##// END OF EJS Templates
manifest: remove manifest.add and add memmfctx.write...
Durham Goode -
r30345:fa54f7ad default
parent child Browse files
Show More
@@ -1,2000 +1,2004 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 wdirrev,
24 24 )
25 25 from . import (
26 26 bookmarks,
27 27 branchmap,
28 28 bundle2,
29 29 changegroup,
30 30 changelog,
31 31 cmdutil,
32 32 context,
33 33 dirstate,
34 34 encoding,
35 35 error,
36 36 exchange,
37 37 extensions,
38 38 filelog,
39 39 hook,
40 40 lock as lockmod,
41 41 manifest,
42 42 match as matchmod,
43 43 merge as mergemod,
44 44 namespaces,
45 45 obsolete,
46 46 pathutil,
47 47 peer,
48 48 phases,
49 49 pushkey,
50 50 repoview,
51 51 revset,
52 52 scmutil,
53 53 store,
54 54 subrepo,
55 55 tags as tagsmod,
56 56 transaction,
57 57 util,
58 58 )
59 59
60 60 release = lockmod.release
61 61 urlerr = util.urlerr
62 62 urlreq = util.urlreq
63 63
64 64 class repofilecache(scmutil.filecache):
65 65 """All filecache usage on repo are done for logic that should be unfiltered
66 66 """
67 67
68 68 def __get__(self, repo, type=None):
69 69 if repo is None:
70 70 return self
71 71 return super(repofilecache, self).__get__(repo.unfiltered(), type)
72 72 def __set__(self, repo, value):
73 73 return super(repofilecache, self).__set__(repo.unfiltered(), value)
74 74 def __delete__(self, repo):
75 75 return super(repofilecache, self).__delete__(repo.unfiltered())
76 76
77 77 class storecache(repofilecache):
78 78 """filecache for files in the store"""
79 79 def join(self, obj, fname):
80 80 return obj.sjoin(fname)
81 81
82 82 class unfilteredpropertycache(util.propertycache):
83 83 """propertycache that apply to unfiltered repo only"""
84 84
85 85 def __get__(self, repo, type=None):
86 86 unfi = repo.unfiltered()
87 87 if unfi is repo:
88 88 return super(unfilteredpropertycache, self).__get__(unfi)
89 89 return getattr(unfi, self.name)
90 90
91 91 class filteredpropertycache(util.propertycache):
92 92 """propertycache that must take filtering in account"""
93 93
94 94 def cachevalue(self, obj, value):
95 95 object.__setattr__(obj, self.name, value)
96 96
97 97
98 98 def hasunfilteredcache(repo, name):
99 99 """check if a repo has an unfilteredpropertycache value for <name>"""
100 100 return name in vars(repo.unfiltered())
101 101
102 102 def unfilteredmethod(orig):
103 103 """decorate method that always need to be run on unfiltered version"""
104 104 def wrapper(repo, *args, **kwargs):
105 105 return orig(repo.unfiltered(), *args, **kwargs)
106 106 return wrapper
107 107
108 108 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
109 109 'unbundle'))
110 110 legacycaps = moderncaps.union(set(['changegroupsubset']))
111 111
112 112 class localpeer(peer.peerrepository):
113 113 '''peer for a local repo; reflects only the most recent API'''
114 114
115 115 def __init__(self, repo, caps=moderncaps):
116 116 peer.peerrepository.__init__(self)
117 117 self._repo = repo.filtered('served')
118 118 self.ui = repo.ui
119 119 self._caps = repo._restrictcapabilities(caps)
120 120 self.requirements = repo.requirements
121 121 self.supportedformats = repo.supportedformats
122 122
123 123 def close(self):
124 124 self._repo.close()
125 125
126 126 def _capabilities(self):
127 127 return self._caps
128 128
129 129 def local(self):
130 130 return self._repo
131 131
132 132 def canpush(self):
133 133 return True
134 134
135 135 def url(self):
136 136 return self._repo.url()
137 137
138 138 def lookup(self, key):
139 139 return self._repo.lookup(key)
140 140
141 141 def branchmap(self):
142 142 return self._repo.branchmap()
143 143
144 144 def heads(self):
145 145 return self._repo.heads()
146 146
147 147 def known(self, nodes):
148 148 return self._repo.known(nodes)
149 149
150 150 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
151 151 **kwargs):
152 152 chunks = exchange.getbundlechunks(self._repo, source, heads=heads,
153 153 common=common, bundlecaps=bundlecaps,
154 154 **kwargs)
155 155 cb = util.chunkbuffer(chunks)
156 156
157 157 if bundlecaps is not None and 'HG20' in bundlecaps:
158 158 # When requesting a bundle2, getbundle returns a stream to make the
159 159 # wire level function happier. We need to build a proper object
160 160 # from it in local peer.
161 161 return bundle2.getunbundler(self.ui, cb)
162 162 else:
163 163 return changegroup.getunbundler('01', cb, None)
164 164
165 165 # TODO We might want to move the next two calls into legacypeer and add
166 166 # unbundle instead.
167 167
168 168 def unbundle(self, cg, heads, url):
169 169 """apply a bundle on a repo
170 170
171 171 This function handles the repo locking itself."""
172 172 try:
173 173 try:
174 174 cg = exchange.readbundle(self.ui, cg, None)
175 175 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
176 176 if util.safehasattr(ret, 'getchunks'):
177 177 # This is a bundle20 object, turn it into an unbundler.
178 178 # This little dance should be dropped eventually when the
179 179 # API is finally improved.
180 180 stream = util.chunkbuffer(ret.getchunks())
181 181 ret = bundle2.getunbundler(self.ui, stream)
182 182 return ret
183 183 except Exception as exc:
184 184 # If the exception contains output salvaged from a bundle2
185 185 # reply, we need to make sure it is printed before continuing
186 186 # to fail. So we build a bundle2 with such output and consume
187 187 # it directly.
188 188 #
189 189 # This is not very elegant but allows a "simple" solution for
190 190 # issue4594
191 191 output = getattr(exc, '_bundle2salvagedoutput', ())
192 192 if output:
193 193 bundler = bundle2.bundle20(self._repo.ui)
194 194 for out in output:
195 195 bundler.addpart(out)
196 196 stream = util.chunkbuffer(bundler.getchunks())
197 197 b = bundle2.getunbundler(self.ui, stream)
198 198 bundle2.processbundle(self._repo, b)
199 199 raise
200 200 except error.PushRaced as exc:
201 201 raise error.ResponseError(_('push failed:'), str(exc))
202 202
203 203 def lock(self):
204 204 return self._repo.lock()
205 205
206 206 def addchangegroup(self, cg, source, url):
207 207 return cg.apply(self._repo, source, url)
208 208
209 209 def pushkey(self, namespace, key, old, new):
210 210 return self._repo.pushkey(namespace, key, old, new)
211 211
212 212 def listkeys(self, namespace):
213 213 return self._repo.listkeys(namespace)
214 214
215 215 def debugwireargs(self, one, two, three=None, four=None, five=None):
216 216 '''used to test argument passing over the wire'''
217 217 return "%s %s %s %s %s" % (one, two, three, four, five)
218 218
219 219 class locallegacypeer(localpeer):
220 220 '''peer extension which implements legacy methods too; used for tests with
221 221 restricted capabilities'''
222 222
223 223 def __init__(self, repo):
224 224 localpeer.__init__(self, repo, caps=legacycaps)
225 225
226 226 def branches(self, nodes):
227 227 return self._repo.branches(nodes)
228 228
229 229 def between(self, pairs):
230 230 return self._repo.between(pairs)
231 231
232 232 def changegroup(self, basenodes, source):
233 233 return changegroup.changegroup(self._repo, basenodes, source)
234 234
235 235 def changegroupsubset(self, bases, heads, source):
236 236 return changegroup.changegroupsubset(self._repo, bases, heads, source)
237 237
238 238 class localrepository(object):
239 239
240 240 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
241 241 'manifestv2'))
242 242 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
243 243 'dotencode'))
244 244 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
245 245 filtername = None
246 246
247 247 # a list of (ui, featureset) functions.
248 248 # only functions defined in module of enabled extensions are invoked
249 249 featuresetupfuncs = set()
250 250
251 251 def __init__(self, baseui, path=None, create=False):
252 252 self.requirements = set()
253 253 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
254 254 self.wopener = self.wvfs
255 255 self.root = self.wvfs.base
256 256 self.path = self.wvfs.join(".hg")
257 257 self.origroot = path
258 258 self.auditor = pathutil.pathauditor(self.root, self._checknested)
259 259 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
260 260 realfs=False)
261 261 self.vfs = scmutil.vfs(self.path)
262 262 self.opener = self.vfs
263 263 self.baseui = baseui
264 264 self.ui = baseui.copy()
265 265 self.ui.copy = baseui.copy # prevent copying repo configuration
266 266 # A list of callback to shape the phase if no data were found.
267 267 # Callback are in the form: func(repo, roots) --> processed root.
268 268 # This list it to be filled by extension during repo setup
269 269 self._phasedefaults = []
270 270 try:
271 271 self.ui.readconfig(self.join("hgrc"), self.root)
272 272 extensions.loadall(self.ui)
273 273 except IOError:
274 274 pass
275 275
276 276 if self.featuresetupfuncs:
277 277 self.supported = set(self._basesupported) # use private copy
278 278 extmods = set(m.__name__ for n, m
279 279 in extensions.extensions(self.ui))
280 280 for setupfunc in self.featuresetupfuncs:
281 281 if setupfunc.__module__ in extmods:
282 282 setupfunc(self.ui, self.supported)
283 283 else:
284 284 self.supported = self._basesupported
285 285
286 286 if not self.vfs.isdir():
287 287 if create:
288 288 self.requirements = newreporequirements(self)
289 289
290 290 if not self.wvfs.exists():
291 291 self.wvfs.makedirs()
292 292 self.vfs.makedir(notindexed=True)
293 293
294 294 if 'store' in self.requirements:
295 295 self.vfs.mkdir("store")
296 296
297 297 # create an invalid changelog
298 298 self.vfs.append(
299 299 "00changelog.i",
300 300 '\0\0\0\2' # represents revlogv2
301 301 ' dummy changelog to prevent using the old repo layout'
302 302 )
303 303 else:
304 304 raise error.RepoError(_("repository %s not found") % path)
305 305 elif create:
306 306 raise error.RepoError(_("repository %s already exists") % path)
307 307 else:
308 308 try:
309 309 self.requirements = scmutil.readrequires(
310 310 self.vfs, self.supported)
311 311 except IOError as inst:
312 312 if inst.errno != errno.ENOENT:
313 313 raise
314 314
315 315 self.sharedpath = self.path
316 316 try:
317 317 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
318 318 realpath=True)
319 319 s = vfs.base
320 320 if not vfs.exists():
321 321 raise error.RepoError(
322 322 _('.hg/sharedpath points to nonexistent directory %s') % s)
323 323 self.sharedpath = s
324 324 except IOError as inst:
325 325 if inst.errno != errno.ENOENT:
326 326 raise
327 327
328 328 self.store = store.store(
329 329 self.requirements, self.sharedpath, scmutil.vfs)
330 330 self.spath = self.store.path
331 331 self.svfs = self.store.vfs
332 332 self.sjoin = self.store.join
333 333 self.vfs.createmode = self.store.createmode
334 334 self._applyopenerreqs()
335 335 if create:
336 336 self._writerequirements()
337 337
338 338 self._dirstatevalidatewarned = False
339 339
340 340 self._branchcaches = {}
341 341 self._revbranchcache = None
342 342 self.filterpats = {}
343 343 self._datafilters = {}
344 344 self._transref = self._lockref = self._wlockref = None
345 345
346 346 # A cache for various files under .hg/ that tracks file changes,
347 347 # (used by the filecache decorator)
348 348 #
349 349 # Maps a property name to its util.filecacheentry
350 350 self._filecache = {}
351 351
352 352 # hold sets of revision to be filtered
353 353 # should be cleared when something might have changed the filter value:
354 354 # - new changesets,
355 355 # - phase change,
356 356 # - new obsolescence marker,
357 357 # - working directory parent change,
358 358 # - bookmark changes
359 359 self.filteredrevcache = {}
360 360
361 361 # generic mapping between names and nodes
362 362 self.names = namespaces.namespaces()
363 363
364 364 def close(self):
365 365 self._writecaches()
366 366
367 367 def _writecaches(self):
368 368 if self._revbranchcache:
369 369 self._revbranchcache.write()
370 370
371 371 def _restrictcapabilities(self, caps):
372 372 if self.ui.configbool('experimental', 'bundle2-advertise', True):
373 373 caps = set(caps)
374 374 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
375 375 caps.add('bundle2=' + urlreq.quote(capsblob))
376 376 return caps
377 377
378 378 def _applyopenerreqs(self):
379 379 self.svfs.options = dict((r, 1) for r in self.requirements
380 380 if r in self.openerreqs)
381 381 # experimental config: format.chunkcachesize
382 382 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
383 383 if chunkcachesize is not None:
384 384 self.svfs.options['chunkcachesize'] = chunkcachesize
385 385 # experimental config: format.maxchainlen
386 386 maxchainlen = self.ui.configint('format', 'maxchainlen')
387 387 if maxchainlen is not None:
388 388 self.svfs.options['maxchainlen'] = maxchainlen
389 389 # experimental config: format.manifestcachesize
390 390 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
391 391 if manifestcachesize is not None:
392 392 self.svfs.options['manifestcachesize'] = manifestcachesize
393 393 # experimental config: format.aggressivemergedeltas
394 394 aggressivemergedeltas = self.ui.configbool('format',
395 395 'aggressivemergedeltas', False)
396 396 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
397 397 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
398 398
399 399 def _writerequirements(self):
400 400 scmutil.writerequires(self.vfs, self.requirements)
401 401
402 402 def _checknested(self, path):
403 403 """Determine if path is a legal nested repository."""
404 404 if not path.startswith(self.root):
405 405 return False
406 406 subpath = path[len(self.root) + 1:]
407 407 normsubpath = util.pconvert(subpath)
408 408
409 409 # XXX: Checking against the current working copy is wrong in
410 410 # the sense that it can reject things like
411 411 #
412 412 # $ hg cat -r 10 sub/x.txt
413 413 #
414 414 # if sub/ is no longer a subrepository in the working copy
415 415 # parent revision.
416 416 #
417 417 # However, it can of course also allow things that would have
418 418 # been rejected before, such as the above cat command if sub/
419 419 # is a subrepository now, but was a normal directory before.
420 420 # The old path auditor would have rejected by mistake since it
421 421 # panics when it sees sub/.hg/.
422 422 #
423 423 # All in all, checking against the working copy seems sensible
424 424 # since we want to prevent access to nested repositories on
425 425 # the filesystem *now*.
426 426 ctx = self[None]
427 427 parts = util.splitpath(subpath)
428 428 while parts:
429 429 prefix = '/'.join(parts)
430 430 if prefix in ctx.substate:
431 431 if prefix == normsubpath:
432 432 return True
433 433 else:
434 434 sub = ctx.sub(prefix)
435 435 return sub.checknested(subpath[len(prefix) + 1:])
436 436 else:
437 437 parts.pop()
438 438 return False
439 439
440 440 def peer(self):
441 441 return localpeer(self) # not cached to avoid reference cycle
442 442
443 443 def unfiltered(self):
444 444 """Return unfiltered version of the repository
445 445
446 446 Intended to be overwritten by filtered repo."""
447 447 return self
448 448
449 449 def filtered(self, name):
450 450 """Return a filtered version of a repository"""
451 451 # build a new class with the mixin and the current class
452 452 # (possibly subclass of the repo)
453 453 class proxycls(repoview.repoview, self.unfiltered().__class__):
454 454 pass
455 455 return proxycls(self, name)
456 456
457 457 @repofilecache('bookmarks', 'bookmarks.current')
458 458 def _bookmarks(self):
459 459 return bookmarks.bmstore(self)
460 460
461 461 @property
462 462 def _activebookmark(self):
463 463 return self._bookmarks.active
464 464
465 465 def bookmarkheads(self, bookmark):
466 466 name = bookmark.split('@', 1)[0]
467 467 heads = []
468 468 for mark, n in self._bookmarks.iteritems():
469 469 if mark.split('@', 1)[0] == name:
470 470 heads.append(n)
471 471 return heads
472 472
473 473 # _phaserevs and _phasesets depend on changelog. what we need is to
474 474 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
475 475 # can't be easily expressed in filecache mechanism.
476 476 @storecache('phaseroots', '00changelog.i')
477 477 def _phasecache(self):
478 478 return phases.phasecache(self, self._phasedefaults)
479 479
480 480 @storecache('obsstore')
481 481 def obsstore(self):
482 482 # read default format for new obsstore.
483 483 # developer config: format.obsstore-version
484 484 defaultformat = self.ui.configint('format', 'obsstore-version', None)
485 485 # rely on obsstore class default when possible.
486 486 kwargs = {}
487 487 if defaultformat is not None:
488 488 kwargs['defaultformat'] = defaultformat
489 489 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
490 490 store = obsolete.obsstore(self.svfs, readonly=readonly,
491 491 **kwargs)
492 492 if store and readonly:
493 493 self.ui.warn(
494 494 _('obsolete feature not enabled but %i markers found!\n')
495 495 % len(list(store)))
496 496 return store
497 497
498 498 @storecache('00changelog.i')
499 499 def changelog(self):
500 500 c = changelog.changelog(self.svfs)
501 501 if 'HG_PENDING' in os.environ:
502 502 p = os.environ['HG_PENDING']
503 503 if p.startswith(self.root):
504 504 c.readpending('00changelog.i.a')
505 505 return c
506 506
507 507 @property
508 508 def manifest(self):
509 509 return self.manifestlog._oldmanifest
510 510
511 511 def _constructmanifest(self):
512 512 # This is a temporary function while we migrate from manifest to
513 513 # manifestlog. It allows bundlerepo and unionrepo to intercept the
514 514 # manifest creation.
515 515 return manifest.manifest(self.svfs)
516 516
517 517 @storecache('00manifest.i')
518 518 def manifestlog(self):
519 519 return manifest.manifestlog(self.svfs, self)
520 520
521 521 @repofilecache('dirstate')
522 522 def dirstate(self):
523 523 return dirstate.dirstate(self.vfs, self.ui, self.root,
524 524 self._dirstatevalidate)
525 525
526 526 def _dirstatevalidate(self, node):
527 527 try:
528 528 self.changelog.rev(node)
529 529 return node
530 530 except error.LookupError:
531 531 if not self._dirstatevalidatewarned:
532 532 self._dirstatevalidatewarned = True
533 533 self.ui.warn(_("warning: ignoring unknown"
534 534 " working parent %s!\n") % short(node))
535 535 return nullid
536 536
537 537 def __getitem__(self, changeid):
538 538 if changeid is None or changeid == wdirrev:
539 539 return context.workingctx(self)
540 540 if isinstance(changeid, slice):
541 541 return [context.changectx(self, i)
542 542 for i in xrange(*changeid.indices(len(self)))
543 543 if i not in self.changelog.filteredrevs]
544 544 return context.changectx(self, changeid)
545 545
546 546 def __contains__(self, changeid):
547 547 try:
548 548 self[changeid]
549 549 return True
550 550 except error.RepoLookupError:
551 551 return False
552 552
553 553 def __nonzero__(self):
554 554 return True
555 555
556 556 def __len__(self):
557 557 return len(self.changelog)
558 558
559 559 def __iter__(self):
560 560 return iter(self.changelog)
561 561
562 562 def revs(self, expr, *args):
563 563 '''Find revisions matching a revset.
564 564
565 565 The revset is specified as a string ``expr`` that may contain
566 566 %-formatting to escape certain types. See ``revset.formatspec``.
567 567
568 568 Revset aliases from the configuration are not expanded. To expand
569 569 user aliases, consider calling ``scmutil.revrange()``.
570 570
571 571 Returns a revset.abstractsmartset, which is a list-like interface
572 572 that contains integer revisions.
573 573 '''
574 574 expr = revset.formatspec(expr, *args)
575 575 m = revset.match(None, expr)
576 576 return m(self)
577 577
578 578 def set(self, expr, *args):
579 579 '''Find revisions matching a revset and emit changectx instances.
580 580
581 581 This is a convenience wrapper around ``revs()`` that iterates the
582 582 result and is a generator of changectx instances.
583 583
584 584 Revset aliases from the configuration are not expanded. To expand
585 585 user aliases, consider calling ``scmutil.revrange()``.
586 586 '''
587 587 for r in self.revs(expr, *args):
588 588 yield self[r]
589 589
590 590 def url(self):
591 591 return 'file:' + self.root
592 592
593 593 def hook(self, name, throw=False, **args):
594 594 """Call a hook, passing this repo instance.
595 595
596 596 This a convenience method to aid invoking hooks. Extensions likely
597 597 won't call this unless they have registered a custom hook or are
598 598 replacing code that is expected to call a hook.
599 599 """
600 600 return hook.hook(self.ui, self, name, throw, **args)
601 601
602 602 @unfilteredmethod
603 603 def _tag(self, names, node, message, local, user, date, extra=None,
604 604 editor=False):
605 605 if isinstance(names, str):
606 606 names = (names,)
607 607
608 608 branches = self.branchmap()
609 609 for name in names:
610 610 self.hook('pretag', throw=True, node=hex(node), tag=name,
611 611 local=local)
612 612 if name in branches:
613 613 self.ui.warn(_("warning: tag %s conflicts with existing"
614 614 " branch name\n") % name)
615 615
616 616 def writetags(fp, names, munge, prevtags):
617 617 fp.seek(0, 2)
618 618 if prevtags and prevtags[-1] != '\n':
619 619 fp.write('\n')
620 620 for name in names:
621 621 if munge:
622 622 m = munge(name)
623 623 else:
624 624 m = name
625 625
626 626 if (self._tagscache.tagtypes and
627 627 name in self._tagscache.tagtypes):
628 628 old = self.tags().get(name, nullid)
629 629 fp.write('%s %s\n' % (hex(old), m))
630 630 fp.write('%s %s\n' % (hex(node), m))
631 631 fp.close()
632 632
633 633 prevtags = ''
634 634 if local:
635 635 try:
636 636 fp = self.vfs('localtags', 'r+')
637 637 except IOError:
638 638 fp = self.vfs('localtags', 'a')
639 639 else:
640 640 prevtags = fp.read()
641 641
642 642 # local tags are stored in the current charset
643 643 writetags(fp, names, None, prevtags)
644 644 for name in names:
645 645 self.hook('tag', node=hex(node), tag=name, local=local)
646 646 return
647 647
648 648 try:
649 649 fp = self.wfile('.hgtags', 'rb+')
650 650 except IOError as e:
651 651 if e.errno != errno.ENOENT:
652 652 raise
653 653 fp = self.wfile('.hgtags', 'ab')
654 654 else:
655 655 prevtags = fp.read()
656 656
657 657 # committed tags are stored in UTF-8
658 658 writetags(fp, names, encoding.fromlocal, prevtags)
659 659
660 660 fp.close()
661 661
662 662 self.invalidatecaches()
663 663
664 664 if '.hgtags' not in self.dirstate:
665 665 self[None].add(['.hgtags'])
666 666
667 667 m = matchmod.exact(self.root, '', ['.hgtags'])
668 668 tagnode = self.commit(message, user, date, extra=extra, match=m,
669 669 editor=editor)
670 670
671 671 for name in names:
672 672 self.hook('tag', node=hex(node), tag=name, local=local)
673 673
674 674 return tagnode
675 675
676 676 def tag(self, names, node, message, local, user, date, editor=False):
677 677 '''tag a revision with one or more symbolic names.
678 678
679 679 names is a list of strings or, when adding a single tag, names may be a
680 680 string.
681 681
682 682 if local is True, the tags are stored in a per-repository file.
683 683 otherwise, they are stored in the .hgtags file, and a new
684 684 changeset is committed with the change.
685 685
686 686 keyword arguments:
687 687
688 688 local: whether to store tags in non-version-controlled file
689 689 (default False)
690 690
691 691 message: commit message to use if committing
692 692
693 693 user: name of user to use if committing
694 694
695 695 date: date tuple to use if committing'''
696 696
697 697 if not local:
698 698 m = matchmod.exact(self.root, '', ['.hgtags'])
699 699 if any(self.status(match=m, unknown=True, ignored=True)):
700 700 raise error.Abort(_('working copy of .hgtags is changed'),
701 701 hint=_('please commit .hgtags manually'))
702 702
703 703 self.tags() # instantiate the cache
704 704 self._tag(names, node, message, local, user, date, editor=editor)
705 705
706 706 @filteredpropertycache
707 707 def _tagscache(self):
708 708 '''Returns a tagscache object that contains various tags related
709 709 caches.'''
710 710
711 711 # This simplifies its cache management by having one decorated
712 712 # function (this one) and the rest simply fetch things from it.
713 713 class tagscache(object):
714 714 def __init__(self):
715 715 # These two define the set of tags for this repository. tags
716 716 # maps tag name to node; tagtypes maps tag name to 'global' or
717 717 # 'local'. (Global tags are defined by .hgtags across all
718 718 # heads, and local tags are defined in .hg/localtags.)
719 719 # They constitute the in-memory cache of tags.
720 720 self.tags = self.tagtypes = None
721 721
722 722 self.nodetagscache = self.tagslist = None
723 723
724 724 cache = tagscache()
725 725 cache.tags, cache.tagtypes = self._findtags()
726 726
727 727 return cache
728 728
729 729 def tags(self):
730 730 '''return a mapping of tag to node'''
731 731 t = {}
732 732 if self.changelog.filteredrevs:
733 733 tags, tt = self._findtags()
734 734 else:
735 735 tags = self._tagscache.tags
736 736 for k, v in tags.iteritems():
737 737 try:
738 738 # ignore tags to unknown nodes
739 739 self.changelog.rev(v)
740 740 t[k] = v
741 741 except (error.LookupError, ValueError):
742 742 pass
743 743 return t
744 744
745 745 def _findtags(self):
746 746 '''Do the hard work of finding tags. Return a pair of dicts
747 747 (tags, tagtypes) where tags maps tag name to node, and tagtypes
748 748 maps tag name to a string like \'global\' or \'local\'.
749 749 Subclasses or extensions are free to add their own tags, but
750 750 should be aware that the returned dicts will be retained for the
751 751 duration of the localrepo object.'''
752 752
753 753 # XXX what tagtype should subclasses/extensions use? Currently
754 754 # mq and bookmarks add tags, but do not set the tagtype at all.
755 755 # Should each extension invent its own tag type? Should there
756 756 # be one tagtype for all such "virtual" tags? Or is the status
757 757 # quo fine?
758 758
759 759 alltags = {} # map tag name to (node, hist)
760 760 tagtypes = {}
761 761
762 762 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
763 763 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
764 764
765 765 # Build the return dicts. Have to re-encode tag names because
766 766 # the tags module always uses UTF-8 (in order not to lose info
767 767 # writing to the cache), but the rest of Mercurial wants them in
768 768 # local encoding.
769 769 tags = {}
770 770 for (name, (node, hist)) in alltags.iteritems():
771 771 if node != nullid:
772 772 tags[encoding.tolocal(name)] = node
773 773 tags['tip'] = self.changelog.tip()
774 774 tagtypes = dict([(encoding.tolocal(name), value)
775 775 for (name, value) in tagtypes.iteritems()])
776 776 return (tags, tagtypes)
777 777
778 778 def tagtype(self, tagname):
779 779 '''
780 780 return the type of the given tag. result can be:
781 781
782 782 'local' : a local tag
783 783 'global' : a global tag
784 784 None : tag does not exist
785 785 '''
786 786
787 787 return self._tagscache.tagtypes.get(tagname)
788 788
789 789 def tagslist(self):
790 790 '''return a list of tags ordered by revision'''
791 791 if not self._tagscache.tagslist:
792 792 l = []
793 793 for t, n in self.tags().iteritems():
794 794 l.append((self.changelog.rev(n), t, n))
795 795 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
796 796
797 797 return self._tagscache.tagslist
798 798
799 799 def nodetags(self, node):
800 800 '''return the tags associated with a node'''
801 801 if not self._tagscache.nodetagscache:
802 802 nodetagscache = {}
803 803 for t, n in self._tagscache.tags.iteritems():
804 804 nodetagscache.setdefault(n, []).append(t)
805 805 for tags in nodetagscache.itervalues():
806 806 tags.sort()
807 807 self._tagscache.nodetagscache = nodetagscache
808 808 return self._tagscache.nodetagscache.get(node, [])
809 809
810 810 def nodebookmarks(self, node):
811 811 """return the list of bookmarks pointing to the specified node"""
812 812 marks = []
813 813 for bookmark, n in self._bookmarks.iteritems():
814 814 if n == node:
815 815 marks.append(bookmark)
816 816 return sorted(marks)
817 817
818 818 def branchmap(self):
819 819 '''returns a dictionary {branch: [branchheads]} with branchheads
820 820 ordered by increasing revision number'''
821 821 branchmap.updatecache(self)
822 822 return self._branchcaches[self.filtername]
823 823
824 824 @unfilteredmethod
825 825 def revbranchcache(self):
826 826 if not self._revbranchcache:
827 827 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
828 828 return self._revbranchcache
829 829
830 830 def branchtip(self, branch, ignoremissing=False):
831 831 '''return the tip node for a given branch
832 832
833 833 If ignoremissing is True, then this method will not raise an error.
834 834 This is helpful for callers that only expect None for a missing branch
835 835 (e.g. namespace).
836 836
837 837 '''
838 838 try:
839 839 return self.branchmap().branchtip(branch)
840 840 except KeyError:
841 841 if not ignoremissing:
842 842 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
843 843 else:
844 844 pass
845 845
846 846 def lookup(self, key):
847 847 return self[key].node()
848 848
849 849 def lookupbranch(self, key, remote=None):
850 850 repo = remote or self
851 851 if key in repo.branchmap():
852 852 return key
853 853
854 854 repo = (remote and remote.local()) and remote or self
855 855 return repo[key].branch()
856 856
857 857 def known(self, nodes):
858 858 cl = self.changelog
859 859 nm = cl.nodemap
860 860 filtered = cl.filteredrevs
861 861 result = []
862 862 for n in nodes:
863 863 r = nm.get(n)
864 864 resp = not (r is None or r in filtered)
865 865 result.append(resp)
866 866 return result
867 867
868 868 def local(self):
869 869 return self
870 870
871 871 def publishing(self):
872 872 # it's safe (and desirable) to trust the publish flag unconditionally
873 873 # so that we don't finalize changes shared between users via ssh or nfs
874 874 return self.ui.configbool('phases', 'publish', True, untrusted=True)
875 875
876 876 def cancopy(self):
877 877 # so statichttprepo's override of local() works
878 878 if not self.local():
879 879 return False
880 880 if not self.publishing():
881 881 return True
882 882 # if publishing we can't copy if there is filtered content
883 883 return not self.filtered('visible').changelog.filteredrevs
884 884
885 885 def shared(self):
886 886 '''the type of shared repository (None if not shared)'''
887 887 if self.sharedpath != self.path:
888 888 return 'store'
889 889 return None
890 890
891 891 def join(self, f, *insidef):
892 892 return self.vfs.join(os.path.join(f, *insidef))
893 893
894 894 def wjoin(self, f, *insidef):
895 895 return self.vfs.reljoin(self.root, f, *insidef)
896 896
897 897 def file(self, f):
898 898 if f[0] == '/':
899 899 f = f[1:]
900 900 return filelog.filelog(self.svfs, f)
901 901
902 902 def changectx(self, changeid):
903 903 return self[changeid]
904 904
905 905 def setparents(self, p1, p2=nullid):
906 906 self.dirstate.beginparentchange()
907 907 copies = self.dirstate.setparents(p1, p2)
908 908 pctx = self[p1]
909 909 if copies:
910 910 # Adjust copy records, the dirstate cannot do it, it
911 911 # requires access to parents manifests. Preserve them
912 912 # only for entries added to first parent.
913 913 for f in copies:
914 914 if f not in pctx and copies[f] in pctx:
915 915 self.dirstate.copy(copies[f], f)
916 916 if p2 == nullid:
917 917 for f, s in sorted(self.dirstate.copies().items()):
918 918 if f not in pctx and s not in pctx:
919 919 self.dirstate.copy(None, f)
920 920 self.dirstate.endparentchange()
921 921
922 922 def filectx(self, path, changeid=None, fileid=None):
923 923 """changeid can be a changeset revision, node, or tag.
924 924 fileid can be a file revision or node."""
925 925 return context.filectx(self, path, changeid, fileid)
926 926
927 927 def getcwd(self):
928 928 return self.dirstate.getcwd()
929 929
930 930 def pathto(self, f, cwd=None):
931 931 return self.dirstate.pathto(f, cwd)
932 932
933 933 def wfile(self, f, mode='r'):
934 934 return self.wvfs(f, mode)
935 935
936 936 def _link(self, f):
937 937 return self.wvfs.islink(f)
938 938
939 939 def _loadfilter(self, filter):
940 940 if filter not in self.filterpats:
941 941 l = []
942 942 for pat, cmd in self.ui.configitems(filter):
943 943 if cmd == '!':
944 944 continue
945 945 mf = matchmod.match(self.root, '', [pat])
946 946 fn = None
947 947 params = cmd
948 948 for name, filterfn in self._datafilters.iteritems():
949 949 if cmd.startswith(name):
950 950 fn = filterfn
951 951 params = cmd[len(name):].lstrip()
952 952 break
953 953 if not fn:
954 954 fn = lambda s, c, **kwargs: util.filter(s, c)
955 955 # Wrap old filters not supporting keyword arguments
956 956 if not inspect.getargspec(fn)[2]:
957 957 oldfn = fn
958 958 fn = lambda s, c, **kwargs: oldfn(s, c)
959 959 l.append((mf, fn, params))
960 960 self.filterpats[filter] = l
961 961 return self.filterpats[filter]
962 962
963 963 def _filter(self, filterpats, filename, data):
964 964 for mf, fn, cmd in filterpats:
965 965 if mf(filename):
966 966 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
967 967 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
968 968 break
969 969
970 970 return data
971 971
972 972 @unfilteredpropertycache
973 973 def _encodefilterpats(self):
974 974 return self._loadfilter('encode')
975 975
976 976 @unfilteredpropertycache
977 977 def _decodefilterpats(self):
978 978 return self._loadfilter('decode')
979 979
980 980 def adddatafilter(self, name, filter):
981 981 self._datafilters[name] = filter
982 982
983 983 def wread(self, filename):
984 984 if self._link(filename):
985 985 data = self.wvfs.readlink(filename)
986 986 else:
987 987 data = self.wvfs.read(filename)
988 988 return self._filter(self._encodefilterpats, filename, data)
989 989
990 990 def wwrite(self, filename, data, flags, backgroundclose=False):
991 991 """write ``data`` into ``filename`` in the working directory
992 992
993 993 This returns length of written (maybe decoded) data.
994 994 """
995 995 data = self._filter(self._decodefilterpats, filename, data)
996 996 if 'l' in flags:
997 997 self.wvfs.symlink(data, filename)
998 998 else:
999 999 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
1000 1000 if 'x' in flags:
1001 1001 self.wvfs.setflags(filename, False, True)
1002 1002 return len(data)
1003 1003
1004 1004 def wwritedata(self, filename, data):
1005 1005 return self._filter(self._decodefilterpats, filename, data)
1006 1006
1007 1007 def currenttransaction(self):
1008 1008 """return the current transaction or None if non exists"""
1009 1009 if self._transref:
1010 1010 tr = self._transref()
1011 1011 else:
1012 1012 tr = None
1013 1013
1014 1014 if tr and tr.running():
1015 1015 return tr
1016 1016 return None
1017 1017
1018 1018 def transaction(self, desc, report=None):
1019 1019 if (self.ui.configbool('devel', 'all-warnings')
1020 1020 or self.ui.configbool('devel', 'check-locks')):
1021 1021 if self._currentlock(self._lockref) is None:
1022 1022 raise RuntimeError('programming error: transaction requires '
1023 1023 'locking')
1024 1024 tr = self.currenttransaction()
1025 1025 if tr is not None:
1026 1026 return tr.nest()
1027 1027
1028 1028 # abort here if the journal already exists
1029 1029 if self.svfs.exists("journal"):
1030 1030 raise error.RepoError(
1031 1031 _("abandoned transaction found"),
1032 1032 hint=_("run 'hg recover' to clean up transaction"))
1033 1033
1034 1034 idbase = "%.40f#%f" % (random.random(), time.time())
1035 1035 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1036 1036 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1037 1037
1038 1038 self._writejournal(desc)
1039 1039 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1040 1040 if report:
1041 1041 rp = report
1042 1042 else:
1043 1043 rp = self.ui.warn
1044 1044 vfsmap = {'plain': self.vfs} # root of .hg/
1045 1045 # we must avoid cyclic reference between repo and transaction.
1046 1046 reporef = weakref.ref(self)
1047 1047 def validate(tr):
1048 1048 """will run pre-closing hooks"""
1049 1049 reporef().hook('pretxnclose', throw=True,
1050 1050 txnname=desc, **tr.hookargs)
1051 1051 def releasefn(tr, success):
1052 1052 repo = reporef()
1053 1053 if success:
1054 1054 # this should be explicitly invoked here, because
1055 1055 # in-memory changes aren't written out at closing
1056 1056 # transaction, if tr.addfilegenerator (via
1057 1057 # dirstate.write or so) isn't invoked while
1058 1058 # transaction running
1059 1059 repo.dirstate.write(None)
1060 1060 else:
1061 1061 # discard all changes (including ones already written
1062 1062 # out) in this transaction
1063 1063 repo.dirstate.restorebackup(None, prefix='journal.')
1064 1064
1065 1065 repo.invalidate(clearfilecache=True)
1066 1066
1067 1067 tr = transaction.transaction(rp, self.svfs, vfsmap,
1068 1068 "journal",
1069 1069 "undo",
1070 1070 aftertrans(renames),
1071 1071 self.store.createmode,
1072 1072 validator=validate,
1073 1073 releasefn=releasefn)
1074 1074
1075 1075 tr.hookargs['txnid'] = txnid
1076 1076 # note: writing the fncache only during finalize mean that the file is
1077 1077 # outdated when running hooks. As fncache is used for streaming clone,
1078 1078 # this is not expected to break anything that happen during the hooks.
1079 1079 tr.addfinalize('flush-fncache', self.store.write)
1080 1080 def txnclosehook(tr2):
1081 1081 """To be run if transaction is successful, will schedule a hook run
1082 1082 """
1083 1083 # Don't reference tr2 in hook() so we don't hold a reference.
1084 1084 # This reduces memory consumption when there are multiple
1085 1085 # transactions per lock. This can likely go away if issue5045
1086 1086 # fixes the function accumulation.
1087 1087 hookargs = tr2.hookargs
1088 1088
1089 1089 def hook():
1090 1090 reporef().hook('txnclose', throw=False, txnname=desc,
1091 1091 **hookargs)
1092 1092 reporef()._afterlock(hook)
1093 1093 tr.addfinalize('txnclose-hook', txnclosehook)
1094 1094 def txnaborthook(tr2):
1095 1095 """To be run if transaction is aborted
1096 1096 """
1097 1097 reporef().hook('txnabort', throw=False, txnname=desc,
1098 1098 **tr2.hookargs)
1099 1099 tr.addabort('txnabort-hook', txnaborthook)
1100 1100 # avoid eager cache invalidation. in-memory data should be identical
1101 1101 # to stored data if transaction has no error.
1102 1102 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1103 1103 self._transref = weakref.ref(tr)
1104 1104 return tr
1105 1105
1106 1106 def _journalfiles(self):
1107 1107 return ((self.svfs, 'journal'),
1108 1108 (self.vfs, 'journal.dirstate'),
1109 1109 (self.vfs, 'journal.branch'),
1110 1110 (self.vfs, 'journal.desc'),
1111 1111 (self.vfs, 'journal.bookmarks'),
1112 1112 (self.svfs, 'journal.phaseroots'))
1113 1113
1114 1114 def undofiles(self):
1115 1115 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1116 1116
1117 1117 def _writejournal(self, desc):
1118 1118 self.dirstate.savebackup(None, prefix='journal.')
1119 1119 self.vfs.write("journal.branch",
1120 1120 encoding.fromlocal(self.dirstate.branch()))
1121 1121 self.vfs.write("journal.desc",
1122 1122 "%d\n%s\n" % (len(self), desc))
1123 1123 self.vfs.write("journal.bookmarks",
1124 1124 self.vfs.tryread("bookmarks"))
1125 1125 self.svfs.write("journal.phaseroots",
1126 1126 self.svfs.tryread("phaseroots"))
1127 1127
1128 1128 def recover(self):
1129 1129 with self.lock():
1130 1130 if self.svfs.exists("journal"):
1131 1131 self.ui.status(_("rolling back interrupted transaction\n"))
1132 1132 vfsmap = {'': self.svfs,
1133 1133 'plain': self.vfs,}
1134 1134 transaction.rollback(self.svfs, vfsmap, "journal",
1135 1135 self.ui.warn)
1136 1136 self.invalidate()
1137 1137 return True
1138 1138 else:
1139 1139 self.ui.warn(_("no interrupted transaction available\n"))
1140 1140 return False
1141 1141
1142 1142 def rollback(self, dryrun=False, force=False):
1143 1143 wlock = lock = dsguard = None
1144 1144 try:
1145 1145 wlock = self.wlock()
1146 1146 lock = self.lock()
1147 1147 if self.svfs.exists("undo"):
1148 1148 dsguard = cmdutil.dirstateguard(self, 'rollback')
1149 1149
1150 1150 return self._rollback(dryrun, force, dsguard)
1151 1151 else:
1152 1152 self.ui.warn(_("no rollback information available\n"))
1153 1153 return 1
1154 1154 finally:
1155 1155 release(dsguard, lock, wlock)
1156 1156
1157 1157 @unfilteredmethod # Until we get smarter cache management
1158 1158 def _rollback(self, dryrun, force, dsguard):
1159 1159 ui = self.ui
1160 1160 try:
1161 1161 args = self.vfs.read('undo.desc').splitlines()
1162 1162 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1163 1163 if len(args) >= 3:
1164 1164 detail = args[2]
1165 1165 oldtip = oldlen - 1
1166 1166
1167 1167 if detail and ui.verbose:
1168 1168 msg = (_('repository tip rolled back to revision %s'
1169 1169 ' (undo %s: %s)\n')
1170 1170 % (oldtip, desc, detail))
1171 1171 else:
1172 1172 msg = (_('repository tip rolled back to revision %s'
1173 1173 ' (undo %s)\n')
1174 1174 % (oldtip, desc))
1175 1175 except IOError:
1176 1176 msg = _('rolling back unknown transaction\n')
1177 1177 desc = None
1178 1178
1179 1179 if not force and self['.'] != self['tip'] and desc == 'commit':
1180 1180 raise error.Abort(
1181 1181 _('rollback of last commit while not checked out '
1182 1182 'may lose data'), hint=_('use -f to force'))
1183 1183
1184 1184 ui.status(msg)
1185 1185 if dryrun:
1186 1186 return 0
1187 1187
1188 1188 parents = self.dirstate.parents()
1189 1189 self.destroying()
1190 1190 vfsmap = {'plain': self.vfs, '': self.svfs}
1191 1191 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1192 1192 if self.vfs.exists('undo.bookmarks'):
1193 1193 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1194 1194 if self.svfs.exists('undo.phaseroots'):
1195 1195 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1196 1196 self.invalidate()
1197 1197
1198 1198 parentgone = (parents[0] not in self.changelog.nodemap or
1199 1199 parents[1] not in self.changelog.nodemap)
1200 1200 if parentgone:
1201 1201 # prevent dirstateguard from overwriting already restored one
1202 1202 dsguard.close()
1203 1203
1204 1204 self.dirstate.restorebackup(None, prefix='undo.')
1205 1205 try:
1206 1206 branch = self.vfs.read('undo.branch')
1207 1207 self.dirstate.setbranch(encoding.tolocal(branch))
1208 1208 except IOError:
1209 1209 ui.warn(_('named branch could not be reset: '
1210 1210 'current branch is still \'%s\'\n')
1211 1211 % self.dirstate.branch())
1212 1212
1213 1213 parents = tuple([p.rev() for p in self[None].parents()])
1214 1214 if len(parents) > 1:
1215 1215 ui.status(_('working directory now based on '
1216 1216 'revisions %d and %d\n') % parents)
1217 1217 else:
1218 1218 ui.status(_('working directory now based on '
1219 1219 'revision %d\n') % parents)
1220 1220 mergemod.mergestate.clean(self, self['.'].node())
1221 1221
1222 1222 # TODO: if we know which new heads may result from this rollback, pass
1223 1223 # them to destroy(), which will prevent the branchhead cache from being
1224 1224 # invalidated.
1225 1225 self.destroyed()
1226 1226 return 0
1227 1227
1228 1228 def invalidatecaches(self):
1229 1229
1230 1230 if '_tagscache' in vars(self):
1231 1231 # can't use delattr on proxy
1232 1232 del self.__dict__['_tagscache']
1233 1233
1234 1234 self.unfiltered()._branchcaches.clear()
1235 1235 self.invalidatevolatilesets()
1236 1236
1237 1237 def invalidatevolatilesets(self):
1238 1238 self.filteredrevcache.clear()
1239 1239 obsolete.clearobscaches(self)
1240 1240
1241 1241 def invalidatedirstate(self):
1242 1242 '''Invalidates the dirstate, causing the next call to dirstate
1243 1243 to check if it was modified since the last time it was read,
1244 1244 rereading it if it has.
1245 1245
1246 1246 This is different to dirstate.invalidate() that it doesn't always
1247 1247 rereads the dirstate. Use dirstate.invalidate() if you want to
1248 1248 explicitly read the dirstate again (i.e. restoring it to a previous
1249 1249 known good state).'''
1250 1250 if hasunfilteredcache(self, 'dirstate'):
1251 1251 for k in self.dirstate._filecache:
1252 1252 try:
1253 1253 delattr(self.dirstate, k)
1254 1254 except AttributeError:
1255 1255 pass
1256 1256 delattr(self.unfiltered(), 'dirstate')
1257 1257
1258 1258 def invalidate(self, clearfilecache=False):
1259 1259 '''Invalidates both store and non-store parts other than dirstate
1260 1260
1261 1261 If a transaction is running, invalidation of store is omitted,
1262 1262 because discarding in-memory changes might cause inconsistency
1263 1263 (e.g. incomplete fncache causes unintentional failure, but
1264 1264 redundant one doesn't).
1265 1265 '''
1266 1266 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1267 1267 for k in self._filecache.keys():
1268 1268 # dirstate is invalidated separately in invalidatedirstate()
1269 1269 if k == 'dirstate':
1270 1270 continue
1271 1271
1272 1272 if clearfilecache:
1273 1273 del self._filecache[k]
1274 1274 try:
1275 1275 delattr(unfiltered, k)
1276 1276 except AttributeError:
1277 1277 pass
1278 1278 self.invalidatecaches()
1279 1279 if not self.currenttransaction():
1280 1280 # TODO: Changing contents of store outside transaction
1281 1281 # causes inconsistency. We should make in-memory store
1282 1282 # changes detectable, and abort if changed.
1283 1283 self.store.invalidatecaches()
1284 1284
1285 1285 def invalidateall(self):
1286 1286 '''Fully invalidates both store and non-store parts, causing the
1287 1287 subsequent operation to reread any outside changes.'''
1288 1288 # extension should hook this to invalidate its caches
1289 1289 self.invalidate()
1290 1290 self.invalidatedirstate()
1291 1291
1292 1292 @unfilteredmethod
1293 1293 def _refreshfilecachestats(self, tr):
1294 1294 """Reload stats of cached files so that they are flagged as valid"""
1295 1295 for k, ce in self._filecache.items():
1296 1296 if k == 'dirstate' or k not in self.__dict__:
1297 1297 continue
1298 1298 ce.refresh()
1299 1299
1300 1300 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1301 1301 inheritchecker=None, parentenvvar=None):
1302 1302 parentlock = None
1303 1303 # the contents of parentenvvar are used by the underlying lock to
1304 1304 # determine whether it can be inherited
1305 1305 if parentenvvar is not None:
1306 1306 parentlock = os.environ.get(parentenvvar)
1307 1307 try:
1308 1308 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1309 1309 acquirefn=acquirefn, desc=desc,
1310 1310 inheritchecker=inheritchecker,
1311 1311 parentlock=parentlock)
1312 1312 except error.LockHeld as inst:
1313 1313 if not wait:
1314 1314 raise
1315 1315 # show more details for new-style locks
1316 1316 if ':' in inst.locker:
1317 1317 host, pid = inst.locker.split(":", 1)
1318 1318 self.ui.warn(
1319 1319 _("waiting for lock on %s held by process %r "
1320 1320 "on host %r\n") % (desc, pid, host))
1321 1321 else:
1322 1322 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1323 1323 (desc, inst.locker))
1324 1324 # default to 600 seconds timeout
1325 1325 l = lockmod.lock(vfs, lockname,
1326 1326 int(self.ui.config("ui", "timeout", "600")),
1327 1327 releasefn=releasefn, acquirefn=acquirefn,
1328 1328 desc=desc)
1329 1329 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1330 1330 return l
1331 1331
1332 1332 def _afterlock(self, callback):
1333 1333 """add a callback to be run when the repository is fully unlocked
1334 1334
1335 1335 The callback will be executed when the outermost lock is released
1336 1336 (with wlock being higher level than 'lock')."""
1337 1337 for ref in (self._wlockref, self._lockref):
1338 1338 l = ref and ref()
1339 1339 if l and l.held:
1340 1340 l.postrelease.append(callback)
1341 1341 break
1342 1342 else: # no lock have been found.
1343 1343 callback()
1344 1344
1345 1345 def lock(self, wait=True):
1346 1346 '''Lock the repository store (.hg/store) and return a weak reference
1347 1347 to the lock. Use this before modifying the store (e.g. committing or
1348 1348 stripping). If you are opening a transaction, get a lock as well.)
1349 1349
1350 1350 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1351 1351 'wlock' first to avoid a dead-lock hazard.'''
1352 1352 l = self._currentlock(self._lockref)
1353 1353 if l is not None:
1354 1354 l.lock()
1355 1355 return l
1356 1356
1357 1357 l = self._lock(self.svfs, "lock", wait, None,
1358 1358 self.invalidate, _('repository %s') % self.origroot)
1359 1359 self._lockref = weakref.ref(l)
1360 1360 return l
1361 1361
1362 1362 def _wlockchecktransaction(self):
1363 1363 if self.currenttransaction() is not None:
1364 1364 raise error.LockInheritanceContractViolation(
1365 1365 'wlock cannot be inherited in the middle of a transaction')
1366 1366
1367 1367 def wlock(self, wait=True):
1368 1368 '''Lock the non-store parts of the repository (everything under
1369 1369 .hg except .hg/store) and return a weak reference to the lock.
1370 1370
1371 1371 Use this before modifying files in .hg.
1372 1372
1373 1373 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1374 1374 'wlock' first to avoid a dead-lock hazard.'''
1375 1375 l = self._wlockref and self._wlockref()
1376 1376 if l is not None and l.held:
1377 1377 l.lock()
1378 1378 return l
1379 1379
1380 1380 # We do not need to check for non-waiting lock acquisition. Such
1381 1381 # acquisition would not cause dead-lock as they would just fail.
1382 1382 if wait and (self.ui.configbool('devel', 'all-warnings')
1383 1383 or self.ui.configbool('devel', 'check-locks')):
1384 1384 if self._currentlock(self._lockref) is not None:
1385 1385 self.ui.develwarn('"wlock" acquired after "lock"')
1386 1386
1387 1387 def unlock():
1388 1388 if self.dirstate.pendingparentchange():
1389 1389 self.dirstate.invalidate()
1390 1390 else:
1391 1391 self.dirstate.write(None)
1392 1392
1393 1393 self._filecache['dirstate'].refresh()
1394 1394
1395 1395 l = self._lock(self.vfs, "wlock", wait, unlock,
1396 1396 self.invalidatedirstate, _('working directory of %s') %
1397 1397 self.origroot,
1398 1398 inheritchecker=self._wlockchecktransaction,
1399 1399 parentenvvar='HG_WLOCK_LOCKER')
1400 1400 self._wlockref = weakref.ref(l)
1401 1401 return l
1402 1402
1403 1403 def _currentlock(self, lockref):
1404 1404 """Returns the lock if it's held, or None if it's not."""
1405 1405 if lockref is None:
1406 1406 return None
1407 1407 l = lockref()
1408 1408 if l is None or not l.held:
1409 1409 return None
1410 1410 return l
1411 1411
1412 1412 def currentwlock(self):
1413 1413 """Returns the wlock if it's held, or None if it's not."""
1414 1414 return self._currentlock(self._wlockref)
1415 1415
1416 1416 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1417 1417 """
1418 1418 commit an individual file as part of a larger transaction
1419 1419 """
1420 1420
1421 1421 fname = fctx.path()
1422 1422 fparent1 = manifest1.get(fname, nullid)
1423 1423 fparent2 = manifest2.get(fname, nullid)
1424 1424 if isinstance(fctx, context.filectx):
1425 1425 node = fctx.filenode()
1426 1426 if node in [fparent1, fparent2]:
1427 1427 self.ui.debug('reusing %s filelog entry\n' % fname)
1428 1428 if manifest1.flags(fname) != fctx.flags():
1429 1429 changelist.append(fname)
1430 1430 return node
1431 1431
1432 1432 flog = self.file(fname)
1433 1433 meta = {}
1434 1434 copy = fctx.renamed()
1435 1435 if copy and copy[0] != fname:
1436 1436 # Mark the new revision of this file as a copy of another
1437 1437 # file. This copy data will effectively act as a parent
1438 1438 # of this new revision. If this is a merge, the first
1439 1439 # parent will be the nullid (meaning "look up the copy data")
1440 1440 # and the second one will be the other parent. For example:
1441 1441 #
1442 1442 # 0 --- 1 --- 3 rev1 changes file foo
1443 1443 # \ / rev2 renames foo to bar and changes it
1444 1444 # \- 2 -/ rev3 should have bar with all changes and
1445 1445 # should record that bar descends from
1446 1446 # bar in rev2 and foo in rev1
1447 1447 #
1448 1448 # this allows this merge to succeed:
1449 1449 #
1450 1450 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1451 1451 # \ / merging rev3 and rev4 should use bar@rev2
1452 1452 # \- 2 --- 4 as the merge base
1453 1453 #
1454 1454
1455 1455 cfname = copy[0]
1456 1456 crev = manifest1.get(cfname)
1457 1457 newfparent = fparent2
1458 1458
1459 1459 if manifest2: # branch merge
1460 1460 if fparent2 == nullid or crev is None: # copied on remote side
1461 1461 if cfname in manifest2:
1462 1462 crev = manifest2[cfname]
1463 1463 newfparent = fparent1
1464 1464
1465 1465 # Here, we used to search backwards through history to try to find
1466 1466 # where the file copy came from if the source of a copy was not in
1467 1467 # the parent directory. However, this doesn't actually make sense to
1468 1468 # do (what does a copy from something not in your working copy even
1469 1469 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1470 1470 # the user that copy information was dropped, so if they didn't
1471 1471 # expect this outcome it can be fixed, but this is the correct
1472 1472 # behavior in this circumstance.
1473 1473
1474 1474 if crev:
1475 1475 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1476 1476 meta["copy"] = cfname
1477 1477 meta["copyrev"] = hex(crev)
1478 1478 fparent1, fparent2 = nullid, newfparent
1479 1479 else:
1480 1480 self.ui.warn(_("warning: can't find ancestor for '%s' "
1481 1481 "copied from '%s'!\n") % (fname, cfname))
1482 1482
1483 1483 elif fparent1 == nullid:
1484 1484 fparent1, fparent2 = fparent2, nullid
1485 1485 elif fparent2 != nullid:
1486 1486 # is one parent an ancestor of the other?
1487 1487 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1488 1488 if fparent1 in fparentancestors:
1489 1489 fparent1, fparent2 = fparent2, nullid
1490 1490 elif fparent2 in fparentancestors:
1491 1491 fparent2 = nullid
1492 1492
1493 1493 # is the file changed?
1494 1494 text = fctx.data()
1495 1495 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1496 1496 changelist.append(fname)
1497 1497 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1498 1498 # are just the flags changed during merge?
1499 1499 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1500 1500 changelist.append(fname)
1501 1501
1502 1502 return fparent1
1503 1503
1504 1504 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1505 1505 """check for commit arguments that aren't committable"""
1506 1506 if match.isexact() or match.prefix():
1507 1507 matched = set(status.modified + status.added + status.removed)
1508 1508
1509 1509 for f in match.files():
1510 1510 f = self.dirstate.normalize(f)
1511 1511 if f == '.' or f in matched or f in wctx.substate:
1512 1512 continue
1513 1513 if f in status.deleted:
1514 1514 fail(f, _('file not found!'))
1515 1515 if f in vdirs: # visited directory
1516 1516 d = f + '/'
1517 1517 for mf in matched:
1518 1518 if mf.startswith(d):
1519 1519 break
1520 1520 else:
1521 1521 fail(f, _("no match under directory!"))
1522 1522 elif f not in self.dirstate:
1523 1523 fail(f, _("file not tracked!"))
1524 1524
1525 1525 @unfilteredmethod
1526 1526 def commit(self, text="", user=None, date=None, match=None, force=False,
1527 1527 editor=False, extra=None):
1528 1528 """Add a new revision to current repository.
1529 1529
1530 1530 Revision information is gathered from the working directory,
1531 1531 match can be used to filter the committed files. If editor is
1532 1532 supplied, it is called to get a commit message.
1533 1533 """
1534 1534 if extra is None:
1535 1535 extra = {}
1536 1536
1537 1537 def fail(f, msg):
1538 1538 raise error.Abort('%s: %s' % (f, msg))
1539 1539
1540 1540 if not match:
1541 1541 match = matchmod.always(self.root, '')
1542 1542
1543 1543 if not force:
1544 1544 vdirs = []
1545 1545 match.explicitdir = vdirs.append
1546 1546 match.bad = fail
1547 1547
1548 1548 wlock = lock = tr = None
1549 1549 try:
1550 1550 wlock = self.wlock()
1551 1551 lock = self.lock() # for recent changelog (see issue4368)
1552 1552
1553 1553 wctx = self[None]
1554 1554 merge = len(wctx.parents()) > 1
1555 1555
1556 1556 if not force and merge and match.ispartial():
1557 1557 raise error.Abort(_('cannot partially commit a merge '
1558 1558 '(do not specify files or patterns)'))
1559 1559
1560 1560 status = self.status(match=match, clean=force)
1561 1561 if force:
1562 1562 status.modified.extend(status.clean) # mq may commit clean files
1563 1563
1564 1564 # check subrepos
1565 1565 subs = []
1566 1566 commitsubs = set()
1567 1567 newstate = wctx.substate.copy()
1568 1568 # only manage subrepos and .hgsubstate if .hgsub is present
1569 1569 if '.hgsub' in wctx:
1570 1570 # we'll decide whether to track this ourselves, thanks
1571 1571 for c in status.modified, status.added, status.removed:
1572 1572 if '.hgsubstate' in c:
1573 1573 c.remove('.hgsubstate')
1574 1574
1575 1575 # compare current state to last committed state
1576 1576 # build new substate based on last committed state
1577 1577 oldstate = wctx.p1().substate
1578 1578 for s in sorted(newstate.keys()):
1579 1579 if not match(s):
1580 1580 # ignore working copy, use old state if present
1581 1581 if s in oldstate:
1582 1582 newstate[s] = oldstate[s]
1583 1583 continue
1584 1584 if not force:
1585 1585 raise error.Abort(
1586 1586 _("commit with new subrepo %s excluded") % s)
1587 1587 dirtyreason = wctx.sub(s).dirtyreason(True)
1588 1588 if dirtyreason:
1589 1589 if not self.ui.configbool('ui', 'commitsubrepos'):
1590 1590 raise error.Abort(dirtyreason,
1591 1591 hint=_("use --subrepos for recursive commit"))
1592 1592 subs.append(s)
1593 1593 commitsubs.add(s)
1594 1594 else:
1595 1595 bs = wctx.sub(s).basestate()
1596 1596 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1597 1597 if oldstate.get(s, (None, None, None))[1] != bs:
1598 1598 subs.append(s)
1599 1599
1600 1600 # check for removed subrepos
1601 1601 for p in wctx.parents():
1602 1602 r = [s for s in p.substate if s not in newstate]
1603 1603 subs += [s for s in r if match(s)]
1604 1604 if subs:
1605 1605 if (not match('.hgsub') and
1606 1606 '.hgsub' in (wctx.modified() + wctx.added())):
1607 1607 raise error.Abort(
1608 1608 _("can't commit subrepos without .hgsub"))
1609 1609 status.modified.insert(0, '.hgsubstate')
1610 1610
1611 1611 elif '.hgsub' in status.removed:
1612 1612 # clean up .hgsubstate when .hgsub is removed
1613 1613 if ('.hgsubstate' in wctx and
1614 1614 '.hgsubstate' not in (status.modified + status.added +
1615 1615 status.removed)):
1616 1616 status.removed.insert(0, '.hgsubstate')
1617 1617
1618 1618 # make sure all explicit patterns are matched
1619 1619 if not force:
1620 1620 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1621 1621
1622 1622 cctx = context.workingcommitctx(self, status,
1623 1623 text, user, date, extra)
1624 1624
1625 1625 # internal config: ui.allowemptycommit
1626 1626 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1627 1627 or extra.get('close') or merge or cctx.files()
1628 1628 or self.ui.configbool('ui', 'allowemptycommit'))
1629 1629 if not allowemptycommit:
1630 1630 return None
1631 1631
1632 1632 if merge and cctx.deleted():
1633 1633 raise error.Abort(_("cannot commit merge with missing files"))
1634 1634
1635 1635 ms = mergemod.mergestate.read(self)
1636 1636 cmdutil.checkunresolved(ms)
1637 1637
1638 1638 if editor:
1639 1639 cctx._text = editor(self, cctx, subs)
1640 1640 edited = (text != cctx._text)
1641 1641
1642 1642 # Save commit message in case this transaction gets rolled back
1643 1643 # (e.g. by a pretxncommit hook). Leave the content alone on
1644 1644 # the assumption that the user will use the same editor again.
1645 1645 msgfn = self.savecommitmessage(cctx._text)
1646 1646
1647 1647 # commit subs and write new state
1648 1648 if subs:
1649 1649 for s in sorted(commitsubs):
1650 1650 sub = wctx.sub(s)
1651 1651 self.ui.status(_('committing subrepository %s\n') %
1652 1652 subrepo.subrelpath(sub))
1653 1653 sr = sub.commit(cctx._text, user, date)
1654 1654 newstate[s] = (newstate[s][0], sr)
1655 1655 subrepo.writestate(self, newstate)
1656 1656
1657 1657 p1, p2 = self.dirstate.parents()
1658 1658 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1659 1659 try:
1660 1660 self.hook("precommit", throw=True, parent1=hookp1,
1661 1661 parent2=hookp2)
1662 1662 tr = self.transaction('commit')
1663 1663 ret = self.commitctx(cctx, True)
1664 1664 except: # re-raises
1665 1665 if edited:
1666 1666 self.ui.write(
1667 1667 _('note: commit message saved in %s\n') % msgfn)
1668 1668 raise
1669 1669 # update bookmarks, dirstate and mergestate
1670 1670 bookmarks.update(self, [p1, p2], ret)
1671 1671 cctx.markcommitted(ret)
1672 1672 ms.reset()
1673 1673 tr.close()
1674 1674
1675 1675 finally:
1676 1676 lockmod.release(tr, lock, wlock)
1677 1677
1678 1678 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1679 1679 # hack for command that use a temporary commit (eg: histedit)
1680 1680 # temporary commit got stripped before hook release
1681 1681 if self.changelog.hasnode(ret):
1682 1682 self.hook("commit", node=node, parent1=parent1,
1683 1683 parent2=parent2)
1684 1684 self._afterlock(commithook)
1685 1685 return ret
1686 1686
1687 1687 @unfilteredmethod
1688 1688 def commitctx(self, ctx, error=False):
1689 1689 """Add a new revision to current repository.
1690 1690 Revision information is passed via the context argument.
1691 1691 """
1692 1692
1693 1693 tr = None
1694 1694 p1, p2 = ctx.p1(), ctx.p2()
1695 1695 user = ctx.user()
1696 1696
1697 1697 lock = self.lock()
1698 1698 try:
1699 1699 tr = self.transaction("commit")
1700 1700 trp = weakref.proxy(tr)
1701 1701
1702 1702 if ctx.files():
1703 m1 = p1.manifest()
1704 m2 = p2.manifest()
1705 m = m1.copy()
1703 m1ctx = p1.manifestctx()
1704 m2ctx = p2.manifestctx()
1705 mctx = m1ctx.copy()
1706
1707 m = mctx.read()
1708 m1 = m1ctx.read()
1709 m2 = m2ctx.read()
1706 1710
1707 1711 # check in files
1708 1712 added = []
1709 1713 changed = []
1710 1714 removed = list(ctx.removed())
1711 1715 linkrev = len(self)
1712 1716 self.ui.note(_("committing files:\n"))
1713 1717 for f in sorted(ctx.modified() + ctx.added()):
1714 1718 self.ui.note(f + "\n")
1715 1719 try:
1716 1720 fctx = ctx[f]
1717 1721 if fctx is None:
1718 1722 removed.append(f)
1719 1723 else:
1720 1724 added.append(f)
1721 1725 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1722 1726 trp, changed)
1723 1727 m.setflag(f, fctx.flags())
1724 1728 except OSError as inst:
1725 1729 self.ui.warn(_("trouble committing %s!\n") % f)
1726 1730 raise
1727 1731 except IOError as inst:
1728 1732 errcode = getattr(inst, 'errno', errno.ENOENT)
1729 1733 if error or errcode and errcode != errno.ENOENT:
1730 1734 self.ui.warn(_("trouble committing %s!\n") % f)
1731 1735 raise
1732 1736
1733 1737 # update manifest
1734 1738 self.ui.note(_("committing manifest\n"))
1735 1739 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1736 1740 drop = [f for f in removed if f in m]
1737 1741 for f in drop:
1738 1742 del m[f]
1739 mn = self.manifestlog.add(m, trp, linkrev,
1740 p1.manifestnode(), p2.manifestnode(),
1741 added, drop)
1743 mn = mctx.write(trp, linkrev,
1744 p1.manifestnode(), p2.manifestnode(),
1745 added, drop)
1742 1746 files = changed + removed
1743 1747 else:
1744 1748 mn = p1.manifestnode()
1745 1749 files = []
1746 1750
1747 1751 # update changelog
1748 1752 self.ui.note(_("committing changelog\n"))
1749 1753 self.changelog.delayupdate(tr)
1750 1754 n = self.changelog.add(mn, files, ctx.description(),
1751 1755 trp, p1.node(), p2.node(),
1752 1756 user, ctx.date(), ctx.extra().copy())
1753 1757 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1754 1758 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1755 1759 parent2=xp2)
1756 1760 # set the new commit is proper phase
1757 1761 targetphase = subrepo.newcommitphase(self.ui, ctx)
1758 1762 if targetphase:
1759 1763 # retract boundary do not alter parent changeset.
1760 1764 # if a parent have higher the resulting phase will
1761 1765 # be compliant anyway
1762 1766 #
1763 1767 # if minimal phase was 0 we don't need to retract anything
1764 1768 phases.retractboundary(self, tr, targetphase, [n])
1765 1769 tr.close()
1766 1770 branchmap.updatecache(self.filtered('served'))
1767 1771 return n
1768 1772 finally:
1769 1773 if tr:
1770 1774 tr.release()
1771 1775 lock.release()
1772 1776
1773 1777 @unfilteredmethod
1774 1778 def destroying(self):
1775 1779 '''Inform the repository that nodes are about to be destroyed.
1776 1780 Intended for use by strip and rollback, so there's a common
1777 1781 place for anything that has to be done before destroying history.
1778 1782
1779 1783 This is mostly useful for saving state that is in memory and waiting
1780 1784 to be flushed when the current lock is released. Because a call to
1781 1785 destroyed is imminent, the repo will be invalidated causing those
1782 1786 changes to stay in memory (waiting for the next unlock), or vanish
1783 1787 completely.
1784 1788 '''
1785 1789 # When using the same lock to commit and strip, the phasecache is left
1786 1790 # dirty after committing. Then when we strip, the repo is invalidated,
1787 1791 # causing those changes to disappear.
1788 1792 if '_phasecache' in vars(self):
1789 1793 self._phasecache.write()
1790 1794
1791 1795 @unfilteredmethod
1792 1796 def destroyed(self):
1793 1797 '''Inform the repository that nodes have been destroyed.
1794 1798 Intended for use by strip and rollback, so there's a common
1795 1799 place for anything that has to be done after destroying history.
1796 1800 '''
1797 1801 # When one tries to:
1798 1802 # 1) destroy nodes thus calling this method (e.g. strip)
1799 1803 # 2) use phasecache somewhere (e.g. commit)
1800 1804 #
1801 1805 # then 2) will fail because the phasecache contains nodes that were
1802 1806 # removed. We can either remove phasecache from the filecache,
1803 1807 # causing it to reload next time it is accessed, or simply filter
1804 1808 # the removed nodes now and write the updated cache.
1805 1809 self._phasecache.filterunknown(self)
1806 1810 self._phasecache.write()
1807 1811
1808 1812 # update the 'served' branch cache to help read only server process
1809 1813 # Thanks to branchcache collaboration this is done from the nearest
1810 1814 # filtered subset and it is expected to be fast.
1811 1815 branchmap.updatecache(self.filtered('served'))
1812 1816
1813 1817 # Ensure the persistent tag cache is updated. Doing it now
1814 1818 # means that the tag cache only has to worry about destroyed
1815 1819 # heads immediately after a strip/rollback. That in turn
1816 1820 # guarantees that "cachetip == currenttip" (comparing both rev
1817 1821 # and node) always means no nodes have been added or destroyed.
1818 1822
1819 1823 # XXX this is suboptimal when qrefresh'ing: we strip the current
1820 1824 # head, refresh the tag cache, then immediately add a new head.
1821 1825 # But I think doing it this way is necessary for the "instant
1822 1826 # tag cache retrieval" case to work.
1823 1827 self.invalidate()
1824 1828
1825 1829 def walk(self, match, node=None):
1826 1830 '''
1827 1831 walk recursively through the directory tree or a given
1828 1832 changeset, finding all files matched by the match
1829 1833 function
1830 1834 '''
1831 1835 return self[node].walk(match)
1832 1836
1833 1837 def status(self, node1='.', node2=None, match=None,
1834 1838 ignored=False, clean=False, unknown=False,
1835 1839 listsubrepos=False):
1836 1840 '''a convenience method that calls node1.status(node2)'''
1837 1841 return self[node1].status(node2, match, ignored, clean, unknown,
1838 1842 listsubrepos)
1839 1843
1840 1844 def heads(self, start=None):
1841 1845 heads = self.changelog.heads(start)
1842 1846 # sort the output in rev descending order
1843 1847 return sorted(heads, key=self.changelog.rev, reverse=True)
1844 1848
1845 1849 def branchheads(self, branch=None, start=None, closed=False):
1846 1850 '''return a (possibly filtered) list of heads for the given branch
1847 1851
1848 1852 Heads are returned in topological order, from newest to oldest.
1849 1853 If branch is None, use the dirstate branch.
1850 1854 If start is not None, return only heads reachable from start.
1851 1855 If closed is True, return heads that are marked as closed as well.
1852 1856 '''
1853 1857 if branch is None:
1854 1858 branch = self[None].branch()
1855 1859 branches = self.branchmap()
1856 1860 if branch not in branches:
1857 1861 return []
1858 1862 # the cache returns heads ordered lowest to highest
1859 1863 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1860 1864 if start is not None:
1861 1865 # filter out the heads that cannot be reached from startrev
1862 1866 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1863 1867 bheads = [h for h in bheads if h in fbheads]
1864 1868 return bheads
1865 1869
1866 1870 def branches(self, nodes):
1867 1871 if not nodes:
1868 1872 nodes = [self.changelog.tip()]
1869 1873 b = []
1870 1874 for n in nodes:
1871 1875 t = n
1872 1876 while True:
1873 1877 p = self.changelog.parents(n)
1874 1878 if p[1] != nullid or p[0] == nullid:
1875 1879 b.append((t, n, p[0], p[1]))
1876 1880 break
1877 1881 n = p[0]
1878 1882 return b
1879 1883
1880 1884 def between(self, pairs):
1881 1885 r = []
1882 1886
1883 1887 for top, bottom in pairs:
1884 1888 n, l, i = top, [], 0
1885 1889 f = 1
1886 1890
1887 1891 while n != bottom and n != nullid:
1888 1892 p = self.changelog.parents(n)[0]
1889 1893 if i == f:
1890 1894 l.append(n)
1891 1895 f = f * 2
1892 1896 n = p
1893 1897 i += 1
1894 1898
1895 1899 r.append(l)
1896 1900
1897 1901 return r
1898 1902
1899 1903 def checkpush(self, pushop):
1900 1904 """Extensions can override this function if additional checks have
1901 1905 to be performed before pushing, or call it if they override push
1902 1906 command.
1903 1907 """
1904 1908 pass
1905 1909
1906 1910 @unfilteredpropertycache
1907 1911 def prepushoutgoinghooks(self):
1908 1912 """Return util.hooks consists of a pushop with repo, remote, outgoing
1909 1913 methods, which are called before pushing changesets.
1910 1914 """
1911 1915 return util.hooks()
1912 1916
1913 1917 def pushkey(self, namespace, key, old, new):
1914 1918 try:
1915 1919 tr = self.currenttransaction()
1916 1920 hookargs = {}
1917 1921 if tr is not None:
1918 1922 hookargs.update(tr.hookargs)
1919 1923 hookargs['namespace'] = namespace
1920 1924 hookargs['key'] = key
1921 1925 hookargs['old'] = old
1922 1926 hookargs['new'] = new
1923 1927 self.hook('prepushkey', throw=True, **hookargs)
1924 1928 except error.HookAbort as exc:
1925 1929 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1926 1930 if exc.hint:
1927 1931 self.ui.write_err(_("(%s)\n") % exc.hint)
1928 1932 return False
1929 1933 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1930 1934 ret = pushkey.push(self, namespace, key, old, new)
1931 1935 def runhook():
1932 1936 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1933 1937 ret=ret)
1934 1938 self._afterlock(runhook)
1935 1939 return ret
1936 1940
1937 1941 def listkeys(self, namespace):
1938 1942 self.hook('prelistkeys', throw=True, namespace=namespace)
1939 1943 self.ui.debug('listing keys for "%s"\n' % namespace)
1940 1944 values = pushkey.list(self, namespace)
1941 1945 self.hook('listkeys', namespace=namespace, values=values)
1942 1946 return values
1943 1947
1944 1948 def debugwireargs(self, one, two, three=None, four=None, five=None):
1945 1949 '''used to test argument passing over the wire'''
1946 1950 return "%s %s %s %s %s" % (one, two, three, four, five)
1947 1951
1948 1952 def savecommitmessage(self, text):
1949 1953 fp = self.vfs('last-message.txt', 'wb')
1950 1954 try:
1951 1955 fp.write(text)
1952 1956 finally:
1953 1957 fp.close()
1954 1958 return self.pathto(fp.name[len(self.root) + 1:])
1955 1959
1956 1960 # used to avoid circular references so destructors work
1957 1961 def aftertrans(files):
1958 1962 renamefiles = [tuple(t) for t in files]
1959 1963 def a():
1960 1964 for vfs, src, dest in renamefiles:
1961 1965 try:
1962 1966 vfs.rename(src, dest)
1963 1967 except OSError: # journal file does not yet exist
1964 1968 pass
1965 1969 return a
1966 1970
1967 1971 def undoname(fn):
1968 1972 base, name = os.path.split(fn)
1969 1973 assert name.startswith('journal')
1970 1974 return os.path.join(base, name.replace('journal', 'undo', 1))
1971 1975
1972 1976 def instance(ui, path, create):
1973 1977 return localrepository(ui, util.urllocalpath(path), create)
1974 1978
1975 1979 def islocal(path):
1976 1980 return True
1977 1981
1978 1982 def newreporequirements(repo):
1979 1983 """Determine the set of requirements for a new local repository.
1980 1984
1981 1985 Extensions can wrap this function to specify custom requirements for
1982 1986 new repositories.
1983 1987 """
1984 1988 ui = repo.ui
1985 1989 requirements = set(['revlogv1'])
1986 1990 if ui.configbool('format', 'usestore', True):
1987 1991 requirements.add('store')
1988 1992 if ui.configbool('format', 'usefncache', True):
1989 1993 requirements.add('fncache')
1990 1994 if ui.configbool('format', 'dotencode', True):
1991 1995 requirements.add('dotencode')
1992 1996
1993 1997 if scmutil.gdinitconfig(ui):
1994 1998 requirements.add('generaldelta')
1995 1999 if ui.configbool('experimental', 'treemanifest', False):
1996 2000 requirements.add('treemanifest')
1997 2001 if ui.configbool('experimental', 'manifestv2', False):
1998 2002 requirements.add('manifestv2')
1999 2003
2000 2004 return requirements
@@ -1,1611 +1,1622 b''
1 1 # manifest.py - manifest revision class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import array
11 11 import heapq
12 12 import os
13 13 import struct
14 14
15 15 from .i18n import _
16 16 from . import (
17 17 error,
18 18 mdiff,
19 19 parsers,
20 20 revlog,
21 21 util,
22 22 )
23 23
24 24 propertycache = util.propertycache
25 25
26 26 def _parsev1(data):
27 27 # This method does a little bit of excessive-looking
28 28 # precondition checking. This is so that the behavior of this
29 29 # class exactly matches its C counterpart to try and help
30 30 # prevent surprise breakage for anyone that develops against
31 31 # the pure version.
32 32 if data and data[-1] != '\n':
33 33 raise ValueError('Manifest did not end in a newline.')
34 34 prev = None
35 35 for l in data.splitlines():
36 36 if prev is not None and prev > l:
37 37 raise ValueError('Manifest lines not in sorted order.')
38 38 prev = l
39 39 f, n = l.split('\0')
40 40 if len(n) > 40:
41 41 yield f, revlog.bin(n[:40]), n[40:]
42 42 else:
43 43 yield f, revlog.bin(n), ''
44 44
45 45 def _parsev2(data):
46 46 metadataend = data.find('\n')
47 47 # Just ignore metadata for now
48 48 pos = metadataend + 1
49 49 prevf = ''
50 50 while pos < len(data):
51 51 end = data.find('\n', pos + 1) # +1 to skip stem length byte
52 52 if end == -1:
53 53 raise ValueError('Manifest ended with incomplete file entry.')
54 54 stemlen = ord(data[pos])
55 55 items = data[pos + 1:end].split('\0')
56 56 f = prevf[:stemlen] + items[0]
57 57 if prevf > f:
58 58 raise ValueError('Manifest entries not in sorted order.')
59 59 fl = items[1]
60 60 # Just ignore metadata (items[2:] for now)
61 61 n = data[end + 1:end + 21]
62 62 yield f, n, fl
63 63 pos = end + 22
64 64 prevf = f
65 65
66 66 def _parse(data):
67 67 """Generates (path, node, flags) tuples from a manifest text"""
68 68 if data.startswith('\0'):
69 69 return iter(_parsev2(data))
70 70 else:
71 71 return iter(_parsev1(data))
72 72
73 73 def _text(it, usemanifestv2):
74 74 """Given an iterator over (path, node, flags) tuples, returns a manifest
75 75 text"""
76 76 if usemanifestv2:
77 77 return _textv2(it)
78 78 else:
79 79 return _textv1(it)
80 80
81 81 def _textv1(it):
82 82 files = []
83 83 lines = []
84 84 _hex = revlog.hex
85 85 for f, n, fl in it:
86 86 files.append(f)
87 87 # if this is changed to support newlines in filenames,
88 88 # be sure to check the templates/ dir again (especially *-raw.tmpl)
89 89 lines.append("%s\0%s%s\n" % (f, _hex(n), fl))
90 90
91 91 _checkforbidden(files)
92 92 return ''.join(lines)
93 93
94 94 def _textv2(it):
95 95 files = []
96 96 lines = ['\0\n']
97 97 prevf = ''
98 98 for f, n, fl in it:
99 99 files.append(f)
100 100 stem = os.path.commonprefix([prevf, f])
101 101 stemlen = min(len(stem), 255)
102 102 lines.append("%c%s\0%s\n%s\n" % (stemlen, f[stemlen:], fl, n))
103 103 prevf = f
104 104 _checkforbidden(files)
105 105 return ''.join(lines)
106 106
107 107 class lazymanifestiter(object):
108 108 def __init__(self, lm):
109 109 self.pos = 0
110 110 self.lm = lm
111 111
112 112 def __iter__(self):
113 113 return self
114 114
115 115 def next(self):
116 116 try:
117 117 data, pos = self.lm._get(self.pos)
118 118 except IndexError:
119 119 raise StopIteration
120 120 if pos == -1:
121 121 self.pos += 1
122 122 return data[0]
123 123 self.pos += 1
124 124 zeropos = data.find('\x00', pos)
125 125 return data[pos:zeropos]
126 126
127 127 class lazymanifestiterentries(object):
128 128 def __init__(self, lm):
129 129 self.lm = lm
130 130 self.pos = 0
131 131
132 132 def __iter__(self):
133 133 return self
134 134
135 135 def next(self):
136 136 try:
137 137 data, pos = self.lm._get(self.pos)
138 138 except IndexError:
139 139 raise StopIteration
140 140 if pos == -1:
141 141 self.pos += 1
142 142 return data
143 143 zeropos = data.find('\x00', pos)
144 144 hashval = unhexlify(data, self.lm.extrainfo[self.pos],
145 145 zeropos + 1, 40)
146 146 flags = self.lm._getflags(data, self.pos, zeropos)
147 147 self.pos += 1
148 148 return (data[pos:zeropos], hashval, flags)
149 149
150 150 def unhexlify(data, extra, pos, length):
151 151 s = data[pos:pos + length].decode('hex')
152 152 if extra:
153 153 s += chr(extra & 0xff)
154 154 return s
155 155
156 156 def _cmp(a, b):
157 157 return (a > b) - (a < b)
158 158
159 159 class _lazymanifest(object):
160 160 def __init__(self, data, positions=None, extrainfo=None, extradata=None):
161 161 if positions is None:
162 162 self.positions = self.findlines(data)
163 163 self.extrainfo = [0] * len(self.positions)
164 164 self.data = data
165 165 self.extradata = []
166 166 else:
167 167 self.positions = positions[:]
168 168 self.extrainfo = extrainfo[:]
169 169 self.extradata = extradata[:]
170 170 self.data = data
171 171
172 172 def findlines(self, data):
173 173 if not data:
174 174 return []
175 175 pos = data.find("\n")
176 176 if pos == -1 or data[-1] != '\n':
177 177 raise ValueError("Manifest did not end in a newline.")
178 178 positions = [0]
179 179 prev = data[:data.find('\x00')]
180 180 while pos < len(data) - 1 and pos != -1:
181 181 positions.append(pos + 1)
182 182 nexts = data[pos + 1:data.find('\x00', pos + 1)]
183 183 if nexts < prev:
184 184 raise ValueError("Manifest lines not in sorted order.")
185 185 prev = nexts
186 186 pos = data.find("\n", pos + 1)
187 187 return positions
188 188
189 189 def _get(self, index):
190 190 # get the position encoded in pos:
191 191 # positive number is an index in 'data'
192 192 # negative number is in extrapieces
193 193 pos = self.positions[index]
194 194 if pos >= 0:
195 195 return self.data, pos
196 196 return self.extradata[-pos - 1], -1
197 197
198 198 def _getkey(self, pos):
199 199 if pos >= 0:
200 200 return self.data[pos:self.data.find('\x00', pos + 1)]
201 201 return self.extradata[-pos - 1][0]
202 202
203 203 def bsearch(self, key):
204 204 first = 0
205 205 last = len(self.positions) - 1
206 206
207 207 while first <= last:
208 208 midpoint = (first + last)//2
209 209 nextpos = self.positions[midpoint]
210 210 candidate = self._getkey(nextpos)
211 211 r = _cmp(key, candidate)
212 212 if r == 0:
213 213 return midpoint
214 214 else:
215 215 if r < 0:
216 216 last = midpoint - 1
217 217 else:
218 218 first = midpoint + 1
219 219 return -1
220 220
221 221 def bsearch2(self, key):
222 222 # same as the above, but will always return the position
223 223 # done for performance reasons
224 224 first = 0
225 225 last = len(self.positions) - 1
226 226
227 227 while first <= last:
228 228 midpoint = (first + last)//2
229 229 nextpos = self.positions[midpoint]
230 230 candidate = self._getkey(nextpos)
231 231 r = _cmp(key, candidate)
232 232 if r == 0:
233 233 return (midpoint, True)
234 234 else:
235 235 if r < 0:
236 236 last = midpoint - 1
237 237 else:
238 238 first = midpoint + 1
239 239 return (first, False)
240 240
241 241 def __contains__(self, key):
242 242 return self.bsearch(key) != -1
243 243
244 244 def _getflags(self, data, needle, pos):
245 245 start = pos + 41
246 246 end = data.find("\n", start)
247 247 if end == -1:
248 248 end = len(data) - 1
249 249 if start == end:
250 250 return ''
251 251 return self.data[start:end]
252 252
253 253 def __getitem__(self, key):
254 254 if not isinstance(key, str):
255 255 raise TypeError("getitem: manifest keys must be a string.")
256 256 needle = self.bsearch(key)
257 257 if needle == -1:
258 258 raise KeyError
259 259 data, pos = self._get(needle)
260 260 if pos == -1:
261 261 return (data[1], data[2])
262 262 zeropos = data.find('\x00', pos)
263 263 assert 0 <= needle <= len(self.positions)
264 264 assert len(self.extrainfo) == len(self.positions)
265 265 hashval = unhexlify(data, self.extrainfo[needle], zeropos + 1, 40)
266 266 flags = self._getflags(data, needle, zeropos)
267 267 return (hashval, flags)
268 268
269 269 def __delitem__(self, key):
270 270 needle, found = self.bsearch2(key)
271 271 if not found:
272 272 raise KeyError
273 273 cur = self.positions[needle]
274 274 self.positions = self.positions[:needle] + self.positions[needle + 1:]
275 275 self.extrainfo = self.extrainfo[:needle] + self.extrainfo[needle + 1:]
276 276 if cur >= 0:
277 277 self.data = self.data[:cur] + '\x00' + self.data[cur + 1:]
278 278
279 279 def __setitem__(self, key, value):
280 280 if not isinstance(key, str):
281 281 raise TypeError("setitem: manifest keys must be a string.")
282 282 if not isinstance(value, tuple) or len(value) != 2:
283 283 raise TypeError("Manifest values must be a tuple of (node, flags).")
284 284 hashval = value[0]
285 285 if not isinstance(hashval, str) or not 20 <= len(hashval) <= 22:
286 286 raise TypeError("node must be a 20-byte string")
287 287 flags = value[1]
288 288 if len(hashval) == 22:
289 289 hashval = hashval[:-1]
290 290 if not isinstance(flags, str) or len(flags) > 1:
291 291 raise TypeError("flags must a 0 or 1 byte string, got %r", flags)
292 292 needle, found = self.bsearch2(key)
293 293 if found:
294 294 # put the item
295 295 pos = self.positions[needle]
296 296 if pos < 0:
297 297 self.extradata[-pos - 1] = (key, hashval, value[1])
298 298 else:
299 299 # just don't bother
300 300 self.extradata.append((key, hashval, value[1]))
301 301 self.positions[needle] = -len(self.extradata)
302 302 else:
303 303 # not found, put it in with extra positions
304 304 self.extradata.append((key, hashval, value[1]))
305 305 self.positions = (self.positions[:needle] + [-len(self.extradata)]
306 306 + self.positions[needle:])
307 307 self.extrainfo = (self.extrainfo[:needle] + [0] +
308 308 self.extrainfo[needle:])
309 309
310 310 def copy(self):
311 311 # XXX call _compact like in C?
312 312 return _lazymanifest(self.data, self.positions, self.extrainfo,
313 313 self.extradata)
314 314
315 315 def _compact(self):
316 316 # hopefully not called TOO often
317 317 if len(self.extradata) == 0:
318 318 return
319 319 l = []
320 320 last_cut = 0
321 321 i = 0
322 322 offset = 0
323 323 self.extrainfo = [0] * len(self.positions)
324 324 while i < len(self.positions):
325 325 if self.positions[i] >= 0:
326 326 cur = self.positions[i]
327 327 last_cut = cur
328 328 while True:
329 329 self.positions[i] = offset
330 330 i += 1
331 331 if i == len(self.positions) or self.positions[i] < 0:
332 332 break
333 333 offset += self.positions[i] - cur
334 334 cur = self.positions[i]
335 335 end_cut = self.data.find('\n', cur)
336 336 if end_cut != -1:
337 337 end_cut += 1
338 338 offset += end_cut - cur
339 339 l.append(self.data[last_cut:end_cut])
340 340 else:
341 341 while i < len(self.positions) and self.positions[i] < 0:
342 342 cur = self.positions[i]
343 343 t = self.extradata[-cur - 1]
344 344 l.append(self._pack(t))
345 345 self.positions[i] = offset
346 346 if len(t[1]) > 20:
347 347 self.extrainfo[i] = ord(t[1][21])
348 348 offset += len(l[-1])
349 349 i += 1
350 350 self.data = ''.join(l)
351 351 self.extradata = []
352 352
353 353 def _pack(self, d):
354 354 return d[0] + '\x00' + d[1][:20].encode('hex') + d[2] + '\n'
355 355
356 356 def text(self):
357 357 self._compact()
358 358 return self.data
359 359
360 360 def diff(self, m2, clean=False):
361 361 '''Finds changes between the current manifest and m2.'''
362 362 # XXX think whether efficiency matters here
363 363 diff = {}
364 364
365 365 for fn, e1, flags in self.iterentries():
366 366 if fn not in m2:
367 367 diff[fn] = (e1, flags), (None, '')
368 368 else:
369 369 e2 = m2[fn]
370 370 if (e1, flags) != e2:
371 371 diff[fn] = (e1, flags), e2
372 372 elif clean:
373 373 diff[fn] = None
374 374
375 375 for fn, e2, flags in m2.iterentries():
376 376 if fn not in self:
377 377 diff[fn] = (None, ''), (e2, flags)
378 378
379 379 return diff
380 380
381 381 def iterentries(self):
382 382 return lazymanifestiterentries(self)
383 383
384 384 def iterkeys(self):
385 385 return lazymanifestiter(self)
386 386
387 387 def __iter__(self):
388 388 return lazymanifestiter(self)
389 389
390 390 def __len__(self):
391 391 return len(self.positions)
392 392
393 393 def filtercopy(self, filterfn):
394 394 # XXX should be optimized
395 395 c = _lazymanifest('')
396 396 for f, n, fl in self.iterentries():
397 397 if filterfn(f):
398 398 c[f] = n, fl
399 399 return c
400 400
401 401 try:
402 402 _lazymanifest = parsers.lazymanifest
403 403 except AttributeError:
404 404 pass
405 405
406 406 class manifestdict(object):
407 407 def __init__(self, data=''):
408 408 if data.startswith('\0'):
409 409 #_lazymanifest can not parse v2
410 410 self._lm = _lazymanifest('')
411 411 for f, n, fl in _parsev2(data):
412 412 self._lm[f] = n, fl
413 413 else:
414 414 self._lm = _lazymanifest(data)
415 415
416 416 def __getitem__(self, key):
417 417 return self._lm[key][0]
418 418
419 419 def find(self, key):
420 420 return self._lm[key]
421 421
422 422 def __len__(self):
423 423 return len(self._lm)
424 424
425 425 def __nonzero__(self):
426 426 # nonzero is covered by the __len__ function, but implementing it here
427 427 # makes it easier for extensions to override.
428 428 return len(self._lm) != 0
429 429
430 430 def __setitem__(self, key, node):
431 431 self._lm[key] = node, self.flags(key, '')
432 432
433 433 def __contains__(self, key):
434 434 return key in self._lm
435 435
436 436 def __delitem__(self, key):
437 437 del self._lm[key]
438 438
439 439 def __iter__(self):
440 440 return self._lm.__iter__()
441 441
442 442 def iterkeys(self):
443 443 return self._lm.iterkeys()
444 444
445 445 def keys(self):
446 446 return list(self.iterkeys())
447 447
448 448 def filesnotin(self, m2):
449 449 '''Set of files in this manifest that are not in the other'''
450 450 diff = self.diff(m2)
451 451 files = set(filepath
452 452 for filepath, hashflags in diff.iteritems()
453 453 if hashflags[1][0] is None)
454 454 return files
455 455
456 456 @propertycache
457 457 def _dirs(self):
458 458 return util.dirs(self)
459 459
460 460 def dirs(self):
461 461 return self._dirs
462 462
463 463 def hasdir(self, dir):
464 464 return dir in self._dirs
465 465
466 466 def _filesfastpath(self, match):
467 467 '''Checks whether we can correctly and quickly iterate over matcher
468 468 files instead of over manifest files.'''
469 469 files = match.files()
470 470 return (len(files) < 100 and (match.isexact() or
471 471 (match.prefix() and all(fn in self for fn in files))))
472 472
473 473 def walk(self, match):
474 474 '''Generates matching file names.
475 475
476 476 Equivalent to manifest.matches(match).iterkeys(), but without creating
477 477 an entirely new manifest.
478 478
479 479 It also reports nonexistent files by marking them bad with match.bad().
480 480 '''
481 481 if match.always():
482 482 for f in iter(self):
483 483 yield f
484 484 return
485 485
486 486 fset = set(match.files())
487 487
488 488 # avoid the entire walk if we're only looking for specific files
489 489 if self._filesfastpath(match):
490 490 for fn in sorted(fset):
491 491 yield fn
492 492 return
493 493
494 494 for fn in self:
495 495 if fn in fset:
496 496 # specified pattern is the exact name
497 497 fset.remove(fn)
498 498 if match(fn):
499 499 yield fn
500 500
501 501 # for dirstate.walk, files=['.'] means "walk the whole tree".
502 502 # follow that here, too
503 503 fset.discard('.')
504 504
505 505 for fn in sorted(fset):
506 506 if not self.hasdir(fn):
507 507 match.bad(fn, None)
508 508
509 509 def matches(self, match):
510 510 '''generate a new manifest filtered by the match argument'''
511 511 if match.always():
512 512 return self.copy()
513 513
514 514 if self._filesfastpath(match):
515 515 m = manifestdict()
516 516 lm = self._lm
517 517 for fn in match.files():
518 518 if fn in lm:
519 519 m._lm[fn] = lm[fn]
520 520 return m
521 521
522 522 m = manifestdict()
523 523 m._lm = self._lm.filtercopy(match)
524 524 return m
525 525
526 526 def diff(self, m2, clean=False):
527 527 '''Finds changes between the current manifest and m2.
528 528
529 529 Args:
530 530 m2: the manifest to which this manifest should be compared.
531 531 clean: if true, include files unchanged between these manifests
532 532 with a None value in the returned dictionary.
533 533
534 534 The result is returned as a dict with filename as key and
535 535 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
536 536 nodeid in the current/other manifest and fl1/fl2 is the flag
537 537 in the current/other manifest. Where the file does not exist,
538 538 the nodeid will be None and the flags will be the empty
539 539 string.
540 540 '''
541 541 return self._lm.diff(m2._lm, clean)
542 542
543 543 def setflag(self, key, flag):
544 544 self._lm[key] = self[key], flag
545 545
546 546 def get(self, key, default=None):
547 547 try:
548 548 return self._lm[key][0]
549 549 except KeyError:
550 550 return default
551 551
552 552 def flags(self, key, default=''):
553 553 try:
554 554 return self._lm[key][1]
555 555 except KeyError:
556 556 return default
557 557
558 558 def copy(self):
559 559 c = manifestdict()
560 560 c._lm = self._lm.copy()
561 561 return c
562 562
563 563 def iteritems(self):
564 564 return (x[:2] for x in self._lm.iterentries())
565 565
566 566 def iterentries(self):
567 567 return self._lm.iterentries()
568 568
569 569 def text(self, usemanifestv2=False):
570 570 if usemanifestv2:
571 571 return _textv2(self._lm.iterentries())
572 572 else:
573 573 # use (probably) native version for v1
574 574 return self._lm.text()
575 575
576 576 def fastdelta(self, base, changes):
577 577 """Given a base manifest text as an array.array and a list of changes
578 578 relative to that text, compute a delta that can be used by revlog.
579 579 """
580 580 delta = []
581 581 dstart = None
582 582 dend = None
583 583 dline = [""]
584 584 start = 0
585 585 # zero copy representation of base as a buffer
586 586 addbuf = util.buffer(base)
587 587
588 588 changes = list(changes)
589 589 if len(changes) < 1000:
590 590 # start with a readonly loop that finds the offset of
591 591 # each line and creates the deltas
592 592 for f, todelete in changes:
593 593 # bs will either be the index of the item or the insert point
594 594 start, end = _msearch(addbuf, f, start)
595 595 if not todelete:
596 596 h, fl = self._lm[f]
597 597 l = "%s\0%s%s\n" % (f, revlog.hex(h), fl)
598 598 else:
599 599 if start == end:
600 600 # item we want to delete was not found, error out
601 601 raise AssertionError(
602 602 _("failed to remove %s from manifest") % f)
603 603 l = ""
604 604 if dstart is not None and dstart <= start and dend >= start:
605 605 if dend < end:
606 606 dend = end
607 607 if l:
608 608 dline.append(l)
609 609 else:
610 610 if dstart is not None:
611 611 delta.append([dstart, dend, "".join(dline)])
612 612 dstart = start
613 613 dend = end
614 614 dline = [l]
615 615
616 616 if dstart is not None:
617 617 delta.append([dstart, dend, "".join(dline)])
618 618 # apply the delta to the base, and get a delta for addrevision
619 619 deltatext, arraytext = _addlistdelta(base, delta)
620 620 else:
621 621 # For large changes, it's much cheaper to just build the text and
622 622 # diff it.
623 623 arraytext = array.array('c', self.text())
624 624 deltatext = mdiff.textdiff(base, arraytext)
625 625
626 626 return arraytext, deltatext
627 627
628 628 def _msearch(m, s, lo=0, hi=None):
629 629 '''return a tuple (start, end) that says where to find s within m.
630 630
631 631 If the string is found m[start:end] are the line containing
632 632 that string. If start == end the string was not found and
633 633 they indicate the proper sorted insertion point.
634 634
635 635 m should be a buffer or a string
636 636 s is a string'''
637 637 def advance(i, c):
638 638 while i < lenm and m[i] != c:
639 639 i += 1
640 640 return i
641 641 if not s:
642 642 return (lo, lo)
643 643 lenm = len(m)
644 644 if not hi:
645 645 hi = lenm
646 646 while lo < hi:
647 647 mid = (lo + hi) // 2
648 648 start = mid
649 649 while start > 0 and m[start - 1] != '\n':
650 650 start -= 1
651 651 end = advance(start, '\0')
652 652 if m[start:end] < s:
653 653 # we know that after the null there are 40 bytes of sha1
654 654 # this translates to the bisect lo = mid + 1
655 655 lo = advance(end + 40, '\n') + 1
656 656 else:
657 657 # this translates to the bisect hi = mid
658 658 hi = start
659 659 end = advance(lo, '\0')
660 660 found = m[lo:end]
661 661 if s == found:
662 662 # we know that after the null there are 40 bytes of sha1
663 663 end = advance(end + 40, '\n')
664 664 return (lo, end + 1)
665 665 else:
666 666 return (lo, lo)
667 667
668 668 def _checkforbidden(l):
669 669 """Check filenames for illegal characters."""
670 670 for f in l:
671 671 if '\n' in f or '\r' in f:
672 672 raise error.RevlogError(
673 673 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
674 674
675 675
676 676 # apply the changes collected during the bisect loop to our addlist
677 677 # return a delta suitable for addrevision
678 678 def _addlistdelta(addlist, x):
679 679 # for large addlist arrays, building a new array is cheaper
680 680 # than repeatedly modifying the existing one
681 681 currentposition = 0
682 682 newaddlist = array.array('c')
683 683
684 684 for start, end, content in x:
685 685 newaddlist += addlist[currentposition:start]
686 686 if content:
687 687 newaddlist += array.array('c', content)
688 688
689 689 currentposition = end
690 690
691 691 newaddlist += addlist[currentposition:]
692 692
693 693 deltatext = "".join(struct.pack(">lll", start, end, len(content))
694 694 + content for start, end, content in x)
695 695 return deltatext, newaddlist
696 696
697 697 def _splittopdir(f):
698 698 if '/' in f:
699 699 dir, subpath = f.split('/', 1)
700 700 return dir + '/', subpath
701 701 else:
702 702 return '', f
703 703
704 704 _noop = lambda s: None
705 705
706 706 class treemanifest(object):
707 707 def __init__(self, dir='', text=''):
708 708 self._dir = dir
709 709 self._node = revlog.nullid
710 710 self._loadfunc = _noop
711 711 self._copyfunc = _noop
712 712 self._dirty = False
713 713 self._dirs = {}
714 714 # Using _lazymanifest here is a little slower than plain old dicts
715 715 self._files = {}
716 716 self._flags = {}
717 717 if text:
718 718 def readsubtree(subdir, subm):
719 719 raise AssertionError('treemanifest constructor only accepts '
720 720 'flat manifests')
721 721 self.parse(text, readsubtree)
722 722 self._dirty = True # Mark flat manifest dirty after parsing
723 723
724 724 def _subpath(self, path):
725 725 return self._dir + path
726 726
727 727 def __len__(self):
728 728 self._load()
729 729 size = len(self._files)
730 730 for m in self._dirs.values():
731 731 size += m.__len__()
732 732 return size
733 733
734 734 def _isempty(self):
735 735 self._load() # for consistency; already loaded by all callers
736 736 return (not self._files and (not self._dirs or
737 737 all(m._isempty() for m in self._dirs.values())))
738 738
739 739 def __repr__(self):
740 740 return ('<treemanifest dir=%s, node=%s, loaded=%s, dirty=%s at 0x%x>' %
741 741 (self._dir, revlog.hex(self._node),
742 742 bool(self._loadfunc is _noop),
743 743 self._dirty, id(self)))
744 744
745 745 def dir(self):
746 746 '''The directory that this tree manifest represents, including a
747 747 trailing '/'. Empty string for the repo root directory.'''
748 748 return self._dir
749 749
750 750 def node(self):
751 751 '''This node of this instance. nullid for unsaved instances. Should
752 752 be updated when the instance is read or written from a revlog.
753 753 '''
754 754 assert not self._dirty
755 755 return self._node
756 756
757 757 def setnode(self, node):
758 758 self._node = node
759 759 self._dirty = False
760 760
761 761 def iterentries(self):
762 762 self._load()
763 763 for p, n in sorted(self._dirs.items() + self._files.items()):
764 764 if p in self._files:
765 765 yield self._subpath(p), n, self._flags.get(p, '')
766 766 else:
767 767 for x in n.iterentries():
768 768 yield x
769 769
770 770 def iteritems(self):
771 771 self._load()
772 772 for p, n in sorted(self._dirs.items() + self._files.items()):
773 773 if p in self._files:
774 774 yield self._subpath(p), n
775 775 else:
776 776 for f, sn in n.iteritems():
777 777 yield f, sn
778 778
779 779 def iterkeys(self):
780 780 self._load()
781 781 for p in sorted(self._dirs.keys() + self._files.keys()):
782 782 if p in self._files:
783 783 yield self._subpath(p)
784 784 else:
785 785 for f in self._dirs[p].iterkeys():
786 786 yield f
787 787
788 788 def keys(self):
789 789 return list(self.iterkeys())
790 790
791 791 def __iter__(self):
792 792 return self.iterkeys()
793 793
794 794 def __contains__(self, f):
795 795 if f is None:
796 796 return False
797 797 self._load()
798 798 dir, subpath = _splittopdir(f)
799 799 if dir:
800 800 if dir not in self._dirs:
801 801 return False
802 802 return self._dirs[dir].__contains__(subpath)
803 803 else:
804 804 return f in self._files
805 805
806 806 def get(self, f, default=None):
807 807 self._load()
808 808 dir, subpath = _splittopdir(f)
809 809 if dir:
810 810 if dir not in self._dirs:
811 811 return default
812 812 return self._dirs[dir].get(subpath, default)
813 813 else:
814 814 return self._files.get(f, default)
815 815
816 816 def __getitem__(self, f):
817 817 self._load()
818 818 dir, subpath = _splittopdir(f)
819 819 if dir:
820 820 return self._dirs[dir].__getitem__(subpath)
821 821 else:
822 822 return self._files[f]
823 823
824 824 def flags(self, f):
825 825 self._load()
826 826 dir, subpath = _splittopdir(f)
827 827 if dir:
828 828 if dir not in self._dirs:
829 829 return ''
830 830 return self._dirs[dir].flags(subpath)
831 831 else:
832 832 if f in self._dirs:
833 833 return ''
834 834 return self._flags.get(f, '')
835 835
836 836 def find(self, f):
837 837 self._load()
838 838 dir, subpath = _splittopdir(f)
839 839 if dir:
840 840 return self._dirs[dir].find(subpath)
841 841 else:
842 842 return self._files[f], self._flags.get(f, '')
843 843
844 844 def __delitem__(self, f):
845 845 self._load()
846 846 dir, subpath = _splittopdir(f)
847 847 if dir:
848 848 self._dirs[dir].__delitem__(subpath)
849 849 # If the directory is now empty, remove it
850 850 if self._dirs[dir]._isempty():
851 851 del self._dirs[dir]
852 852 else:
853 853 del self._files[f]
854 854 if f in self._flags:
855 855 del self._flags[f]
856 856 self._dirty = True
857 857
858 858 def __setitem__(self, f, n):
859 859 assert n is not None
860 860 self._load()
861 861 dir, subpath = _splittopdir(f)
862 862 if dir:
863 863 if dir not in self._dirs:
864 864 self._dirs[dir] = treemanifest(self._subpath(dir))
865 865 self._dirs[dir].__setitem__(subpath, n)
866 866 else:
867 867 self._files[f] = n[:21] # to match manifestdict's behavior
868 868 self._dirty = True
869 869
870 870 def _load(self):
871 871 if self._loadfunc is not _noop:
872 872 lf, self._loadfunc = self._loadfunc, _noop
873 873 lf(self)
874 874 elif self._copyfunc is not _noop:
875 875 cf, self._copyfunc = self._copyfunc, _noop
876 876 cf(self)
877 877
878 878 def setflag(self, f, flags):
879 879 """Set the flags (symlink, executable) for path f."""
880 880 self._load()
881 881 dir, subpath = _splittopdir(f)
882 882 if dir:
883 883 if dir not in self._dirs:
884 884 self._dirs[dir] = treemanifest(self._subpath(dir))
885 885 self._dirs[dir].setflag(subpath, flags)
886 886 else:
887 887 self._flags[f] = flags
888 888 self._dirty = True
889 889
890 890 def copy(self):
891 891 copy = treemanifest(self._dir)
892 892 copy._node = self._node
893 893 copy._dirty = self._dirty
894 894 if self._copyfunc is _noop:
895 895 def _copyfunc(s):
896 896 self._load()
897 897 for d in self._dirs:
898 898 s._dirs[d] = self._dirs[d].copy()
899 899 s._files = dict.copy(self._files)
900 900 s._flags = dict.copy(self._flags)
901 901 if self._loadfunc is _noop:
902 902 _copyfunc(copy)
903 903 else:
904 904 copy._copyfunc = _copyfunc
905 905 else:
906 906 copy._copyfunc = self._copyfunc
907 907 return copy
908 908
909 909 def filesnotin(self, m2):
910 910 '''Set of files in this manifest that are not in the other'''
911 911 files = set()
912 912 def _filesnotin(t1, t2):
913 913 if t1._node == t2._node and not t1._dirty and not t2._dirty:
914 914 return
915 915 t1._load()
916 916 t2._load()
917 917 for d, m1 in t1._dirs.iteritems():
918 918 if d in t2._dirs:
919 919 m2 = t2._dirs[d]
920 920 _filesnotin(m1, m2)
921 921 else:
922 922 files.update(m1.iterkeys())
923 923
924 924 for fn in t1._files.iterkeys():
925 925 if fn not in t2._files:
926 926 files.add(t1._subpath(fn))
927 927
928 928 _filesnotin(self, m2)
929 929 return files
930 930
931 931 @propertycache
932 932 def _alldirs(self):
933 933 return util.dirs(self)
934 934
935 935 def dirs(self):
936 936 return self._alldirs
937 937
938 938 def hasdir(self, dir):
939 939 self._load()
940 940 topdir, subdir = _splittopdir(dir)
941 941 if topdir:
942 942 if topdir in self._dirs:
943 943 return self._dirs[topdir].hasdir(subdir)
944 944 return False
945 945 return (dir + '/') in self._dirs
946 946
947 947 def walk(self, match):
948 948 '''Generates matching file names.
949 949
950 950 Equivalent to manifest.matches(match).iterkeys(), but without creating
951 951 an entirely new manifest.
952 952
953 953 It also reports nonexistent files by marking them bad with match.bad().
954 954 '''
955 955 if match.always():
956 956 for f in iter(self):
957 957 yield f
958 958 return
959 959
960 960 fset = set(match.files())
961 961
962 962 for fn in self._walk(match):
963 963 if fn in fset:
964 964 # specified pattern is the exact name
965 965 fset.remove(fn)
966 966 yield fn
967 967
968 968 # for dirstate.walk, files=['.'] means "walk the whole tree".
969 969 # follow that here, too
970 970 fset.discard('.')
971 971
972 972 for fn in sorted(fset):
973 973 if not self.hasdir(fn):
974 974 match.bad(fn, None)
975 975
976 976 def _walk(self, match):
977 977 '''Recursively generates matching file names for walk().'''
978 978 if not match.visitdir(self._dir[:-1] or '.'):
979 979 return
980 980
981 981 # yield this dir's files and walk its submanifests
982 982 self._load()
983 983 for p in sorted(self._dirs.keys() + self._files.keys()):
984 984 if p in self._files:
985 985 fullp = self._subpath(p)
986 986 if match(fullp):
987 987 yield fullp
988 988 else:
989 989 for f in self._dirs[p]._walk(match):
990 990 yield f
991 991
992 992 def matches(self, match):
993 993 '''generate a new manifest filtered by the match argument'''
994 994 if match.always():
995 995 return self.copy()
996 996
997 997 return self._matches(match)
998 998
999 999 def _matches(self, match):
1000 1000 '''recursively generate a new manifest filtered by the match argument.
1001 1001 '''
1002 1002
1003 1003 visit = match.visitdir(self._dir[:-1] or '.')
1004 1004 if visit == 'all':
1005 1005 return self.copy()
1006 1006 ret = treemanifest(self._dir)
1007 1007 if not visit:
1008 1008 return ret
1009 1009
1010 1010 self._load()
1011 1011 for fn in self._files:
1012 1012 fullp = self._subpath(fn)
1013 1013 if not match(fullp):
1014 1014 continue
1015 1015 ret._files[fn] = self._files[fn]
1016 1016 if fn in self._flags:
1017 1017 ret._flags[fn] = self._flags[fn]
1018 1018
1019 1019 for dir, subm in self._dirs.iteritems():
1020 1020 m = subm._matches(match)
1021 1021 if not m._isempty():
1022 1022 ret._dirs[dir] = m
1023 1023
1024 1024 if not ret._isempty():
1025 1025 ret._dirty = True
1026 1026 return ret
1027 1027
1028 1028 def diff(self, m2, clean=False):
1029 1029 '''Finds changes between the current manifest and m2.
1030 1030
1031 1031 Args:
1032 1032 m2: the manifest to which this manifest should be compared.
1033 1033 clean: if true, include files unchanged between these manifests
1034 1034 with a None value in the returned dictionary.
1035 1035
1036 1036 The result is returned as a dict with filename as key and
1037 1037 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
1038 1038 nodeid in the current/other manifest and fl1/fl2 is the flag
1039 1039 in the current/other manifest. Where the file does not exist,
1040 1040 the nodeid will be None and the flags will be the empty
1041 1041 string.
1042 1042 '''
1043 1043 result = {}
1044 1044 emptytree = treemanifest()
1045 1045 def _diff(t1, t2):
1046 1046 if t1._node == t2._node and not t1._dirty and not t2._dirty:
1047 1047 return
1048 1048 t1._load()
1049 1049 t2._load()
1050 1050 for d, m1 in t1._dirs.iteritems():
1051 1051 m2 = t2._dirs.get(d, emptytree)
1052 1052 _diff(m1, m2)
1053 1053
1054 1054 for d, m2 in t2._dirs.iteritems():
1055 1055 if d not in t1._dirs:
1056 1056 _diff(emptytree, m2)
1057 1057
1058 1058 for fn, n1 in t1._files.iteritems():
1059 1059 fl1 = t1._flags.get(fn, '')
1060 1060 n2 = t2._files.get(fn, None)
1061 1061 fl2 = t2._flags.get(fn, '')
1062 1062 if n1 != n2 or fl1 != fl2:
1063 1063 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
1064 1064 elif clean:
1065 1065 result[t1._subpath(fn)] = None
1066 1066
1067 1067 for fn, n2 in t2._files.iteritems():
1068 1068 if fn not in t1._files:
1069 1069 fl2 = t2._flags.get(fn, '')
1070 1070 result[t2._subpath(fn)] = ((None, ''), (n2, fl2))
1071 1071
1072 1072 _diff(self, m2)
1073 1073 return result
1074 1074
1075 1075 def unmodifiedsince(self, m2):
1076 1076 return not self._dirty and not m2._dirty and self._node == m2._node
1077 1077
1078 1078 def parse(self, text, readsubtree):
1079 1079 for f, n, fl in _parse(text):
1080 1080 if fl == 't':
1081 1081 f = f + '/'
1082 1082 self._dirs[f] = readsubtree(self._subpath(f), n)
1083 1083 elif '/' in f:
1084 1084 # This is a flat manifest, so use __setitem__ and setflag rather
1085 1085 # than assigning directly to _files and _flags, so we can
1086 1086 # assign a path in a subdirectory, and to mark dirty (compared
1087 1087 # to nullid).
1088 1088 self[f] = n
1089 1089 if fl:
1090 1090 self.setflag(f, fl)
1091 1091 else:
1092 1092 # Assigning to _files and _flags avoids marking as dirty,
1093 1093 # and should be a little faster.
1094 1094 self._files[f] = n
1095 1095 if fl:
1096 1096 self._flags[f] = fl
1097 1097
1098 1098 def text(self, usemanifestv2=False):
1099 1099 """Get the full data of this manifest as a bytestring."""
1100 1100 self._load()
1101 1101 return _text(self.iterentries(), usemanifestv2)
1102 1102
1103 1103 def dirtext(self, usemanifestv2=False):
1104 1104 """Get the full data of this directory as a bytestring. Make sure that
1105 1105 any submanifests have been written first, so their nodeids are correct.
1106 1106 """
1107 1107 self._load()
1108 1108 flags = self.flags
1109 1109 dirs = [(d[:-1], self._dirs[d]._node, 't') for d in self._dirs]
1110 1110 files = [(f, self._files[f], flags(f)) for f in self._files]
1111 1111 return _text(sorted(dirs + files), usemanifestv2)
1112 1112
1113 1113 def read(self, gettext, readsubtree):
1114 1114 def _load_for_read(s):
1115 1115 s.parse(gettext(), readsubtree)
1116 1116 s._dirty = False
1117 1117 self._loadfunc = _load_for_read
1118 1118
1119 1119 def writesubtrees(self, m1, m2, writesubtree):
1120 1120 self._load() # for consistency; should never have any effect here
1121 1121 m1._load()
1122 1122 m2._load()
1123 1123 emptytree = treemanifest()
1124 1124 for d, subm in self._dirs.iteritems():
1125 1125 subp1 = m1._dirs.get(d, emptytree)._node
1126 1126 subp2 = m2._dirs.get(d, emptytree)._node
1127 1127 if subp1 == revlog.nullid:
1128 1128 subp1, subp2 = subp2, subp1
1129 1129 writesubtree(subm, subp1, subp2)
1130 1130
1131 1131 class manifestrevlog(revlog.revlog):
1132 1132 '''A revlog that stores manifest texts. This is responsible for caching the
1133 1133 full-text manifest contents.
1134 1134 '''
1135 1135 def __init__(self, opener, dir='', dirlogcache=None):
1136 1136 # During normal operations, we expect to deal with not more than four
1137 1137 # revs at a time (such as during commit --amend). When rebasing large
1138 1138 # stacks of commits, the number can go up, hence the config knob below.
1139 1139 cachesize = 4
1140 1140 usetreemanifest = False
1141 1141 usemanifestv2 = False
1142 1142 opts = getattr(opener, 'options', None)
1143 1143 if opts is not None:
1144 1144 cachesize = opts.get('manifestcachesize', cachesize)
1145 1145 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1146 1146 usemanifestv2 = opts.get('manifestv2', usemanifestv2)
1147 1147
1148 1148 self._treeondisk = usetreemanifest
1149 1149 self._usemanifestv2 = usemanifestv2
1150 1150
1151 1151 self._fulltextcache = util.lrucachedict(cachesize)
1152 1152
1153 1153 indexfile = "00manifest.i"
1154 1154 if dir:
1155 1155 assert self._treeondisk, 'opts is %r' % opts
1156 1156 if not dir.endswith('/'):
1157 1157 dir = dir + '/'
1158 1158 indexfile = "meta/" + dir + "00manifest.i"
1159 1159 self._dir = dir
1160 1160 # The dirlogcache is kept on the root manifest log
1161 1161 if dir:
1162 1162 self._dirlogcache = dirlogcache
1163 1163 else:
1164 1164 self._dirlogcache = {'': self}
1165 1165
1166 1166 super(manifestrevlog, self).__init__(opener, indexfile,
1167 1167 checkambig=bool(dir))
1168 1168
1169 1169 @property
1170 1170 def fulltextcache(self):
1171 1171 return self._fulltextcache
1172 1172
1173 1173 def clearcaches(self):
1174 1174 super(manifestrevlog, self).clearcaches()
1175 1175 self._fulltextcache.clear()
1176 1176 self._dirlogcache = {'': self}
1177 1177
1178 1178 def dirlog(self, dir):
1179 1179 if dir:
1180 1180 assert self._treeondisk
1181 1181 if dir not in self._dirlogcache:
1182 1182 self._dirlogcache[dir] = manifestrevlog(self.opener, dir,
1183 1183 self._dirlogcache)
1184 1184 return self._dirlogcache[dir]
1185 1185
1186 1186 def add(self, m, transaction, link, p1, p2, added, removed):
1187 1187 if (p1 in self.fulltextcache and util.safehasattr(m, 'fastdelta')
1188 1188 and not self._usemanifestv2):
1189 1189 # If our first parent is in the manifest cache, we can
1190 1190 # compute a delta here using properties we know about the
1191 1191 # manifest up-front, which may save time later for the
1192 1192 # revlog layer.
1193 1193
1194 1194 _checkforbidden(added)
1195 1195 # combine the changed lists into one sorted iterator
1196 1196 work = heapq.merge([(x, False) for x in added],
1197 1197 [(x, True) for x in removed])
1198 1198
1199 1199 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1200 1200 cachedelta = self.rev(p1), deltatext
1201 1201 text = util.buffer(arraytext)
1202 1202 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
1203 1203 else:
1204 1204 # The first parent manifest isn't already loaded, so we'll
1205 1205 # just encode a fulltext of the manifest and pass that
1206 1206 # through to the revlog layer, and let it handle the delta
1207 1207 # process.
1208 1208 if self._treeondisk:
1209 1209 m1 = self.read(p1)
1210 1210 m2 = self.read(p2)
1211 1211 n = self._addtree(m, transaction, link, m1, m2)
1212 1212 arraytext = None
1213 1213 else:
1214 1214 text = m.text(self._usemanifestv2)
1215 1215 n = self.addrevision(text, transaction, link, p1, p2)
1216 1216 arraytext = array.array('c', text)
1217 1217
1218 1218 if arraytext is not None:
1219 1219 self.fulltextcache[n] = arraytext
1220 1220
1221 1221 return n
1222 1222
1223 1223 def _addtree(self, m, transaction, link, m1, m2):
1224 1224 # If the manifest is unchanged compared to one parent,
1225 1225 # don't write a new revision
1226 1226 if m.unmodifiedsince(m1) or m.unmodifiedsince(m2):
1227 1227 return m.node()
1228 1228 def writesubtree(subm, subp1, subp2):
1229 1229 sublog = self.dirlog(subm.dir())
1230 1230 sublog.add(subm, transaction, link, subp1, subp2, None, None)
1231 1231 m.writesubtrees(m1, m2, writesubtree)
1232 1232 text = m.dirtext(self._usemanifestv2)
1233 1233 # Double-check whether contents are unchanged to one parent
1234 1234 if text == m1.dirtext(self._usemanifestv2):
1235 1235 n = m1.node()
1236 1236 elif text == m2.dirtext(self._usemanifestv2):
1237 1237 n = m2.node()
1238 1238 else:
1239 1239 n = self.addrevision(text, transaction, link, m1.node(), m2.node())
1240 1240 # Save nodeid so parent manifest can calculate its nodeid
1241 1241 m.setnode(n)
1242 1242 return n
1243 1243
1244 1244 class manifestlog(object):
1245 1245 """A collection class representing the collection of manifest snapshots
1246 1246 referenced by commits in the repository.
1247 1247
1248 1248 In this situation, 'manifest' refers to the abstract concept of a snapshot
1249 1249 of the list of files in the given commit. Consumers of the output of this
1250 1250 class do not care about the implementation details of the actual manifests
1251 1251 they receive (i.e. tree or flat or lazily loaded, etc)."""
1252 1252 def __init__(self, opener, repo):
1253 1253 self._repo = repo
1254 1254
1255 1255 usetreemanifest = False
1256 1256
1257 1257 opts = getattr(opener, 'options', None)
1258 1258 if opts is not None:
1259 1259 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1260 1260 self._treeinmem = usetreemanifest
1261 1261
1262 1262 self._oldmanifest = repo._constructmanifest()
1263 1263 self._revlog = self._oldmanifest
1264 1264
1265 1265 # A cache of the manifestctx or treemanifestctx for each directory
1266 1266 self._dirmancache = {}
1267 1267
1268 1268 # We'll separate this into it's own cache once oldmanifest is no longer
1269 1269 # used
1270 1270 self._mancache = self._oldmanifest._mancache
1271 1271 self._dirmancache[''] = self._mancache
1272 1272
1273 1273 # A future patch makes this use the same config value as the existing
1274 1274 # mancache
1275 1275 self.cachesize = 4
1276 1276
1277 1277 def __getitem__(self, node):
1278 1278 """Retrieves the manifest instance for the given node. Throws a
1279 1279 LookupError if not found.
1280 1280 """
1281 1281 return self.get('', node)
1282 1282
1283 1283 def get(self, dir, node):
1284 1284 """Retrieves the manifest instance for the given node. Throws a
1285 1285 LookupError if not found.
1286 1286 """
1287 1287 if node in self._dirmancache.get(dir, ()):
1288 1288 cachemf = self._dirmancache[dir][node]
1289 1289 # The old manifest may put non-ctx manifests in the cache, so
1290 1290 # skip those since they don't implement the full api.
1291 1291 if (isinstance(cachemf, manifestctx) or
1292 1292 isinstance(cachemf, treemanifestctx)):
1293 1293 return cachemf
1294 1294
1295 1295 if dir:
1296 1296 if self._revlog._treeondisk:
1297 1297 dirlog = self._revlog.dirlog(dir)
1298 1298 if node not in dirlog.nodemap:
1299 1299 raise LookupError(node, dirlog.indexfile,
1300 1300 _('no node'))
1301 1301 m = treemanifestctx(self._repo, dir, node)
1302 1302 else:
1303 1303 raise error.Abort(
1304 1304 _("cannot ask for manifest directory '%s' in a flat "
1305 1305 "manifest") % dir)
1306 1306 else:
1307 1307 if node not in self._revlog.nodemap:
1308 1308 raise LookupError(node, self._revlog.indexfile,
1309 1309 _('no node'))
1310 1310 if self._treeinmem:
1311 1311 m = treemanifestctx(self._repo, '', node)
1312 1312 else:
1313 1313 m = manifestctx(self._repo, node)
1314 1314
1315 1315 if node != revlog.nullid:
1316 1316 mancache = self._dirmancache.get(dir)
1317 1317 if not mancache:
1318 1318 mancache = util.lrucachedict(self.cachesize)
1319 1319 self._dirmancache[dir] = mancache
1320 1320 mancache[node] = m
1321 1321 return m
1322 1322
1323 def add(self, m, transaction, link, p1, p2, added, removed):
1324 return self._revlog.add(m, transaction, link, p1, p2, added, removed)
1325
1326 1323 class memmanifestctx(object):
1327 1324 def __init__(self, repo):
1328 1325 self._repo = repo
1329 1326 self._manifestdict = manifestdict()
1330 1327
1328 def _revlog(self):
1329 return self._repo.manifestlog._revlog
1330
1331 1331 def new(self):
1332 1332 return memmanifestctx(self._repo)
1333 1333
1334 1334 def copy(self):
1335 1335 memmf = memmanifestctx(self._repo)
1336 1336 memmf._manifestdict = self.read().copy()
1337 1337 return memmf
1338 1338
1339 1339 def read(self):
1340 1340 return self._manifestdict
1341 1341
1342 def write(self, transaction, link, p1, p2, added, removed):
1343 return self._revlog().add(self._manifestdict, transaction, link, p1, p2,
1344 added, removed)
1345
1342 1346 class manifestctx(object):
1343 1347 """A class representing a single revision of a manifest, including its
1344 1348 contents, its parent revs, and its linkrev.
1345 1349 """
1346 1350 def __init__(self, repo, node):
1347 1351 self._repo = repo
1348 1352 self._data = None
1349 1353
1350 1354 self._node = node
1351 1355
1352 1356 # TODO: We eventually want p1, p2, and linkrev exposed on this class,
1353 1357 # but let's add it later when something needs it and we can load it
1354 1358 # lazily.
1355 1359 #self.p1, self.p2 = revlog.parents(node)
1356 1360 #rev = revlog.rev(node)
1357 1361 #self.linkrev = revlog.linkrev(rev)
1358 1362
1359 1363 def _revlog(self):
1360 1364 return self._repo.manifestlog._revlog
1361 1365
1362 1366 def node(self):
1363 1367 return self._node
1364 1368
1365 1369 def new(self):
1366 1370 return memmanifestctx(self._repo)
1367 1371
1368 1372 def copy(self):
1369 1373 memmf = memmanifestctx(self._repo)
1370 1374 memmf._manifestdict = self.read().copy()
1371 1375 return memmf
1372 1376
1373 1377 def read(self):
1374 1378 if not self._data:
1375 1379 if self._node == revlog.nullid:
1376 1380 self._data = manifestdict()
1377 1381 else:
1378 1382 rl = self._revlog()
1379 1383 text = rl.revision(self._node)
1380 1384 arraytext = array.array('c', text)
1381 1385 rl._fulltextcache[self._node] = arraytext
1382 1386 self._data = manifestdict(text)
1383 1387 return self._data
1384 1388
1385 1389 def readfast(self, shallow=False):
1386 1390 '''Calls either readdelta or read, based on which would be less work.
1387 1391 readdelta is called if the delta is against the p1, and therefore can be
1388 1392 read quickly.
1389 1393
1390 1394 If `shallow` is True, nothing changes since this is a flat manifest.
1391 1395 '''
1392 1396 rl = self._revlog()
1393 1397 r = rl.rev(self._node)
1394 1398 deltaparent = rl.deltaparent(r)
1395 1399 if deltaparent != revlog.nullrev and deltaparent in rl.parentrevs(r):
1396 1400 return self.readdelta()
1397 1401 return self.read()
1398 1402
1399 1403 def readdelta(self, shallow=False):
1400 1404 '''Returns a manifest containing just the entries that are present
1401 1405 in this manifest, but not in its p1 manifest. This is efficient to read
1402 1406 if the revlog delta is already p1.
1403 1407
1404 1408 Changing the value of `shallow` has no effect on flat manifests.
1405 1409 '''
1406 1410 revlog = self._revlog()
1407 1411 if revlog._usemanifestv2:
1408 1412 # Need to perform a slow delta
1409 1413 r0 = revlog.deltaparent(revlog.rev(self._node))
1410 1414 m0 = manifestctx(self._repo, revlog.node(r0)).read()
1411 1415 m1 = self.read()
1412 1416 md = manifestdict()
1413 1417 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1414 1418 if n1:
1415 1419 md[f] = n1
1416 1420 if fl1:
1417 1421 md.setflag(f, fl1)
1418 1422 return md
1419 1423
1420 1424 r = revlog.rev(self._node)
1421 1425 d = mdiff.patchtext(revlog.revdiff(revlog.deltaparent(r), r))
1422 1426 return manifestdict(d)
1423 1427
1424 1428 def find(self, key):
1425 1429 return self.read().find(key)
1426 1430
1427 1431 class memtreemanifestctx(object):
1428 1432 def __init__(self, repo, dir=''):
1429 1433 self._repo = repo
1430 1434 self._dir = dir
1431 1435 self._treemanifest = treemanifest()
1432 1436
1437 def _revlog(self):
1438 return self._repo.manifestlog._revlog
1439
1433 1440 def new(self, dir=''):
1434 1441 return memtreemanifestctx(self._repo, dir=dir)
1435 1442
1436 1443 def copy(self):
1437 1444 memmf = memtreemanifestctx(self._repo, dir=self._dir)
1438 1445 memmf._treemanifest = self._treemanifest.copy()
1439 1446 return memmf
1440 1447
1441 1448 def read(self):
1442 1449 return self._treemanifest
1443 1450
1451 def write(self, transaction, link, p1, p2, added, removed):
1452 return self._revlog().add(self._treemanifest, transaction, link, p1, p2,
1453 added, removed)
1454
1444 1455 class treemanifestctx(object):
1445 1456 def __init__(self, repo, dir, node):
1446 1457 self._repo = repo
1447 1458 self._dir = dir
1448 1459 self._data = None
1449 1460
1450 1461 self._node = node
1451 1462
1452 1463 # TODO: Load p1/p2/linkrev lazily. They need to be lazily loaded so that
1453 1464 # we can instantiate treemanifestctx objects for directories we don't
1454 1465 # have on disk.
1455 1466 #self.p1, self.p2 = revlog.parents(node)
1456 1467 #rev = revlog.rev(node)
1457 1468 #self.linkrev = revlog.linkrev(rev)
1458 1469
1459 1470 def _revlog(self):
1460 1471 return self._repo.manifestlog._revlog.dirlog(self._dir)
1461 1472
1462 1473 def read(self):
1463 1474 if not self._data:
1464 1475 rl = self._revlog()
1465 1476 if self._node == revlog.nullid:
1466 1477 self._data = treemanifest()
1467 1478 elif rl._treeondisk:
1468 1479 m = treemanifest(dir=self._dir)
1469 1480 def gettext():
1470 1481 return rl.revision(self._node)
1471 1482 def readsubtree(dir, subm):
1472 1483 return treemanifestctx(self._repo, dir, subm).read()
1473 1484 m.read(gettext, readsubtree)
1474 1485 m.setnode(self._node)
1475 1486 self._data = m
1476 1487 else:
1477 1488 text = rl.revision(self._node)
1478 1489 arraytext = array.array('c', text)
1479 1490 rl.fulltextcache[self._node] = arraytext
1480 1491 self._data = treemanifest(dir=self._dir, text=text)
1481 1492
1482 1493 return self._data
1483 1494
1484 1495 def node(self):
1485 1496 return self._node
1486 1497
1487 1498 def new(self, dir=''):
1488 1499 return memtreemanifestctx(self._repo, dir=dir)
1489 1500
1490 1501 def copy(self):
1491 1502 memmf = memtreemanifestctx(self._repo, dir=self._dir)
1492 1503 memmf._treemanifest = self.read().copy()
1493 1504 return memmf
1494 1505
1495 1506 def readdelta(self, shallow=False):
1496 1507 '''Returns a manifest containing just the entries that are present
1497 1508 in this manifest, but not in its p1 manifest. This is efficient to read
1498 1509 if the revlog delta is already p1.
1499 1510
1500 1511 If `shallow` is True, this will read the delta for this directory,
1501 1512 without recursively reading subdirectory manifests. Instead, any
1502 1513 subdirectory entry will be reported as it appears in the manifest, i.e.
1503 1514 the subdirectory will be reported among files and distinguished only by
1504 1515 its 't' flag.
1505 1516 '''
1506 1517 revlog = self._revlog()
1507 1518 if shallow and not revlog._usemanifestv2:
1508 1519 r = revlog.rev(self._node)
1509 1520 d = mdiff.patchtext(revlog.revdiff(revlog.deltaparent(r), r))
1510 1521 return manifestdict(d)
1511 1522 else:
1512 1523 # Need to perform a slow delta
1513 1524 r0 = revlog.deltaparent(revlog.rev(self._node))
1514 1525 m0 = treemanifestctx(self._repo, self._dir, revlog.node(r0)).read()
1515 1526 m1 = self.read()
1516 1527 md = treemanifest(dir=self._dir)
1517 1528 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
1518 1529 if n1:
1519 1530 md[f] = n1
1520 1531 if fl1:
1521 1532 md.setflag(f, fl1)
1522 1533 return md
1523 1534
1524 1535 def readfast(self, shallow=False):
1525 1536 '''Calls either readdelta or read, based on which would be less work.
1526 1537 readdelta is called if the delta is against the p1, and therefore can be
1527 1538 read quickly.
1528 1539
1529 1540 If `shallow` is True, it only returns the entries from this manifest,
1530 1541 and not any submanifests.
1531 1542 '''
1532 1543 rl = self._revlog()
1533 1544 r = rl.rev(self._node)
1534 1545 deltaparent = rl.deltaparent(r)
1535 1546 if (deltaparent != revlog.nullrev and
1536 1547 deltaparent in rl.parentrevs(r)):
1537 1548 return self.readdelta(shallow=shallow)
1538 1549
1539 1550 if shallow:
1540 1551 return manifestdict(rl.revision(self._node))
1541 1552 else:
1542 1553 return self.read()
1543 1554
1544 1555 def find(self, key):
1545 1556 return self.read().find(key)
1546 1557
1547 1558 class manifest(manifestrevlog):
1548 1559 def __init__(self, opener, dir='', dirlogcache=None):
1549 1560 '''The 'dir' and 'dirlogcache' arguments are for internal use by
1550 1561 manifest.manifest only. External users should create a root manifest
1551 1562 log with manifest.manifest(opener) and call dirlog() on it.
1552 1563 '''
1553 1564 # During normal operations, we expect to deal with not more than four
1554 1565 # revs at a time (such as during commit --amend). When rebasing large
1555 1566 # stacks of commits, the number can go up, hence the config knob below.
1556 1567 cachesize = 4
1557 1568 usetreemanifest = False
1558 1569 opts = getattr(opener, 'options', None)
1559 1570 if opts is not None:
1560 1571 cachesize = opts.get('manifestcachesize', cachesize)
1561 1572 usetreemanifest = opts.get('treemanifest', usetreemanifest)
1562 1573 self._mancache = util.lrucachedict(cachesize)
1563 1574 self._treeinmem = usetreemanifest
1564 1575 super(manifest, self).__init__(opener, dir=dir, dirlogcache=dirlogcache)
1565 1576
1566 1577 def _newmanifest(self, data=''):
1567 1578 if self._treeinmem:
1568 1579 return treemanifest(self._dir, data)
1569 1580 return manifestdict(data)
1570 1581
1571 1582 def dirlog(self, dir):
1572 1583 """This overrides the base revlog implementation to allow construction
1573 1584 'manifest' types instead of manifestrevlog types. This is only needed
1574 1585 until we migrate off the 'manifest' type."""
1575 1586 if dir:
1576 1587 assert self._treeondisk
1577 1588 if dir not in self._dirlogcache:
1578 1589 self._dirlogcache[dir] = manifest(self.opener, dir,
1579 1590 self._dirlogcache)
1580 1591 return self._dirlogcache[dir]
1581 1592
1582 1593 def read(self, node):
1583 1594 if node == revlog.nullid:
1584 1595 return self._newmanifest() # don't upset local cache
1585 1596 if node in self._mancache:
1586 1597 cached = self._mancache[node]
1587 1598 if (isinstance(cached, manifestctx) or
1588 1599 isinstance(cached, treemanifestctx)):
1589 1600 cached = cached.read()
1590 1601 return cached
1591 1602 if self._treeondisk:
1592 1603 def gettext():
1593 1604 return self.revision(node)
1594 1605 def readsubtree(dir, subm):
1595 1606 return self.dirlog(dir).read(subm)
1596 1607 m = self._newmanifest()
1597 1608 m.read(gettext, readsubtree)
1598 1609 m.setnode(node)
1599 1610 arraytext = None
1600 1611 else:
1601 1612 text = self.revision(node)
1602 1613 m = self._newmanifest(text)
1603 1614 arraytext = array.array('c', text)
1604 1615 self._mancache[node] = m
1605 1616 if arraytext is not None:
1606 1617 self.fulltextcache[node] = arraytext
1607 1618 return m
1608 1619
1609 1620 def clearcaches(self):
1610 1621 super(manifest, self).clearcaches()
1611 1622 self._mancache.clear()
General Comments 0
You need to be logged in to leave comments. Login now