##// END OF EJS Templates
manifest: introduce manifestlog and manifestctx classes...
Durham Goode -
r29825:426d931e default
parent child Browse files
Show More
@@ -1,1973 +1,1977
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import errno
11 11 import hashlib
12 12 import inspect
13 13 import os
14 14 import random
15 15 import time
16 16 import weakref
17 17
18 18 from .i18n import _
19 19 from .node import (
20 20 hex,
21 21 nullid,
22 22 short,
23 23 wdirrev,
24 24 )
25 25 from . import (
26 26 bookmarks,
27 27 branchmap,
28 28 bundle2,
29 29 changegroup,
30 30 changelog,
31 31 cmdutil,
32 32 context,
33 33 dirstate,
34 34 encoding,
35 35 error,
36 36 exchange,
37 37 extensions,
38 38 filelog,
39 39 hook,
40 40 lock as lockmod,
41 41 manifest,
42 42 match as matchmod,
43 43 merge as mergemod,
44 44 namespaces,
45 45 obsolete,
46 46 pathutil,
47 47 peer,
48 48 phases,
49 49 pushkey,
50 50 repoview,
51 51 revset,
52 52 scmutil,
53 53 store,
54 54 subrepo,
55 55 tags as tagsmod,
56 56 transaction,
57 57 util,
58 58 )
59 59
60 60 release = lockmod.release
61 61 urlerr = util.urlerr
62 62 urlreq = util.urlreq
63 63
64 64 class repofilecache(scmutil.filecache):
65 65 """All filecache usage on repo are done for logic that should be unfiltered
66 66 """
67 67
68 68 def __get__(self, repo, type=None):
69 69 if repo is None:
70 70 return self
71 71 return super(repofilecache, self).__get__(repo.unfiltered(), type)
72 72 def __set__(self, repo, value):
73 73 return super(repofilecache, self).__set__(repo.unfiltered(), value)
74 74 def __delete__(self, repo):
75 75 return super(repofilecache, self).__delete__(repo.unfiltered())
76 76
77 77 class storecache(repofilecache):
78 78 """filecache for files in the store"""
79 79 def join(self, obj, fname):
80 80 return obj.sjoin(fname)
81 81
82 82 class unfilteredpropertycache(util.propertycache):
83 83 """propertycache that apply to unfiltered repo only"""
84 84
85 85 def __get__(self, repo, type=None):
86 86 unfi = repo.unfiltered()
87 87 if unfi is repo:
88 88 return super(unfilteredpropertycache, self).__get__(unfi)
89 89 return getattr(unfi, self.name)
90 90
91 91 class filteredpropertycache(util.propertycache):
92 92 """propertycache that must take filtering in account"""
93 93
94 94 def cachevalue(self, obj, value):
95 95 object.__setattr__(obj, self.name, value)
96 96
97 97
98 98 def hasunfilteredcache(repo, name):
99 99 """check if a repo has an unfilteredpropertycache value for <name>"""
100 100 return name in vars(repo.unfiltered())
101 101
102 102 def unfilteredmethod(orig):
103 103 """decorate method that always need to be run on unfiltered version"""
104 104 def wrapper(repo, *args, **kwargs):
105 105 return orig(repo.unfiltered(), *args, **kwargs)
106 106 return wrapper
107 107
108 108 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
109 109 'unbundle'))
110 110 legacycaps = moderncaps.union(set(['changegroupsubset']))
111 111
112 112 class localpeer(peer.peerrepository):
113 113 '''peer for a local repo; reflects only the most recent API'''
114 114
115 115 def __init__(self, repo, caps=moderncaps):
116 116 peer.peerrepository.__init__(self)
117 117 self._repo = repo.filtered('served')
118 118 self.ui = repo.ui
119 119 self._caps = repo._restrictcapabilities(caps)
120 120 self.requirements = repo.requirements
121 121 self.supportedformats = repo.supportedformats
122 122
123 123 def close(self):
124 124 self._repo.close()
125 125
126 126 def _capabilities(self):
127 127 return self._caps
128 128
129 129 def local(self):
130 130 return self._repo
131 131
132 132 def canpush(self):
133 133 return True
134 134
135 135 def url(self):
136 136 return self._repo.url()
137 137
138 138 def lookup(self, key):
139 139 return self._repo.lookup(key)
140 140
141 141 def branchmap(self):
142 142 return self._repo.branchmap()
143 143
144 144 def heads(self):
145 145 return self._repo.heads()
146 146
147 147 def known(self, nodes):
148 148 return self._repo.known(nodes)
149 149
150 150 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
151 151 **kwargs):
152 152 cg = exchange.getbundle(self._repo, source, heads=heads,
153 153 common=common, bundlecaps=bundlecaps, **kwargs)
154 154 if bundlecaps is not None and 'HG20' in bundlecaps:
155 155 # When requesting a bundle2, getbundle returns a stream to make the
156 156 # wire level function happier. We need to build a proper object
157 157 # from it in local peer.
158 158 cg = bundle2.getunbundler(self.ui, cg)
159 159 return cg
160 160
161 161 # TODO We might want to move the next two calls into legacypeer and add
162 162 # unbundle instead.
163 163
164 164 def unbundle(self, cg, heads, url):
165 165 """apply a bundle on a repo
166 166
167 167 This function handles the repo locking itself."""
168 168 try:
169 169 try:
170 170 cg = exchange.readbundle(self.ui, cg, None)
171 171 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
172 172 if util.safehasattr(ret, 'getchunks'):
173 173 # This is a bundle20 object, turn it into an unbundler.
174 174 # This little dance should be dropped eventually when the
175 175 # API is finally improved.
176 176 stream = util.chunkbuffer(ret.getchunks())
177 177 ret = bundle2.getunbundler(self.ui, stream)
178 178 return ret
179 179 except Exception as exc:
180 180 # If the exception contains output salvaged from a bundle2
181 181 # reply, we need to make sure it is printed before continuing
182 182 # to fail. So we build a bundle2 with such output and consume
183 183 # it directly.
184 184 #
185 185 # This is not very elegant but allows a "simple" solution for
186 186 # issue4594
187 187 output = getattr(exc, '_bundle2salvagedoutput', ())
188 188 if output:
189 189 bundler = bundle2.bundle20(self._repo.ui)
190 190 for out in output:
191 191 bundler.addpart(out)
192 192 stream = util.chunkbuffer(bundler.getchunks())
193 193 b = bundle2.getunbundler(self.ui, stream)
194 194 bundle2.processbundle(self._repo, b)
195 195 raise
196 196 except error.PushRaced as exc:
197 197 raise error.ResponseError(_('push failed:'), str(exc))
198 198
199 199 def lock(self):
200 200 return self._repo.lock()
201 201
202 202 def addchangegroup(self, cg, source, url):
203 203 return cg.apply(self._repo, source, url)
204 204
205 205 def pushkey(self, namespace, key, old, new):
206 206 return self._repo.pushkey(namespace, key, old, new)
207 207
208 208 def listkeys(self, namespace):
209 209 return self._repo.listkeys(namespace)
210 210
211 211 def debugwireargs(self, one, two, three=None, four=None, five=None):
212 212 '''used to test argument passing over the wire'''
213 213 return "%s %s %s %s %s" % (one, two, three, four, five)
214 214
215 215 class locallegacypeer(localpeer):
216 216 '''peer extension which implements legacy methods too; used for tests with
217 217 restricted capabilities'''
218 218
219 219 def __init__(self, repo):
220 220 localpeer.__init__(self, repo, caps=legacycaps)
221 221
222 222 def branches(self, nodes):
223 223 return self._repo.branches(nodes)
224 224
225 225 def between(self, pairs):
226 226 return self._repo.between(pairs)
227 227
228 228 def changegroup(self, basenodes, source):
229 229 return changegroup.changegroup(self._repo, basenodes, source)
230 230
231 231 def changegroupsubset(self, bases, heads, source):
232 232 return changegroup.changegroupsubset(self._repo, bases, heads, source)
233 233
234 234 class localrepository(object):
235 235
236 236 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
237 237 'manifestv2'))
238 238 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
239 239 'dotencode'))
240 240 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
241 241 filtername = None
242 242
243 243 # a list of (ui, featureset) functions.
244 244 # only functions defined in module of enabled extensions are invoked
245 245 featuresetupfuncs = set()
246 246
247 247 def __init__(self, baseui, path=None, create=False):
248 248 self.requirements = set()
249 249 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
250 250 self.wopener = self.wvfs
251 251 self.root = self.wvfs.base
252 252 self.path = self.wvfs.join(".hg")
253 253 self.origroot = path
254 254 self.auditor = pathutil.pathauditor(self.root, self._checknested)
255 255 self.nofsauditor = pathutil.pathauditor(self.root, self._checknested,
256 256 realfs=False)
257 257 self.vfs = scmutil.vfs(self.path)
258 258 self.opener = self.vfs
259 259 self.baseui = baseui
260 260 self.ui = baseui.copy()
261 261 self.ui.copy = baseui.copy # prevent copying repo configuration
262 262 # A list of callback to shape the phase if no data were found.
263 263 # Callback are in the form: func(repo, roots) --> processed root.
264 264 # This list it to be filled by extension during repo setup
265 265 self._phasedefaults = []
266 266 try:
267 267 self.ui.readconfig(self.join("hgrc"), self.root)
268 268 extensions.loadall(self.ui)
269 269 except IOError:
270 270 pass
271 271
272 272 if self.featuresetupfuncs:
273 273 self.supported = set(self._basesupported) # use private copy
274 274 extmods = set(m.__name__ for n, m
275 275 in extensions.extensions(self.ui))
276 276 for setupfunc in self.featuresetupfuncs:
277 277 if setupfunc.__module__ in extmods:
278 278 setupfunc(self.ui, self.supported)
279 279 else:
280 280 self.supported = self._basesupported
281 281
282 282 if not self.vfs.isdir():
283 283 if create:
284 284 self.requirements = newreporequirements(self)
285 285
286 286 if not self.wvfs.exists():
287 287 self.wvfs.makedirs()
288 288 self.vfs.makedir(notindexed=True)
289 289
290 290 if 'store' in self.requirements:
291 291 self.vfs.mkdir("store")
292 292
293 293 # create an invalid changelog
294 294 self.vfs.append(
295 295 "00changelog.i",
296 296 '\0\0\0\2' # represents revlogv2
297 297 ' dummy changelog to prevent using the old repo layout'
298 298 )
299 299 else:
300 300 raise error.RepoError(_("repository %s not found") % path)
301 301 elif create:
302 302 raise error.RepoError(_("repository %s already exists") % path)
303 303 else:
304 304 try:
305 305 self.requirements = scmutil.readrequires(
306 306 self.vfs, self.supported)
307 307 except IOError as inst:
308 308 if inst.errno != errno.ENOENT:
309 309 raise
310 310
311 311 self.sharedpath = self.path
312 312 try:
313 313 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
314 314 realpath=True)
315 315 s = vfs.base
316 316 if not vfs.exists():
317 317 raise error.RepoError(
318 318 _('.hg/sharedpath points to nonexistent directory %s') % s)
319 319 self.sharedpath = s
320 320 except IOError as inst:
321 321 if inst.errno != errno.ENOENT:
322 322 raise
323 323
324 324 self.store = store.store(
325 325 self.requirements, self.sharedpath, scmutil.vfs)
326 326 self.spath = self.store.path
327 327 self.svfs = self.store.vfs
328 328 self.sjoin = self.store.join
329 329 self.vfs.createmode = self.store.createmode
330 330 self._applyopenerreqs()
331 331 if create:
332 332 self._writerequirements()
333 333
334 334 self._dirstatevalidatewarned = False
335 335
336 336 self._branchcaches = {}
337 337 self._revbranchcache = None
338 338 self.filterpats = {}
339 339 self._datafilters = {}
340 340 self._transref = self._lockref = self._wlockref = None
341 341
342 342 # A cache for various files under .hg/ that tracks file changes,
343 343 # (used by the filecache decorator)
344 344 #
345 345 # Maps a property name to its util.filecacheentry
346 346 self._filecache = {}
347 347
348 348 # hold sets of revision to be filtered
349 349 # should be cleared when something might have changed the filter value:
350 350 # - new changesets,
351 351 # - phase change,
352 352 # - new obsolescence marker,
353 353 # - working directory parent change,
354 354 # - bookmark changes
355 355 self.filteredrevcache = {}
356 356
357 357 # generic mapping between names and nodes
358 358 self.names = namespaces.namespaces()
359 359
360 360 def close(self):
361 361 self._writecaches()
362 362
363 363 def _writecaches(self):
364 364 if self._revbranchcache:
365 365 self._revbranchcache.write()
366 366
367 367 def _restrictcapabilities(self, caps):
368 368 if self.ui.configbool('experimental', 'bundle2-advertise', True):
369 369 caps = set(caps)
370 370 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
371 371 caps.add('bundle2=' + urlreq.quote(capsblob))
372 372 return caps
373 373
374 374 def _applyopenerreqs(self):
375 375 self.svfs.options = dict((r, 1) for r in self.requirements
376 376 if r in self.openerreqs)
377 377 # experimental config: format.chunkcachesize
378 378 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
379 379 if chunkcachesize is not None:
380 380 self.svfs.options['chunkcachesize'] = chunkcachesize
381 381 # experimental config: format.maxchainlen
382 382 maxchainlen = self.ui.configint('format', 'maxchainlen')
383 383 if maxchainlen is not None:
384 384 self.svfs.options['maxchainlen'] = maxchainlen
385 385 # experimental config: format.manifestcachesize
386 386 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
387 387 if manifestcachesize is not None:
388 388 self.svfs.options['manifestcachesize'] = manifestcachesize
389 389 # experimental config: format.aggressivemergedeltas
390 390 aggressivemergedeltas = self.ui.configbool('format',
391 391 'aggressivemergedeltas', False)
392 392 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
393 393 self.svfs.options['lazydeltabase'] = not scmutil.gddeltaconfig(self.ui)
394 394
395 395 def _writerequirements(self):
396 396 scmutil.writerequires(self.vfs, self.requirements)
397 397
398 398 def _checknested(self, path):
399 399 """Determine if path is a legal nested repository."""
400 400 if not path.startswith(self.root):
401 401 return False
402 402 subpath = path[len(self.root) + 1:]
403 403 normsubpath = util.pconvert(subpath)
404 404
405 405 # XXX: Checking against the current working copy is wrong in
406 406 # the sense that it can reject things like
407 407 #
408 408 # $ hg cat -r 10 sub/x.txt
409 409 #
410 410 # if sub/ is no longer a subrepository in the working copy
411 411 # parent revision.
412 412 #
413 413 # However, it can of course also allow things that would have
414 414 # been rejected before, such as the above cat command if sub/
415 415 # is a subrepository now, but was a normal directory before.
416 416 # The old path auditor would have rejected by mistake since it
417 417 # panics when it sees sub/.hg/.
418 418 #
419 419 # All in all, checking against the working copy seems sensible
420 420 # since we want to prevent access to nested repositories on
421 421 # the filesystem *now*.
422 422 ctx = self[None]
423 423 parts = util.splitpath(subpath)
424 424 while parts:
425 425 prefix = '/'.join(parts)
426 426 if prefix in ctx.substate:
427 427 if prefix == normsubpath:
428 428 return True
429 429 else:
430 430 sub = ctx.sub(prefix)
431 431 return sub.checknested(subpath[len(prefix) + 1:])
432 432 else:
433 433 parts.pop()
434 434 return False
435 435
436 436 def peer(self):
437 437 return localpeer(self) # not cached to avoid reference cycle
438 438
439 439 def unfiltered(self):
440 440 """Return unfiltered version of the repository
441 441
442 442 Intended to be overwritten by filtered repo."""
443 443 return self
444 444
445 445 def filtered(self, name):
446 446 """Return a filtered version of a repository"""
447 447 # build a new class with the mixin and the current class
448 448 # (possibly subclass of the repo)
449 449 class proxycls(repoview.repoview, self.unfiltered().__class__):
450 450 pass
451 451 return proxycls(self, name)
452 452
453 453 @repofilecache('bookmarks', 'bookmarks.current')
454 454 def _bookmarks(self):
455 455 return bookmarks.bmstore(self)
456 456
457 457 @property
458 458 def _activebookmark(self):
459 459 return self._bookmarks.active
460 460
461 461 def bookmarkheads(self, bookmark):
462 462 name = bookmark.split('@', 1)[0]
463 463 heads = []
464 464 for mark, n in self._bookmarks.iteritems():
465 465 if mark.split('@', 1)[0] == name:
466 466 heads.append(n)
467 467 return heads
468 468
469 469 # _phaserevs and _phasesets depend on changelog. what we need is to
470 470 # call _phasecache.invalidate() if '00changelog.i' was changed, but it
471 471 # can't be easily expressed in filecache mechanism.
472 472 @storecache('phaseroots', '00changelog.i')
473 473 def _phasecache(self):
474 474 return phases.phasecache(self, self._phasedefaults)
475 475
476 476 @storecache('obsstore')
477 477 def obsstore(self):
478 478 # read default format for new obsstore.
479 479 # developer config: format.obsstore-version
480 480 defaultformat = self.ui.configint('format', 'obsstore-version', None)
481 481 # rely on obsstore class default when possible.
482 482 kwargs = {}
483 483 if defaultformat is not None:
484 484 kwargs['defaultformat'] = defaultformat
485 485 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
486 486 store = obsolete.obsstore(self.svfs, readonly=readonly,
487 487 **kwargs)
488 488 if store and readonly:
489 489 self.ui.warn(
490 490 _('obsolete feature not enabled but %i markers found!\n')
491 491 % len(list(store)))
492 492 return store
493 493
494 494 @storecache('00changelog.i')
495 495 def changelog(self):
496 496 c = changelog.changelog(self.svfs)
497 497 if 'HG_PENDING' in os.environ:
498 498 p = os.environ['HG_PENDING']
499 499 if p.startswith(self.root):
500 500 c.readpending('00changelog.i.a')
501 501 return c
502 502
503 503 @storecache('00manifest.i')
504 504 def manifest(self):
505 505 return manifest.manifest(self.svfs)
506 506
507 @storecache('00manifest.i')
508 def manifestlog(self):
509 return manifest.manifestlog(self.svfs, self.manifest)
510
507 511 @repofilecache('dirstate')
508 512 def dirstate(self):
509 513 return dirstate.dirstate(self.vfs, self.ui, self.root,
510 514 self._dirstatevalidate)
511 515
512 516 def _dirstatevalidate(self, node):
513 517 try:
514 518 self.changelog.rev(node)
515 519 return node
516 520 except error.LookupError:
517 521 if not self._dirstatevalidatewarned:
518 522 self._dirstatevalidatewarned = True
519 523 self.ui.warn(_("warning: ignoring unknown"
520 524 " working parent %s!\n") % short(node))
521 525 return nullid
522 526
523 527 def __getitem__(self, changeid):
524 528 if changeid is None or changeid == wdirrev:
525 529 return context.workingctx(self)
526 530 if isinstance(changeid, slice):
527 531 return [context.changectx(self, i)
528 532 for i in xrange(*changeid.indices(len(self)))
529 533 if i not in self.changelog.filteredrevs]
530 534 return context.changectx(self, changeid)
531 535
532 536 def __contains__(self, changeid):
533 537 try:
534 538 self[changeid]
535 539 return True
536 540 except error.RepoLookupError:
537 541 return False
538 542
539 543 def __nonzero__(self):
540 544 return True
541 545
542 546 def __len__(self):
543 547 return len(self.changelog)
544 548
545 549 def __iter__(self):
546 550 return iter(self.changelog)
547 551
548 552 def revs(self, expr, *args):
549 553 '''Find revisions matching a revset.
550 554
551 555 The revset is specified as a string ``expr`` that may contain
552 556 %-formatting to escape certain types. See ``revset.formatspec``.
553 557
554 558 Revset aliases from the configuration are not expanded. To expand
555 559 user aliases, consider calling ``scmutil.revrange()``.
556 560
557 561 Returns a revset.abstractsmartset, which is a list-like interface
558 562 that contains integer revisions.
559 563 '''
560 564 expr = revset.formatspec(expr, *args)
561 565 m = revset.match(None, expr)
562 566 return m(self)
563 567
564 568 def set(self, expr, *args):
565 569 '''Find revisions matching a revset and emit changectx instances.
566 570
567 571 This is a convenience wrapper around ``revs()`` that iterates the
568 572 result and is a generator of changectx instances.
569 573
570 574 Revset aliases from the configuration are not expanded. To expand
571 575 user aliases, consider calling ``scmutil.revrange()``.
572 576 '''
573 577 for r in self.revs(expr, *args):
574 578 yield self[r]
575 579
576 580 def url(self):
577 581 return 'file:' + self.root
578 582
579 583 def hook(self, name, throw=False, **args):
580 584 """Call a hook, passing this repo instance.
581 585
582 586 This a convenience method to aid invoking hooks. Extensions likely
583 587 won't call this unless they have registered a custom hook or are
584 588 replacing code that is expected to call a hook.
585 589 """
586 590 return hook.hook(self.ui, self, name, throw, **args)
587 591
588 592 @unfilteredmethod
589 593 def _tag(self, names, node, message, local, user, date, extra=None,
590 594 editor=False):
591 595 if isinstance(names, str):
592 596 names = (names,)
593 597
594 598 branches = self.branchmap()
595 599 for name in names:
596 600 self.hook('pretag', throw=True, node=hex(node), tag=name,
597 601 local=local)
598 602 if name in branches:
599 603 self.ui.warn(_("warning: tag %s conflicts with existing"
600 604 " branch name\n") % name)
601 605
602 606 def writetags(fp, names, munge, prevtags):
603 607 fp.seek(0, 2)
604 608 if prevtags and prevtags[-1] != '\n':
605 609 fp.write('\n')
606 610 for name in names:
607 611 if munge:
608 612 m = munge(name)
609 613 else:
610 614 m = name
611 615
612 616 if (self._tagscache.tagtypes and
613 617 name in self._tagscache.tagtypes):
614 618 old = self.tags().get(name, nullid)
615 619 fp.write('%s %s\n' % (hex(old), m))
616 620 fp.write('%s %s\n' % (hex(node), m))
617 621 fp.close()
618 622
619 623 prevtags = ''
620 624 if local:
621 625 try:
622 626 fp = self.vfs('localtags', 'r+')
623 627 except IOError:
624 628 fp = self.vfs('localtags', 'a')
625 629 else:
626 630 prevtags = fp.read()
627 631
628 632 # local tags are stored in the current charset
629 633 writetags(fp, names, None, prevtags)
630 634 for name in names:
631 635 self.hook('tag', node=hex(node), tag=name, local=local)
632 636 return
633 637
634 638 try:
635 639 fp = self.wfile('.hgtags', 'rb+')
636 640 except IOError as e:
637 641 if e.errno != errno.ENOENT:
638 642 raise
639 643 fp = self.wfile('.hgtags', 'ab')
640 644 else:
641 645 prevtags = fp.read()
642 646
643 647 # committed tags are stored in UTF-8
644 648 writetags(fp, names, encoding.fromlocal, prevtags)
645 649
646 650 fp.close()
647 651
648 652 self.invalidatecaches()
649 653
650 654 if '.hgtags' not in self.dirstate:
651 655 self[None].add(['.hgtags'])
652 656
653 657 m = matchmod.exact(self.root, '', ['.hgtags'])
654 658 tagnode = self.commit(message, user, date, extra=extra, match=m,
655 659 editor=editor)
656 660
657 661 for name in names:
658 662 self.hook('tag', node=hex(node), tag=name, local=local)
659 663
660 664 return tagnode
661 665
662 666 def tag(self, names, node, message, local, user, date, editor=False):
663 667 '''tag a revision with one or more symbolic names.
664 668
665 669 names is a list of strings or, when adding a single tag, names may be a
666 670 string.
667 671
668 672 if local is True, the tags are stored in a per-repository file.
669 673 otherwise, they are stored in the .hgtags file, and a new
670 674 changeset is committed with the change.
671 675
672 676 keyword arguments:
673 677
674 678 local: whether to store tags in non-version-controlled file
675 679 (default False)
676 680
677 681 message: commit message to use if committing
678 682
679 683 user: name of user to use if committing
680 684
681 685 date: date tuple to use if committing'''
682 686
683 687 if not local:
684 688 m = matchmod.exact(self.root, '', ['.hgtags'])
685 689 if any(self.status(match=m, unknown=True, ignored=True)):
686 690 raise error.Abort(_('working copy of .hgtags is changed'),
687 691 hint=_('please commit .hgtags manually'))
688 692
689 693 self.tags() # instantiate the cache
690 694 self._tag(names, node, message, local, user, date, editor=editor)
691 695
692 696 @filteredpropertycache
693 697 def _tagscache(self):
694 698 '''Returns a tagscache object that contains various tags related
695 699 caches.'''
696 700
697 701 # This simplifies its cache management by having one decorated
698 702 # function (this one) and the rest simply fetch things from it.
699 703 class tagscache(object):
700 704 def __init__(self):
701 705 # These two define the set of tags for this repository. tags
702 706 # maps tag name to node; tagtypes maps tag name to 'global' or
703 707 # 'local'. (Global tags are defined by .hgtags across all
704 708 # heads, and local tags are defined in .hg/localtags.)
705 709 # They constitute the in-memory cache of tags.
706 710 self.tags = self.tagtypes = None
707 711
708 712 self.nodetagscache = self.tagslist = None
709 713
710 714 cache = tagscache()
711 715 cache.tags, cache.tagtypes = self._findtags()
712 716
713 717 return cache
714 718
715 719 def tags(self):
716 720 '''return a mapping of tag to node'''
717 721 t = {}
718 722 if self.changelog.filteredrevs:
719 723 tags, tt = self._findtags()
720 724 else:
721 725 tags = self._tagscache.tags
722 726 for k, v in tags.iteritems():
723 727 try:
724 728 # ignore tags to unknown nodes
725 729 self.changelog.rev(v)
726 730 t[k] = v
727 731 except (error.LookupError, ValueError):
728 732 pass
729 733 return t
730 734
731 735 def _findtags(self):
732 736 '''Do the hard work of finding tags. Return a pair of dicts
733 737 (tags, tagtypes) where tags maps tag name to node, and tagtypes
734 738 maps tag name to a string like \'global\' or \'local\'.
735 739 Subclasses or extensions are free to add their own tags, but
736 740 should be aware that the returned dicts will be retained for the
737 741 duration of the localrepo object.'''
738 742
739 743 # XXX what tagtype should subclasses/extensions use? Currently
740 744 # mq and bookmarks add tags, but do not set the tagtype at all.
741 745 # Should each extension invent its own tag type? Should there
742 746 # be one tagtype for all such "virtual" tags? Or is the status
743 747 # quo fine?
744 748
745 749 alltags = {} # map tag name to (node, hist)
746 750 tagtypes = {}
747 751
748 752 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
749 753 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
750 754
751 755 # Build the return dicts. Have to re-encode tag names because
752 756 # the tags module always uses UTF-8 (in order not to lose info
753 757 # writing to the cache), but the rest of Mercurial wants them in
754 758 # local encoding.
755 759 tags = {}
756 760 for (name, (node, hist)) in alltags.iteritems():
757 761 if node != nullid:
758 762 tags[encoding.tolocal(name)] = node
759 763 tags['tip'] = self.changelog.tip()
760 764 tagtypes = dict([(encoding.tolocal(name), value)
761 765 for (name, value) in tagtypes.iteritems()])
762 766 return (tags, tagtypes)
763 767
764 768 def tagtype(self, tagname):
765 769 '''
766 770 return the type of the given tag. result can be:
767 771
768 772 'local' : a local tag
769 773 'global' : a global tag
770 774 None : tag does not exist
771 775 '''
772 776
773 777 return self._tagscache.tagtypes.get(tagname)
774 778
775 779 def tagslist(self):
776 780 '''return a list of tags ordered by revision'''
777 781 if not self._tagscache.tagslist:
778 782 l = []
779 783 for t, n in self.tags().iteritems():
780 784 l.append((self.changelog.rev(n), t, n))
781 785 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
782 786
783 787 return self._tagscache.tagslist
784 788
785 789 def nodetags(self, node):
786 790 '''return the tags associated with a node'''
787 791 if not self._tagscache.nodetagscache:
788 792 nodetagscache = {}
789 793 for t, n in self._tagscache.tags.iteritems():
790 794 nodetagscache.setdefault(n, []).append(t)
791 795 for tags in nodetagscache.itervalues():
792 796 tags.sort()
793 797 self._tagscache.nodetagscache = nodetagscache
794 798 return self._tagscache.nodetagscache.get(node, [])
795 799
796 800 def nodebookmarks(self, node):
797 801 """return the list of bookmarks pointing to the specified node"""
798 802 marks = []
799 803 for bookmark, n in self._bookmarks.iteritems():
800 804 if n == node:
801 805 marks.append(bookmark)
802 806 return sorted(marks)
803 807
804 808 def branchmap(self):
805 809 '''returns a dictionary {branch: [branchheads]} with branchheads
806 810 ordered by increasing revision number'''
807 811 branchmap.updatecache(self)
808 812 return self._branchcaches[self.filtername]
809 813
810 814 @unfilteredmethod
811 815 def revbranchcache(self):
812 816 if not self._revbranchcache:
813 817 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
814 818 return self._revbranchcache
815 819
816 820 def branchtip(self, branch, ignoremissing=False):
817 821 '''return the tip node for a given branch
818 822
819 823 If ignoremissing is True, then this method will not raise an error.
820 824 This is helpful for callers that only expect None for a missing branch
821 825 (e.g. namespace).
822 826
823 827 '''
824 828 try:
825 829 return self.branchmap().branchtip(branch)
826 830 except KeyError:
827 831 if not ignoremissing:
828 832 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
829 833 else:
830 834 pass
831 835
832 836 def lookup(self, key):
833 837 return self[key].node()
834 838
835 839 def lookupbranch(self, key, remote=None):
836 840 repo = remote or self
837 841 if key in repo.branchmap():
838 842 return key
839 843
840 844 repo = (remote and remote.local()) and remote or self
841 845 return repo[key].branch()
842 846
843 847 def known(self, nodes):
844 848 cl = self.changelog
845 849 nm = cl.nodemap
846 850 filtered = cl.filteredrevs
847 851 result = []
848 852 for n in nodes:
849 853 r = nm.get(n)
850 854 resp = not (r is None or r in filtered)
851 855 result.append(resp)
852 856 return result
853 857
854 858 def local(self):
855 859 return self
856 860
857 861 def publishing(self):
858 862 # it's safe (and desirable) to trust the publish flag unconditionally
859 863 # so that we don't finalize changes shared between users via ssh or nfs
860 864 return self.ui.configbool('phases', 'publish', True, untrusted=True)
861 865
862 866 def cancopy(self):
863 867 # so statichttprepo's override of local() works
864 868 if not self.local():
865 869 return False
866 870 if not self.publishing():
867 871 return True
868 872 # if publishing we can't copy if there is filtered content
869 873 return not self.filtered('visible').changelog.filteredrevs
870 874
871 875 def shared(self):
872 876 '''the type of shared repository (None if not shared)'''
873 877 if self.sharedpath != self.path:
874 878 return 'store'
875 879 return None
876 880
877 881 def join(self, f, *insidef):
878 882 return self.vfs.join(os.path.join(f, *insidef))
879 883
880 884 def wjoin(self, f, *insidef):
881 885 return self.vfs.reljoin(self.root, f, *insidef)
882 886
883 887 def file(self, f):
884 888 if f[0] == '/':
885 889 f = f[1:]
886 890 return filelog.filelog(self.svfs, f)
887 891
888 892 def changectx(self, changeid):
889 893 return self[changeid]
890 894
891 895 def setparents(self, p1, p2=nullid):
892 896 self.dirstate.beginparentchange()
893 897 copies = self.dirstate.setparents(p1, p2)
894 898 pctx = self[p1]
895 899 if copies:
896 900 # Adjust copy records, the dirstate cannot do it, it
897 901 # requires access to parents manifests. Preserve them
898 902 # only for entries added to first parent.
899 903 for f in copies:
900 904 if f not in pctx and copies[f] in pctx:
901 905 self.dirstate.copy(copies[f], f)
902 906 if p2 == nullid:
903 907 for f, s in sorted(self.dirstate.copies().items()):
904 908 if f not in pctx and s not in pctx:
905 909 self.dirstate.copy(None, f)
906 910 self.dirstate.endparentchange()
907 911
908 912 def filectx(self, path, changeid=None, fileid=None):
909 913 """changeid can be a changeset revision, node, or tag.
910 914 fileid can be a file revision or node."""
911 915 return context.filectx(self, path, changeid, fileid)
912 916
913 917 def getcwd(self):
914 918 return self.dirstate.getcwd()
915 919
916 920 def pathto(self, f, cwd=None):
917 921 return self.dirstate.pathto(f, cwd)
918 922
919 923 def wfile(self, f, mode='r'):
920 924 return self.wvfs(f, mode)
921 925
922 926 def _link(self, f):
923 927 return self.wvfs.islink(f)
924 928
925 929 def _loadfilter(self, filter):
926 930 if filter not in self.filterpats:
927 931 l = []
928 932 for pat, cmd in self.ui.configitems(filter):
929 933 if cmd == '!':
930 934 continue
931 935 mf = matchmod.match(self.root, '', [pat])
932 936 fn = None
933 937 params = cmd
934 938 for name, filterfn in self._datafilters.iteritems():
935 939 if cmd.startswith(name):
936 940 fn = filterfn
937 941 params = cmd[len(name):].lstrip()
938 942 break
939 943 if not fn:
940 944 fn = lambda s, c, **kwargs: util.filter(s, c)
941 945 # Wrap old filters not supporting keyword arguments
942 946 if not inspect.getargspec(fn)[2]:
943 947 oldfn = fn
944 948 fn = lambda s, c, **kwargs: oldfn(s, c)
945 949 l.append((mf, fn, params))
946 950 self.filterpats[filter] = l
947 951 return self.filterpats[filter]
948 952
949 953 def _filter(self, filterpats, filename, data):
950 954 for mf, fn, cmd in filterpats:
951 955 if mf(filename):
952 956 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
953 957 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
954 958 break
955 959
956 960 return data
957 961
958 962 @unfilteredpropertycache
959 963 def _encodefilterpats(self):
960 964 return self._loadfilter('encode')
961 965
962 966 @unfilteredpropertycache
963 967 def _decodefilterpats(self):
964 968 return self._loadfilter('decode')
965 969
966 970 def adddatafilter(self, name, filter):
967 971 self._datafilters[name] = filter
968 972
969 973 def wread(self, filename):
970 974 if self._link(filename):
971 975 data = self.wvfs.readlink(filename)
972 976 else:
973 977 data = self.wvfs.read(filename)
974 978 return self._filter(self._encodefilterpats, filename, data)
975 979
976 980 def wwrite(self, filename, data, flags, backgroundclose=False):
977 981 """write ``data`` into ``filename`` in the working directory
978 982
979 983 This returns length of written (maybe decoded) data.
980 984 """
981 985 data = self._filter(self._decodefilterpats, filename, data)
982 986 if 'l' in flags:
983 987 self.wvfs.symlink(data, filename)
984 988 else:
985 989 self.wvfs.write(filename, data, backgroundclose=backgroundclose)
986 990 if 'x' in flags:
987 991 self.wvfs.setflags(filename, False, True)
988 992 return len(data)
989 993
990 994 def wwritedata(self, filename, data):
991 995 return self._filter(self._decodefilterpats, filename, data)
992 996
993 997 def currenttransaction(self):
994 998 """return the current transaction or None if non exists"""
995 999 if self._transref:
996 1000 tr = self._transref()
997 1001 else:
998 1002 tr = None
999 1003
1000 1004 if tr and tr.running():
1001 1005 return tr
1002 1006 return None
1003 1007
1004 1008 def transaction(self, desc, report=None):
1005 1009 if (self.ui.configbool('devel', 'all-warnings')
1006 1010 or self.ui.configbool('devel', 'check-locks')):
1007 1011 if self._currentlock(self._lockref) is None:
1008 1012 raise RuntimeError('programming error: transaction requires '
1009 1013 'locking')
1010 1014 tr = self.currenttransaction()
1011 1015 if tr is not None:
1012 1016 return tr.nest()
1013 1017
1014 1018 # abort here if the journal already exists
1015 1019 if self.svfs.exists("journal"):
1016 1020 raise error.RepoError(
1017 1021 _("abandoned transaction found"),
1018 1022 hint=_("run 'hg recover' to clean up transaction"))
1019 1023
1020 1024 idbase = "%.40f#%f" % (random.random(), time.time())
1021 1025 txnid = 'TXN:' + hashlib.sha1(idbase).hexdigest()
1022 1026 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
1023 1027
1024 1028 self._writejournal(desc)
1025 1029 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
1026 1030 if report:
1027 1031 rp = report
1028 1032 else:
1029 1033 rp = self.ui.warn
1030 1034 vfsmap = {'plain': self.vfs} # root of .hg/
1031 1035 # we must avoid cyclic reference between repo and transaction.
1032 1036 reporef = weakref.ref(self)
1033 1037 def validate(tr):
1034 1038 """will run pre-closing hooks"""
1035 1039 reporef().hook('pretxnclose', throw=True,
1036 1040 txnname=desc, **tr.hookargs)
1037 1041 def releasefn(tr, success):
1038 1042 repo = reporef()
1039 1043 if success:
1040 1044 # this should be explicitly invoked here, because
1041 1045 # in-memory changes aren't written out at closing
1042 1046 # transaction, if tr.addfilegenerator (via
1043 1047 # dirstate.write or so) isn't invoked while
1044 1048 # transaction running
1045 1049 repo.dirstate.write(None)
1046 1050 else:
1047 1051 # discard all changes (including ones already written
1048 1052 # out) in this transaction
1049 1053 repo.dirstate.restorebackup(None, prefix='journal.')
1050 1054
1051 1055 repo.invalidate(clearfilecache=True)
1052 1056
1053 1057 tr = transaction.transaction(rp, self.svfs, vfsmap,
1054 1058 "journal",
1055 1059 "undo",
1056 1060 aftertrans(renames),
1057 1061 self.store.createmode,
1058 1062 validator=validate,
1059 1063 releasefn=releasefn)
1060 1064
1061 1065 tr.hookargs['txnid'] = txnid
1062 1066 # note: writing the fncache only during finalize mean that the file is
1063 1067 # outdated when running hooks. As fncache is used for streaming clone,
1064 1068 # this is not expected to break anything that happen during the hooks.
1065 1069 tr.addfinalize('flush-fncache', self.store.write)
1066 1070 def txnclosehook(tr2):
1067 1071 """To be run if transaction is successful, will schedule a hook run
1068 1072 """
1069 1073 # Don't reference tr2 in hook() so we don't hold a reference.
1070 1074 # This reduces memory consumption when there are multiple
1071 1075 # transactions per lock. This can likely go away if issue5045
1072 1076 # fixes the function accumulation.
1073 1077 hookargs = tr2.hookargs
1074 1078
1075 1079 def hook():
1076 1080 reporef().hook('txnclose', throw=False, txnname=desc,
1077 1081 **hookargs)
1078 1082 reporef()._afterlock(hook)
1079 1083 tr.addfinalize('txnclose-hook', txnclosehook)
1080 1084 def txnaborthook(tr2):
1081 1085 """To be run if transaction is aborted
1082 1086 """
1083 1087 reporef().hook('txnabort', throw=False, txnname=desc,
1084 1088 **tr2.hookargs)
1085 1089 tr.addabort('txnabort-hook', txnaborthook)
1086 1090 # avoid eager cache invalidation. in-memory data should be identical
1087 1091 # to stored data if transaction has no error.
1088 1092 tr.addpostclose('refresh-filecachestats', self._refreshfilecachestats)
1089 1093 self._transref = weakref.ref(tr)
1090 1094 return tr
1091 1095
1092 1096 def _journalfiles(self):
1093 1097 return ((self.svfs, 'journal'),
1094 1098 (self.vfs, 'journal.dirstate'),
1095 1099 (self.vfs, 'journal.branch'),
1096 1100 (self.vfs, 'journal.desc'),
1097 1101 (self.vfs, 'journal.bookmarks'),
1098 1102 (self.svfs, 'journal.phaseroots'))
1099 1103
1100 1104 def undofiles(self):
1101 1105 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1102 1106
1103 1107 def _writejournal(self, desc):
1104 1108 self.dirstate.savebackup(None, prefix='journal.')
1105 1109 self.vfs.write("journal.branch",
1106 1110 encoding.fromlocal(self.dirstate.branch()))
1107 1111 self.vfs.write("journal.desc",
1108 1112 "%d\n%s\n" % (len(self), desc))
1109 1113 self.vfs.write("journal.bookmarks",
1110 1114 self.vfs.tryread("bookmarks"))
1111 1115 self.svfs.write("journal.phaseroots",
1112 1116 self.svfs.tryread("phaseroots"))
1113 1117
1114 1118 def recover(self):
1115 1119 with self.lock():
1116 1120 if self.svfs.exists("journal"):
1117 1121 self.ui.status(_("rolling back interrupted transaction\n"))
1118 1122 vfsmap = {'': self.svfs,
1119 1123 'plain': self.vfs,}
1120 1124 transaction.rollback(self.svfs, vfsmap, "journal",
1121 1125 self.ui.warn)
1122 1126 self.invalidate()
1123 1127 return True
1124 1128 else:
1125 1129 self.ui.warn(_("no interrupted transaction available\n"))
1126 1130 return False
1127 1131
1128 1132 def rollback(self, dryrun=False, force=False):
1129 1133 wlock = lock = dsguard = None
1130 1134 try:
1131 1135 wlock = self.wlock()
1132 1136 lock = self.lock()
1133 1137 if self.svfs.exists("undo"):
1134 1138 dsguard = cmdutil.dirstateguard(self, 'rollback')
1135 1139
1136 1140 return self._rollback(dryrun, force, dsguard)
1137 1141 else:
1138 1142 self.ui.warn(_("no rollback information available\n"))
1139 1143 return 1
1140 1144 finally:
1141 1145 release(dsguard, lock, wlock)
1142 1146
1143 1147 @unfilteredmethod # Until we get smarter cache management
1144 1148 def _rollback(self, dryrun, force, dsguard):
1145 1149 ui = self.ui
1146 1150 try:
1147 1151 args = self.vfs.read('undo.desc').splitlines()
1148 1152 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1149 1153 if len(args) >= 3:
1150 1154 detail = args[2]
1151 1155 oldtip = oldlen - 1
1152 1156
1153 1157 if detail and ui.verbose:
1154 1158 msg = (_('repository tip rolled back to revision %s'
1155 1159 ' (undo %s: %s)\n')
1156 1160 % (oldtip, desc, detail))
1157 1161 else:
1158 1162 msg = (_('repository tip rolled back to revision %s'
1159 1163 ' (undo %s)\n')
1160 1164 % (oldtip, desc))
1161 1165 except IOError:
1162 1166 msg = _('rolling back unknown transaction\n')
1163 1167 desc = None
1164 1168
1165 1169 if not force and self['.'] != self['tip'] and desc == 'commit':
1166 1170 raise error.Abort(
1167 1171 _('rollback of last commit while not checked out '
1168 1172 'may lose data'), hint=_('use -f to force'))
1169 1173
1170 1174 ui.status(msg)
1171 1175 if dryrun:
1172 1176 return 0
1173 1177
1174 1178 parents = self.dirstate.parents()
1175 1179 self.destroying()
1176 1180 vfsmap = {'plain': self.vfs, '': self.svfs}
1177 1181 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1178 1182 if self.vfs.exists('undo.bookmarks'):
1179 1183 self.vfs.rename('undo.bookmarks', 'bookmarks', checkambig=True)
1180 1184 if self.svfs.exists('undo.phaseroots'):
1181 1185 self.svfs.rename('undo.phaseroots', 'phaseroots', checkambig=True)
1182 1186 self.invalidate()
1183 1187
1184 1188 parentgone = (parents[0] not in self.changelog.nodemap or
1185 1189 parents[1] not in self.changelog.nodemap)
1186 1190 if parentgone:
1187 1191 # prevent dirstateguard from overwriting already restored one
1188 1192 dsguard.close()
1189 1193
1190 1194 self.dirstate.restorebackup(None, prefix='undo.')
1191 1195 try:
1192 1196 branch = self.vfs.read('undo.branch')
1193 1197 self.dirstate.setbranch(encoding.tolocal(branch))
1194 1198 except IOError:
1195 1199 ui.warn(_('named branch could not be reset: '
1196 1200 'current branch is still \'%s\'\n')
1197 1201 % self.dirstate.branch())
1198 1202
1199 1203 parents = tuple([p.rev() for p in self[None].parents()])
1200 1204 if len(parents) > 1:
1201 1205 ui.status(_('working directory now based on '
1202 1206 'revisions %d and %d\n') % parents)
1203 1207 else:
1204 1208 ui.status(_('working directory now based on '
1205 1209 'revision %d\n') % parents)
1206 1210 mergemod.mergestate.clean(self, self['.'].node())
1207 1211
1208 1212 # TODO: if we know which new heads may result from this rollback, pass
1209 1213 # them to destroy(), which will prevent the branchhead cache from being
1210 1214 # invalidated.
1211 1215 self.destroyed()
1212 1216 return 0
1213 1217
1214 1218 def invalidatecaches(self):
1215 1219
1216 1220 if '_tagscache' in vars(self):
1217 1221 # can't use delattr on proxy
1218 1222 del self.__dict__['_tagscache']
1219 1223
1220 1224 self.unfiltered()._branchcaches.clear()
1221 1225 self.invalidatevolatilesets()
1222 1226
1223 1227 def invalidatevolatilesets(self):
1224 1228 self.filteredrevcache.clear()
1225 1229 obsolete.clearobscaches(self)
1226 1230
1227 1231 def invalidatedirstate(self):
1228 1232 '''Invalidates the dirstate, causing the next call to dirstate
1229 1233 to check if it was modified since the last time it was read,
1230 1234 rereading it if it has.
1231 1235
1232 1236 This is different to dirstate.invalidate() that it doesn't always
1233 1237 rereads the dirstate. Use dirstate.invalidate() if you want to
1234 1238 explicitly read the dirstate again (i.e. restoring it to a previous
1235 1239 known good state).'''
1236 1240 if hasunfilteredcache(self, 'dirstate'):
1237 1241 for k in self.dirstate._filecache:
1238 1242 try:
1239 1243 delattr(self.dirstate, k)
1240 1244 except AttributeError:
1241 1245 pass
1242 1246 delattr(self.unfiltered(), 'dirstate')
1243 1247
1244 1248 def invalidate(self, clearfilecache=False):
1245 1249 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1246 1250 for k in self._filecache.keys():
1247 1251 # dirstate is invalidated separately in invalidatedirstate()
1248 1252 if k == 'dirstate':
1249 1253 continue
1250 1254
1251 1255 if clearfilecache:
1252 1256 del self._filecache[k]
1253 1257 try:
1254 1258 delattr(unfiltered, k)
1255 1259 except AttributeError:
1256 1260 pass
1257 1261 self.invalidatecaches()
1258 1262 self.store.invalidatecaches()
1259 1263
1260 1264 def invalidateall(self):
1261 1265 '''Fully invalidates both store and non-store parts, causing the
1262 1266 subsequent operation to reread any outside changes.'''
1263 1267 # extension should hook this to invalidate its caches
1264 1268 self.invalidate()
1265 1269 self.invalidatedirstate()
1266 1270
1267 1271 def _refreshfilecachestats(self, tr):
1268 1272 """Reload stats of cached files so that they are flagged as valid"""
1269 1273 for k, ce in self._filecache.items():
1270 1274 if k == 'dirstate' or k not in self.__dict__:
1271 1275 continue
1272 1276 ce.refresh()
1273 1277
1274 1278 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc,
1275 1279 inheritchecker=None, parentenvvar=None):
1276 1280 parentlock = None
1277 1281 # the contents of parentenvvar are used by the underlying lock to
1278 1282 # determine whether it can be inherited
1279 1283 if parentenvvar is not None:
1280 1284 parentlock = os.environ.get(parentenvvar)
1281 1285 try:
1282 1286 l = lockmod.lock(vfs, lockname, 0, releasefn=releasefn,
1283 1287 acquirefn=acquirefn, desc=desc,
1284 1288 inheritchecker=inheritchecker,
1285 1289 parentlock=parentlock)
1286 1290 except error.LockHeld as inst:
1287 1291 if not wait:
1288 1292 raise
1289 1293 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1290 1294 (desc, inst.locker))
1291 1295 # default to 600 seconds timeout
1292 1296 l = lockmod.lock(vfs, lockname,
1293 1297 int(self.ui.config("ui", "timeout", "600")),
1294 1298 releasefn=releasefn, acquirefn=acquirefn,
1295 1299 desc=desc)
1296 1300 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1297 1301 return l
1298 1302
1299 1303 def _afterlock(self, callback):
1300 1304 """add a callback to be run when the repository is fully unlocked
1301 1305
1302 1306 The callback will be executed when the outermost lock is released
1303 1307 (with wlock being higher level than 'lock')."""
1304 1308 for ref in (self._wlockref, self._lockref):
1305 1309 l = ref and ref()
1306 1310 if l and l.held:
1307 1311 l.postrelease.append(callback)
1308 1312 break
1309 1313 else: # no lock have been found.
1310 1314 callback()
1311 1315
1312 1316 def lock(self, wait=True):
1313 1317 '''Lock the repository store (.hg/store) and return a weak reference
1314 1318 to the lock. Use this before modifying the store (e.g. committing or
1315 1319 stripping). If you are opening a transaction, get a lock as well.)
1316 1320
1317 1321 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1318 1322 'wlock' first to avoid a dead-lock hazard.'''
1319 1323 l = self._currentlock(self._lockref)
1320 1324 if l is not None:
1321 1325 l.lock()
1322 1326 return l
1323 1327
1324 1328 l = self._lock(self.svfs, "lock", wait, None,
1325 1329 self.invalidate, _('repository %s') % self.origroot)
1326 1330 self._lockref = weakref.ref(l)
1327 1331 return l
1328 1332
1329 1333 def _wlockchecktransaction(self):
1330 1334 if self.currenttransaction() is not None:
1331 1335 raise error.LockInheritanceContractViolation(
1332 1336 'wlock cannot be inherited in the middle of a transaction')
1333 1337
1334 1338 def wlock(self, wait=True):
1335 1339 '''Lock the non-store parts of the repository (everything under
1336 1340 .hg except .hg/store) and return a weak reference to the lock.
1337 1341
1338 1342 Use this before modifying files in .hg.
1339 1343
1340 1344 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1341 1345 'wlock' first to avoid a dead-lock hazard.'''
1342 1346 l = self._wlockref and self._wlockref()
1343 1347 if l is not None and l.held:
1344 1348 l.lock()
1345 1349 return l
1346 1350
1347 1351 # We do not need to check for non-waiting lock acquisition. Such
1348 1352 # acquisition would not cause dead-lock as they would just fail.
1349 1353 if wait and (self.ui.configbool('devel', 'all-warnings')
1350 1354 or self.ui.configbool('devel', 'check-locks')):
1351 1355 if self._currentlock(self._lockref) is not None:
1352 1356 self.ui.develwarn('"wlock" acquired after "lock"')
1353 1357
1354 1358 def unlock():
1355 1359 if self.dirstate.pendingparentchange():
1356 1360 self.dirstate.invalidate()
1357 1361 else:
1358 1362 self.dirstate.write(None)
1359 1363
1360 1364 self._filecache['dirstate'].refresh()
1361 1365
1362 1366 l = self._lock(self.vfs, "wlock", wait, unlock,
1363 1367 self.invalidatedirstate, _('working directory of %s') %
1364 1368 self.origroot,
1365 1369 inheritchecker=self._wlockchecktransaction,
1366 1370 parentenvvar='HG_WLOCK_LOCKER')
1367 1371 self._wlockref = weakref.ref(l)
1368 1372 return l
1369 1373
1370 1374 def _currentlock(self, lockref):
1371 1375 """Returns the lock if it's held, or None if it's not."""
1372 1376 if lockref is None:
1373 1377 return None
1374 1378 l = lockref()
1375 1379 if l is None or not l.held:
1376 1380 return None
1377 1381 return l
1378 1382
1379 1383 def currentwlock(self):
1380 1384 """Returns the wlock if it's held, or None if it's not."""
1381 1385 return self._currentlock(self._wlockref)
1382 1386
1383 1387 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1384 1388 """
1385 1389 commit an individual file as part of a larger transaction
1386 1390 """
1387 1391
1388 1392 fname = fctx.path()
1389 1393 fparent1 = manifest1.get(fname, nullid)
1390 1394 fparent2 = manifest2.get(fname, nullid)
1391 1395 if isinstance(fctx, context.filectx):
1392 1396 node = fctx.filenode()
1393 1397 if node in [fparent1, fparent2]:
1394 1398 self.ui.debug('reusing %s filelog entry\n' % fname)
1395 1399 if manifest1.flags(fname) != fctx.flags():
1396 1400 changelist.append(fname)
1397 1401 return node
1398 1402
1399 1403 flog = self.file(fname)
1400 1404 meta = {}
1401 1405 copy = fctx.renamed()
1402 1406 if copy and copy[0] != fname:
1403 1407 # Mark the new revision of this file as a copy of another
1404 1408 # file. This copy data will effectively act as a parent
1405 1409 # of this new revision. If this is a merge, the first
1406 1410 # parent will be the nullid (meaning "look up the copy data")
1407 1411 # and the second one will be the other parent. For example:
1408 1412 #
1409 1413 # 0 --- 1 --- 3 rev1 changes file foo
1410 1414 # \ / rev2 renames foo to bar and changes it
1411 1415 # \- 2 -/ rev3 should have bar with all changes and
1412 1416 # should record that bar descends from
1413 1417 # bar in rev2 and foo in rev1
1414 1418 #
1415 1419 # this allows this merge to succeed:
1416 1420 #
1417 1421 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1418 1422 # \ / merging rev3 and rev4 should use bar@rev2
1419 1423 # \- 2 --- 4 as the merge base
1420 1424 #
1421 1425
1422 1426 cfname = copy[0]
1423 1427 crev = manifest1.get(cfname)
1424 1428 newfparent = fparent2
1425 1429
1426 1430 if manifest2: # branch merge
1427 1431 if fparent2 == nullid or crev is None: # copied on remote side
1428 1432 if cfname in manifest2:
1429 1433 crev = manifest2[cfname]
1430 1434 newfparent = fparent1
1431 1435
1432 1436 # Here, we used to search backwards through history to try to find
1433 1437 # where the file copy came from if the source of a copy was not in
1434 1438 # the parent directory. However, this doesn't actually make sense to
1435 1439 # do (what does a copy from something not in your working copy even
1436 1440 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1437 1441 # the user that copy information was dropped, so if they didn't
1438 1442 # expect this outcome it can be fixed, but this is the correct
1439 1443 # behavior in this circumstance.
1440 1444
1441 1445 if crev:
1442 1446 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1443 1447 meta["copy"] = cfname
1444 1448 meta["copyrev"] = hex(crev)
1445 1449 fparent1, fparent2 = nullid, newfparent
1446 1450 else:
1447 1451 self.ui.warn(_("warning: can't find ancestor for '%s' "
1448 1452 "copied from '%s'!\n") % (fname, cfname))
1449 1453
1450 1454 elif fparent1 == nullid:
1451 1455 fparent1, fparent2 = fparent2, nullid
1452 1456 elif fparent2 != nullid:
1453 1457 # is one parent an ancestor of the other?
1454 1458 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1455 1459 if fparent1 in fparentancestors:
1456 1460 fparent1, fparent2 = fparent2, nullid
1457 1461 elif fparent2 in fparentancestors:
1458 1462 fparent2 = nullid
1459 1463
1460 1464 # is the file changed?
1461 1465 text = fctx.data()
1462 1466 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1463 1467 changelist.append(fname)
1464 1468 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1465 1469 # are just the flags changed during merge?
1466 1470 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1467 1471 changelist.append(fname)
1468 1472
1469 1473 return fparent1
1470 1474
1471 1475 def checkcommitpatterns(self, wctx, vdirs, match, status, fail):
1472 1476 """check for commit arguments that aren't commitable"""
1473 1477 if match.isexact() or match.prefix():
1474 1478 matched = set(status.modified + status.added + status.removed)
1475 1479
1476 1480 for f in match.files():
1477 1481 f = self.dirstate.normalize(f)
1478 1482 if f == '.' or f in matched or f in wctx.substate:
1479 1483 continue
1480 1484 if f in status.deleted:
1481 1485 fail(f, _('file not found!'))
1482 1486 if f in vdirs: # visited directory
1483 1487 d = f + '/'
1484 1488 for mf in matched:
1485 1489 if mf.startswith(d):
1486 1490 break
1487 1491 else:
1488 1492 fail(f, _("no match under directory!"))
1489 1493 elif f not in self.dirstate:
1490 1494 fail(f, _("file not tracked!"))
1491 1495
1492 1496 @unfilteredmethod
1493 1497 def commit(self, text="", user=None, date=None, match=None, force=False,
1494 1498 editor=False, extra=None):
1495 1499 """Add a new revision to current repository.
1496 1500
1497 1501 Revision information is gathered from the working directory,
1498 1502 match can be used to filter the committed files. If editor is
1499 1503 supplied, it is called to get a commit message.
1500 1504 """
1501 1505 if extra is None:
1502 1506 extra = {}
1503 1507
1504 1508 def fail(f, msg):
1505 1509 raise error.Abort('%s: %s' % (f, msg))
1506 1510
1507 1511 if not match:
1508 1512 match = matchmod.always(self.root, '')
1509 1513
1510 1514 if not force:
1511 1515 vdirs = []
1512 1516 match.explicitdir = vdirs.append
1513 1517 match.bad = fail
1514 1518
1515 1519 wlock = lock = tr = None
1516 1520 try:
1517 1521 wlock = self.wlock()
1518 1522 lock = self.lock() # for recent changelog (see issue4368)
1519 1523
1520 1524 wctx = self[None]
1521 1525 merge = len(wctx.parents()) > 1
1522 1526
1523 1527 if not force and merge and match.ispartial():
1524 1528 raise error.Abort(_('cannot partially commit a merge '
1525 1529 '(do not specify files or patterns)'))
1526 1530
1527 1531 status = self.status(match=match, clean=force)
1528 1532 if force:
1529 1533 status.modified.extend(status.clean) # mq may commit clean files
1530 1534
1531 1535 # check subrepos
1532 1536 subs = []
1533 1537 commitsubs = set()
1534 1538 newstate = wctx.substate.copy()
1535 1539 # only manage subrepos and .hgsubstate if .hgsub is present
1536 1540 if '.hgsub' in wctx:
1537 1541 # we'll decide whether to track this ourselves, thanks
1538 1542 for c in status.modified, status.added, status.removed:
1539 1543 if '.hgsubstate' in c:
1540 1544 c.remove('.hgsubstate')
1541 1545
1542 1546 # compare current state to last committed state
1543 1547 # build new substate based on last committed state
1544 1548 oldstate = wctx.p1().substate
1545 1549 for s in sorted(newstate.keys()):
1546 1550 if not match(s):
1547 1551 # ignore working copy, use old state if present
1548 1552 if s in oldstate:
1549 1553 newstate[s] = oldstate[s]
1550 1554 continue
1551 1555 if not force:
1552 1556 raise error.Abort(
1553 1557 _("commit with new subrepo %s excluded") % s)
1554 1558 dirtyreason = wctx.sub(s).dirtyreason(True)
1555 1559 if dirtyreason:
1556 1560 if not self.ui.configbool('ui', 'commitsubrepos'):
1557 1561 raise error.Abort(dirtyreason,
1558 1562 hint=_("use --subrepos for recursive commit"))
1559 1563 subs.append(s)
1560 1564 commitsubs.add(s)
1561 1565 else:
1562 1566 bs = wctx.sub(s).basestate()
1563 1567 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1564 1568 if oldstate.get(s, (None, None, None))[1] != bs:
1565 1569 subs.append(s)
1566 1570
1567 1571 # check for removed subrepos
1568 1572 for p in wctx.parents():
1569 1573 r = [s for s in p.substate if s not in newstate]
1570 1574 subs += [s for s in r if match(s)]
1571 1575 if subs:
1572 1576 if (not match('.hgsub') and
1573 1577 '.hgsub' in (wctx.modified() + wctx.added())):
1574 1578 raise error.Abort(
1575 1579 _("can't commit subrepos without .hgsub"))
1576 1580 status.modified.insert(0, '.hgsubstate')
1577 1581
1578 1582 elif '.hgsub' in status.removed:
1579 1583 # clean up .hgsubstate when .hgsub is removed
1580 1584 if ('.hgsubstate' in wctx and
1581 1585 '.hgsubstate' not in (status.modified + status.added +
1582 1586 status.removed)):
1583 1587 status.removed.insert(0, '.hgsubstate')
1584 1588
1585 1589 # make sure all explicit patterns are matched
1586 1590 if not force:
1587 1591 self.checkcommitpatterns(wctx, vdirs, match, status, fail)
1588 1592
1589 1593 cctx = context.workingcommitctx(self, status,
1590 1594 text, user, date, extra)
1591 1595
1592 1596 # internal config: ui.allowemptycommit
1593 1597 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1594 1598 or extra.get('close') or merge or cctx.files()
1595 1599 or self.ui.configbool('ui', 'allowemptycommit'))
1596 1600 if not allowemptycommit:
1597 1601 return None
1598 1602
1599 1603 if merge and cctx.deleted():
1600 1604 raise error.Abort(_("cannot commit merge with missing files"))
1601 1605
1602 1606 ms = mergemod.mergestate.read(self)
1603 1607
1604 1608 if list(ms.unresolved()):
1605 1609 raise error.Abort(_('unresolved merge conflicts '
1606 1610 '(see "hg help resolve")'))
1607 1611 if ms.mdstate() != 's' or list(ms.driverresolved()):
1608 1612 raise error.Abort(_('driver-resolved merge conflicts'),
1609 1613 hint=_('run "hg resolve --all" to resolve'))
1610 1614
1611 1615 if editor:
1612 1616 cctx._text = editor(self, cctx, subs)
1613 1617 edited = (text != cctx._text)
1614 1618
1615 1619 # Save commit message in case this transaction gets rolled back
1616 1620 # (e.g. by a pretxncommit hook). Leave the content alone on
1617 1621 # the assumption that the user will use the same editor again.
1618 1622 msgfn = self.savecommitmessage(cctx._text)
1619 1623
1620 1624 # commit subs and write new state
1621 1625 if subs:
1622 1626 for s in sorted(commitsubs):
1623 1627 sub = wctx.sub(s)
1624 1628 self.ui.status(_('committing subrepository %s\n') %
1625 1629 subrepo.subrelpath(sub))
1626 1630 sr = sub.commit(cctx._text, user, date)
1627 1631 newstate[s] = (newstate[s][0], sr)
1628 1632 subrepo.writestate(self, newstate)
1629 1633
1630 1634 p1, p2 = self.dirstate.parents()
1631 1635 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1632 1636 try:
1633 1637 self.hook("precommit", throw=True, parent1=hookp1,
1634 1638 parent2=hookp2)
1635 1639 tr = self.transaction('commit')
1636 1640 ret = self.commitctx(cctx, True)
1637 1641 except: # re-raises
1638 1642 if edited:
1639 1643 self.ui.write(
1640 1644 _('note: commit message saved in %s\n') % msgfn)
1641 1645 raise
1642 1646 # update bookmarks, dirstate and mergestate
1643 1647 bookmarks.update(self, [p1, p2], ret)
1644 1648 cctx.markcommitted(ret)
1645 1649 ms.reset()
1646 1650 tr.close()
1647 1651
1648 1652 finally:
1649 1653 lockmod.release(tr, lock, wlock)
1650 1654
1651 1655 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1652 1656 # hack for command that use a temporary commit (eg: histedit)
1653 1657 # temporary commit got stripped before hook release
1654 1658 if self.changelog.hasnode(ret):
1655 1659 self.hook("commit", node=node, parent1=parent1,
1656 1660 parent2=parent2)
1657 1661 self._afterlock(commithook)
1658 1662 return ret
1659 1663
1660 1664 @unfilteredmethod
1661 1665 def commitctx(self, ctx, error=False):
1662 1666 """Add a new revision to current repository.
1663 1667 Revision information is passed via the context argument.
1664 1668 """
1665 1669
1666 1670 tr = None
1667 1671 p1, p2 = ctx.p1(), ctx.p2()
1668 1672 user = ctx.user()
1669 1673
1670 1674 lock = self.lock()
1671 1675 try:
1672 1676 tr = self.transaction("commit")
1673 1677 trp = weakref.proxy(tr)
1674 1678
1675 1679 if ctx.files():
1676 1680 m1 = p1.manifest()
1677 1681 m2 = p2.manifest()
1678 1682 m = m1.copy()
1679 1683
1680 1684 # check in files
1681 1685 added = []
1682 1686 changed = []
1683 1687 removed = list(ctx.removed())
1684 1688 linkrev = len(self)
1685 1689 self.ui.note(_("committing files:\n"))
1686 1690 for f in sorted(ctx.modified() + ctx.added()):
1687 1691 self.ui.note(f + "\n")
1688 1692 try:
1689 1693 fctx = ctx[f]
1690 1694 if fctx is None:
1691 1695 removed.append(f)
1692 1696 else:
1693 1697 added.append(f)
1694 1698 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1695 1699 trp, changed)
1696 1700 m.setflag(f, fctx.flags())
1697 1701 except OSError as inst:
1698 1702 self.ui.warn(_("trouble committing %s!\n") % f)
1699 1703 raise
1700 1704 except IOError as inst:
1701 1705 errcode = getattr(inst, 'errno', errno.ENOENT)
1702 1706 if error or errcode and errcode != errno.ENOENT:
1703 1707 self.ui.warn(_("trouble committing %s!\n") % f)
1704 1708 raise
1705 1709
1706 1710 # update manifest
1707 1711 self.ui.note(_("committing manifest\n"))
1708 1712 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1709 1713 drop = [f for f in removed if f in m]
1710 1714 for f in drop:
1711 1715 del m[f]
1712 1716 mn = self.manifest.add(m, trp, linkrev,
1713 1717 p1.manifestnode(), p2.manifestnode(),
1714 1718 added, drop)
1715 1719 files = changed + removed
1716 1720 else:
1717 1721 mn = p1.manifestnode()
1718 1722 files = []
1719 1723
1720 1724 # update changelog
1721 1725 self.ui.note(_("committing changelog\n"))
1722 1726 self.changelog.delayupdate(tr)
1723 1727 n = self.changelog.add(mn, files, ctx.description(),
1724 1728 trp, p1.node(), p2.node(),
1725 1729 user, ctx.date(), ctx.extra().copy())
1726 1730 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1727 1731 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1728 1732 parent2=xp2)
1729 1733 # set the new commit is proper phase
1730 1734 targetphase = subrepo.newcommitphase(self.ui, ctx)
1731 1735 if targetphase:
1732 1736 # retract boundary do not alter parent changeset.
1733 1737 # if a parent have higher the resulting phase will
1734 1738 # be compliant anyway
1735 1739 #
1736 1740 # if minimal phase was 0 we don't need to retract anything
1737 1741 phases.retractboundary(self, tr, targetphase, [n])
1738 1742 tr.close()
1739 1743 branchmap.updatecache(self.filtered('served'))
1740 1744 return n
1741 1745 finally:
1742 1746 if tr:
1743 1747 tr.release()
1744 1748 lock.release()
1745 1749
1746 1750 @unfilteredmethod
1747 1751 def destroying(self):
1748 1752 '''Inform the repository that nodes are about to be destroyed.
1749 1753 Intended for use by strip and rollback, so there's a common
1750 1754 place for anything that has to be done before destroying history.
1751 1755
1752 1756 This is mostly useful for saving state that is in memory and waiting
1753 1757 to be flushed when the current lock is released. Because a call to
1754 1758 destroyed is imminent, the repo will be invalidated causing those
1755 1759 changes to stay in memory (waiting for the next unlock), or vanish
1756 1760 completely.
1757 1761 '''
1758 1762 # When using the same lock to commit and strip, the phasecache is left
1759 1763 # dirty after committing. Then when we strip, the repo is invalidated,
1760 1764 # causing those changes to disappear.
1761 1765 if '_phasecache' in vars(self):
1762 1766 self._phasecache.write()
1763 1767
1764 1768 @unfilteredmethod
1765 1769 def destroyed(self):
1766 1770 '''Inform the repository that nodes have been destroyed.
1767 1771 Intended for use by strip and rollback, so there's a common
1768 1772 place for anything that has to be done after destroying history.
1769 1773 '''
1770 1774 # When one tries to:
1771 1775 # 1) destroy nodes thus calling this method (e.g. strip)
1772 1776 # 2) use phasecache somewhere (e.g. commit)
1773 1777 #
1774 1778 # then 2) will fail because the phasecache contains nodes that were
1775 1779 # removed. We can either remove phasecache from the filecache,
1776 1780 # causing it to reload next time it is accessed, or simply filter
1777 1781 # the removed nodes now and write the updated cache.
1778 1782 self._phasecache.filterunknown(self)
1779 1783 self._phasecache.write()
1780 1784
1781 1785 # update the 'served' branch cache to help read only server process
1782 1786 # Thanks to branchcache collaboration this is done from the nearest
1783 1787 # filtered subset and it is expected to be fast.
1784 1788 branchmap.updatecache(self.filtered('served'))
1785 1789
1786 1790 # Ensure the persistent tag cache is updated. Doing it now
1787 1791 # means that the tag cache only has to worry about destroyed
1788 1792 # heads immediately after a strip/rollback. That in turn
1789 1793 # guarantees that "cachetip == currenttip" (comparing both rev
1790 1794 # and node) always means no nodes have been added or destroyed.
1791 1795
1792 1796 # XXX this is suboptimal when qrefresh'ing: we strip the current
1793 1797 # head, refresh the tag cache, then immediately add a new head.
1794 1798 # But I think doing it this way is necessary for the "instant
1795 1799 # tag cache retrieval" case to work.
1796 1800 self.invalidate()
1797 1801
1798 1802 def walk(self, match, node=None):
1799 1803 '''
1800 1804 walk recursively through the directory tree or a given
1801 1805 changeset, finding all files matched by the match
1802 1806 function
1803 1807 '''
1804 1808 return self[node].walk(match)
1805 1809
1806 1810 def status(self, node1='.', node2=None, match=None,
1807 1811 ignored=False, clean=False, unknown=False,
1808 1812 listsubrepos=False):
1809 1813 '''a convenience method that calls node1.status(node2)'''
1810 1814 return self[node1].status(node2, match, ignored, clean, unknown,
1811 1815 listsubrepos)
1812 1816
1813 1817 def heads(self, start=None):
1814 1818 heads = self.changelog.heads(start)
1815 1819 # sort the output in rev descending order
1816 1820 return sorted(heads, key=self.changelog.rev, reverse=True)
1817 1821
1818 1822 def branchheads(self, branch=None, start=None, closed=False):
1819 1823 '''return a (possibly filtered) list of heads for the given branch
1820 1824
1821 1825 Heads are returned in topological order, from newest to oldest.
1822 1826 If branch is None, use the dirstate branch.
1823 1827 If start is not None, return only heads reachable from start.
1824 1828 If closed is True, return heads that are marked as closed as well.
1825 1829 '''
1826 1830 if branch is None:
1827 1831 branch = self[None].branch()
1828 1832 branches = self.branchmap()
1829 1833 if branch not in branches:
1830 1834 return []
1831 1835 # the cache returns heads ordered lowest to highest
1832 1836 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1833 1837 if start is not None:
1834 1838 # filter out the heads that cannot be reached from startrev
1835 1839 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1836 1840 bheads = [h for h in bheads if h in fbheads]
1837 1841 return bheads
1838 1842
1839 1843 def branches(self, nodes):
1840 1844 if not nodes:
1841 1845 nodes = [self.changelog.tip()]
1842 1846 b = []
1843 1847 for n in nodes:
1844 1848 t = n
1845 1849 while True:
1846 1850 p = self.changelog.parents(n)
1847 1851 if p[1] != nullid or p[0] == nullid:
1848 1852 b.append((t, n, p[0], p[1]))
1849 1853 break
1850 1854 n = p[0]
1851 1855 return b
1852 1856
1853 1857 def between(self, pairs):
1854 1858 r = []
1855 1859
1856 1860 for top, bottom in pairs:
1857 1861 n, l, i = top, [], 0
1858 1862 f = 1
1859 1863
1860 1864 while n != bottom and n != nullid:
1861 1865 p = self.changelog.parents(n)[0]
1862 1866 if i == f:
1863 1867 l.append(n)
1864 1868 f = f * 2
1865 1869 n = p
1866 1870 i += 1
1867 1871
1868 1872 r.append(l)
1869 1873
1870 1874 return r
1871 1875
1872 1876 def checkpush(self, pushop):
1873 1877 """Extensions can override this function if additional checks have
1874 1878 to be performed before pushing, or call it if they override push
1875 1879 command.
1876 1880 """
1877 1881 pass
1878 1882
1879 1883 @unfilteredpropertycache
1880 1884 def prepushoutgoinghooks(self):
1881 1885 """Return util.hooks consists of a pushop with repo, remote, outgoing
1882 1886 methods, which are called before pushing changesets.
1883 1887 """
1884 1888 return util.hooks()
1885 1889
1886 1890 def pushkey(self, namespace, key, old, new):
1887 1891 try:
1888 1892 tr = self.currenttransaction()
1889 1893 hookargs = {}
1890 1894 if tr is not None:
1891 1895 hookargs.update(tr.hookargs)
1892 1896 hookargs['namespace'] = namespace
1893 1897 hookargs['key'] = key
1894 1898 hookargs['old'] = old
1895 1899 hookargs['new'] = new
1896 1900 self.hook('prepushkey', throw=True, **hookargs)
1897 1901 except error.HookAbort as exc:
1898 1902 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1899 1903 if exc.hint:
1900 1904 self.ui.write_err(_("(%s)\n") % exc.hint)
1901 1905 return False
1902 1906 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1903 1907 ret = pushkey.push(self, namespace, key, old, new)
1904 1908 def runhook():
1905 1909 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1906 1910 ret=ret)
1907 1911 self._afterlock(runhook)
1908 1912 return ret
1909 1913
1910 1914 def listkeys(self, namespace):
1911 1915 self.hook('prelistkeys', throw=True, namespace=namespace)
1912 1916 self.ui.debug('listing keys for "%s"\n' % namespace)
1913 1917 values = pushkey.list(self, namespace)
1914 1918 self.hook('listkeys', namespace=namespace, values=values)
1915 1919 return values
1916 1920
1917 1921 def debugwireargs(self, one, two, three=None, four=None, five=None):
1918 1922 '''used to test argument passing over the wire'''
1919 1923 return "%s %s %s %s %s" % (one, two, three, four, five)
1920 1924
1921 1925 def savecommitmessage(self, text):
1922 1926 fp = self.vfs('last-message.txt', 'wb')
1923 1927 try:
1924 1928 fp.write(text)
1925 1929 finally:
1926 1930 fp.close()
1927 1931 return self.pathto(fp.name[len(self.root) + 1:])
1928 1932
1929 1933 # used to avoid circular references so destructors work
1930 1934 def aftertrans(files):
1931 1935 renamefiles = [tuple(t) for t in files]
1932 1936 def a():
1933 1937 for vfs, src, dest in renamefiles:
1934 1938 try:
1935 1939 vfs.rename(src, dest)
1936 1940 except OSError: # journal file does not yet exist
1937 1941 pass
1938 1942 return a
1939 1943
1940 1944 def undoname(fn):
1941 1945 base, name = os.path.split(fn)
1942 1946 assert name.startswith('journal')
1943 1947 return os.path.join(base, name.replace('journal', 'undo', 1))
1944 1948
1945 1949 def instance(ui, path, create):
1946 1950 return localrepository(ui, util.urllocalpath(path), create)
1947 1951
1948 1952 def islocal(path):
1949 1953 return True
1950 1954
1951 1955 def newreporequirements(repo):
1952 1956 """Determine the set of requirements for a new local repository.
1953 1957
1954 1958 Extensions can wrap this function to specify custom requirements for
1955 1959 new repositories.
1956 1960 """
1957 1961 ui = repo.ui
1958 1962 requirements = set(['revlogv1'])
1959 1963 if ui.configbool('format', 'usestore', True):
1960 1964 requirements.add('store')
1961 1965 if ui.configbool('format', 'usefncache', True):
1962 1966 requirements.add('fncache')
1963 1967 if ui.configbool('format', 'dotencode', True):
1964 1968 requirements.add('dotencode')
1965 1969
1966 1970 if scmutil.gdinitconfig(ui):
1967 1971 requirements.add('generaldelta')
1968 1972 if ui.configbool('experimental', 'treemanifest', False):
1969 1973 requirements.add('treemanifest')
1970 1974 if ui.configbool('experimental', 'manifestv2', False):
1971 1975 requirements.add('manifestv2')
1972 1976
1973 1977 return requirements
@@ -1,1122 +1,1186
1 1 # manifest.py - manifest revision class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import array
11 11 import heapq
12 12 import os
13 13 import struct
14 14
15 15 from .i18n import _
16 16 from . import (
17 17 error,
18 18 mdiff,
19 19 parsers,
20 20 revlog,
21 21 util,
22 22 )
23 23
24 24 propertycache = util.propertycache
25 25
26 26 def _parsev1(data):
27 27 # This method does a little bit of excessive-looking
28 28 # precondition checking. This is so that the behavior of this
29 29 # class exactly matches its C counterpart to try and help
30 30 # prevent surprise breakage for anyone that develops against
31 31 # the pure version.
32 32 if data and data[-1] != '\n':
33 33 raise ValueError('Manifest did not end in a newline.')
34 34 prev = None
35 35 for l in data.splitlines():
36 36 if prev is not None and prev > l:
37 37 raise ValueError('Manifest lines not in sorted order.')
38 38 prev = l
39 39 f, n = l.split('\0')
40 40 if len(n) > 40:
41 41 yield f, revlog.bin(n[:40]), n[40:]
42 42 else:
43 43 yield f, revlog.bin(n), ''
44 44
45 45 def _parsev2(data):
46 46 metadataend = data.find('\n')
47 47 # Just ignore metadata for now
48 48 pos = metadataend + 1
49 49 prevf = ''
50 50 while pos < len(data):
51 51 end = data.find('\n', pos + 1) # +1 to skip stem length byte
52 52 if end == -1:
53 53 raise ValueError('Manifest ended with incomplete file entry.')
54 54 stemlen = ord(data[pos])
55 55 items = data[pos + 1:end].split('\0')
56 56 f = prevf[:stemlen] + items[0]
57 57 if prevf > f:
58 58 raise ValueError('Manifest entries not in sorted order.')
59 59 fl = items[1]
60 60 # Just ignore metadata (items[2:] for now)
61 61 n = data[end + 1:end + 21]
62 62 yield f, n, fl
63 63 pos = end + 22
64 64 prevf = f
65 65
66 66 def _parse(data):
67 67 """Generates (path, node, flags) tuples from a manifest text"""
68 68 if data.startswith('\0'):
69 69 return iter(_parsev2(data))
70 70 else:
71 71 return iter(_parsev1(data))
72 72
73 73 def _text(it, usemanifestv2):
74 74 """Given an iterator over (path, node, flags) tuples, returns a manifest
75 75 text"""
76 76 if usemanifestv2:
77 77 return _textv2(it)
78 78 else:
79 79 return _textv1(it)
80 80
81 81 def _textv1(it):
82 82 files = []
83 83 lines = []
84 84 _hex = revlog.hex
85 85 for f, n, fl in it:
86 86 files.append(f)
87 87 # if this is changed to support newlines in filenames,
88 88 # be sure to check the templates/ dir again (especially *-raw.tmpl)
89 89 lines.append("%s\0%s%s\n" % (f, _hex(n), fl))
90 90
91 91 _checkforbidden(files)
92 92 return ''.join(lines)
93 93
94 94 def _textv2(it):
95 95 files = []
96 96 lines = ['\0\n']
97 97 prevf = ''
98 98 for f, n, fl in it:
99 99 files.append(f)
100 100 stem = os.path.commonprefix([prevf, f])
101 101 stemlen = min(len(stem), 255)
102 102 lines.append("%c%s\0%s\n%s\n" % (stemlen, f[stemlen:], fl, n))
103 103 prevf = f
104 104 _checkforbidden(files)
105 105 return ''.join(lines)
106 106
107 107 class _lazymanifest(dict):
108 108 """This is the pure implementation of lazymanifest.
109 109
110 110 It has not been optimized *at all* and is not lazy.
111 111 """
112 112
113 113 def __init__(self, data):
114 114 dict.__init__(self)
115 115 for f, n, fl in _parse(data):
116 116 self[f] = n, fl
117 117
118 118 def __setitem__(self, k, v):
119 119 node, flag = v
120 120 assert node is not None
121 121 if len(node) > 21:
122 122 node = node[:21] # match c implementation behavior
123 123 dict.__setitem__(self, k, (node, flag))
124 124
125 125 def __iter__(self):
126 126 return iter(sorted(dict.keys(self)))
127 127
128 128 def iterkeys(self):
129 129 return iter(sorted(dict.keys(self)))
130 130
131 131 def iterentries(self):
132 132 return ((f, e[0], e[1]) for f, e in sorted(self.iteritems()))
133 133
134 134 def copy(self):
135 135 c = _lazymanifest('')
136 136 c.update(self)
137 137 return c
138 138
139 139 def diff(self, m2, clean=False):
140 140 '''Finds changes between the current manifest and m2.'''
141 141 diff = {}
142 142
143 143 for fn, e1 in self.iteritems():
144 144 if fn not in m2:
145 145 diff[fn] = e1, (None, '')
146 146 else:
147 147 e2 = m2[fn]
148 148 if e1 != e2:
149 149 diff[fn] = e1, e2
150 150 elif clean:
151 151 diff[fn] = None
152 152
153 153 for fn, e2 in m2.iteritems():
154 154 if fn not in self:
155 155 diff[fn] = (None, ''), e2
156 156
157 157 return diff
158 158
159 159 def filtercopy(self, filterfn):
160 160 c = _lazymanifest('')
161 161 for f, n, fl in self.iterentries():
162 162 if filterfn(f):
163 163 c[f] = n, fl
164 164 return c
165 165
166 166 def text(self):
167 167 """Get the full data of this manifest as a bytestring."""
168 168 return _textv1(self.iterentries())
169 169
170 170 try:
171 171 _lazymanifest = parsers.lazymanifest
172 172 except AttributeError:
173 173 pass
174 174
175 175 class manifestdict(object):
176 176 def __init__(self, data=''):
177 177 if data.startswith('\0'):
178 178 #_lazymanifest can not parse v2
179 179 self._lm = _lazymanifest('')
180 180 for f, n, fl in _parsev2(data):
181 181 self._lm[f] = n, fl
182 182 else:
183 183 self._lm = _lazymanifest(data)
184 184
185 185 def __getitem__(self, key):
186 186 return self._lm[key][0]
187 187
188 188 def find(self, key):
189 189 return self._lm[key]
190 190
191 191 def __len__(self):
192 192 return len(self._lm)
193 193
194 194 def __setitem__(self, key, node):
195 195 self._lm[key] = node, self.flags(key, '')
196 196
197 197 def __contains__(self, key):
198 198 return key in self._lm
199 199
200 200 def __delitem__(self, key):
201 201 del self._lm[key]
202 202
203 203 def __iter__(self):
204 204 return self._lm.__iter__()
205 205
206 206 def iterkeys(self):
207 207 return self._lm.iterkeys()
208 208
209 209 def keys(self):
210 210 return list(self.iterkeys())
211 211
212 212 def filesnotin(self, m2):
213 213 '''Set of files in this manifest that are not in the other'''
214 214 diff = self.diff(m2)
215 215 files = set(filepath
216 216 for filepath, hashflags in diff.iteritems()
217 217 if hashflags[1][0] is None)
218 218 return files
219 219
220 220 @propertycache
221 221 def _dirs(self):
222 222 return util.dirs(self)
223 223
224 224 def dirs(self):
225 225 return self._dirs
226 226
227 227 def hasdir(self, dir):
228 228 return dir in self._dirs
229 229
230 230 def _filesfastpath(self, match):
231 231 '''Checks whether we can correctly and quickly iterate over matcher
232 232 files instead of over manifest files.'''
233 233 files = match.files()
234 234 return (len(files) < 100 and (match.isexact() or
235 235 (match.prefix() and all(fn in self for fn in files))))
236 236
237 237 def walk(self, match):
238 238 '''Generates matching file names.
239 239
240 240 Equivalent to manifest.matches(match).iterkeys(), but without creating
241 241 an entirely new manifest.
242 242
243 243 It also reports nonexistent files by marking them bad with match.bad().
244 244 '''
245 245 if match.always():
246 246 for f in iter(self):
247 247 yield f
248 248 return
249 249
250 250 fset = set(match.files())
251 251
252 252 # avoid the entire walk if we're only looking for specific files
253 253 if self._filesfastpath(match):
254 254 for fn in sorted(fset):
255 255 yield fn
256 256 return
257 257
258 258 for fn in self:
259 259 if fn in fset:
260 260 # specified pattern is the exact name
261 261 fset.remove(fn)
262 262 if match(fn):
263 263 yield fn
264 264
265 265 # for dirstate.walk, files=['.'] means "walk the whole tree".
266 266 # follow that here, too
267 267 fset.discard('.')
268 268
269 269 for fn in sorted(fset):
270 270 if not self.hasdir(fn):
271 271 match.bad(fn, None)
272 272
273 273 def matches(self, match):
274 274 '''generate a new manifest filtered by the match argument'''
275 275 if match.always():
276 276 return self.copy()
277 277
278 278 if self._filesfastpath(match):
279 279 m = manifestdict()
280 280 lm = self._lm
281 281 for fn in match.files():
282 282 if fn in lm:
283 283 m._lm[fn] = lm[fn]
284 284 return m
285 285
286 286 m = manifestdict()
287 287 m._lm = self._lm.filtercopy(match)
288 288 return m
289 289
290 290 def diff(self, m2, clean=False):
291 291 '''Finds changes between the current manifest and m2.
292 292
293 293 Args:
294 294 m2: the manifest to which this manifest should be compared.
295 295 clean: if true, include files unchanged between these manifests
296 296 with a None value in the returned dictionary.
297 297
298 298 The result is returned as a dict with filename as key and
299 299 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
300 300 nodeid in the current/other manifest and fl1/fl2 is the flag
301 301 in the current/other manifest. Where the file does not exist,
302 302 the nodeid will be None and the flags will be the empty
303 303 string.
304 304 '''
305 305 return self._lm.diff(m2._lm, clean)
306 306
307 307 def setflag(self, key, flag):
308 308 self._lm[key] = self[key], flag
309 309
310 310 def get(self, key, default=None):
311 311 try:
312 312 return self._lm[key][0]
313 313 except KeyError:
314 314 return default
315 315
316 316 def flags(self, key, default=''):
317 317 try:
318 318 return self._lm[key][1]
319 319 except KeyError:
320 320 return default
321 321
322 322 def copy(self):
323 323 c = manifestdict()
324 324 c._lm = self._lm.copy()
325 325 return c
326 326
327 327 def iteritems(self):
328 328 return (x[:2] for x in self._lm.iterentries())
329 329
330 330 def iterentries(self):
331 331 return self._lm.iterentries()
332 332
333 333 def text(self, usemanifestv2=False):
334 334 if usemanifestv2:
335 335 return _textv2(self._lm.iterentries())
336 336 else:
337 337 # use (probably) native version for v1
338 338 return self._lm.text()
339 339
340 340 def fastdelta(self, base, changes):
341 341 """Given a base manifest text as an array.array and a list of changes
342 342 relative to that text, compute a delta that can be used by revlog.
343 343 """
344 344 delta = []
345 345 dstart = None
346 346 dend = None
347 347 dline = [""]
348 348 start = 0
349 349 # zero copy representation of base as a buffer
350 350 addbuf = util.buffer(base)
351 351
352 352 changes = list(changes)
353 353 if len(changes) < 1000:
354 354 # start with a readonly loop that finds the offset of
355 355 # each line and creates the deltas
356 356 for f, todelete in changes:
357 357 # bs will either be the index of the item or the insert point
358 358 start, end = _msearch(addbuf, f, start)
359 359 if not todelete:
360 360 h, fl = self._lm[f]
361 361 l = "%s\0%s%s\n" % (f, revlog.hex(h), fl)
362 362 else:
363 363 if start == end:
364 364 # item we want to delete was not found, error out
365 365 raise AssertionError(
366 366 _("failed to remove %s from manifest") % f)
367 367 l = ""
368 368 if dstart is not None and dstart <= start and dend >= start:
369 369 if dend < end:
370 370 dend = end
371 371 if l:
372 372 dline.append(l)
373 373 else:
374 374 if dstart is not None:
375 375 delta.append([dstart, dend, "".join(dline)])
376 376 dstart = start
377 377 dend = end
378 378 dline = [l]
379 379
380 380 if dstart is not None:
381 381 delta.append([dstart, dend, "".join(dline)])
382 382 # apply the delta to the base, and get a delta for addrevision
383 383 deltatext, arraytext = _addlistdelta(base, delta)
384 384 else:
385 385 # For large changes, it's much cheaper to just build the text and
386 386 # diff it.
387 387 arraytext = array.array('c', self.text())
388 388 deltatext = mdiff.textdiff(base, arraytext)
389 389
390 390 return arraytext, deltatext
391 391
392 392 def _msearch(m, s, lo=0, hi=None):
393 393 '''return a tuple (start, end) that says where to find s within m.
394 394
395 395 If the string is found m[start:end] are the line containing
396 396 that string. If start == end the string was not found and
397 397 they indicate the proper sorted insertion point.
398 398
399 399 m should be a buffer or a string
400 400 s is a string'''
401 401 def advance(i, c):
402 402 while i < lenm and m[i] != c:
403 403 i += 1
404 404 return i
405 405 if not s:
406 406 return (lo, lo)
407 407 lenm = len(m)
408 408 if not hi:
409 409 hi = lenm
410 410 while lo < hi:
411 411 mid = (lo + hi) // 2
412 412 start = mid
413 413 while start > 0 and m[start - 1] != '\n':
414 414 start -= 1
415 415 end = advance(start, '\0')
416 416 if m[start:end] < s:
417 417 # we know that after the null there are 40 bytes of sha1
418 418 # this translates to the bisect lo = mid + 1
419 419 lo = advance(end + 40, '\n') + 1
420 420 else:
421 421 # this translates to the bisect hi = mid
422 422 hi = start
423 423 end = advance(lo, '\0')
424 424 found = m[lo:end]
425 425 if s == found:
426 426 # we know that after the null there are 40 bytes of sha1
427 427 end = advance(end + 40, '\n')
428 428 return (lo, end + 1)
429 429 else:
430 430 return (lo, lo)
431 431
432 432 def _checkforbidden(l):
433 433 """Check filenames for illegal characters."""
434 434 for f in l:
435 435 if '\n' in f or '\r' in f:
436 436 raise error.RevlogError(
437 437 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
438 438
439 439
440 440 # apply the changes collected during the bisect loop to our addlist
441 441 # return a delta suitable for addrevision
442 442 def _addlistdelta(addlist, x):
443 443 # for large addlist arrays, building a new array is cheaper
444 444 # than repeatedly modifying the existing one
445 445 currentposition = 0
446 446 newaddlist = array.array('c')
447 447
448 448 for start, end, content in x:
449 449 newaddlist += addlist[currentposition:start]
450 450 if content:
451 451 newaddlist += array.array('c', content)
452 452
453 453 currentposition = end
454 454
455 455 newaddlist += addlist[currentposition:]
456 456
457 457 deltatext = "".join(struct.pack(">lll", start, end, len(content))
458 458 + content for start, end, content in x)
459 459 return deltatext, newaddlist
460 460
461 461 def _splittopdir(f):
462 462 if '/' in f:
463 463 dir, subpath = f.split('/', 1)
464 464 return dir + '/', subpath
465 465 else:
466 466 return '', f
467 467
468 468 _noop = lambda s: None
469 469
470 470 class treemanifest(object):
471 471 def __init__(self, dir='', text=''):
472 472 self._dir = dir
473 473 self._node = revlog.nullid
474 474 self._loadfunc = _noop
475 475 self._copyfunc = _noop
476 476 self._dirty = False
477 477 self._dirs = {}
478 478 # Using _lazymanifest here is a little slower than plain old dicts
479 479 self._files = {}
480 480 self._flags = {}
481 481 if text:
482 482 def readsubtree(subdir, subm):
483 483 raise AssertionError('treemanifest constructor only accepts '
484 484 'flat manifests')
485 485 self.parse(text, readsubtree)
486 486 self._dirty = True # Mark flat manifest dirty after parsing
487 487
488 488 def _subpath(self, path):
489 489 return self._dir + path
490 490
491 491 def __len__(self):
492 492 self._load()
493 493 size = len(self._files)
494 494 for m in self._dirs.values():
495 495 size += m.__len__()
496 496 return size
497 497
498 498 def _isempty(self):
499 499 self._load() # for consistency; already loaded by all callers
500 500 return (not self._files and (not self._dirs or
501 501 all(m._isempty() for m in self._dirs.values())))
502 502
503 503 def __repr__(self):
504 504 return ('<treemanifest dir=%s, node=%s, loaded=%s, dirty=%s at 0x%x>' %
505 505 (self._dir, revlog.hex(self._node),
506 506 bool(self._loadfunc is _noop),
507 507 self._dirty, id(self)))
508 508
509 509 def dir(self):
510 510 '''The directory that this tree manifest represents, including a
511 511 trailing '/'. Empty string for the repo root directory.'''
512 512 return self._dir
513 513
514 514 def node(self):
515 515 '''This node of this instance. nullid for unsaved instances. Should
516 516 be updated when the instance is read or written from a revlog.
517 517 '''
518 518 assert not self._dirty
519 519 return self._node
520 520
521 521 def setnode(self, node):
522 522 self._node = node
523 523 self._dirty = False
524 524
525 525 def iterentries(self):
526 526 self._load()
527 527 for p, n in sorted(self._dirs.items() + self._files.items()):
528 528 if p in self._files:
529 529 yield self._subpath(p), n, self._flags.get(p, '')
530 530 else:
531 531 for x in n.iterentries():
532 532 yield x
533 533
534 534 def iteritems(self):
535 535 self._load()
536 536 for p, n in sorted(self._dirs.items() + self._files.items()):
537 537 if p in self._files:
538 538 yield self._subpath(p), n
539 539 else:
540 540 for f, sn in n.iteritems():
541 541 yield f, sn
542 542
543 543 def iterkeys(self):
544 544 self._load()
545 545 for p in sorted(self._dirs.keys() + self._files.keys()):
546 546 if p in self._files:
547 547 yield self._subpath(p)
548 548 else:
549 549 for f in self._dirs[p].iterkeys():
550 550 yield f
551 551
552 552 def keys(self):
553 553 return list(self.iterkeys())
554 554
555 555 def __iter__(self):
556 556 return self.iterkeys()
557 557
558 558 def __contains__(self, f):
559 559 if f is None:
560 560 return False
561 561 self._load()
562 562 dir, subpath = _splittopdir(f)
563 563 if dir:
564 564 if dir not in self._dirs:
565 565 return False
566 566 return self._dirs[dir].__contains__(subpath)
567 567 else:
568 568 return f in self._files
569 569
570 570 def get(self, f, default=None):
571 571 self._load()
572 572 dir, subpath = _splittopdir(f)
573 573 if dir:
574 574 if dir not in self._dirs:
575 575 return default
576 576 return self._dirs[dir].get(subpath, default)
577 577 else:
578 578 return self._files.get(f, default)
579 579
580 580 def __getitem__(self, f):
581 581 self._load()
582 582 dir, subpath = _splittopdir(f)
583 583 if dir:
584 584 return self._dirs[dir].__getitem__(subpath)
585 585 else:
586 586 return self._files[f]
587 587
588 588 def flags(self, f):
589 589 self._load()
590 590 dir, subpath = _splittopdir(f)
591 591 if dir:
592 592 if dir not in self._dirs:
593 593 return ''
594 594 return self._dirs[dir].flags(subpath)
595 595 else:
596 596 if f in self._dirs:
597 597 return ''
598 598 return self._flags.get(f, '')
599 599
600 600 def find(self, f):
601 601 self._load()
602 602 dir, subpath = _splittopdir(f)
603 603 if dir:
604 604 return self._dirs[dir].find(subpath)
605 605 else:
606 606 return self._files[f], self._flags.get(f, '')
607 607
608 608 def __delitem__(self, f):
609 609 self._load()
610 610 dir, subpath = _splittopdir(f)
611 611 if dir:
612 612 self._dirs[dir].__delitem__(subpath)
613 613 # If the directory is now empty, remove it
614 614 if self._dirs[dir]._isempty():
615 615 del self._dirs[dir]
616 616 else:
617 617 del self._files[f]
618 618 if f in self._flags:
619 619 del self._flags[f]
620 620 self._dirty = True
621 621
622 622 def __setitem__(self, f, n):
623 623 assert n is not None
624 624 self._load()
625 625 dir, subpath = _splittopdir(f)
626 626 if dir:
627 627 if dir not in self._dirs:
628 628 self._dirs[dir] = treemanifest(self._subpath(dir))
629 629 self._dirs[dir].__setitem__(subpath, n)
630 630 else:
631 631 self._files[f] = n[:21] # to match manifestdict's behavior
632 632 self._dirty = True
633 633
634 634 def _load(self):
635 635 if self._loadfunc is not _noop:
636 636 lf, self._loadfunc = self._loadfunc, _noop
637 637 lf(self)
638 638 elif self._copyfunc is not _noop:
639 639 cf, self._copyfunc = self._copyfunc, _noop
640 640 cf(self)
641 641
642 642 def setflag(self, f, flags):
643 643 """Set the flags (symlink, executable) for path f."""
644 644 self._load()
645 645 dir, subpath = _splittopdir(f)
646 646 if dir:
647 647 if dir not in self._dirs:
648 648 self._dirs[dir] = treemanifest(self._subpath(dir))
649 649 self._dirs[dir].setflag(subpath, flags)
650 650 else:
651 651 self._flags[f] = flags
652 652 self._dirty = True
653 653
654 654 def copy(self):
655 655 copy = treemanifest(self._dir)
656 656 copy._node = self._node
657 657 copy._dirty = self._dirty
658 658 if self._copyfunc is _noop:
659 659 def _copyfunc(s):
660 660 self._load()
661 661 for d in self._dirs:
662 662 s._dirs[d] = self._dirs[d].copy()
663 663 s._files = dict.copy(self._files)
664 664 s._flags = dict.copy(self._flags)
665 665 if self._loadfunc is _noop:
666 666 _copyfunc(copy)
667 667 else:
668 668 copy._copyfunc = _copyfunc
669 669 else:
670 670 copy._copyfunc = self._copyfunc
671 671 return copy
672 672
673 673 def filesnotin(self, m2):
674 674 '''Set of files in this manifest that are not in the other'''
675 675 files = set()
676 676 def _filesnotin(t1, t2):
677 677 if t1._node == t2._node and not t1._dirty and not t2._dirty:
678 678 return
679 679 t1._load()
680 680 t2._load()
681 681 for d, m1 in t1._dirs.iteritems():
682 682 if d in t2._dirs:
683 683 m2 = t2._dirs[d]
684 684 _filesnotin(m1, m2)
685 685 else:
686 686 files.update(m1.iterkeys())
687 687
688 688 for fn in t1._files.iterkeys():
689 689 if fn not in t2._files:
690 690 files.add(t1._subpath(fn))
691 691
692 692 _filesnotin(self, m2)
693 693 return files
694 694
695 695 @propertycache
696 696 def _alldirs(self):
697 697 return util.dirs(self)
698 698
699 699 def dirs(self):
700 700 return self._alldirs
701 701
702 702 def hasdir(self, dir):
703 703 self._load()
704 704 topdir, subdir = _splittopdir(dir)
705 705 if topdir:
706 706 if topdir in self._dirs:
707 707 return self._dirs[topdir].hasdir(subdir)
708 708 return False
709 709 return (dir + '/') in self._dirs
710 710
711 711 def walk(self, match):
712 712 '''Generates matching file names.
713 713
714 714 Equivalent to manifest.matches(match).iterkeys(), but without creating
715 715 an entirely new manifest.
716 716
717 717 It also reports nonexistent files by marking them bad with match.bad().
718 718 '''
719 719 if match.always():
720 720 for f in iter(self):
721 721 yield f
722 722 return
723 723
724 724 fset = set(match.files())
725 725
726 726 for fn in self._walk(match):
727 727 if fn in fset:
728 728 # specified pattern is the exact name
729 729 fset.remove(fn)
730 730 yield fn
731 731
732 732 # for dirstate.walk, files=['.'] means "walk the whole tree".
733 733 # follow that here, too
734 734 fset.discard('.')
735 735
736 736 for fn in sorted(fset):
737 737 if not self.hasdir(fn):
738 738 match.bad(fn, None)
739 739
740 740 def _walk(self, match):
741 741 '''Recursively generates matching file names for walk().'''
742 742 if not match.visitdir(self._dir[:-1] or '.'):
743 743 return
744 744
745 745 # yield this dir's files and walk its submanifests
746 746 self._load()
747 747 for p in sorted(self._dirs.keys() + self._files.keys()):
748 748 if p in self._files:
749 749 fullp = self._subpath(p)
750 750 if match(fullp):
751 751 yield fullp
752 752 else:
753 753 for f in self._dirs[p]._walk(match):
754 754 yield f
755 755
756 756 def matches(self, match):
757 757 '''generate a new manifest filtered by the match argument'''
758 758 if match.always():
759 759 return self.copy()
760 760
761 761 return self._matches(match)
762 762
763 763 def _matches(self, match):
764 764 '''recursively generate a new manifest filtered by the match argument.
765 765 '''
766 766
767 767 visit = match.visitdir(self._dir[:-1] or '.')
768 768 if visit == 'all':
769 769 return self.copy()
770 770 ret = treemanifest(self._dir)
771 771 if not visit:
772 772 return ret
773 773
774 774 self._load()
775 775 for fn in self._files:
776 776 fullp = self._subpath(fn)
777 777 if not match(fullp):
778 778 continue
779 779 ret._files[fn] = self._files[fn]
780 780 if fn in self._flags:
781 781 ret._flags[fn] = self._flags[fn]
782 782
783 783 for dir, subm in self._dirs.iteritems():
784 784 m = subm._matches(match)
785 785 if not m._isempty():
786 786 ret._dirs[dir] = m
787 787
788 788 if not ret._isempty():
789 789 ret._dirty = True
790 790 return ret
791 791
792 792 def diff(self, m2, clean=False):
793 793 '''Finds changes between the current manifest and m2.
794 794
795 795 Args:
796 796 m2: the manifest to which this manifest should be compared.
797 797 clean: if true, include files unchanged between these manifests
798 798 with a None value in the returned dictionary.
799 799
800 800 The result is returned as a dict with filename as key and
801 801 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
802 802 nodeid in the current/other manifest and fl1/fl2 is the flag
803 803 in the current/other manifest. Where the file does not exist,
804 804 the nodeid will be None and the flags will be the empty
805 805 string.
806 806 '''
807 807 result = {}
808 808 emptytree = treemanifest()
809 809 def _diff(t1, t2):
810 810 if t1._node == t2._node and not t1._dirty and not t2._dirty:
811 811 return
812 812 t1._load()
813 813 t2._load()
814 814 for d, m1 in t1._dirs.iteritems():
815 815 m2 = t2._dirs.get(d, emptytree)
816 816 _diff(m1, m2)
817 817
818 818 for d, m2 in t2._dirs.iteritems():
819 819 if d not in t1._dirs:
820 820 _diff(emptytree, m2)
821 821
822 822 for fn, n1 in t1._files.iteritems():
823 823 fl1 = t1._flags.get(fn, '')
824 824 n2 = t2._files.get(fn, None)
825 825 fl2 = t2._flags.get(fn, '')
826 826 if n1 != n2 or fl1 != fl2:
827 827 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
828 828 elif clean:
829 829 result[t1._subpath(fn)] = None
830 830
831 831 for fn, n2 in t2._files.iteritems():
832 832 if fn not in t1._files:
833 833 fl2 = t2._flags.get(fn, '')
834 834 result[t2._subpath(fn)] = ((None, ''), (n2, fl2))
835 835
836 836 _diff(self, m2)
837 837 return result
838 838
839 839 def unmodifiedsince(self, m2):
840 840 return not self._dirty and not m2._dirty and self._node == m2._node
841 841
842 842 def parse(self, text, readsubtree):
843 843 for f, n, fl in _parse(text):
844 844 if fl == 't':
845 845 f = f + '/'
846 846 self._dirs[f] = readsubtree(self._subpath(f), n)
847 847 elif '/' in f:
848 848 # This is a flat manifest, so use __setitem__ and setflag rather
849 849 # than assigning directly to _files and _flags, so we can
850 850 # assign a path in a subdirectory, and to mark dirty (compared
851 851 # to nullid).
852 852 self[f] = n
853 853 if fl:
854 854 self.setflag(f, fl)
855 855 else:
856 856 # Assigning to _files and _flags avoids marking as dirty,
857 857 # and should be a little faster.
858 858 self._files[f] = n
859 859 if fl:
860 860 self._flags[f] = fl
861 861
862 862 def text(self, usemanifestv2=False):
863 863 """Get the full data of this manifest as a bytestring."""
864 864 self._load()
865 865 return _text(self.iterentries(), usemanifestv2)
866 866
867 867 def dirtext(self, usemanifestv2=False):
868 868 """Get the full data of this directory as a bytestring. Make sure that
869 869 any submanifests have been written first, so their nodeids are correct.
870 870 """
871 871 self._load()
872 872 flags = self.flags
873 873 dirs = [(d[:-1], self._dirs[d]._node, 't') for d in self._dirs]
874 874 files = [(f, self._files[f], flags(f)) for f in self._files]
875 875 return _text(sorted(dirs + files), usemanifestv2)
876 876
877 877 def read(self, gettext, readsubtree):
878 878 def _load_for_read(s):
879 879 s.parse(gettext(), readsubtree)
880 880 s._dirty = False
881 881 self._loadfunc = _load_for_read
882 882
883 883 def writesubtrees(self, m1, m2, writesubtree):
884 884 self._load() # for consistency; should never have any effect here
885 885 emptytree = treemanifest()
886 886 for d, subm in self._dirs.iteritems():
887 887 subp1 = m1._dirs.get(d, emptytree)._node
888 888 subp2 = m2._dirs.get(d, emptytree)._node
889 889 if subp1 == revlog.nullid:
890 890 subp1, subp2 = subp2, subp1
891 891 writesubtree(subm, subp1, subp2)
892 892
893 893 class manifestrevlog(revlog.revlog):
894 894 '''A revlog that stores manifest texts. This is responsible for caching the
895 895 full-text manifest contents.
896 896 '''
897 897 def __init__(self, opener, indexfile):
898 898 super(manifestrevlog, self).__init__(opener, indexfile)
899 899
900 900 # During normal operations, we expect to deal with not more than four
901 901 # revs at a time (such as during commit --amend). When rebasing large
902 902 # stacks of commits, the number can go up, hence the config knob below.
903 903 cachesize = 4
904 904 opts = getattr(opener, 'options', None)
905 905 if opts is not None:
906 906 cachesize = opts.get('manifestcachesize', cachesize)
907 907 self._fulltextcache = util.lrucachedict(cachesize)
908 908
909 909 @property
910 910 def fulltextcache(self):
911 911 return self._fulltextcache
912 912
913 913 def clearcaches(self):
914 914 super(manifestrevlog, self).clearcaches()
915 915 self._fulltextcache.clear()
916 916
917 class manifestlog(object):
918 """A collection class representing the collection of manifest snapshots
919 referenced by commits in the repository.
920
921 In this situation, 'manifest' refers to the abstract concept of a snapshot
922 of the list of files in the given commit. Consumers of the output of this
923 class do not care about the implementation details of the actual manifests
924 they receive (i.e. tree or flat or lazily loaded, etc)."""
925 def __init__(self, opener, oldmanifest):
926 self._revlog = oldmanifest
927
928 # We'll separate this into it's own cache once oldmanifest is no longer
929 # used
930 self._mancache = oldmanifest._mancache
931
932 # _revlog is the same as _oldmanifest right now, but we eventually want
933 # to delete _oldmanifest while still allowing manifestlog to access the
934 # revlog specific apis.
935 self._oldmanifest = oldmanifest
936
937 def __getitem__(self, node):
938 """Retrieves the manifest instance for the given node. Throws a KeyError
939 if not found.
940 """
941 if (self._oldmanifest._treeondisk
942 or self._oldmanifest._treeinmem):
943 # TODO: come back and support tree manifests directly
944 return self._oldmanifest.read(node)
945
946 if node == revlog.nullid:
947 return manifestdict()
948 if node in self._mancache:
949 cachemf = self._mancache[node]
950 # The old manifest may put non-ctx manifests in the cache, so skip
951 # those since they don't implement the full api.
952 if isinstance(cachemf, manifestctx):
953 return cachemf
954
955 m = manifestctx(self._revlog, node)
956 self._mancache[node] = m
957 return m
958
959 class manifestctx(manifestdict):
960 """A class representing a single revision of a manifest, including its
961 contents, its parent revs, and its linkrev.
962 """
963 def __init__(self, revlog, node):
964 self._revlog = revlog
965
966 self._node = node
967 self.p1, self.p2 = revlog.parents(node)
968 rev = revlog.rev(node)
969 self.linkrev = revlog.linkrev(rev)
970
971 # This should eventually be made lazy loaded, so consumers can access
972 # the node/p1/linkrev data without having to parse the whole manifest.
973 data = revlog.revision(node)
974 arraytext = array.array('c', data)
975 revlog._fulltextcache[node] = arraytext
976 super(manifestctx, self).__init__(data)
977
978 def node(self):
979 return self._node
980
917 981 class manifest(manifestrevlog):
918 982 def __init__(self, opener, dir='', dirlogcache=None):
919 983 '''The 'dir' and 'dirlogcache' arguments are for internal use by
920 984 manifest.manifest only. External users should create a root manifest
921 985 log with manifest.manifest(opener) and call dirlog() on it.
922 986 '''
923 987 # During normal operations, we expect to deal with not more than four
924 988 # revs at a time (such as during commit --amend). When rebasing large
925 989 # stacks of commits, the number can go up, hence the config knob below.
926 990 cachesize = 4
927 991 usetreemanifest = False
928 992 usemanifestv2 = False
929 993 opts = getattr(opener, 'options', None)
930 994 if opts is not None:
931 995 cachesize = opts.get('manifestcachesize', cachesize)
932 996 usetreemanifest = opts.get('treemanifest', usetreemanifest)
933 997 usemanifestv2 = opts.get('manifestv2', usemanifestv2)
934 998 self._mancache = util.lrucachedict(cachesize)
935 999 self._treeinmem = usetreemanifest
936 1000 self._treeondisk = usetreemanifest
937 1001 self._usemanifestv2 = usemanifestv2
938 1002 indexfile = "00manifest.i"
939 1003 if dir:
940 1004 assert self._treeondisk, 'opts is %r' % opts
941 1005 if not dir.endswith('/'):
942 1006 dir = dir + '/'
943 1007 indexfile = "meta/" + dir + "00manifest.i"
944 1008 super(manifest, self).__init__(opener, indexfile)
945 1009 self._dir = dir
946 1010 # The dirlogcache is kept on the root manifest log
947 1011 if dir:
948 1012 self._dirlogcache = dirlogcache
949 1013 else:
950 1014 self._dirlogcache = {'': self}
951 1015
952 1016 def _newmanifest(self, data=''):
953 1017 if self._treeinmem:
954 1018 return treemanifest(self._dir, data)
955 1019 return manifestdict(data)
956 1020
957 1021 def dirlog(self, dir):
958 1022 if dir:
959 1023 assert self._treeondisk
960 1024 if dir not in self._dirlogcache:
961 1025 self._dirlogcache[dir] = manifest(self.opener, dir,
962 1026 self._dirlogcache)
963 1027 return self._dirlogcache[dir]
964 1028
965 1029 def _slowreaddelta(self, node):
966 1030 r0 = self.deltaparent(self.rev(node))
967 1031 m0 = self.read(self.node(r0))
968 1032 m1 = self.read(node)
969 1033 md = self._newmanifest()
970 1034 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
971 1035 if n1:
972 1036 md[f] = n1
973 1037 if fl1:
974 1038 md.setflag(f, fl1)
975 1039 return md
976 1040
977 1041 def readdelta(self, node):
978 1042 if self._usemanifestv2 or self._treeondisk:
979 1043 return self._slowreaddelta(node)
980 1044 r = self.rev(node)
981 1045 d = mdiff.patchtext(self.revdiff(self.deltaparent(r), r))
982 1046 return self._newmanifest(d)
983 1047
984 1048 def readshallowdelta(self, node):
985 1049 '''For flat manifests, this is the same as readdelta(). For
986 1050 treemanifests, this will read the delta for this revlog's directory,
987 1051 without recursively reading subdirectory manifests. Instead, any
988 1052 subdirectory entry will be reported as it appears in the manifests, i.e.
989 1053 the subdirectory will be reported among files and distinguished only by
990 1054 its 't' flag.'''
991 1055 if not self._treeondisk:
992 1056 return self.readdelta(node)
993 1057 if self._usemanifestv2:
994 1058 raise error.Abort(
995 1059 _("readshallowdelta() not implemented for manifestv2"))
996 1060 r = self.rev(node)
997 1061 d = mdiff.patchtext(self.revdiff(self.deltaparent(r), r))
998 1062 return manifestdict(d)
999 1063
1000 1064 def readfast(self, node):
1001 1065 '''use the faster of readdelta or read
1002 1066
1003 1067 This will return a manifest which is either only the files
1004 1068 added/modified relative to p1, or all files in the
1005 1069 manifest. Which one is returned depends on the codepath used
1006 1070 to retrieve the data.
1007 1071 '''
1008 1072 r = self.rev(node)
1009 1073 deltaparent = self.deltaparent(r)
1010 1074 if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r):
1011 1075 return self.readdelta(node)
1012 1076 return self.read(node)
1013 1077
1014 1078 def readshallowfast(self, node):
1015 1079 '''like readfast(), but calls readshallowdelta() instead of readdelta()
1016 1080 '''
1017 1081 r = self.rev(node)
1018 1082 deltaparent = self.deltaparent(r)
1019 1083 if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r):
1020 1084 return self.readshallowdelta(node)
1021 1085 return self.readshallow(node)
1022 1086
1023 1087 def read(self, node):
1024 1088 if node == revlog.nullid:
1025 1089 return self._newmanifest() # don't upset local cache
1026 1090 if node in self._mancache:
1027 1091 return self._mancache[node]
1028 1092 if self._treeondisk:
1029 1093 def gettext():
1030 1094 return self.revision(node)
1031 1095 def readsubtree(dir, subm):
1032 1096 return self.dirlog(dir).read(subm)
1033 1097 m = self._newmanifest()
1034 1098 m.read(gettext, readsubtree)
1035 1099 m.setnode(node)
1036 1100 arraytext = None
1037 1101 else:
1038 1102 text = self.revision(node)
1039 1103 m = self._newmanifest(text)
1040 1104 arraytext = array.array('c', text)
1041 1105 self._mancache[node] = m
1042 1106 self.fulltextcache[node] = arraytext
1043 1107 return m
1044 1108
1045 1109 def readshallow(self, node):
1046 1110 '''Reads the manifest in this directory. When using flat manifests,
1047 1111 this manifest will generally have files in subdirectories in it. Does
1048 1112 not cache the manifest as the callers generally do not read the same
1049 1113 version twice.'''
1050 1114 return manifestdict(self.revision(node))
1051 1115
1052 1116 def find(self, node, f):
1053 1117 '''look up entry for a single file efficiently.
1054 1118 return (node, flags) pair if found, (None, None) if not.'''
1055 1119 m = self.read(node)
1056 1120 try:
1057 1121 return m.find(f)
1058 1122 except KeyError:
1059 1123 return None, None
1060 1124
1061 1125 def add(self, m, transaction, link, p1, p2, added, removed):
1062 1126 if (p1 in self.fulltextcache and not self._treeinmem
1063 1127 and not self._usemanifestv2):
1064 1128 # If our first parent is in the manifest cache, we can
1065 1129 # compute a delta here using properties we know about the
1066 1130 # manifest up-front, which may save time later for the
1067 1131 # revlog layer.
1068 1132
1069 1133 _checkforbidden(added)
1070 1134 # combine the changed lists into one sorted iterator
1071 1135 work = heapq.merge([(x, False) for x in added],
1072 1136 [(x, True) for x in removed])
1073 1137
1074 1138 arraytext, deltatext = m.fastdelta(self.fulltextcache[p1], work)
1075 1139 cachedelta = self.rev(p1), deltatext
1076 1140 text = util.buffer(arraytext)
1077 1141 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
1078 1142 else:
1079 1143 # The first parent manifest isn't already loaded, so we'll
1080 1144 # just encode a fulltext of the manifest and pass that
1081 1145 # through to the revlog layer, and let it handle the delta
1082 1146 # process.
1083 1147 if self._treeondisk:
1084 1148 m1 = self.read(p1)
1085 1149 m2 = self.read(p2)
1086 1150 n = self._addtree(m, transaction, link, m1, m2)
1087 1151 arraytext = None
1088 1152 else:
1089 1153 text = m.text(self._usemanifestv2)
1090 1154 n = self.addrevision(text, transaction, link, p1, p2)
1091 1155 arraytext = array.array('c', text)
1092 1156
1093 1157 self._mancache[n] = m
1094 1158 self.fulltextcache[n] = arraytext
1095 1159
1096 1160 return n
1097 1161
1098 1162 def _addtree(self, m, transaction, link, m1, m2):
1099 1163 # If the manifest is unchanged compared to one parent,
1100 1164 # don't write a new revision
1101 1165 if m.unmodifiedsince(m1) or m.unmodifiedsince(m2):
1102 1166 return m.node()
1103 1167 def writesubtree(subm, subp1, subp2):
1104 1168 sublog = self.dirlog(subm.dir())
1105 1169 sublog.add(subm, transaction, link, subp1, subp2, None, None)
1106 1170 m.writesubtrees(m1, m2, writesubtree)
1107 1171 text = m.dirtext(self._usemanifestv2)
1108 1172 # Double-check whether contents are unchanged to one parent
1109 1173 if text == m1.dirtext(self._usemanifestv2):
1110 1174 n = m1.node()
1111 1175 elif text == m2.dirtext(self._usemanifestv2):
1112 1176 n = m2.node()
1113 1177 else:
1114 1178 n = self.addrevision(text, transaction, link, m1.node(), m2.node())
1115 1179 # Save nodeid so parent manifest can calculate its nodeid
1116 1180 m.setnode(n)
1117 1181 return n
1118 1182
1119 1183 def clearcaches(self):
1120 1184 super(manifest, self).clearcaches()
1121 1185 self._mancache.clear()
1122 1186 self._dirlogcache = {'': self}
General Comments 0
You need to be logged in to leave comments. Login now