##// END OF EJS Templates
treemanifest: add configuration for using treemanifest type...
Martin von Zweigbergk -
r24402:c2287f20 default
parent child Browse files
Show More
@@ -1,1920 +1,1923
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import urllib
10 10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock as lockmod
13 13 import transaction, store, encoding, exchange, bundle2
14 14 import scmutil, util, extensions, hook, error, revset
15 15 import match as matchmod
16 16 import merge as mergemod
17 17 import tags as tagsmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 import branchmap, pathutil
21 21 import namespaces
22 22 propertycache = util.propertycache
23 23 filecache = scmutil.filecache
24 24
25 25 class repofilecache(filecache):
26 26 """All filecache usage on repo are done for logic that should be unfiltered
27 27 """
28 28
29 29 def __get__(self, repo, type=None):
30 30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 31 def __set__(self, repo, value):
32 32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 33 def __delete__(self, repo):
34 34 return super(repofilecache, self).__delete__(repo.unfiltered())
35 35
36 36 class storecache(repofilecache):
37 37 """filecache for files in the store"""
38 38 def join(self, obj, fname):
39 39 return obj.sjoin(fname)
40 40
41 41 class unfilteredpropertycache(propertycache):
42 42 """propertycache that apply to unfiltered repo only"""
43 43
44 44 def __get__(self, repo, type=None):
45 45 unfi = repo.unfiltered()
46 46 if unfi is repo:
47 47 return super(unfilteredpropertycache, self).__get__(unfi)
48 48 return getattr(unfi, self.name)
49 49
50 50 class filteredpropertycache(propertycache):
51 51 """propertycache that must take filtering in account"""
52 52
53 53 def cachevalue(self, obj, value):
54 54 object.__setattr__(obj, self.name, value)
55 55
56 56
57 57 def hasunfilteredcache(repo, name):
58 58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 59 return name in vars(repo.unfiltered())
60 60
61 61 def unfilteredmethod(orig):
62 62 """decorate method that always need to be run on unfiltered version"""
63 63 def wrapper(repo, *args, **kwargs):
64 64 return orig(repo.unfiltered(), *args, **kwargs)
65 65 return wrapper
66 66
67 67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 68 'unbundle'))
69 69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70 70
71 71 class localpeer(peer.peerrepository):
72 72 '''peer for a local repo; reflects only the most recent API'''
73 73
74 74 def __init__(self, repo, caps=moderncaps):
75 75 peer.peerrepository.__init__(self)
76 76 self._repo = repo.filtered('served')
77 77 self.ui = repo.ui
78 78 self._caps = repo._restrictcapabilities(caps)
79 79 self.requirements = repo.requirements
80 80 self.supportedformats = repo.supportedformats
81 81
82 82 def close(self):
83 83 self._repo.close()
84 84
85 85 def _capabilities(self):
86 86 return self._caps
87 87
88 88 def local(self):
89 89 return self._repo
90 90
91 91 def canpush(self):
92 92 return True
93 93
94 94 def url(self):
95 95 return self._repo.url()
96 96
97 97 def lookup(self, key):
98 98 return self._repo.lookup(key)
99 99
100 100 def branchmap(self):
101 101 return self._repo.branchmap()
102 102
103 103 def heads(self):
104 104 return self._repo.heads()
105 105
106 106 def known(self, nodes):
107 107 return self._repo.known(nodes)
108 108
109 109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 110 format='HG10', **kwargs):
111 111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 112 common=common, bundlecaps=bundlecaps, **kwargs)
113 113 if bundlecaps is not None and 'HG2Y' in bundlecaps:
114 114 # When requesting a bundle2, getbundle returns a stream to make the
115 115 # wire level function happier. We need to build a proper object
116 116 # from it in local peer.
117 117 cg = bundle2.unbundle20(self.ui, cg)
118 118 return cg
119 119
120 120 # TODO We might want to move the next two calls into legacypeer and add
121 121 # unbundle instead.
122 122
123 123 def unbundle(self, cg, heads, url):
124 124 """apply a bundle on a repo
125 125
126 126 This function handles the repo locking itself."""
127 127 try:
128 128 cg = exchange.readbundle(self.ui, cg, None)
129 129 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
130 130 if util.safehasattr(ret, 'getchunks'):
131 131 # This is a bundle20 object, turn it into an unbundler.
132 132 # This little dance should be dropped eventually when the API
133 133 # is finally improved.
134 134 stream = util.chunkbuffer(ret.getchunks())
135 135 ret = bundle2.unbundle20(self.ui, stream)
136 136 return ret
137 137 except error.PushRaced, exc:
138 138 raise error.ResponseError(_('push failed:'), str(exc))
139 139
140 140 def lock(self):
141 141 return self._repo.lock()
142 142
143 143 def addchangegroup(self, cg, source, url):
144 144 return changegroup.addchangegroup(self._repo, cg, source, url)
145 145
146 146 def pushkey(self, namespace, key, old, new):
147 147 return self._repo.pushkey(namespace, key, old, new)
148 148
149 149 def listkeys(self, namespace):
150 150 return self._repo.listkeys(namespace)
151 151
152 152 def debugwireargs(self, one, two, three=None, four=None, five=None):
153 153 '''used to test argument passing over the wire'''
154 154 return "%s %s %s %s %s" % (one, two, three, four, five)
155 155
156 156 class locallegacypeer(localpeer):
157 157 '''peer extension which implements legacy methods too; used for tests with
158 158 restricted capabilities'''
159 159
160 160 def __init__(self, repo):
161 161 localpeer.__init__(self, repo, caps=legacycaps)
162 162
163 163 def branches(self, nodes):
164 164 return self._repo.branches(nodes)
165 165
166 166 def between(self, pairs):
167 167 return self._repo.between(pairs)
168 168
169 169 def changegroup(self, basenodes, source):
170 170 return changegroup.changegroup(self._repo, basenodes, source)
171 171
172 172 def changegroupsubset(self, bases, heads, source):
173 173 return changegroup.changegroupsubset(self._repo, bases, heads, source)
174 174
175 175 class localrepository(object):
176 176
177 177 supportedformats = set(('revlogv1', 'generaldelta'))
178 178 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
179 179 'dotencode'))
180 180 openerreqs = set(('revlogv1', 'generaldelta'))
181 181 requirements = ['revlogv1']
182 182 filtername = None
183 183
184 184 # a list of (ui, featureset) functions.
185 185 # only functions defined in module of enabled extensions are invoked
186 186 featuresetupfuncs = set()
187 187
188 188 def _baserequirements(self, create):
189 189 return self.requirements[:]
190 190
191 191 def __init__(self, baseui, path=None, create=False):
192 192 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
193 193 self.wopener = self.wvfs
194 194 self.root = self.wvfs.base
195 195 self.path = self.wvfs.join(".hg")
196 196 self.origroot = path
197 197 self.auditor = pathutil.pathauditor(self.root, self._checknested)
198 198 self.vfs = scmutil.vfs(self.path)
199 199 self.opener = self.vfs
200 200 self.baseui = baseui
201 201 self.ui = baseui.copy()
202 202 self.ui.copy = baseui.copy # prevent copying repo configuration
203 203 # A list of callback to shape the phase if no data were found.
204 204 # Callback are in the form: func(repo, roots) --> processed root.
205 205 # This list it to be filled by extension during repo setup
206 206 self._phasedefaults = []
207 207 try:
208 208 self.ui.readconfig(self.join("hgrc"), self.root)
209 209 extensions.loadall(self.ui)
210 210 except IOError:
211 211 pass
212 212
213 213 if self.featuresetupfuncs:
214 214 self.supported = set(self._basesupported) # use private copy
215 215 extmods = set(m.__name__ for n, m
216 216 in extensions.extensions(self.ui))
217 217 for setupfunc in self.featuresetupfuncs:
218 218 if setupfunc.__module__ in extmods:
219 219 setupfunc(self.ui, self.supported)
220 220 else:
221 221 self.supported = self._basesupported
222 222
223 223 if not self.vfs.isdir():
224 224 if create:
225 225 if not self.wvfs.exists():
226 226 self.wvfs.makedirs()
227 227 self.vfs.makedir(notindexed=True)
228 228 requirements = self._baserequirements(create)
229 229 if self.ui.configbool('format', 'usestore', True):
230 230 self.vfs.mkdir("store")
231 231 requirements.append("store")
232 232 if self.ui.configbool('format', 'usefncache', True):
233 233 requirements.append("fncache")
234 234 if self.ui.configbool('format', 'dotencode', True):
235 235 requirements.append('dotencode')
236 236 # create an invalid changelog
237 237 self.vfs.append(
238 238 "00changelog.i",
239 239 '\0\0\0\2' # represents revlogv2
240 240 ' dummy changelog to prevent using the old repo layout'
241 241 )
242 242 if self.ui.configbool('format', 'generaldelta', False):
243 243 requirements.append("generaldelta")
244 244 requirements = set(requirements)
245 245 else:
246 246 raise error.RepoError(_("repository %s not found") % path)
247 247 elif create:
248 248 raise error.RepoError(_("repository %s already exists") % path)
249 249 else:
250 250 try:
251 251 requirements = scmutil.readrequires(self.vfs, self.supported)
252 252 except IOError, inst:
253 253 if inst.errno != errno.ENOENT:
254 254 raise
255 255 requirements = set()
256 256
257 257 self.sharedpath = self.path
258 258 try:
259 259 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
260 260 realpath=True)
261 261 s = vfs.base
262 262 if not vfs.exists():
263 263 raise error.RepoError(
264 264 _('.hg/sharedpath points to nonexistent directory %s') % s)
265 265 self.sharedpath = s
266 266 except IOError, inst:
267 267 if inst.errno != errno.ENOENT:
268 268 raise
269 269
270 270 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
271 271 self.spath = self.store.path
272 272 self.svfs = self.store.vfs
273 273 self.sopener = self.svfs
274 274 self.sjoin = self.store.join
275 275 self.vfs.createmode = self.store.createmode
276 276 self._applyrequirements(requirements)
277 277 if create:
278 278 self._writerequirements()
279 279
280 280
281 281 self._branchcaches = {}
282 282 self._revbranchcache = None
283 283 self.filterpats = {}
284 284 self._datafilters = {}
285 285 self._transref = self._lockref = self._wlockref = None
286 286
287 287 # A cache for various files under .hg/ that tracks file changes,
288 288 # (used by the filecache decorator)
289 289 #
290 290 # Maps a property name to its util.filecacheentry
291 291 self._filecache = {}
292 292
293 293 # hold sets of revision to be filtered
294 294 # should be cleared when something might have changed the filter value:
295 295 # - new changesets,
296 296 # - phase change,
297 297 # - new obsolescence marker,
298 298 # - working directory parent change,
299 299 # - bookmark changes
300 300 self.filteredrevcache = {}
301 301
302 302 # generic mapping between names and nodes
303 303 self.names = namespaces.namespaces()
304 304
305 305 def close(self):
306 306 self._writecaches()
307 307
308 308 def _writecaches(self):
309 309 if self._revbranchcache:
310 310 self._revbranchcache.write()
311 311
312 312 def _restrictcapabilities(self, caps):
313 313 # bundle2 is not ready for prime time, drop it unless explicitly
314 314 # required by the tests (or some brave tester)
315 315 if self.ui.configbool('experimental', 'bundle2-exp', False):
316 316 caps = set(caps)
317 317 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
318 318 caps.add('bundle2-exp=' + urllib.quote(capsblob))
319 319 return caps
320 320
321 321 def _applyrequirements(self, requirements):
322 322 self.requirements = requirements
323 323 self.svfs.options = dict((r, 1) for r in requirements
324 324 if r in self.openerreqs)
325 325 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
326 326 if chunkcachesize is not None:
327 327 self.svfs.options['chunkcachesize'] = chunkcachesize
328 328 maxchainlen = self.ui.configint('format', 'maxchainlen')
329 329 if maxchainlen is not None:
330 330 self.svfs.options['maxchainlen'] = maxchainlen
331 331 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
332 332 if manifestcachesize is not None:
333 333 self.svfs.options['manifestcachesize'] = manifestcachesize
334 usetreemanifest = self.ui.configbool('experimental', 'treemanifest')
335 if usetreemanifest is not None:
336 self.svfs.options['usetreemanifest'] = usetreemanifest
334 337
335 338 def _writerequirements(self):
336 339 reqfile = self.vfs("requires", "w")
337 340 for r in sorted(self.requirements):
338 341 reqfile.write("%s\n" % r)
339 342 reqfile.close()
340 343
341 344 def _checknested(self, path):
342 345 """Determine if path is a legal nested repository."""
343 346 if not path.startswith(self.root):
344 347 return False
345 348 subpath = path[len(self.root) + 1:]
346 349 normsubpath = util.pconvert(subpath)
347 350
348 351 # XXX: Checking against the current working copy is wrong in
349 352 # the sense that it can reject things like
350 353 #
351 354 # $ hg cat -r 10 sub/x.txt
352 355 #
353 356 # if sub/ is no longer a subrepository in the working copy
354 357 # parent revision.
355 358 #
356 359 # However, it can of course also allow things that would have
357 360 # been rejected before, such as the above cat command if sub/
358 361 # is a subrepository now, but was a normal directory before.
359 362 # The old path auditor would have rejected by mistake since it
360 363 # panics when it sees sub/.hg/.
361 364 #
362 365 # All in all, checking against the working copy seems sensible
363 366 # since we want to prevent access to nested repositories on
364 367 # the filesystem *now*.
365 368 ctx = self[None]
366 369 parts = util.splitpath(subpath)
367 370 while parts:
368 371 prefix = '/'.join(parts)
369 372 if prefix in ctx.substate:
370 373 if prefix == normsubpath:
371 374 return True
372 375 else:
373 376 sub = ctx.sub(prefix)
374 377 return sub.checknested(subpath[len(prefix) + 1:])
375 378 else:
376 379 parts.pop()
377 380 return False
378 381
379 382 def peer(self):
380 383 return localpeer(self) # not cached to avoid reference cycle
381 384
382 385 def unfiltered(self):
383 386 """Return unfiltered version of the repository
384 387
385 388 Intended to be overwritten by filtered repo."""
386 389 return self
387 390
388 391 def filtered(self, name):
389 392 """Return a filtered version of a repository"""
390 393 # build a new class with the mixin and the current class
391 394 # (possibly subclass of the repo)
392 395 class proxycls(repoview.repoview, self.unfiltered().__class__):
393 396 pass
394 397 return proxycls(self, name)
395 398
396 399 @repofilecache('bookmarks')
397 400 def _bookmarks(self):
398 401 return bookmarks.bmstore(self)
399 402
400 403 @repofilecache('bookmarks.current')
401 404 def _bookmarkcurrent(self):
402 405 return bookmarks.readcurrent(self)
403 406
404 407 def bookmarkheads(self, bookmark):
405 408 name = bookmark.split('@', 1)[0]
406 409 heads = []
407 410 for mark, n in self._bookmarks.iteritems():
408 411 if mark.split('@', 1)[0] == name:
409 412 heads.append(n)
410 413 return heads
411 414
412 415 @storecache('phaseroots')
413 416 def _phasecache(self):
414 417 return phases.phasecache(self, self._phasedefaults)
415 418
416 419 @storecache('obsstore')
417 420 def obsstore(self):
418 421 # read default format for new obsstore.
419 422 defaultformat = self.ui.configint('format', 'obsstore-version', None)
420 423 # rely on obsstore class default when possible.
421 424 kwargs = {}
422 425 if defaultformat is not None:
423 426 kwargs['defaultformat'] = defaultformat
424 427 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
425 428 store = obsolete.obsstore(self.svfs, readonly=readonly,
426 429 **kwargs)
427 430 if store and readonly:
428 431 # message is rare enough to not be translated
429 432 msg = 'obsolete feature not enabled but %i markers found!\n'
430 433 self.ui.warn(msg % len(list(store)))
431 434 return store
432 435
433 436 @storecache('00changelog.i')
434 437 def changelog(self):
435 438 c = changelog.changelog(self.svfs)
436 439 if 'HG_PENDING' in os.environ:
437 440 p = os.environ['HG_PENDING']
438 441 if p.startswith(self.root):
439 442 c.readpending('00changelog.i.a')
440 443 return c
441 444
442 445 @storecache('00manifest.i')
443 446 def manifest(self):
444 447 return manifest.manifest(self.svfs)
445 448
446 449 @repofilecache('dirstate')
447 450 def dirstate(self):
448 451 warned = [0]
449 452 def validate(node):
450 453 try:
451 454 self.changelog.rev(node)
452 455 return node
453 456 except error.LookupError:
454 457 if not warned[0]:
455 458 warned[0] = True
456 459 self.ui.warn(_("warning: ignoring unknown"
457 460 " working parent %s!\n") % short(node))
458 461 return nullid
459 462
460 463 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
461 464
462 465 def __getitem__(self, changeid):
463 466 if changeid is None:
464 467 return context.workingctx(self)
465 468 if isinstance(changeid, slice):
466 469 return [context.changectx(self, i)
467 470 for i in xrange(*changeid.indices(len(self)))
468 471 if i not in self.changelog.filteredrevs]
469 472 return context.changectx(self, changeid)
470 473
471 474 def __contains__(self, changeid):
472 475 try:
473 476 self[changeid]
474 477 return True
475 478 except error.RepoLookupError:
476 479 return False
477 480
478 481 def __nonzero__(self):
479 482 return True
480 483
481 484 def __len__(self):
482 485 return len(self.changelog)
483 486
484 487 def __iter__(self):
485 488 return iter(self.changelog)
486 489
487 490 def revs(self, expr, *args):
488 491 '''Return a list of revisions matching the given revset'''
489 492 expr = revset.formatspec(expr, *args)
490 493 m = revset.match(None, expr)
491 494 return m(self)
492 495
493 496 def set(self, expr, *args):
494 497 '''
495 498 Yield a context for each matching revision, after doing arg
496 499 replacement via revset.formatspec
497 500 '''
498 501 for r in self.revs(expr, *args):
499 502 yield self[r]
500 503
501 504 def url(self):
502 505 return 'file:' + self.root
503 506
504 507 def hook(self, name, throw=False, **args):
505 508 """Call a hook, passing this repo instance.
506 509
507 510 This a convenience method to aid invoking hooks. Extensions likely
508 511 won't call this unless they have registered a custom hook or are
509 512 replacing code that is expected to call a hook.
510 513 """
511 514 return hook.hook(self.ui, self, name, throw, **args)
512 515
513 516 @unfilteredmethod
514 517 def _tag(self, names, node, message, local, user, date, extra={},
515 518 editor=False):
516 519 if isinstance(names, str):
517 520 names = (names,)
518 521
519 522 branches = self.branchmap()
520 523 for name in names:
521 524 self.hook('pretag', throw=True, node=hex(node), tag=name,
522 525 local=local)
523 526 if name in branches:
524 527 self.ui.warn(_("warning: tag %s conflicts with existing"
525 528 " branch name\n") % name)
526 529
527 530 def writetags(fp, names, munge, prevtags):
528 531 fp.seek(0, 2)
529 532 if prevtags and prevtags[-1] != '\n':
530 533 fp.write('\n')
531 534 for name in names:
532 535 if munge:
533 536 m = munge(name)
534 537 else:
535 538 m = name
536 539
537 540 if (self._tagscache.tagtypes and
538 541 name in self._tagscache.tagtypes):
539 542 old = self.tags().get(name, nullid)
540 543 fp.write('%s %s\n' % (hex(old), m))
541 544 fp.write('%s %s\n' % (hex(node), m))
542 545 fp.close()
543 546
544 547 prevtags = ''
545 548 if local:
546 549 try:
547 550 fp = self.vfs('localtags', 'r+')
548 551 except IOError:
549 552 fp = self.vfs('localtags', 'a')
550 553 else:
551 554 prevtags = fp.read()
552 555
553 556 # local tags are stored in the current charset
554 557 writetags(fp, names, None, prevtags)
555 558 for name in names:
556 559 self.hook('tag', node=hex(node), tag=name, local=local)
557 560 return
558 561
559 562 try:
560 563 fp = self.wfile('.hgtags', 'rb+')
561 564 except IOError, e:
562 565 if e.errno != errno.ENOENT:
563 566 raise
564 567 fp = self.wfile('.hgtags', 'ab')
565 568 else:
566 569 prevtags = fp.read()
567 570
568 571 # committed tags are stored in UTF-8
569 572 writetags(fp, names, encoding.fromlocal, prevtags)
570 573
571 574 fp.close()
572 575
573 576 self.invalidatecaches()
574 577
575 578 if '.hgtags' not in self.dirstate:
576 579 self[None].add(['.hgtags'])
577 580
578 581 m = matchmod.exact(self.root, '', ['.hgtags'])
579 582 tagnode = self.commit(message, user, date, extra=extra, match=m,
580 583 editor=editor)
581 584
582 585 for name in names:
583 586 self.hook('tag', node=hex(node), tag=name, local=local)
584 587
585 588 return tagnode
586 589
587 590 def tag(self, names, node, message, local, user, date, editor=False):
588 591 '''tag a revision with one or more symbolic names.
589 592
590 593 names is a list of strings or, when adding a single tag, names may be a
591 594 string.
592 595
593 596 if local is True, the tags are stored in a per-repository file.
594 597 otherwise, they are stored in the .hgtags file, and a new
595 598 changeset is committed with the change.
596 599
597 600 keyword arguments:
598 601
599 602 local: whether to store tags in non-version-controlled file
600 603 (default False)
601 604
602 605 message: commit message to use if committing
603 606
604 607 user: name of user to use if committing
605 608
606 609 date: date tuple to use if committing'''
607 610
608 611 if not local:
609 612 m = matchmod.exact(self.root, '', ['.hgtags'])
610 613 if util.any(self.status(match=m, unknown=True, ignored=True)):
611 614 raise util.Abort(_('working copy of .hgtags is changed'),
612 615 hint=_('please commit .hgtags manually'))
613 616
614 617 self.tags() # instantiate the cache
615 618 self._tag(names, node, message, local, user, date, editor=editor)
616 619
617 620 @filteredpropertycache
618 621 def _tagscache(self):
619 622 '''Returns a tagscache object that contains various tags related
620 623 caches.'''
621 624
622 625 # This simplifies its cache management by having one decorated
623 626 # function (this one) and the rest simply fetch things from it.
624 627 class tagscache(object):
625 628 def __init__(self):
626 629 # These two define the set of tags for this repository. tags
627 630 # maps tag name to node; tagtypes maps tag name to 'global' or
628 631 # 'local'. (Global tags are defined by .hgtags across all
629 632 # heads, and local tags are defined in .hg/localtags.)
630 633 # They constitute the in-memory cache of tags.
631 634 self.tags = self.tagtypes = None
632 635
633 636 self.nodetagscache = self.tagslist = None
634 637
635 638 cache = tagscache()
636 639 cache.tags, cache.tagtypes = self._findtags()
637 640
638 641 return cache
639 642
640 643 def tags(self):
641 644 '''return a mapping of tag to node'''
642 645 t = {}
643 646 if self.changelog.filteredrevs:
644 647 tags, tt = self._findtags()
645 648 else:
646 649 tags = self._tagscache.tags
647 650 for k, v in tags.iteritems():
648 651 try:
649 652 # ignore tags to unknown nodes
650 653 self.changelog.rev(v)
651 654 t[k] = v
652 655 except (error.LookupError, ValueError):
653 656 pass
654 657 return t
655 658
656 659 def _findtags(self):
657 660 '''Do the hard work of finding tags. Return a pair of dicts
658 661 (tags, tagtypes) where tags maps tag name to node, and tagtypes
659 662 maps tag name to a string like \'global\' or \'local\'.
660 663 Subclasses or extensions are free to add their own tags, but
661 664 should be aware that the returned dicts will be retained for the
662 665 duration of the localrepo object.'''
663 666
664 667 # XXX what tagtype should subclasses/extensions use? Currently
665 668 # mq and bookmarks add tags, but do not set the tagtype at all.
666 669 # Should each extension invent its own tag type? Should there
667 670 # be one tagtype for all such "virtual" tags? Or is the status
668 671 # quo fine?
669 672
670 673 alltags = {} # map tag name to (node, hist)
671 674 tagtypes = {}
672 675
673 676 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
674 677 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
675 678
676 679 # Build the return dicts. Have to re-encode tag names because
677 680 # the tags module always uses UTF-8 (in order not to lose info
678 681 # writing to the cache), but the rest of Mercurial wants them in
679 682 # local encoding.
680 683 tags = {}
681 684 for (name, (node, hist)) in alltags.iteritems():
682 685 if node != nullid:
683 686 tags[encoding.tolocal(name)] = node
684 687 tags['tip'] = self.changelog.tip()
685 688 tagtypes = dict([(encoding.tolocal(name), value)
686 689 for (name, value) in tagtypes.iteritems()])
687 690 return (tags, tagtypes)
688 691
689 692 def tagtype(self, tagname):
690 693 '''
691 694 return the type of the given tag. result can be:
692 695
693 696 'local' : a local tag
694 697 'global' : a global tag
695 698 None : tag does not exist
696 699 '''
697 700
698 701 return self._tagscache.tagtypes.get(tagname)
699 702
700 703 def tagslist(self):
701 704 '''return a list of tags ordered by revision'''
702 705 if not self._tagscache.tagslist:
703 706 l = []
704 707 for t, n in self.tags().iteritems():
705 708 l.append((self.changelog.rev(n), t, n))
706 709 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
707 710
708 711 return self._tagscache.tagslist
709 712
710 713 def nodetags(self, node):
711 714 '''return the tags associated with a node'''
712 715 if not self._tagscache.nodetagscache:
713 716 nodetagscache = {}
714 717 for t, n in self._tagscache.tags.iteritems():
715 718 nodetagscache.setdefault(n, []).append(t)
716 719 for tags in nodetagscache.itervalues():
717 720 tags.sort()
718 721 self._tagscache.nodetagscache = nodetagscache
719 722 return self._tagscache.nodetagscache.get(node, [])
720 723
721 724 def nodebookmarks(self, node):
722 725 marks = []
723 726 for bookmark, n in self._bookmarks.iteritems():
724 727 if n == node:
725 728 marks.append(bookmark)
726 729 return sorted(marks)
727 730
728 731 def branchmap(self):
729 732 '''returns a dictionary {branch: [branchheads]} with branchheads
730 733 ordered by increasing revision number'''
731 734 branchmap.updatecache(self)
732 735 return self._branchcaches[self.filtername]
733 736
734 737 @unfilteredmethod
735 738 def revbranchcache(self):
736 739 if not self._revbranchcache:
737 740 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
738 741 return self._revbranchcache
739 742
740 743 def branchtip(self, branch, ignoremissing=False):
741 744 '''return the tip node for a given branch
742 745
743 746 If ignoremissing is True, then this method will not raise an error.
744 747 This is helpful for callers that only expect None for a missing branch
745 748 (e.g. namespace).
746 749
747 750 '''
748 751 try:
749 752 return self.branchmap().branchtip(branch)
750 753 except KeyError:
751 754 if not ignoremissing:
752 755 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
753 756 else:
754 757 pass
755 758
756 759 def lookup(self, key):
757 760 return self[key].node()
758 761
759 762 def lookupbranch(self, key, remote=None):
760 763 repo = remote or self
761 764 if key in repo.branchmap():
762 765 return key
763 766
764 767 repo = (remote and remote.local()) and remote or self
765 768 return repo[key].branch()
766 769
767 770 def known(self, nodes):
768 771 nm = self.changelog.nodemap
769 772 pc = self._phasecache
770 773 result = []
771 774 for n in nodes:
772 775 r = nm.get(n)
773 776 resp = not (r is None or pc.phase(self, r) >= phases.secret)
774 777 result.append(resp)
775 778 return result
776 779
777 780 def local(self):
778 781 return self
779 782
780 783 def cancopy(self):
781 784 # so statichttprepo's override of local() works
782 785 if not self.local():
783 786 return False
784 787 if not self.ui.configbool('phases', 'publish', True):
785 788 return True
786 789 # if publishing we can't copy if there is filtered content
787 790 return not self.filtered('visible').changelog.filteredrevs
788 791
789 792 def shared(self):
790 793 '''the type of shared repository (None if not shared)'''
791 794 if self.sharedpath != self.path:
792 795 return 'store'
793 796 return None
794 797
795 798 def join(self, f, *insidef):
796 799 return self.vfs.join(os.path.join(f, *insidef))
797 800
798 801 def wjoin(self, f, *insidef):
799 802 return self.vfs.reljoin(self.root, f, *insidef)
800 803
801 804 def file(self, f):
802 805 if f[0] == '/':
803 806 f = f[1:]
804 807 return filelog.filelog(self.svfs, f)
805 808
806 809 def changectx(self, changeid):
807 810 return self[changeid]
808 811
809 812 def parents(self, changeid=None):
810 813 '''get list of changectxs for parents of changeid'''
811 814 return self[changeid].parents()
812 815
813 816 def setparents(self, p1, p2=nullid):
814 817 self.dirstate.beginparentchange()
815 818 copies = self.dirstate.setparents(p1, p2)
816 819 pctx = self[p1]
817 820 if copies:
818 821 # Adjust copy records, the dirstate cannot do it, it
819 822 # requires access to parents manifests. Preserve them
820 823 # only for entries added to first parent.
821 824 for f in copies:
822 825 if f not in pctx and copies[f] in pctx:
823 826 self.dirstate.copy(copies[f], f)
824 827 if p2 == nullid:
825 828 for f, s in sorted(self.dirstate.copies().items()):
826 829 if f not in pctx and s not in pctx:
827 830 self.dirstate.copy(None, f)
828 831 self.dirstate.endparentchange()
829 832
830 833 def filectx(self, path, changeid=None, fileid=None):
831 834 """changeid can be a changeset revision, node, or tag.
832 835 fileid can be a file revision or node."""
833 836 return context.filectx(self, path, changeid, fileid)
834 837
835 838 def getcwd(self):
836 839 return self.dirstate.getcwd()
837 840
838 841 def pathto(self, f, cwd=None):
839 842 return self.dirstate.pathto(f, cwd)
840 843
841 844 def wfile(self, f, mode='r'):
842 845 return self.wvfs(f, mode)
843 846
844 847 def _link(self, f):
845 848 return self.wvfs.islink(f)
846 849
847 850 def _loadfilter(self, filter):
848 851 if filter not in self.filterpats:
849 852 l = []
850 853 for pat, cmd in self.ui.configitems(filter):
851 854 if cmd == '!':
852 855 continue
853 856 mf = matchmod.match(self.root, '', [pat])
854 857 fn = None
855 858 params = cmd
856 859 for name, filterfn in self._datafilters.iteritems():
857 860 if cmd.startswith(name):
858 861 fn = filterfn
859 862 params = cmd[len(name):].lstrip()
860 863 break
861 864 if not fn:
862 865 fn = lambda s, c, **kwargs: util.filter(s, c)
863 866 # Wrap old filters not supporting keyword arguments
864 867 if not inspect.getargspec(fn)[2]:
865 868 oldfn = fn
866 869 fn = lambda s, c, **kwargs: oldfn(s, c)
867 870 l.append((mf, fn, params))
868 871 self.filterpats[filter] = l
869 872 return self.filterpats[filter]
870 873
871 874 def _filter(self, filterpats, filename, data):
872 875 for mf, fn, cmd in filterpats:
873 876 if mf(filename):
874 877 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
875 878 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
876 879 break
877 880
878 881 return data
879 882
880 883 @unfilteredpropertycache
881 884 def _encodefilterpats(self):
882 885 return self._loadfilter('encode')
883 886
884 887 @unfilteredpropertycache
885 888 def _decodefilterpats(self):
886 889 return self._loadfilter('decode')
887 890
888 891 def adddatafilter(self, name, filter):
889 892 self._datafilters[name] = filter
890 893
891 894 def wread(self, filename):
892 895 if self._link(filename):
893 896 data = self.wvfs.readlink(filename)
894 897 else:
895 898 data = self.wvfs.read(filename)
896 899 return self._filter(self._encodefilterpats, filename, data)
897 900
898 901 def wwrite(self, filename, data, flags):
899 902 data = self._filter(self._decodefilterpats, filename, data)
900 903 if 'l' in flags:
901 904 self.wvfs.symlink(data, filename)
902 905 else:
903 906 self.wvfs.write(filename, data)
904 907 if 'x' in flags:
905 908 self.wvfs.setflags(filename, False, True)
906 909
907 910 def wwritedata(self, filename, data):
908 911 return self._filter(self._decodefilterpats, filename, data)
909 912
910 913 def currenttransaction(self):
911 914 """return the current transaction or None if non exists"""
912 915 if self._transref:
913 916 tr = self._transref()
914 917 else:
915 918 tr = None
916 919
917 920 if tr and tr.running():
918 921 return tr
919 922 return None
920 923
921 924 def transaction(self, desc, report=None):
922 925 if (self.ui.configbool('devel', 'all')
923 926 or self.ui.configbool('devel', 'check-locks')):
924 927 l = self._lockref and self._lockref()
925 928 if l is None or not l.held:
926 929 msg = 'transaction with no lock\n'
927 930 if self.ui.tracebackflag:
928 931 util.debugstacktrace(msg, 1)
929 932 else:
930 933 self.ui.write_err(msg)
931 934 tr = self.currenttransaction()
932 935 if tr is not None:
933 936 return tr.nest()
934 937
935 938 # abort here if the journal already exists
936 939 if self.svfs.exists("journal"):
937 940 raise error.RepoError(
938 941 _("abandoned transaction found"),
939 942 hint=_("run 'hg recover' to clean up transaction"))
940 943
941 944 self.hook('pretxnopen', throw=True, txnname=desc)
942 945
943 946 self._writejournal(desc)
944 947 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
945 948 if report:
946 949 rp = report
947 950 else:
948 951 rp = self.ui.warn
949 952 vfsmap = {'plain': self.vfs} # root of .hg/
950 953 # we must avoid cyclic reference between repo and transaction.
951 954 reporef = weakref.ref(self)
952 955 def validate(tr):
953 956 """will run pre-closing hooks"""
954 957 pending = lambda: tr.writepending() and self.root or ""
955 958 reporef().hook('pretxnclose', throw=True, pending=pending,
956 959 xnname=desc)
957 960
958 961 tr = transaction.transaction(rp, self.sopener, vfsmap,
959 962 "journal",
960 963 "undo",
961 964 aftertrans(renames),
962 965 self.store.createmode,
963 966 validator=validate)
964 967 # note: writing the fncache only during finalize mean that the file is
965 968 # outdated when running hooks. As fncache is used for streaming clone,
966 969 # this is not expected to break anything that happen during the hooks.
967 970 tr.addfinalize('flush-fncache', self.store.write)
968 971 def txnclosehook(tr2):
969 972 """To be run if transaction is successful, will schedule a hook run
970 973 """
971 974 def hook():
972 975 reporef().hook('txnclose', throw=False, txnname=desc,
973 976 **tr2.hookargs)
974 977 reporef()._afterlock(hook)
975 978 tr.addfinalize('txnclose-hook', txnclosehook)
976 979 self._transref = weakref.ref(tr)
977 980 return tr
978 981
979 982 def _journalfiles(self):
980 983 return ((self.svfs, 'journal'),
981 984 (self.vfs, 'journal.dirstate'),
982 985 (self.vfs, 'journal.branch'),
983 986 (self.vfs, 'journal.desc'),
984 987 (self.vfs, 'journal.bookmarks'),
985 988 (self.svfs, 'journal.phaseroots'))
986 989
987 990 def undofiles(self):
988 991 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
989 992
990 993 def _writejournal(self, desc):
991 994 self.vfs.write("journal.dirstate",
992 995 self.vfs.tryread("dirstate"))
993 996 self.vfs.write("journal.branch",
994 997 encoding.fromlocal(self.dirstate.branch()))
995 998 self.vfs.write("journal.desc",
996 999 "%d\n%s\n" % (len(self), desc))
997 1000 self.vfs.write("journal.bookmarks",
998 1001 self.vfs.tryread("bookmarks"))
999 1002 self.svfs.write("journal.phaseroots",
1000 1003 self.svfs.tryread("phaseroots"))
1001 1004
1002 1005 def recover(self):
1003 1006 lock = self.lock()
1004 1007 try:
1005 1008 if self.svfs.exists("journal"):
1006 1009 self.ui.status(_("rolling back interrupted transaction\n"))
1007 1010 vfsmap = {'': self.svfs,
1008 1011 'plain': self.vfs,}
1009 1012 transaction.rollback(self.svfs, vfsmap, "journal",
1010 1013 self.ui.warn)
1011 1014 self.invalidate()
1012 1015 return True
1013 1016 else:
1014 1017 self.ui.warn(_("no interrupted transaction available\n"))
1015 1018 return False
1016 1019 finally:
1017 1020 lock.release()
1018 1021
1019 1022 def rollback(self, dryrun=False, force=False):
1020 1023 wlock = lock = None
1021 1024 try:
1022 1025 wlock = self.wlock()
1023 1026 lock = self.lock()
1024 1027 if self.svfs.exists("undo"):
1025 1028 return self._rollback(dryrun, force)
1026 1029 else:
1027 1030 self.ui.warn(_("no rollback information available\n"))
1028 1031 return 1
1029 1032 finally:
1030 1033 release(lock, wlock)
1031 1034
1032 1035 @unfilteredmethod # Until we get smarter cache management
1033 1036 def _rollback(self, dryrun, force):
1034 1037 ui = self.ui
1035 1038 try:
1036 1039 args = self.vfs.read('undo.desc').splitlines()
1037 1040 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1038 1041 if len(args) >= 3:
1039 1042 detail = args[2]
1040 1043 oldtip = oldlen - 1
1041 1044
1042 1045 if detail and ui.verbose:
1043 1046 msg = (_('repository tip rolled back to revision %s'
1044 1047 ' (undo %s: %s)\n')
1045 1048 % (oldtip, desc, detail))
1046 1049 else:
1047 1050 msg = (_('repository tip rolled back to revision %s'
1048 1051 ' (undo %s)\n')
1049 1052 % (oldtip, desc))
1050 1053 except IOError:
1051 1054 msg = _('rolling back unknown transaction\n')
1052 1055 desc = None
1053 1056
1054 1057 if not force and self['.'] != self['tip'] and desc == 'commit':
1055 1058 raise util.Abort(
1056 1059 _('rollback of last commit while not checked out '
1057 1060 'may lose data'), hint=_('use -f to force'))
1058 1061
1059 1062 ui.status(msg)
1060 1063 if dryrun:
1061 1064 return 0
1062 1065
1063 1066 parents = self.dirstate.parents()
1064 1067 self.destroying()
1065 1068 vfsmap = {'plain': self.vfs, '': self.svfs}
1066 1069 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1067 1070 if self.vfs.exists('undo.bookmarks'):
1068 1071 self.vfs.rename('undo.bookmarks', 'bookmarks')
1069 1072 if self.svfs.exists('undo.phaseroots'):
1070 1073 self.svfs.rename('undo.phaseroots', 'phaseroots')
1071 1074 self.invalidate()
1072 1075
1073 1076 parentgone = (parents[0] not in self.changelog.nodemap or
1074 1077 parents[1] not in self.changelog.nodemap)
1075 1078 if parentgone:
1076 1079 self.vfs.rename('undo.dirstate', 'dirstate')
1077 1080 try:
1078 1081 branch = self.vfs.read('undo.branch')
1079 1082 self.dirstate.setbranch(encoding.tolocal(branch))
1080 1083 except IOError:
1081 1084 ui.warn(_('named branch could not be reset: '
1082 1085 'current branch is still \'%s\'\n')
1083 1086 % self.dirstate.branch())
1084 1087
1085 1088 self.dirstate.invalidate()
1086 1089 parents = tuple([p.rev() for p in self.parents()])
1087 1090 if len(parents) > 1:
1088 1091 ui.status(_('working directory now based on '
1089 1092 'revisions %d and %d\n') % parents)
1090 1093 else:
1091 1094 ui.status(_('working directory now based on '
1092 1095 'revision %d\n') % parents)
1093 1096 # TODO: if we know which new heads may result from this rollback, pass
1094 1097 # them to destroy(), which will prevent the branchhead cache from being
1095 1098 # invalidated.
1096 1099 self.destroyed()
1097 1100 return 0
1098 1101
1099 1102 def invalidatecaches(self):
1100 1103
1101 1104 if '_tagscache' in vars(self):
1102 1105 # can't use delattr on proxy
1103 1106 del self.__dict__['_tagscache']
1104 1107
1105 1108 self.unfiltered()._branchcaches.clear()
1106 1109 self.invalidatevolatilesets()
1107 1110
1108 1111 def invalidatevolatilesets(self):
1109 1112 self.filteredrevcache.clear()
1110 1113 obsolete.clearobscaches(self)
1111 1114
1112 1115 def invalidatedirstate(self):
1113 1116 '''Invalidates the dirstate, causing the next call to dirstate
1114 1117 to check if it was modified since the last time it was read,
1115 1118 rereading it if it has.
1116 1119
1117 1120 This is different to dirstate.invalidate() that it doesn't always
1118 1121 rereads the dirstate. Use dirstate.invalidate() if you want to
1119 1122 explicitly read the dirstate again (i.e. restoring it to a previous
1120 1123 known good state).'''
1121 1124 if hasunfilteredcache(self, 'dirstate'):
1122 1125 for k in self.dirstate._filecache:
1123 1126 try:
1124 1127 delattr(self.dirstate, k)
1125 1128 except AttributeError:
1126 1129 pass
1127 1130 delattr(self.unfiltered(), 'dirstate')
1128 1131
1129 1132 def invalidate(self):
1130 1133 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1131 1134 for k in self._filecache:
1132 1135 # dirstate is invalidated separately in invalidatedirstate()
1133 1136 if k == 'dirstate':
1134 1137 continue
1135 1138
1136 1139 try:
1137 1140 delattr(unfiltered, k)
1138 1141 except AttributeError:
1139 1142 pass
1140 1143 self.invalidatecaches()
1141 1144 self.store.invalidatecaches()
1142 1145
1143 1146 def invalidateall(self):
1144 1147 '''Fully invalidates both store and non-store parts, causing the
1145 1148 subsequent operation to reread any outside changes.'''
1146 1149 # extension should hook this to invalidate its caches
1147 1150 self.invalidate()
1148 1151 self.invalidatedirstate()
1149 1152
1150 1153 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1151 1154 try:
1152 1155 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1153 1156 except error.LockHeld, inst:
1154 1157 if not wait:
1155 1158 raise
1156 1159 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1157 1160 (desc, inst.locker))
1158 1161 # default to 600 seconds timeout
1159 1162 l = lockmod.lock(vfs, lockname,
1160 1163 int(self.ui.config("ui", "timeout", "600")),
1161 1164 releasefn, desc=desc)
1162 1165 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1163 1166 if acquirefn:
1164 1167 acquirefn()
1165 1168 return l
1166 1169
1167 1170 def _afterlock(self, callback):
1168 1171 """add a callback to the current repository lock.
1169 1172
1170 1173 The callback will be executed on lock release."""
1171 1174 l = self._lockref and self._lockref()
1172 1175 if l:
1173 1176 l.postrelease.append(callback)
1174 1177 else:
1175 1178 callback()
1176 1179
1177 1180 def lock(self, wait=True):
1178 1181 '''Lock the repository store (.hg/store) and return a weak reference
1179 1182 to the lock. Use this before modifying the store (e.g. committing or
1180 1183 stripping). If you are opening a transaction, get a lock as well.)'''
1181 1184 l = self._lockref and self._lockref()
1182 1185 if l is not None and l.held:
1183 1186 l.lock()
1184 1187 return l
1185 1188
1186 1189 def unlock():
1187 1190 for k, ce in self._filecache.items():
1188 1191 if k == 'dirstate' or k not in self.__dict__:
1189 1192 continue
1190 1193 ce.refresh()
1191 1194
1192 1195 l = self._lock(self.svfs, "lock", wait, unlock,
1193 1196 self.invalidate, _('repository %s') % self.origroot)
1194 1197 self._lockref = weakref.ref(l)
1195 1198 return l
1196 1199
1197 1200 def wlock(self, wait=True):
1198 1201 '''Lock the non-store parts of the repository (everything under
1199 1202 .hg except .hg/store) and return a weak reference to the lock.
1200 1203 Use this before modifying files in .hg.'''
1201 1204 if (self.ui.configbool('devel', 'all')
1202 1205 or self.ui.configbool('devel', 'check-locks')):
1203 1206 l = self._lockref and self._lockref()
1204 1207 if l is not None and l.held:
1205 1208 msg = '"lock" taken before "wlock"\n'
1206 1209 if self.ui.tracebackflag:
1207 1210 util.debugstacktrace(msg, 1)
1208 1211 else:
1209 1212 self.ui.write_err(msg)
1210 1213 l = self._wlockref and self._wlockref()
1211 1214 if l is not None and l.held:
1212 1215 l.lock()
1213 1216 return l
1214 1217
1215 1218 def unlock():
1216 1219 if self.dirstate.pendingparentchange():
1217 1220 self.dirstate.invalidate()
1218 1221 else:
1219 1222 self.dirstate.write()
1220 1223
1221 1224 self._filecache['dirstate'].refresh()
1222 1225
1223 1226 l = self._lock(self.vfs, "wlock", wait, unlock,
1224 1227 self.invalidatedirstate, _('working directory of %s') %
1225 1228 self.origroot)
1226 1229 self._wlockref = weakref.ref(l)
1227 1230 return l
1228 1231
1229 1232 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1230 1233 """
1231 1234 commit an individual file as part of a larger transaction
1232 1235 """
1233 1236
1234 1237 fname = fctx.path()
1235 1238 fparent1 = manifest1.get(fname, nullid)
1236 1239 fparent2 = manifest2.get(fname, nullid)
1237 1240 if isinstance(fctx, context.filectx):
1238 1241 node = fctx.filenode()
1239 1242 if node in [fparent1, fparent2]:
1240 1243 self.ui.debug('reusing %s filelog entry\n' % fname)
1241 1244 return node
1242 1245
1243 1246 flog = self.file(fname)
1244 1247 meta = {}
1245 1248 copy = fctx.renamed()
1246 1249 if copy and copy[0] != fname:
1247 1250 # Mark the new revision of this file as a copy of another
1248 1251 # file. This copy data will effectively act as a parent
1249 1252 # of this new revision. If this is a merge, the first
1250 1253 # parent will be the nullid (meaning "look up the copy data")
1251 1254 # and the second one will be the other parent. For example:
1252 1255 #
1253 1256 # 0 --- 1 --- 3 rev1 changes file foo
1254 1257 # \ / rev2 renames foo to bar and changes it
1255 1258 # \- 2 -/ rev3 should have bar with all changes and
1256 1259 # should record that bar descends from
1257 1260 # bar in rev2 and foo in rev1
1258 1261 #
1259 1262 # this allows this merge to succeed:
1260 1263 #
1261 1264 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1262 1265 # \ / merging rev3 and rev4 should use bar@rev2
1263 1266 # \- 2 --- 4 as the merge base
1264 1267 #
1265 1268
1266 1269 cfname = copy[0]
1267 1270 crev = manifest1.get(cfname)
1268 1271 newfparent = fparent2
1269 1272
1270 1273 if manifest2: # branch merge
1271 1274 if fparent2 == nullid or crev is None: # copied on remote side
1272 1275 if cfname in manifest2:
1273 1276 crev = manifest2[cfname]
1274 1277 newfparent = fparent1
1275 1278
1276 1279 # Here, we used to search backwards through history to try to find
1277 1280 # where the file copy came from if the source of a copy was not in
1278 1281 # the parent directory. However, this doesn't actually make sense to
1279 1282 # do (what does a copy from something not in your working copy even
1280 1283 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1281 1284 # the user that copy information was dropped, so if they didn't
1282 1285 # expect this outcome it can be fixed, but this is the correct
1283 1286 # behavior in this circumstance.
1284 1287
1285 1288 if crev:
1286 1289 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1287 1290 meta["copy"] = cfname
1288 1291 meta["copyrev"] = hex(crev)
1289 1292 fparent1, fparent2 = nullid, newfparent
1290 1293 else:
1291 1294 self.ui.warn(_("warning: can't find ancestor for '%s' "
1292 1295 "copied from '%s'!\n") % (fname, cfname))
1293 1296
1294 1297 elif fparent1 == nullid:
1295 1298 fparent1, fparent2 = fparent2, nullid
1296 1299 elif fparent2 != nullid:
1297 1300 # is one parent an ancestor of the other?
1298 1301 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1299 1302 if fparent1 in fparentancestors:
1300 1303 fparent1, fparent2 = fparent2, nullid
1301 1304 elif fparent2 in fparentancestors:
1302 1305 fparent2 = nullid
1303 1306
1304 1307 # is the file changed?
1305 1308 text = fctx.data()
1306 1309 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1307 1310 changelist.append(fname)
1308 1311 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1309 1312 # are just the flags changed during merge?
1310 1313 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1311 1314 changelist.append(fname)
1312 1315
1313 1316 return fparent1
1314 1317
1315 1318 @unfilteredmethod
1316 1319 def commit(self, text="", user=None, date=None, match=None, force=False,
1317 1320 editor=False, extra={}):
1318 1321 """Add a new revision to current repository.
1319 1322
1320 1323 Revision information is gathered from the working directory,
1321 1324 match can be used to filter the committed files. If editor is
1322 1325 supplied, it is called to get a commit message.
1323 1326 """
1324 1327
1325 1328 def fail(f, msg):
1326 1329 raise util.Abort('%s: %s' % (f, msg))
1327 1330
1328 1331 if not match:
1329 1332 match = matchmod.always(self.root, '')
1330 1333
1331 1334 if not force:
1332 1335 vdirs = []
1333 1336 match.explicitdir = vdirs.append
1334 1337 match.bad = fail
1335 1338
1336 1339 wlock = self.wlock()
1337 1340 try:
1338 1341 wctx = self[None]
1339 1342 merge = len(wctx.parents()) > 1
1340 1343
1341 1344 if not force and merge and not match.always():
1342 1345 raise util.Abort(_('cannot partially commit a merge '
1343 1346 '(do not specify files or patterns)'))
1344 1347
1345 1348 status = self.status(match=match, clean=force)
1346 1349 if force:
1347 1350 status.modified.extend(status.clean) # mq may commit clean files
1348 1351
1349 1352 # check subrepos
1350 1353 subs = []
1351 1354 commitsubs = set()
1352 1355 newstate = wctx.substate.copy()
1353 1356 # only manage subrepos and .hgsubstate if .hgsub is present
1354 1357 if '.hgsub' in wctx:
1355 1358 # we'll decide whether to track this ourselves, thanks
1356 1359 for c in status.modified, status.added, status.removed:
1357 1360 if '.hgsubstate' in c:
1358 1361 c.remove('.hgsubstate')
1359 1362
1360 1363 # compare current state to last committed state
1361 1364 # build new substate based on last committed state
1362 1365 oldstate = wctx.p1().substate
1363 1366 for s in sorted(newstate.keys()):
1364 1367 if not match(s):
1365 1368 # ignore working copy, use old state if present
1366 1369 if s in oldstate:
1367 1370 newstate[s] = oldstate[s]
1368 1371 continue
1369 1372 if not force:
1370 1373 raise util.Abort(
1371 1374 _("commit with new subrepo %s excluded") % s)
1372 1375 if wctx.sub(s).dirty(True):
1373 1376 if not self.ui.configbool('ui', 'commitsubrepos'):
1374 1377 raise util.Abort(
1375 1378 _("uncommitted changes in subrepo %s") % s,
1376 1379 hint=_("use --subrepos for recursive commit"))
1377 1380 subs.append(s)
1378 1381 commitsubs.add(s)
1379 1382 else:
1380 1383 bs = wctx.sub(s).basestate()
1381 1384 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1382 1385 if oldstate.get(s, (None, None, None))[1] != bs:
1383 1386 subs.append(s)
1384 1387
1385 1388 # check for removed subrepos
1386 1389 for p in wctx.parents():
1387 1390 r = [s for s in p.substate if s not in newstate]
1388 1391 subs += [s for s in r if match(s)]
1389 1392 if subs:
1390 1393 if (not match('.hgsub') and
1391 1394 '.hgsub' in (wctx.modified() + wctx.added())):
1392 1395 raise util.Abort(
1393 1396 _("can't commit subrepos without .hgsub"))
1394 1397 status.modified.insert(0, '.hgsubstate')
1395 1398
1396 1399 elif '.hgsub' in status.removed:
1397 1400 # clean up .hgsubstate when .hgsub is removed
1398 1401 if ('.hgsubstate' in wctx and
1399 1402 '.hgsubstate' not in (status.modified + status.added +
1400 1403 status.removed)):
1401 1404 status.removed.insert(0, '.hgsubstate')
1402 1405
1403 1406 # make sure all explicit patterns are matched
1404 1407 if not force and match.files():
1405 1408 matched = set(status.modified + status.added + status.removed)
1406 1409
1407 1410 for f in match.files():
1408 1411 f = self.dirstate.normalize(f)
1409 1412 if f == '.' or f in matched or f in wctx.substate:
1410 1413 continue
1411 1414 if f in status.deleted:
1412 1415 fail(f, _('file not found!'))
1413 1416 if f in vdirs: # visited directory
1414 1417 d = f + '/'
1415 1418 for mf in matched:
1416 1419 if mf.startswith(d):
1417 1420 break
1418 1421 else:
1419 1422 fail(f, _("no match under directory!"))
1420 1423 elif f not in self.dirstate:
1421 1424 fail(f, _("file not tracked!"))
1422 1425
1423 1426 cctx = context.workingcommitctx(self, status,
1424 1427 text, user, date, extra)
1425 1428
1426 1429 if (not force and not extra.get("close") and not merge
1427 1430 and not cctx.files()
1428 1431 and wctx.branch() == wctx.p1().branch()):
1429 1432 return None
1430 1433
1431 1434 if merge and cctx.deleted():
1432 1435 raise util.Abort(_("cannot commit merge with missing files"))
1433 1436
1434 1437 ms = mergemod.mergestate(self)
1435 1438 for f in status.modified:
1436 1439 if f in ms and ms[f] == 'u':
1437 1440 raise util.Abort(_('unresolved merge conflicts '
1438 1441 '(see "hg help resolve")'))
1439 1442
1440 1443 if editor:
1441 1444 cctx._text = editor(self, cctx, subs)
1442 1445 edited = (text != cctx._text)
1443 1446
1444 1447 # Save commit message in case this transaction gets rolled back
1445 1448 # (e.g. by a pretxncommit hook). Leave the content alone on
1446 1449 # the assumption that the user will use the same editor again.
1447 1450 msgfn = self.savecommitmessage(cctx._text)
1448 1451
1449 1452 # commit subs and write new state
1450 1453 if subs:
1451 1454 for s in sorted(commitsubs):
1452 1455 sub = wctx.sub(s)
1453 1456 self.ui.status(_('committing subrepository %s\n') %
1454 1457 subrepo.subrelpath(sub))
1455 1458 sr = sub.commit(cctx._text, user, date)
1456 1459 newstate[s] = (newstate[s][0], sr)
1457 1460 subrepo.writestate(self, newstate)
1458 1461
1459 1462 p1, p2 = self.dirstate.parents()
1460 1463 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1461 1464 try:
1462 1465 self.hook("precommit", throw=True, parent1=hookp1,
1463 1466 parent2=hookp2)
1464 1467 ret = self.commitctx(cctx, True)
1465 1468 except: # re-raises
1466 1469 if edited:
1467 1470 self.ui.write(
1468 1471 _('note: commit message saved in %s\n') % msgfn)
1469 1472 raise
1470 1473
1471 1474 # update bookmarks, dirstate and mergestate
1472 1475 bookmarks.update(self, [p1, p2], ret)
1473 1476 cctx.markcommitted(ret)
1474 1477 ms.reset()
1475 1478 finally:
1476 1479 wlock.release()
1477 1480
1478 1481 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1479 1482 # hack for command that use a temporary commit (eg: histedit)
1480 1483 # temporary commit got stripped before hook release
1481 1484 if node in self:
1482 1485 self.hook("commit", node=node, parent1=parent1,
1483 1486 parent2=parent2)
1484 1487 self._afterlock(commithook)
1485 1488 return ret
1486 1489
1487 1490 @unfilteredmethod
1488 1491 def commitctx(self, ctx, error=False):
1489 1492 """Add a new revision to current repository.
1490 1493 Revision information is passed via the context argument.
1491 1494 """
1492 1495
1493 1496 tr = None
1494 1497 p1, p2 = ctx.p1(), ctx.p2()
1495 1498 user = ctx.user()
1496 1499
1497 1500 lock = self.lock()
1498 1501 try:
1499 1502 tr = self.transaction("commit")
1500 1503 trp = weakref.proxy(tr)
1501 1504
1502 1505 if ctx.files():
1503 1506 m1 = p1.manifest()
1504 1507 m2 = p2.manifest()
1505 1508 m = m1.copy()
1506 1509
1507 1510 # check in files
1508 1511 added = []
1509 1512 changed = []
1510 1513 removed = list(ctx.removed())
1511 1514 linkrev = len(self)
1512 1515 self.ui.note(_("committing files:\n"))
1513 1516 for f in sorted(ctx.modified() + ctx.added()):
1514 1517 self.ui.note(f + "\n")
1515 1518 try:
1516 1519 fctx = ctx[f]
1517 1520 if fctx is None:
1518 1521 removed.append(f)
1519 1522 else:
1520 1523 added.append(f)
1521 1524 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1522 1525 trp, changed)
1523 1526 m.setflag(f, fctx.flags())
1524 1527 except OSError, inst:
1525 1528 self.ui.warn(_("trouble committing %s!\n") % f)
1526 1529 raise
1527 1530 except IOError, inst:
1528 1531 errcode = getattr(inst, 'errno', errno.ENOENT)
1529 1532 if error or errcode and errcode != errno.ENOENT:
1530 1533 self.ui.warn(_("trouble committing %s!\n") % f)
1531 1534 raise
1532 1535
1533 1536 # update manifest
1534 1537 self.ui.note(_("committing manifest\n"))
1535 1538 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1536 1539 drop = [f for f in removed if f in m]
1537 1540 for f in drop:
1538 1541 del m[f]
1539 1542 mn = self.manifest.add(m, trp, linkrev,
1540 1543 p1.manifestnode(), p2.manifestnode(),
1541 1544 added, drop)
1542 1545 files = changed + removed
1543 1546 else:
1544 1547 mn = p1.manifestnode()
1545 1548 files = []
1546 1549
1547 1550 # update changelog
1548 1551 self.ui.note(_("committing changelog\n"))
1549 1552 self.changelog.delayupdate(tr)
1550 1553 n = self.changelog.add(mn, files, ctx.description(),
1551 1554 trp, p1.node(), p2.node(),
1552 1555 user, ctx.date(), ctx.extra().copy())
1553 1556 p = lambda: tr.writepending() and self.root or ""
1554 1557 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1555 1558 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1556 1559 parent2=xp2, pending=p)
1557 1560 # set the new commit is proper phase
1558 1561 targetphase = subrepo.newcommitphase(self.ui, ctx)
1559 1562 if targetphase:
1560 1563 # retract boundary do not alter parent changeset.
1561 1564 # if a parent have higher the resulting phase will
1562 1565 # be compliant anyway
1563 1566 #
1564 1567 # if minimal phase was 0 we don't need to retract anything
1565 1568 phases.retractboundary(self, tr, targetphase, [n])
1566 1569 tr.close()
1567 1570 branchmap.updatecache(self.filtered('served'))
1568 1571 return n
1569 1572 finally:
1570 1573 if tr:
1571 1574 tr.release()
1572 1575 lock.release()
1573 1576
1574 1577 @unfilteredmethod
1575 1578 def destroying(self):
1576 1579 '''Inform the repository that nodes are about to be destroyed.
1577 1580 Intended for use by strip and rollback, so there's a common
1578 1581 place for anything that has to be done before destroying history.
1579 1582
1580 1583 This is mostly useful for saving state that is in memory and waiting
1581 1584 to be flushed when the current lock is released. Because a call to
1582 1585 destroyed is imminent, the repo will be invalidated causing those
1583 1586 changes to stay in memory (waiting for the next unlock), or vanish
1584 1587 completely.
1585 1588 '''
1586 1589 # When using the same lock to commit and strip, the phasecache is left
1587 1590 # dirty after committing. Then when we strip, the repo is invalidated,
1588 1591 # causing those changes to disappear.
1589 1592 if '_phasecache' in vars(self):
1590 1593 self._phasecache.write()
1591 1594
1592 1595 @unfilteredmethod
1593 1596 def destroyed(self):
1594 1597 '''Inform the repository that nodes have been destroyed.
1595 1598 Intended for use by strip and rollback, so there's a common
1596 1599 place for anything that has to be done after destroying history.
1597 1600 '''
1598 1601 # When one tries to:
1599 1602 # 1) destroy nodes thus calling this method (e.g. strip)
1600 1603 # 2) use phasecache somewhere (e.g. commit)
1601 1604 #
1602 1605 # then 2) will fail because the phasecache contains nodes that were
1603 1606 # removed. We can either remove phasecache from the filecache,
1604 1607 # causing it to reload next time it is accessed, or simply filter
1605 1608 # the removed nodes now and write the updated cache.
1606 1609 self._phasecache.filterunknown(self)
1607 1610 self._phasecache.write()
1608 1611
1609 1612 # update the 'served' branch cache to help read only server process
1610 1613 # Thanks to branchcache collaboration this is done from the nearest
1611 1614 # filtered subset and it is expected to be fast.
1612 1615 branchmap.updatecache(self.filtered('served'))
1613 1616
1614 1617 # Ensure the persistent tag cache is updated. Doing it now
1615 1618 # means that the tag cache only has to worry about destroyed
1616 1619 # heads immediately after a strip/rollback. That in turn
1617 1620 # guarantees that "cachetip == currenttip" (comparing both rev
1618 1621 # and node) always means no nodes have been added or destroyed.
1619 1622
1620 1623 # XXX this is suboptimal when qrefresh'ing: we strip the current
1621 1624 # head, refresh the tag cache, then immediately add a new head.
1622 1625 # But I think doing it this way is necessary for the "instant
1623 1626 # tag cache retrieval" case to work.
1624 1627 self.invalidate()
1625 1628
1626 1629 def walk(self, match, node=None):
1627 1630 '''
1628 1631 walk recursively through the directory tree or a given
1629 1632 changeset, finding all files matched by the match
1630 1633 function
1631 1634 '''
1632 1635 return self[node].walk(match)
1633 1636
1634 1637 def status(self, node1='.', node2=None, match=None,
1635 1638 ignored=False, clean=False, unknown=False,
1636 1639 listsubrepos=False):
1637 1640 '''a convenience method that calls node1.status(node2)'''
1638 1641 return self[node1].status(node2, match, ignored, clean, unknown,
1639 1642 listsubrepos)
1640 1643
1641 1644 def heads(self, start=None):
1642 1645 heads = self.changelog.heads(start)
1643 1646 # sort the output in rev descending order
1644 1647 return sorted(heads, key=self.changelog.rev, reverse=True)
1645 1648
1646 1649 def branchheads(self, branch=None, start=None, closed=False):
1647 1650 '''return a (possibly filtered) list of heads for the given branch
1648 1651
1649 1652 Heads are returned in topological order, from newest to oldest.
1650 1653 If branch is None, use the dirstate branch.
1651 1654 If start is not None, return only heads reachable from start.
1652 1655 If closed is True, return heads that are marked as closed as well.
1653 1656 '''
1654 1657 if branch is None:
1655 1658 branch = self[None].branch()
1656 1659 branches = self.branchmap()
1657 1660 if branch not in branches:
1658 1661 return []
1659 1662 # the cache returns heads ordered lowest to highest
1660 1663 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1661 1664 if start is not None:
1662 1665 # filter out the heads that cannot be reached from startrev
1663 1666 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1664 1667 bheads = [h for h in bheads if h in fbheads]
1665 1668 return bheads
1666 1669
1667 1670 def branches(self, nodes):
1668 1671 if not nodes:
1669 1672 nodes = [self.changelog.tip()]
1670 1673 b = []
1671 1674 for n in nodes:
1672 1675 t = n
1673 1676 while True:
1674 1677 p = self.changelog.parents(n)
1675 1678 if p[1] != nullid or p[0] == nullid:
1676 1679 b.append((t, n, p[0], p[1]))
1677 1680 break
1678 1681 n = p[0]
1679 1682 return b
1680 1683
1681 1684 def between(self, pairs):
1682 1685 r = []
1683 1686
1684 1687 for top, bottom in pairs:
1685 1688 n, l, i = top, [], 0
1686 1689 f = 1
1687 1690
1688 1691 while n != bottom and n != nullid:
1689 1692 p = self.changelog.parents(n)[0]
1690 1693 if i == f:
1691 1694 l.append(n)
1692 1695 f = f * 2
1693 1696 n = p
1694 1697 i += 1
1695 1698
1696 1699 r.append(l)
1697 1700
1698 1701 return r
1699 1702
1700 1703 def checkpush(self, pushop):
1701 1704 """Extensions can override this function if additional checks have
1702 1705 to be performed before pushing, or call it if they override push
1703 1706 command.
1704 1707 """
1705 1708 pass
1706 1709
1707 1710 @unfilteredpropertycache
1708 1711 def prepushoutgoinghooks(self):
1709 1712 """Return util.hooks consists of "(repo, remote, outgoing)"
1710 1713 functions, which are called before pushing changesets.
1711 1714 """
1712 1715 return util.hooks()
1713 1716
1714 1717 def stream_in(self, remote, requirements):
1715 1718 lock = self.lock()
1716 1719 try:
1717 1720 # Save remote branchmap. We will use it later
1718 1721 # to speed up branchcache creation
1719 1722 rbranchmap = None
1720 1723 if remote.capable("branchmap"):
1721 1724 rbranchmap = remote.branchmap()
1722 1725
1723 1726 fp = remote.stream_out()
1724 1727 l = fp.readline()
1725 1728 try:
1726 1729 resp = int(l)
1727 1730 except ValueError:
1728 1731 raise error.ResponseError(
1729 1732 _('unexpected response from remote server:'), l)
1730 1733 if resp == 1:
1731 1734 raise util.Abort(_('operation forbidden by server'))
1732 1735 elif resp == 2:
1733 1736 raise util.Abort(_('locking the remote repository failed'))
1734 1737 elif resp != 0:
1735 1738 raise util.Abort(_('the server sent an unknown error code'))
1736 1739 self.ui.status(_('streaming all changes\n'))
1737 1740 l = fp.readline()
1738 1741 try:
1739 1742 total_files, total_bytes = map(int, l.split(' ', 1))
1740 1743 except (ValueError, TypeError):
1741 1744 raise error.ResponseError(
1742 1745 _('unexpected response from remote server:'), l)
1743 1746 self.ui.status(_('%d files to transfer, %s of data\n') %
1744 1747 (total_files, util.bytecount(total_bytes)))
1745 1748 handled_bytes = 0
1746 1749 self.ui.progress(_('clone'), 0, total=total_bytes)
1747 1750 start = time.time()
1748 1751
1749 1752 tr = self.transaction(_('clone'))
1750 1753 try:
1751 1754 for i in xrange(total_files):
1752 1755 # XXX doesn't support '\n' or '\r' in filenames
1753 1756 l = fp.readline()
1754 1757 try:
1755 1758 name, size = l.split('\0', 1)
1756 1759 size = int(size)
1757 1760 except (ValueError, TypeError):
1758 1761 raise error.ResponseError(
1759 1762 _('unexpected response from remote server:'), l)
1760 1763 if self.ui.debugflag:
1761 1764 self.ui.debug('adding %s (%s)\n' %
1762 1765 (name, util.bytecount(size)))
1763 1766 # for backwards compat, name was partially encoded
1764 1767 ofp = self.svfs(store.decodedir(name), 'w')
1765 1768 for chunk in util.filechunkiter(fp, limit=size):
1766 1769 handled_bytes += len(chunk)
1767 1770 self.ui.progress(_('clone'), handled_bytes,
1768 1771 total=total_bytes)
1769 1772 ofp.write(chunk)
1770 1773 ofp.close()
1771 1774 tr.close()
1772 1775 finally:
1773 1776 tr.release()
1774 1777
1775 1778 # Writing straight to files circumvented the inmemory caches
1776 1779 self.invalidate()
1777 1780
1778 1781 elapsed = time.time() - start
1779 1782 if elapsed <= 0:
1780 1783 elapsed = 0.001
1781 1784 self.ui.progress(_('clone'), None)
1782 1785 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1783 1786 (util.bytecount(total_bytes), elapsed,
1784 1787 util.bytecount(total_bytes / elapsed)))
1785 1788
1786 1789 # new requirements = old non-format requirements +
1787 1790 # new format-related
1788 1791 # requirements from the streamed-in repository
1789 1792 requirements.update(set(self.requirements) - self.supportedformats)
1790 1793 self._applyrequirements(requirements)
1791 1794 self._writerequirements()
1792 1795
1793 1796 if rbranchmap:
1794 1797 rbheads = []
1795 1798 closed = []
1796 1799 for bheads in rbranchmap.itervalues():
1797 1800 rbheads.extend(bheads)
1798 1801 for h in bheads:
1799 1802 r = self.changelog.rev(h)
1800 1803 b, c = self.changelog.branchinfo(r)
1801 1804 if c:
1802 1805 closed.append(h)
1803 1806
1804 1807 if rbheads:
1805 1808 rtiprev = max((int(self.changelog.rev(node))
1806 1809 for node in rbheads))
1807 1810 cache = branchmap.branchcache(rbranchmap,
1808 1811 self[rtiprev].node(),
1809 1812 rtiprev,
1810 1813 closednodes=closed)
1811 1814 # Try to stick it as low as possible
1812 1815 # filter above served are unlikely to be fetch from a clone
1813 1816 for candidate in ('base', 'immutable', 'served'):
1814 1817 rview = self.filtered(candidate)
1815 1818 if cache.validfor(rview):
1816 1819 self._branchcaches[candidate] = cache
1817 1820 cache.write(rview)
1818 1821 break
1819 1822 self.invalidate()
1820 1823 return len(self.heads()) + 1
1821 1824 finally:
1822 1825 lock.release()
1823 1826
1824 1827 def clone(self, remote, heads=[], stream=None):
1825 1828 '''clone remote repository.
1826 1829
1827 1830 keyword arguments:
1828 1831 heads: list of revs to clone (forces use of pull)
1829 1832 stream: use streaming clone if possible'''
1830 1833
1831 1834 # now, all clients that can request uncompressed clones can
1832 1835 # read repo formats supported by all servers that can serve
1833 1836 # them.
1834 1837
1835 1838 # if revlog format changes, client will have to check version
1836 1839 # and format flags on "stream" capability, and use
1837 1840 # uncompressed only if compatible.
1838 1841
1839 1842 if stream is None:
1840 1843 # if the server explicitly prefers to stream (for fast LANs)
1841 1844 stream = remote.capable('stream-preferred')
1842 1845
1843 1846 if stream and not heads:
1844 1847 # 'stream' means remote revlog format is revlogv1 only
1845 1848 if remote.capable('stream'):
1846 1849 self.stream_in(remote, set(('revlogv1',)))
1847 1850 else:
1848 1851 # otherwise, 'streamreqs' contains the remote revlog format
1849 1852 streamreqs = remote.capable('streamreqs')
1850 1853 if streamreqs:
1851 1854 streamreqs = set(streamreqs.split(','))
1852 1855 # if we support it, stream in and adjust our requirements
1853 1856 if not streamreqs - self.supportedformats:
1854 1857 self.stream_in(remote, streamreqs)
1855 1858
1856 1859 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1857 1860 try:
1858 1861 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1859 1862 ret = exchange.pull(self, remote, heads).cgresult
1860 1863 finally:
1861 1864 self.ui.restoreconfig(quiet)
1862 1865 return ret
1863 1866
1864 1867 def pushkey(self, namespace, key, old, new):
1865 1868 try:
1866 1869 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1867 1870 old=old, new=new)
1868 1871 except error.HookAbort, exc:
1869 1872 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1870 1873 if exc.hint:
1871 1874 self.ui.write_err(_("(%s)\n") % exc.hint)
1872 1875 return False
1873 1876 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1874 1877 ret = pushkey.push(self, namespace, key, old, new)
1875 1878 def runhook():
1876 1879 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1877 1880 ret=ret)
1878 1881 self._afterlock(runhook)
1879 1882 return ret
1880 1883
1881 1884 def listkeys(self, namespace):
1882 1885 self.hook('prelistkeys', throw=True, namespace=namespace)
1883 1886 self.ui.debug('listing keys for "%s"\n' % namespace)
1884 1887 values = pushkey.list(self, namespace)
1885 1888 self.hook('listkeys', namespace=namespace, values=values)
1886 1889 return values
1887 1890
1888 1891 def debugwireargs(self, one, two, three=None, four=None, five=None):
1889 1892 '''used to test argument passing over the wire'''
1890 1893 return "%s %s %s %s %s" % (one, two, three, four, five)
1891 1894
1892 1895 def savecommitmessage(self, text):
1893 1896 fp = self.vfs('last-message.txt', 'wb')
1894 1897 try:
1895 1898 fp.write(text)
1896 1899 finally:
1897 1900 fp.close()
1898 1901 return self.pathto(fp.name[len(self.root) + 1:])
1899 1902
1900 1903 # used to avoid circular references so destructors work
1901 1904 def aftertrans(files):
1902 1905 renamefiles = [tuple(t) for t in files]
1903 1906 def a():
1904 1907 for vfs, src, dest in renamefiles:
1905 1908 try:
1906 1909 vfs.rename(src, dest)
1907 1910 except OSError: # journal file does not yet exist
1908 1911 pass
1909 1912 return a
1910 1913
1911 1914 def undoname(fn):
1912 1915 base, name = os.path.split(fn)
1913 1916 assert name.startswith('journal')
1914 1917 return os.path.join(base, name.replace('journal', 'undo', 1))
1915 1918
1916 1919 def instance(ui, path, create):
1917 1920 return localrepository(ui, util.urllocalpath(path), create)
1918 1921
1919 1922 def islocal(path):
1920 1923 return True
@@ -1,628 +1,636
1 1 # manifest.py - manifest revision class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 import mdiff, parsers, error, revlog, util, scmutil
10 10 import array, struct
11 11
12 12 propertycache = util.propertycache
13 13
14 14 class _lazymanifest(dict):
15 15 """This is the pure implementation of lazymanifest.
16 16
17 17 It has not been optimized *at all* and is not lazy.
18 18 """
19 19
20 20 def __init__(self, data):
21 21 # This init method does a little bit of excessive-looking
22 22 # precondition checking. This is so that the behavior of this
23 23 # class exactly matches its C counterpart to try and help
24 24 # prevent surprise breakage for anyone that develops against
25 25 # the pure version.
26 26 if data and data[-1] != '\n':
27 27 raise ValueError('Manifest did not end in a newline.')
28 28 dict.__init__(self)
29 29 prev = None
30 30 for l in data.splitlines():
31 31 if prev is not None and prev > l:
32 32 raise ValueError('Manifest lines not in sorted order.')
33 33 prev = l
34 34 f, n = l.split('\0')
35 35 if len(n) > 40:
36 36 self[f] = revlog.bin(n[:40]), n[40:]
37 37 else:
38 38 self[f] = revlog.bin(n), ''
39 39
40 40 def __setitem__(self, k, v):
41 41 node, flag = v
42 42 assert node is not None
43 43 if len(node) > 21:
44 44 node = node[:21] # match c implementation behavior
45 45 dict.__setitem__(self, k, (node, flag))
46 46
47 47 def __iter__(self):
48 48 return iter(sorted(dict.keys(self)))
49 49
50 50 def iterkeys(self):
51 51 return iter(sorted(dict.keys(self)))
52 52
53 53 def iterentries(self):
54 54 return ((f, e[0], e[1]) for f, e in sorted(self.iteritems()))
55 55
56 56 def copy(self):
57 57 c = _lazymanifest('')
58 58 c.update(self)
59 59 return c
60 60
61 61 def diff(self, m2, clean=False):
62 62 '''Finds changes between the current manifest and m2.'''
63 63 diff = {}
64 64
65 65 for fn, e1 in self.iteritems():
66 66 if fn not in m2:
67 67 diff[fn] = e1, (None, '')
68 68 else:
69 69 e2 = m2[fn]
70 70 if e1 != e2:
71 71 diff[fn] = e1, e2
72 72 elif clean:
73 73 diff[fn] = None
74 74
75 75 for fn, e2 in m2.iteritems():
76 76 if fn not in self:
77 77 diff[fn] = (None, ''), e2
78 78
79 79 return diff
80 80
81 81 def filtercopy(self, filterfn):
82 82 c = _lazymanifest('')
83 83 for f, n, fl in self.iterentries():
84 84 if filterfn(f):
85 85 c[f] = n, fl
86 86 return c
87 87
88 88 def text(self):
89 89 """Get the full data of this manifest as a bytestring."""
90 90 fl = sorted(self.iterentries())
91 91
92 92 _hex = revlog.hex
93 93 # if this is changed to support newlines in filenames,
94 94 # be sure to check the templates/ dir again (especially *-raw.tmpl)
95 95 return ''.join("%s\0%s%s\n" % (
96 96 f, _hex(n[:20]), flag) for f, n, flag in fl)
97 97
98 98 try:
99 99 _lazymanifest = parsers.lazymanifest
100 100 except AttributeError:
101 101 pass
102 102
103 103 class manifestdict(object):
104 104 def __init__(self, data=''):
105 105 self._lm = _lazymanifest(data)
106 106
107 107 def __getitem__(self, key):
108 108 return self._lm[key][0]
109 109
110 110 def find(self, key):
111 111 return self._lm[key]
112 112
113 113 def __len__(self):
114 114 return len(self._lm)
115 115
116 116 def __setitem__(self, key, node):
117 117 self._lm[key] = node, self.flags(key, '')
118 118
119 119 def __contains__(self, key):
120 120 return key in self._lm
121 121
122 122 def __delitem__(self, key):
123 123 del self._lm[key]
124 124
125 125 def __iter__(self):
126 126 return self._lm.__iter__()
127 127
128 128 def iterkeys(self):
129 129 return self._lm.iterkeys()
130 130
131 131 def keys(self):
132 132 return list(self.iterkeys())
133 133
134 134 def intersectfiles(self, files):
135 135 '''make a new lazymanifest with the intersection of self with files
136 136
137 137 The algorithm assumes that files is much smaller than self.'''
138 138 ret = manifestdict()
139 139 lm = self._lm
140 140 for fn in files:
141 141 if fn in lm:
142 142 ret._lm[fn] = self._lm[fn]
143 143 return ret
144 144
145 145 def filesnotin(self, m2):
146 146 '''Set of files in this manifest that are not in the other'''
147 147 files = set(self)
148 148 files.difference_update(m2)
149 149 return files
150 150
151 151 @propertycache
152 152 def _dirs(self):
153 153 return scmutil.dirs(self)
154 154
155 155 def dirs(self):
156 156 return self._dirs
157 157
158 158 def hasdir(self, dir):
159 159 return dir in self._dirs
160 160
161 161 def matches(self, match):
162 162 '''generate a new manifest filtered by the match argument'''
163 163 if match.always():
164 164 return self.copy()
165 165
166 166 files = match.files()
167 167 if (len(files) < 100 and (match.matchfn == match.exact or
168 168 (not match.anypats() and util.all(fn in self for fn in files)))):
169 169 return self.intersectfiles(files)
170 170
171 171 lm = manifestdict('')
172 172 lm._lm = self._lm.filtercopy(match)
173 173 return lm
174 174
175 175 def diff(self, m2, clean=False):
176 176 '''Finds changes between the current manifest and m2.
177 177
178 178 Args:
179 179 m2: the manifest to which this manifest should be compared.
180 180 clean: if true, include files unchanged between these manifests
181 181 with a None value in the returned dictionary.
182 182
183 183 The result is returned as a dict with filename as key and
184 184 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
185 185 nodeid in the current/other manifest and fl1/fl2 is the flag
186 186 in the current/other manifest. Where the file does not exist,
187 187 the nodeid will be None and the flags will be the empty
188 188 string.
189 189 '''
190 190 return self._lm.diff(m2._lm, clean)
191 191
192 192 def setflag(self, key, flag):
193 193 self._lm[key] = self[key], flag
194 194
195 195 def get(self, key, default=None):
196 196 try:
197 197 return self._lm[key][0]
198 198 except KeyError:
199 199 return default
200 200
201 201 def flags(self, key, default=''):
202 202 try:
203 203 return self._lm[key][1]
204 204 except KeyError:
205 205 return default
206 206
207 207 def copy(self):
208 208 c = manifestdict('')
209 209 c._lm = self._lm.copy()
210 210 return c
211 211
212 212 def iteritems(self):
213 213 return (x[:2] for x in self._lm.iterentries())
214 214
215 215 def text(self):
216 216 return self._lm.text()
217 217
218 218 def fastdelta(self, base, changes):
219 219 """Given a base manifest text as an array.array and a list of changes
220 220 relative to that text, compute a delta that can be used by revlog.
221 221 """
222 222 delta = []
223 223 dstart = None
224 224 dend = None
225 225 dline = [""]
226 226 start = 0
227 227 # zero copy representation of base as a buffer
228 228 addbuf = util.buffer(base)
229 229
230 230 # start with a readonly loop that finds the offset of
231 231 # each line and creates the deltas
232 232 for f, todelete in changes:
233 233 # bs will either be the index of the item or the insert point
234 234 start, end = _msearch(addbuf, f, start)
235 235 if not todelete:
236 236 h, fl = self._lm[f]
237 237 l = "%s\0%s%s\n" % (f, revlog.hex(h), fl)
238 238 else:
239 239 if start == end:
240 240 # item we want to delete was not found, error out
241 241 raise AssertionError(
242 242 _("failed to remove %s from manifest") % f)
243 243 l = ""
244 244 if dstart is not None and dstart <= start and dend >= start:
245 245 if dend < end:
246 246 dend = end
247 247 if l:
248 248 dline.append(l)
249 249 else:
250 250 if dstart is not None:
251 251 delta.append([dstart, dend, "".join(dline)])
252 252 dstart = start
253 253 dend = end
254 254 dline = [l]
255 255
256 256 if dstart is not None:
257 257 delta.append([dstart, dend, "".join(dline)])
258 258 # apply the delta to the base, and get a delta for addrevision
259 259 deltatext, arraytext = _addlistdelta(base, delta)
260 260 return arraytext, deltatext
261 261
262 262 def _msearch(m, s, lo=0, hi=None):
263 263 '''return a tuple (start, end) that says where to find s within m.
264 264
265 265 If the string is found m[start:end] are the line containing
266 266 that string. If start == end the string was not found and
267 267 they indicate the proper sorted insertion point.
268 268
269 269 m should be a buffer or a string
270 270 s is a string'''
271 271 def advance(i, c):
272 272 while i < lenm and m[i] != c:
273 273 i += 1
274 274 return i
275 275 if not s:
276 276 return (lo, lo)
277 277 lenm = len(m)
278 278 if not hi:
279 279 hi = lenm
280 280 while lo < hi:
281 281 mid = (lo + hi) // 2
282 282 start = mid
283 283 while start > 0 and m[start - 1] != '\n':
284 284 start -= 1
285 285 end = advance(start, '\0')
286 286 if m[start:end] < s:
287 287 # we know that after the null there are 40 bytes of sha1
288 288 # this translates to the bisect lo = mid + 1
289 289 lo = advance(end + 40, '\n') + 1
290 290 else:
291 291 # this translates to the bisect hi = mid
292 292 hi = start
293 293 end = advance(lo, '\0')
294 294 found = m[lo:end]
295 295 if s == found:
296 296 # we know that after the null there are 40 bytes of sha1
297 297 end = advance(end + 40, '\n')
298 298 return (lo, end + 1)
299 299 else:
300 300 return (lo, lo)
301 301
302 302 def _checkforbidden(l):
303 303 """Check filenames for illegal characters."""
304 304 for f in l:
305 305 if '\n' in f or '\r' in f:
306 306 raise error.RevlogError(
307 307 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
308 308
309 309
310 310 # apply the changes collected during the bisect loop to our addlist
311 311 # return a delta suitable for addrevision
312 312 def _addlistdelta(addlist, x):
313 313 # for large addlist arrays, building a new array is cheaper
314 314 # than repeatedly modifying the existing one
315 315 currentposition = 0
316 316 newaddlist = array.array('c')
317 317
318 318 for start, end, content in x:
319 319 newaddlist += addlist[currentposition:start]
320 320 if content:
321 321 newaddlist += array.array('c', content)
322 322
323 323 currentposition = end
324 324
325 325 newaddlist += addlist[currentposition:]
326 326
327 327 deltatext = "".join(struct.pack(">lll", start, end, len(content))
328 328 + content for start, end, content in x)
329 329 return deltatext, newaddlist
330 330
331 331 def _splittopdir(f):
332 332 if '/' in f:
333 333 dir, subpath = f.split('/', 1)
334 334 return dir + '/', subpath
335 335 else:
336 336 return '', f
337 337
338 338 class treemanifest(object):
339 339 def __init__(self, text=''):
340 340 self._dirs = {}
341 341 # Using _lazymanifest here is a little slower than plain old dicts
342 342 self._files = {}
343 343 self._flags = {}
344 344 lm = _lazymanifest(text)
345 345 for f, n, fl in lm.iterentries():
346 346 self[f] = n
347 347 if fl:
348 348 self.setflag(f, fl)
349 349
350 350 def __len__(self):
351 351 size = len(self._files)
352 352 for m in self._dirs.values():
353 353 size += m.__len__()
354 354 return size
355 355
356 356 def iteritems(self):
357 357 for p, n in sorted(self._dirs.items() + self._files.items()):
358 358 if p in self._files:
359 359 yield p, n
360 360 else:
361 361 for sf, sn in n.iteritems():
362 362 yield p + sf, sn
363 363
364 364 def iterkeys(self):
365 365 for p in sorted(self._dirs.keys() + self._files.keys()):
366 366 if p in self._files:
367 367 yield p
368 368 else:
369 369 for f in self._dirs[p].iterkeys():
370 370 yield p + f
371 371
372 372 def keys(self):
373 373 return list(self.iterkeys())
374 374
375 375 def __iter__(self):
376 376 return self.iterkeys()
377 377
378 378 def __contains__(self, f):
379 379 if f is None:
380 380 return False
381 381 dir, subpath = _splittopdir(f)
382 382 if dir:
383 383 if dir not in self._dirs:
384 384 return False
385 385 return self._dirs[dir].__contains__(subpath)
386 386 else:
387 387 return f in self._files
388 388
389 389 def get(self, f, default=None):
390 390 dir, subpath = _splittopdir(f)
391 391 if dir:
392 392 if dir not in self._dirs:
393 393 return default
394 394 return self._dirs[dir].get(subpath, default)
395 395 else:
396 396 return self._files.get(f, default)
397 397
398 398 def __getitem__(self, f):
399 399 dir, subpath = _splittopdir(f)
400 400 if dir:
401 401 return self._dirs[dir].__getitem__(subpath)
402 402 else:
403 403 return self._files[f]
404 404
405 405 def flags(self, f):
406 406 dir, subpath = _splittopdir(f)
407 407 if dir:
408 408 if dir not in self._dirs:
409 409 return ''
410 410 return self._dirs[dir].flags(subpath)
411 411 else:
412 412 if f in self._dirs:
413 413 return ''
414 414 return self._flags.get(f, '')
415 415
416 416 def find(self, f):
417 417 dir, subpath = _splittopdir(f)
418 418 if dir:
419 419 return self._dirs[dir].find(subpath)
420 420 else:
421 421 return self._files[f], self._flags.get(f, '')
422 422
423 423 def __delitem__(self, f):
424 424 dir, subpath = _splittopdir(f)
425 425 if dir:
426 426 self._dirs[dir].__delitem__(subpath)
427 427 # If the directory is now empty, remove it
428 428 if not self._dirs[dir]._dirs and not self._dirs[dir]._files:
429 429 del self._dirs[dir]
430 430 else:
431 431 del self._files[f]
432 432 if f in self._flags:
433 433 del self._flags[f]
434 434
435 435 def __setitem__(self, f, n):
436 436 assert n is not None
437 437 dir, subpath = _splittopdir(f)
438 438 if dir:
439 439 if dir not in self._dirs:
440 440 self._dirs[dir] = treemanifest()
441 441 self._dirs[dir].__setitem__(subpath, n)
442 442 else:
443 443 self._files[f] = n
444 444
445 445 def setflag(self, f, flags):
446 446 """Set the flags (symlink, executable) for path f."""
447 447 dir, subpath = _splittopdir(f)
448 448 if dir:
449 449 if dir not in self._dirs:
450 450 self._dirs[dir] = treemanifest()
451 451 self._dirs[dir].setflag(subpath, flags)
452 452 else:
453 453 self._flags[f] = flags
454 454
455 455 def copy(self):
456 456 copy = treemanifest()
457 457 for d in self._dirs:
458 458 copy._dirs[d] = self._dirs[d].copy()
459 459 copy._files = dict.copy(self._files)
460 460 copy._flags = dict.copy(self._flags)
461 461 return copy
462 462
463 463 def intersectfiles(self, files):
464 464 '''make a new treemanifest with the intersection of self with files
465 465
466 466 The algorithm assumes that files is much smaller than self.'''
467 467 ret = treemanifest()
468 468 for fn in files:
469 469 if fn in self:
470 470 ret[fn] = self[fn]
471 471 flags = self.flags(fn)
472 472 if flags:
473 473 ret.setflag(fn, flags)
474 474 return ret
475 475
476 476 def filesnotin(self, m2):
477 477 '''Set of files in this manifest that are not in the other'''
478 478 files = set(self.iterkeys())
479 479 files.difference_update(m2.iterkeys())
480 480 return files
481 481
482 482 @propertycache
483 483 def _alldirs(self):
484 484 return scmutil.dirs(self)
485 485
486 486 def dirs(self):
487 487 return self._alldirs
488 488
489 489 def hasdir(self, dir):
490 490 return dir in self._alldirs
491 491
492 492 def matches(self, match):
493 493 '''generate a new manifest filtered by the match argument'''
494 494 if match.always():
495 495 return self.copy()
496 496
497 497 files = match.files()
498 498 if (match.matchfn == match.exact or
499 499 (not match.anypats() and util.all(fn in self for fn in files))):
500 500 return self.intersectfiles(files)
501 501
502 502 m = self.copy()
503 503 for fn in m.keys():
504 504 if not match(fn):
505 505 del m[fn]
506 506 return m
507 507
508 508 def diff(self, m2, clean=False):
509 509 '''Finds changes between the current manifest and m2.
510 510
511 511 Args:
512 512 m2: the manifest to which this manifest should be compared.
513 513 clean: if true, include files unchanged between these manifests
514 514 with a None value in the returned dictionary.
515 515
516 516 The result is returned as a dict with filename as key and
517 517 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
518 518 nodeid in the current/other manifest and fl1/fl2 is the flag
519 519 in the current/other manifest. Where the file does not exist,
520 520 the nodeid will be None and the flags will be the empty
521 521 string.
522 522 '''
523 523 diff = {}
524 524
525 525 for fn, n1 in self.iteritems():
526 526 fl1 = self.flags(fn)
527 527 n2 = m2.get(fn, None)
528 528 fl2 = m2.flags(fn)
529 529 if n2 is None:
530 530 fl2 = ''
531 531 if n1 != n2 or fl1 != fl2:
532 532 diff[fn] = ((n1, fl1), (n2, fl2))
533 533 elif clean:
534 534 diff[fn] = None
535 535
536 536 for fn, n2 in m2.iteritems():
537 537 if fn not in self:
538 538 fl2 = m2.flags(fn)
539 539 diff[fn] = ((None, ''), (n2, fl2))
540 540
541 541 return diff
542 542
543 543 def text(self):
544 544 """Get the full data of this manifest as a bytestring."""
545 545 fl = self.keys()
546 546 _checkforbidden(fl)
547 547
548 548 hex, flags = revlog.hex, self.flags
549 549 # if this is changed to support newlines in filenames,
550 550 # be sure to check the templates/ dir again (especially *-raw.tmpl)
551 551 return ''.join("%s\0%s%s\n" % (f, hex(self[f]), flags(f)) for f in fl)
552 552
553 553 class manifest(revlog.revlog):
554 554 def __init__(self, opener):
555 555 # During normal operations, we expect to deal with not more than four
556 556 # revs at a time (such as during commit --amend). When rebasing large
557 557 # stacks of commits, the number can go up, hence the config knob below.
558 558 cachesize = 4
559 usetreemanifest = False
559 560 opts = getattr(opener, 'options', None)
560 561 if opts is not None:
561 562 cachesize = opts.get('manifestcachesize', cachesize)
563 usetreemanifest = opts.get('usetreemanifest', usetreemanifest)
562 564 self._mancache = util.lrucachedict(cachesize)
563 565 revlog.revlog.__init__(self, opener, "00manifest.i")
566 self._usetreemanifest = usetreemanifest
567
568 def _newmanifest(self, data=''):
569 if self._usetreemanifest:
570 return treemanifest(data)
571 return manifestdict(data)
564 572
565 573 def readdelta(self, node):
566 574 r = self.rev(node)
567 575 d = mdiff.patchtext(self.revdiff(self.deltaparent(r), r))
568 return manifestdict(d)
576 return self._newmanifest(d)
569 577
570 578 def readfast(self, node):
571 579 '''use the faster of readdelta or read'''
572 580 r = self.rev(node)
573 581 deltaparent = self.deltaparent(r)
574 582 if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r):
575 583 return self.readdelta(node)
576 584 return self.read(node)
577 585
578 586 def read(self, node):
579 587 if node == revlog.nullid:
580 return manifestdict() # don't upset local cache
588 return self._newmanifest() # don't upset local cache
581 589 if node in self._mancache:
582 590 return self._mancache[node][0]
583 591 text = self.revision(node)
584 592 arraytext = array.array('c', text)
585 m = manifestdict(text)
593 m = self._newmanifest(text)
586 594 self._mancache[node] = (m, arraytext)
587 595 return m
588 596
589 597 def find(self, node, f):
590 598 '''look up entry for a single file efficiently.
591 599 return (node, flags) pair if found, (None, None) if not.'''
592 600 m = self.read(node)
593 601 try:
594 602 return m.find(f)
595 603 except KeyError:
596 604 return None, None
597 605
598 606 def add(self, m, transaction, link, p1, p2, added, removed):
599 if p1 in self._mancache:
607 if p1 in self._mancache and not self._usetreemanifest:
600 608 # If our first parent is in the manifest cache, we can
601 609 # compute a delta here using properties we know about the
602 610 # manifest up-front, which may save time later for the
603 611 # revlog layer.
604 612
605 613 _checkforbidden(added)
606 614 # combine the changed lists into one list for sorting
607 615 work = [(x, False) for x in added]
608 616 work.extend((x, True) for x in removed)
609 617 # this could use heapq.merge() (from Python 2.6+) or equivalent
610 618 # since the lists are already sorted
611 619 work.sort()
612 620
613 621 arraytext, deltatext = m.fastdelta(self._mancache[p1][1], work)
614 622 cachedelta = self.rev(p1), deltatext
615 623 text = util.buffer(arraytext)
616 624 else:
617 625 # The first parent manifest isn't already loaded, so we'll
618 626 # just encode a fulltext of the manifest and pass that
619 627 # through to the revlog layer, and let it handle the delta
620 628 # process.
621 629 text = m.text()
622 630 arraytext = array.array('c', text)
623 631 cachedelta = None
624 632
625 633 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
626 634 self._mancache[n] = (m, arraytext)
627 635
628 636 return n
General Comments 0
You need to be logged in to leave comments. Login now