##// END OF EJS Templates
treemanifest: cache directory logs and manifests...
Martin von Zweigbergk -
r25185:bf6b476f default
parent child Browse files
Show More
@@ -1,1973 +1,1973 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import urllib
10 10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock as lockmod
13 13 import transaction, store, encoding, exchange, bundle2
14 14 import scmutil, util, extensions, hook, error, revset
15 15 import match as matchmod
16 16 import merge as mergemod
17 17 import tags as tagsmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 import branchmap, pathutil
21 21 import namespaces
22 22 propertycache = util.propertycache
23 23 filecache = scmutil.filecache
24 24
25 25 class repofilecache(filecache):
26 26 """All filecache usage on repo are done for logic that should be unfiltered
27 27 """
28 28
29 29 def __get__(self, repo, type=None):
30 30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 31 def __set__(self, repo, value):
32 32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 33 def __delete__(self, repo):
34 34 return super(repofilecache, self).__delete__(repo.unfiltered())
35 35
36 36 class storecache(repofilecache):
37 37 """filecache for files in the store"""
38 38 def join(self, obj, fname):
39 39 return obj.sjoin(fname)
40 40
41 41 class unfilteredpropertycache(propertycache):
42 42 """propertycache that apply to unfiltered repo only"""
43 43
44 44 def __get__(self, repo, type=None):
45 45 unfi = repo.unfiltered()
46 46 if unfi is repo:
47 47 return super(unfilteredpropertycache, self).__get__(unfi)
48 48 return getattr(unfi, self.name)
49 49
50 50 class filteredpropertycache(propertycache):
51 51 """propertycache that must take filtering in account"""
52 52
53 53 def cachevalue(self, obj, value):
54 54 object.__setattr__(obj, self.name, value)
55 55
56 56
57 57 def hasunfilteredcache(repo, name):
58 58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 59 return name in vars(repo.unfiltered())
60 60
61 61 def unfilteredmethod(orig):
62 62 """decorate method that always need to be run on unfiltered version"""
63 63 def wrapper(repo, *args, **kwargs):
64 64 return orig(repo.unfiltered(), *args, **kwargs)
65 65 return wrapper
66 66
67 67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 68 'unbundle'))
69 69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70 70
71 71 class localpeer(peer.peerrepository):
72 72 '''peer for a local repo; reflects only the most recent API'''
73 73
74 74 def __init__(self, repo, caps=moderncaps):
75 75 peer.peerrepository.__init__(self)
76 76 self._repo = repo.filtered('served')
77 77 self.ui = repo.ui
78 78 self._caps = repo._restrictcapabilities(caps)
79 79 self.requirements = repo.requirements
80 80 self.supportedformats = repo.supportedformats
81 81
82 82 def close(self):
83 83 self._repo.close()
84 84
85 85 def _capabilities(self):
86 86 return self._caps
87 87
88 88 def local(self):
89 89 return self._repo
90 90
91 91 def canpush(self):
92 92 return True
93 93
94 94 def url(self):
95 95 return self._repo.url()
96 96
97 97 def lookup(self, key):
98 98 return self._repo.lookup(key)
99 99
100 100 def branchmap(self):
101 101 return self._repo.branchmap()
102 102
103 103 def heads(self):
104 104 return self._repo.heads()
105 105
106 106 def known(self, nodes):
107 107 return self._repo.known(nodes)
108 108
109 109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 110 **kwargs):
111 111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 112 common=common, bundlecaps=bundlecaps, **kwargs)
113 113 if bundlecaps is not None and 'HG20' in bundlecaps:
114 114 # When requesting a bundle2, getbundle returns a stream to make the
115 115 # wire level function happier. We need to build a proper object
116 116 # from it in local peer.
117 117 cg = bundle2.getunbundler(self.ui, cg)
118 118 return cg
119 119
120 120 # TODO We might want to move the next two calls into legacypeer and add
121 121 # unbundle instead.
122 122
123 123 def unbundle(self, cg, heads, url):
124 124 """apply a bundle on a repo
125 125
126 126 This function handles the repo locking itself."""
127 127 try:
128 128 try:
129 129 cg = exchange.readbundle(self.ui, cg, None)
130 130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
131 131 if util.safehasattr(ret, 'getchunks'):
132 132 # This is a bundle20 object, turn it into an unbundler.
133 133 # This little dance should be dropped eventually when the
134 134 # API is finally improved.
135 135 stream = util.chunkbuffer(ret.getchunks())
136 136 ret = bundle2.getunbundler(self.ui, stream)
137 137 return ret
138 138 except Exception, exc:
139 139 # If the exception contains output salvaged from a bundle2
140 140 # reply, we need to make sure it is printed before continuing
141 141 # to fail. So we build a bundle2 with such output and consume
142 142 # it directly.
143 143 #
144 144 # This is not very elegant but allows a "simple" solution for
145 145 # issue4594
146 146 output = getattr(exc, '_bundle2salvagedoutput', ())
147 147 if output:
148 148 bundler = bundle2.bundle20(self._repo.ui)
149 149 for out in output:
150 150 bundler.addpart(out)
151 151 stream = util.chunkbuffer(bundler.getchunks())
152 152 b = bundle2.getunbundler(self.ui, stream)
153 153 bundle2.processbundle(self._repo, b)
154 154 raise
155 155 except error.PushRaced, exc:
156 156 raise error.ResponseError(_('push failed:'), str(exc))
157 157
158 158 def lock(self):
159 159 return self._repo.lock()
160 160
161 161 def addchangegroup(self, cg, source, url):
162 162 return changegroup.addchangegroup(self._repo, cg, source, url)
163 163
164 164 def pushkey(self, namespace, key, old, new):
165 165 return self._repo.pushkey(namespace, key, old, new)
166 166
167 167 def listkeys(self, namespace):
168 168 return self._repo.listkeys(namespace)
169 169
170 170 def debugwireargs(self, one, two, three=None, four=None, five=None):
171 171 '''used to test argument passing over the wire'''
172 172 return "%s %s %s %s %s" % (one, two, three, four, five)
173 173
174 174 class locallegacypeer(localpeer):
175 175 '''peer extension which implements legacy methods too; used for tests with
176 176 restricted capabilities'''
177 177
178 178 def __init__(self, repo):
179 179 localpeer.__init__(self, repo, caps=legacycaps)
180 180
181 181 def branches(self, nodes):
182 182 return self._repo.branches(nodes)
183 183
184 184 def between(self, pairs):
185 185 return self._repo.between(pairs)
186 186
187 187 def changegroup(self, basenodes, source):
188 188 return changegroup.changegroup(self._repo, basenodes, source)
189 189
190 190 def changegroupsubset(self, bases, heads, source):
191 191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
192 192
193 193 class localrepository(object):
194 194
195 195 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
196 196 'manifestv2'))
197 197 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
198 198 'dotencode'))
199 199 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
200 200 filtername = None
201 201
202 202 # a list of (ui, featureset) functions.
203 203 # only functions defined in module of enabled extensions are invoked
204 204 featuresetupfuncs = set()
205 205
206 206 def _baserequirements(self, create):
207 207 return ['revlogv1']
208 208
209 209 def __init__(self, baseui, path=None, create=False):
210 210 self.requirements = set()
211 211 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
212 212 self.wopener = self.wvfs
213 213 self.root = self.wvfs.base
214 214 self.path = self.wvfs.join(".hg")
215 215 self.origroot = path
216 216 self.auditor = pathutil.pathauditor(self.root, self._checknested)
217 217 self.vfs = scmutil.vfs(self.path)
218 218 self.opener = self.vfs
219 219 self.baseui = baseui
220 220 self.ui = baseui.copy()
221 221 self.ui.copy = baseui.copy # prevent copying repo configuration
222 222 # A list of callback to shape the phase if no data were found.
223 223 # Callback are in the form: func(repo, roots) --> processed root.
224 224 # This list it to be filled by extension during repo setup
225 225 self._phasedefaults = []
226 226 try:
227 227 self.ui.readconfig(self.join("hgrc"), self.root)
228 228 extensions.loadall(self.ui)
229 229 except IOError:
230 230 pass
231 231
232 232 if self.featuresetupfuncs:
233 233 self.supported = set(self._basesupported) # use private copy
234 234 extmods = set(m.__name__ for n, m
235 235 in extensions.extensions(self.ui))
236 236 for setupfunc in self.featuresetupfuncs:
237 237 if setupfunc.__module__ in extmods:
238 238 setupfunc(self.ui, self.supported)
239 239 else:
240 240 self.supported = self._basesupported
241 241
242 242 if not self.vfs.isdir():
243 243 if create:
244 244 if not self.wvfs.exists():
245 245 self.wvfs.makedirs()
246 246 self.vfs.makedir(notindexed=True)
247 247 self.requirements.update(self._baserequirements(create))
248 248 if self.ui.configbool('format', 'usestore', True):
249 249 self.vfs.mkdir("store")
250 250 self.requirements.add("store")
251 251 if self.ui.configbool('format', 'usefncache', True):
252 252 self.requirements.add("fncache")
253 253 if self.ui.configbool('format', 'dotencode', True):
254 254 self.requirements.add('dotencode')
255 255 # create an invalid changelog
256 256 self.vfs.append(
257 257 "00changelog.i",
258 258 '\0\0\0\2' # represents revlogv2
259 259 ' dummy changelog to prevent using the old repo layout'
260 260 )
261 261 if self.ui.configbool('format', 'generaldelta', False):
262 262 self.requirements.add("generaldelta")
263 263 if self.ui.configbool('experimental', 'treemanifest', False):
264 264 self.requirements.add("treemanifest")
265 265 if self.ui.configbool('experimental', 'manifestv2', False):
266 266 self.requirements.add("manifestv2")
267 267 else:
268 268 raise error.RepoError(_("repository %s not found") % path)
269 269 elif create:
270 270 raise error.RepoError(_("repository %s already exists") % path)
271 271 else:
272 272 try:
273 273 self.requirements = scmutil.readrequires(
274 274 self.vfs, self.supported)
275 275 except IOError, inst:
276 276 if inst.errno != errno.ENOENT:
277 277 raise
278 278
279 279 self.sharedpath = self.path
280 280 try:
281 281 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
282 282 realpath=True)
283 283 s = vfs.base
284 284 if not vfs.exists():
285 285 raise error.RepoError(
286 286 _('.hg/sharedpath points to nonexistent directory %s') % s)
287 287 self.sharedpath = s
288 288 except IOError, inst:
289 289 if inst.errno != errno.ENOENT:
290 290 raise
291 291
292 292 self.store = store.store(
293 293 self.requirements, self.sharedpath, scmutil.vfs)
294 294 self.spath = self.store.path
295 295 self.svfs = self.store.vfs
296 296 self.sopener = self.svfs
297 297 self.sjoin = self.store.join
298 298 self.vfs.createmode = self.store.createmode
299 299 self._applyopenerreqs()
300 300 if create:
301 301 self._writerequirements()
302 302
303 303
304 304 self._branchcaches = {}
305 305 self._revbranchcache = None
306 306 self.filterpats = {}
307 307 self._datafilters = {}
308 308 self._transref = self._lockref = self._wlockref = None
309 309
310 310 # A cache for various files under .hg/ that tracks file changes,
311 311 # (used by the filecache decorator)
312 312 #
313 313 # Maps a property name to its util.filecacheentry
314 314 self._filecache = {}
315 315
316 316 # hold sets of revision to be filtered
317 317 # should be cleared when something might have changed the filter value:
318 318 # - new changesets,
319 319 # - phase change,
320 320 # - new obsolescence marker,
321 321 # - working directory parent change,
322 322 # - bookmark changes
323 323 self.filteredrevcache = {}
324 324
325 325 # generic mapping between names and nodes
326 326 self.names = namespaces.namespaces()
327 327
328 328 def close(self):
329 329 self._writecaches()
330 330
331 331 def _writecaches(self):
332 332 if self._revbranchcache:
333 333 self._revbranchcache.write()
334 334
335 335 def _restrictcapabilities(self, caps):
336 336 if self.ui.configbool('experimental', 'bundle2-advertise', True):
337 337 caps = set(caps)
338 338 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
339 339 caps.add('bundle2=' + urllib.quote(capsblob))
340 340 return caps
341 341
342 342 def _applyopenerreqs(self):
343 343 self.svfs.options = dict((r, 1) for r in self.requirements
344 344 if r in self.openerreqs)
345 345 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
346 346 if chunkcachesize is not None:
347 347 self.svfs.options['chunkcachesize'] = chunkcachesize
348 348 maxchainlen = self.ui.configint('format', 'maxchainlen')
349 349 if maxchainlen is not None:
350 350 self.svfs.options['maxchainlen'] = maxchainlen
351 351 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
352 352 if manifestcachesize is not None:
353 353 self.svfs.options['manifestcachesize'] = manifestcachesize
354 354
355 355 def _writerequirements(self):
356 356 scmutil.writerequires(self.vfs, self.requirements)
357 357
358 358 def _checknested(self, path):
359 359 """Determine if path is a legal nested repository."""
360 360 if not path.startswith(self.root):
361 361 return False
362 362 subpath = path[len(self.root) + 1:]
363 363 normsubpath = util.pconvert(subpath)
364 364
365 365 # XXX: Checking against the current working copy is wrong in
366 366 # the sense that it can reject things like
367 367 #
368 368 # $ hg cat -r 10 sub/x.txt
369 369 #
370 370 # if sub/ is no longer a subrepository in the working copy
371 371 # parent revision.
372 372 #
373 373 # However, it can of course also allow things that would have
374 374 # been rejected before, such as the above cat command if sub/
375 375 # is a subrepository now, but was a normal directory before.
376 376 # The old path auditor would have rejected by mistake since it
377 377 # panics when it sees sub/.hg/.
378 378 #
379 379 # All in all, checking against the working copy seems sensible
380 380 # since we want to prevent access to nested repositories on
381 381 # the filesystem *now*.
382 382 ctx = self[None]
383 383 parts = util.splitpath(subpath)
384 384 while parts:
385 385 prefix = '/'.join(parts)
386 386 if prefix in ctx.substate:
387 387 if prefix == normsubpath:
388 388 return True
389 389 else:
390 390 sub = ctx.sub(prefix)
391 391 return sub.checknested(subpath[len(prefix) + 1:])
392 392 else:
393 393 parts.pop()
394 394 return False
395 395
396 396 def peer(self):
397 397 return localpeer(self) # not cached to avoid reference cycle
398 398
399 399 def unfiltered(self):
400 400 """Return unfiltered version of the repository
401 401
402 402 Intended to be overwritten by filtered repo."""
403 403 return self
404 404
405 405 def filtered(self, name):
406 406 """Return a filtered version of a repository"""
407 407 # build a new class with the mixin and the current class
408 408 # (possibly subclass of the repo)
409 409 class proxycls(repoview.repoview, self.unfiltered().__class__):
410 410 pass
411 411 return proxycls(self, name)
412 412
413 413 @repofilecache('bookmarks')
414 414 def _bookmarks(self):
415 415 return bookmarks.bmstore(self)
416 416
417 417 @repofilecache('bookmarks.current')
418 418 def _activebookmark(self):
419 419 return bookmarks.readactive(self)
420 420
421 421 def bookmarkheads(self, bookmark):
422 422 name = bookmark.split('@', 1)[0]
423 423 heads = []
424 424 for mark, n in self._bookmarks.iteritems():
425 425 if mark.split('@', 1)[0] == name:
426 426 heads.append(n)
427 427 return heads
428 428
429 429 @storecache('phaseroots')
430 430 def _phasecache(self):
431 431 return phases.phasecache(self, self._phasedefaults)
432 432
433 433 @storecache('obsstore')
434 434 def obsstore(self):
435 435 # read default format for new obsstore.
436 436 defaultformat = self.ui.configint('format', 'obsstore-version', None)
437 437 # rely on obsstore class default when possible.
438 438 kwargs = {}
439 439 if defaultformat is not None:
440 440 kwargs['defaultformat'] = defaultformat
441 441 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
442 442 store = obsolete.obsstore(self.svfs, readonly=readonly,
443 443 **kwargs)
444 444 if store and readonly:
445 445 self.ui.warn(
446 446 _('obsolete feature not enabled but %i markers found!\n')
447 447 % len(list(store)))
448 448 return store
449 449
450 450 @storecache('00changelog.i')
451 451 def changelog(self):
452 452 c = changelog.changelog(self.svfs)
453 453 if 'HG_PENDING' in os.environ:
454 454 p = os.environ['HG_PENDING']
455 455 if p.startswith(self.root):
456 456 c.readpending('00changelog.i.a')
457 457 return c
458 458
459 459 @storecache('00manifest.i')
460 460 def manifest(self):
461 461 return manifest.manifest(self.svfs)
462 462
463 463 def dirlog(self, dir):
464 return manifest.manifest(self.svfs, dir)
464 return self.manifest.dirlog(dir)
465 465
466 466 @repofilecache('dirstate')
467 467 def dirstate(self):
468 468 warned = [0]
469 469 def validate(node):
470 470 try:
471 471 self.changelog.rev(node)
472 472 return node
473 473 except error.LookupError:
474 474 if not warned[0]:
475 475 warned[0] = True
476 476 self.ui.warn(_("warning: ignoring unknown"
477 477 " working parent %s!\n") % short(node))
478 478 return nullid
479 479
480 480 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
481 481
482 482 def __getitem__(self, changeid):
483 483 if changeid is None:
484 484 return context.workingctx(self)
485 485 if isinstance(changeid, slice):
486 486 return [context.changectx(self, i)
487 487 for i in xrange(*changeid.indices(len(self)))
488 488 if i not in self.changelog.filteredrevs]
489 489 return context.changectx(self, changeid)
490 490
491 491 def __contains__(self, changeid):
492 492 try:
493 493 self[changeid]
494 494 return True
495 495 except error.RepoLookupError:
496 496 return False
497 497
498 498 def __nonzero__(self):
499 499 return True
500 500
501 501 def __len__(self):
502 502 return len(self.changelog)
503 503
504 504 def __iter__(self):
505 505 return iter(self.changelog)
506 506
507 507 def revs(self, expr, *args):
508 508 '''Return a list of revisions matching the given revset'''
509 509 expr = revset.formatspec(expr, *args)
510 510 m = revset.match(None, expr)
511 511 return m(self)
512 512
513 513 def set(self, expr, *args):
514 514 '''
515 515 Yield a context for each matching revision, after doing arg
516 516 replacement via revset.formatspec
517 517 '''
518 518 for r in self.revs(expr, *args):
519 519 yield self[r]
520 520
521 521 def url(self):
522 522 return 'file:' + self.root
523 523
524 524 def hook(self, name, throw=False, **args):
525 525 """Call a hook, passing this repo instance.
526 526
527 527 This a convenience method to aid invoking hooks. Extensions likely
528 528 won't call this unless they have registered a custom hook or are
529 529 replacing code that is expected to call a hook.
530 530 """
531 531 return hook.hook(self.ui, self, name, throw, **args)
532 532
533 533 @unfilteredmethod
534 534 def _tag(self, names, node, message, local, user, date, extra={},
535 535 editor=False):
536 536 if isinstance(names, str):
537 537 names = (names,)
538 538
539 539 branches = self.branchmap()
540 540 for name in names:
541 541 self.hook('pretag', throw=True, node=hex(node), tag=name,
542 542 local=local)
543 543 if name in branches:
544 544 self.ui.warn(_("warning: tag %s conflicts with existing"
545 545 " branch name\n") % name)
546 546
547 547 def writetags(fp, names, munge, prevtags):
548 548 fp.seek(0, 2)
549 549 if prevtags and prevtags[-1] != '\n':
550 550 fp.write('\n')
551 551 for name in names:
552 552 if munge:
553 553 m = munge(name)
554 554 else:
555 555 m = name
556 556
557 557 if (self._tagscache.tagtypes and
558 558 name in self._tagscache.tagtypes):
559 559 old = self.tags().get(name, nullid)
560 560 fp.write('%s %s\n' % (hex(old), m))
561 561 fp.write('%s %s\n' % (hex(node), m))
562 562 fp.close()
563 563
564 564 prevtags = ''
565 565 if local:
566 566 try:
567 567 fp = self.vfs('localtags', 'r+')
568 568 except IOError:
569 569 fp = self.vfs('localtags', 'a')
570 570 else:
571 571 prevtags = fp.read()
572 572
573 573 # local tags are stored in the current charset
574 574 writetags(fp, names, None, prevtags)
575 575 for name in names:
576 576 self.hook('tag', node=hex(node), tag=name, local=local)
577 577 return
578 578
579 579 try:
580 580 fp = self.wfile('.hgtags', 'rb+')
581 581 except IOError, e:
582 582 if e.errno != errno.ENOENT:
583 583 raise
584 584 fp = self.wfile('.hgtags', 'ab')
585 585 else:
586 586 prevtags = fp.read()
587 587
588 588 # committed tags are stored in UTF-8
589 589 writetags(fp, names, encoding.fromlocal, prevtags)
590 590
591 591 fp.close()
592 592
593 593 self.invalidatecaches()
594 594
595 595 if '.hgtags' not in self.dirstate:
596 596 self[None].add(['.hgtags'])
597 597
598 598 m = matchmod.exact(self.root, '', ['.hgtags'])
599 599 tagnode = self.commit(message, user, date, extra=extra, match=m,
600 600 editor=editor)
601 601
602 602 for name in names:
603 603 self.hook('tag', node=hex(node), tag=name, local=local)
604 604
605 605 return tagnode
606 606
607 607 def tag(self, names, node, message, local, user, date, editor=False):
608 608 '''tag a revision with one or more symbolic names.
609 609
610 610 names is a list of strings or, when adding a single tag, names may be a
611 611 string.
612 612
613 613 if local is True, the tags are stored in a per-repository file.
614 614 otherwise, they are stored in the .hgtags file, and a new
615 615 changeset is committed with the change.
616 616
617 617 keyword arguments:
618 618
619 619 local: whether to store tags in non-version-controlled file
620 620 (default False)
621 621
622 622 message: commit message to use if committing
623 623
624 624 user: name of user to use if committing
625 625
626 626 date: date tuple to use if committing'''
627 627
628 628 if not local:
629 629 m = matchmod.exact(self.root, '', ['.hgtags'])
630 630 if any(self.status(match=m, unknown=True, ignored=True)):
631 631 raise util.Abort(_('working copy of .hgtags is changed'),
632 632 hint=_('please commit .hgtags manually'))
633 633
634 634 self.tags() # instantiate the cache
635 635 self._tag(names, node, message, local, user, date, editor=editor)
636 636
637 637 @filteredpropertycache
638 638 def _tagscache(self):
639 639 '''Returns a tagscache object that contains various tags related
640 640 caches.'''
641 641
642 642 # This simplifies its cache management by having one decorated
643 643 # function (this one) and the rest simply fetch things from it.
644 644 class tagscache(object):
645 645 def __init__(self):
646 646 # These two define the set of tags for this repository. tags
647 647 # maps tag name to node; tagtypes maps tag name to 'global' or
648 648 # 'local'. (Global tags are defined by .hgtags across all
649 649 # heads, and local tags are defined in .hg/localtags.)
650 650 # They constitute the in-memory cache of tags.
651 651 self.tags = self.tagtypes = None
652 652
653 653 self.nodetagscache = self.tagslist = None
654 654
655 655 cache = tagscache()
656 656 cache.tags, cache.tagtypes = self._findtags()
657 657
658 658 return cache
659 659
660 660 def tags(self):
661 661 '''return a mapping of tag to node'''
662 662 t = {}
663 663 if self.changelog.filteredrevs:
664 664 tags, tt = self._findtags()
665 665 else:
666 666 tags = self._tagscache.tags
667 667 for k, v in tags.iteritems():
668 668 try:
669 669 # ignore tags to unknown nodes
670 670 self.changelog.rev(v)
671 671 t[k] = v
672 672 except (error.LookupError, ValueError):
673 673 pass
674 674 return t
675 675
676 676 def _findtags(self):
677 677 '''Do the hard work of finding tags. Return a pair of dicts
678 678 (tags, tagtypes) where tags maps tag name to node, and tagtypes
679 679 maps tag name to a string like \'global\' or \'local\'.
680 680 Subclasses or extensions are free to add their own tags, but
681 681 should be aware that the returned dicts will be retained for the
682 682 duration of the localrepo object.'''
683 683
684 684 # XXX what tagtype should subclasses/extensions use? Currently
685 685 # mq and bookmarks add tags, but do not set the tagtype at all.
686 686 # Should each extension invent its own tag type? Should there
687 687 # be one tagtype for all such "virtual" tags? Or is the status
688 688 # quo fine?
689 689
690 690 alltags = {} # map tag name to (node, hist)
691 691 tagtypes = {}
692 692
693 693 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
694 694 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
695 695
696 696 # Build the return dicts. Have to re-encode tag names because
697 697 # the tags module always uses UTF-8 (in order not to lose info
698 698 # writing to the cache), but the rest of Mercurial wants them in
699 699 # local encoding.
700 700 tags = {}
701 701 for (name, (node, hist)) in alltags.iteritems():
702 702 if node != nullid:
703 703 tags[encoding.tolocal(name)] = node
704 704 tags['tip'] = self.changelog.tip()
705 705 tagtypes = dict([(encoding.tolocal(name), value)
706 706 for (name, value) in tagtypes.iteritems()])
707 707 return (tags, tagtypes)
708 708
709 709 def tagtype(self, tagname):
710 710 '''
711 711 return the type of the given tag. result can be:
712 712
713 713 'local' : a local tag
714 714 'global' : a global tag
715 715 None : tag does not exist
716 716 '''
717 717
718 718 return self._tagscache.tagtypes.get(tagname)
719 719
720 720 def tagslist(self):
721 721 '''return a list of tags ordered by revision'''
722 722 if not self._tagscache.tagslist:
723 723 l = []
724 724 for t, n in self.tags().iteritems():
725 725 l.append((self.changelog.rev(n), t, n))
726 726 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
727 727
728 728 return self._tagscache.tagslist
729 729
730 730 def nodetags(self, node):
731 731 '''return the tags associated with a node'''
732 732 if not self._tagscache.nodetagscache:
733 733 nodetagscache = {}
734 734 for t, n in self._tagscache.tags.iteritems():
735 735 nodetagscache.setdefault(n, []).append(t)
736 736 for tags in nodetagscache.itervalues():
737 737 tags.sort()
738 738 self._tagscache.nodetagscache = nodetagscache
739 739 return self._tagscache.nodetagscache.get(node, [])
740 740
741 741 def nodebookmarks(self, node):
742 742 marks = []
743 743 for bookmark, n in self._bookmarks.iteritems():
744 744 if n == node:
745 745 marks.append(bookmark)
746 746 return sorted(marks)
747 747
748 748 def branchmap(self):
749 749 '''returns a dictionary {branch: [branchheads]} with branchheads
750 750 ordered by increasing revision number'''
751 751 branchmap.updatecache(self)
752 752 return self._branchcaches[self.filtername]
753 753
754 754 @unfilteredmethod
755 755 def revbranchcache(self):
756 756 if not self._revbranchcache:
757 757 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
758 758 return self._revbranchcache
759 759
760 760 def branchtip(self, branch, ignoremissing=False):
761 761 '''return the tip node for a given branch
762 762
763 763 If ignoremissing is True, then this method will not raise an error.
764 764 This is helpful for callers that only expect None for a missing branch
765 765 (e.g. namespace).
766 766
767 767 '''
768 768 try:
769 769 return self.branchmap().branchtip(branch)
770 770 except KeyError:
771 771 if not ignoremissing:
772 772 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
773 773 else:
774 774 pass
775 775
776 776 def lookup(self, key):
777 777 return self[key].node()
778 778
779 779 def lookupbranch(self, key, remote=None):
780 780 repo = remote or self
781 781 if key in repo.branchmap():
782 782 return key
783 783
784 784 repo = (remote and remote.local()) and remote or self
785 785 return repo[key].branch()
786 786
787 787 def known(self, nodes):
788 788 nm = self.changelog.nodemap
789 789 pc = self._phasecache
790 790 result = []
791 791 for n in nodes:
792 792 r = nm.get(n)
793 793 resp = not (r is None or pc.phase(self, r) >= phases.secret)
794 794 result.append(resp)
795 795 return result
796 796
797 797 def local(self):
798 798 return self
799 799
800 800 def cancopy(self):
801 801 # so statichttprepo's override of local() works
802 802 if not self.local():
803 803 return False
804 804 if not self.ui.configbool('phases', 'publish', True):
805 805 return True
806 806 # if publishing we can't copy if there is filtered content
807 807 return not self.filtered('visible').changelog.filteredrevs
808 808
809 809 def shared(self):
810 810 '''the type of shared repository (None if not shared)'''
811 811 if self.sharedpath != self.path:
812 812 return 'store'
813 813 return None
814 814
815 815 def join(self, f, *insidef):
816 816 return self.vfs.join(os.path.join(f, *insidef))
817 817
818 818 def wjoin(self, f, *insidef):
819 819 return self.vfs.reljoin(self.root, f, *insidef)
820 820
821 821 def file(self, f):
822 822 if f[0] == '/':
823 823 f = f[1:]
824 824 return filelog.filelog(self.svfs, f)
825 825
826 826 def changectx(self, changeid):
827 827 return self[changeid]
828 828
829 829 def parents(self, changeid=None):
830 830 '''get list of changectxs for parents of changeid'''
831 831 return self[changeid].parents()
832 832
833 833 def setparents(self, p1, p2=nullid):
834 834 self.dirstate.beginparentchange()
835 835 copies = self.dirstate.setparents(p1, p2)
836 836 pctx = self[p1]
837 837 if copies:
838 838 # Adjust copy records, the dirstate cannot do it, it
839 839 # requires access to parents manifests. Preserve them
840 840 # only for entries added to first parent.
841 841 for f in copies:
842 842 if f not in pctx and copies[f] in pctx:
843 843 self.dirstate.copy(copies[f], f)
844 844 if p2 == nullid:
845 845 for f, s in sorted(self.dirstate.copies().items()):
846 846 if f not in pctx and s not in pctx:
847 847 self.dirstate.copy(None, f)
848 848 self.dirstate.endparentchange()
849 849
850 850 def filectx(self, path, changeid=None, fileid=None):
851 851 """changeid can be a changeset revision, node, or tag.
852 852 fileid can be a file revision or node."""
853 853 return context.filectx(self, path, changeid, fileid)
854 854
855 855 def getcwd(self):
856 856 return self.dirstate.getcwd()
857 857
858 858 def pathto(self, f, cwd=None):
859 859 return self.dirstate.pathto(f, cwd)
860 860
861 861 def wfile(self, f, mode='r'):
862 862 return self.wvfs(f, mode)
863 863
864 864 def _link(self, f):
865 865 return self.wvfs.islink(f)
866 866
867 867 def _loadfilter(self, filter):
868 868 if filter not in self.filterpats:
869 869 l = []
870 870 for pat, cmd in self.ui.configitems(filter):
871 871 if cmd == '!':
872 872 continue
873 873 mf = matchmod.match(self.root, '', [pat])
874 874 fn = None
875 875 params = cmd
876 876 for name, filterfn in self._datafilters.iteritems():
877 877 if cmd.startswith(name):
878 878 fn = filterfn
879 879 params = cmd[len(name):].lstrip()
880 880 break
881 881 if not fn:
882 882 fn = lambda s, c, **kwargs: util.filter(s, c)
883 883 # Wrap old filters not supporting keyword arguments
884 884 if not inspect.getargspec(fn)[2]:
885 885 oldfn = fn
886 886 fn = lambda s, c, **kwargs: oldfn(s, c)
887 887 l.append((mf, fn, params))
888 888 self.filterpats[filter] = l
889 889 return self.filterpats[filter]
890 890
891 891 def _filter(self, filterpats, filename, data):
892 892 for mf, fn, cmd in filterpats:
893 893 if mf(filename):
894 894 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
895 895 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
896 896 break
897 897
898 898 return data
899 899
900 900 @unfilteredpropertycache
901 901 def _encodefilterpats(self):
902 902 return self._loadfilter('encode')
903 903
904 904 @unfilteredpropertycache
905 905 def _decodefilterpats(self):
906 906 return self._loadfilter('decode')
907 907
908 908 def adddatafilter(self, name, filter):
909 909 self._datafilters[name] = filter
910 910
911 911 def wread(self, filename):
912 912 if self._link(filename):
913 913 data = self.wvfs.readlink(filename)
914 914 else:
915 915 data = self.wvfs.read(filename)
916 916 return self._filter(self._encodefilterpats, filename, data)
917 917
918 918 def wwrite(self, filename, data, flags):
919 919 """write ``data`` into ``filename`` in the working directory
920 920
921 921 This returns length of written (maybe decoded) data.
922 922 """
923 923 data = self._filter(self._decodefilterpats, filename, data)
924 924 if 'l' in flags:
925 925 self.wvfs.symlink(data, filename)
926 926 else:
927 927 self.wvfs.write(filename, data)
928 928 if 'x' in flags:
929 929 self.wvfs.setflags(filename, False, True)
930 930 return len(data)
931 931
932 932 def wwritedata(self, filename, data):
933 933 return self._filter(self._decodefilterpats, filename, data)
934 934
935 935 def currenttransaction(self):
936 936 """return the current transaction or None if non exists"""
937 937 if self._transref:
938 938 tr = self._transref()
939 939 else:
940 940 tr = None
941 941
942 942 if tr and tr.running():
943 943 return tr
944 944 return None
945 945
946 946 def transaction(self, desc, report=None):
947 947 if (self.ui.configbool('devel', 'all')
948 948 or self.ui.configbool('devel', 'check-locks')):
949 949 l = self._lockref and self._lockref()
950 950 if l is None or not l.held:
951 951 scmutil.develwarn(self.ui, 'transaction with no lock')
952 952 tr = self.currenttransaction()
953 953 if tr is not None:
954 954 return tr.nest()
955 955
956 956 # abort here if the journal already exists
957 957 if self.svfs.exists("journal"):
958 958 raise error.RepoError(
959 959 _("abandoned transaction found"),
960 960 hint=_("run 'hg recover' to clean up transaction"))
961 961
962 962 self.hook('pretxnopen', throw=True, txnname=desc)
963 963
964 964 self._writejournal(desc)
965 965 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
966 966 if report:
967 967 rp = report
968 968 else:
969 969 rp = self.ui.warn
970 970 vfsmap = {'plain': self.vfs} # root of .hg/
971 971 # we must avoid cyclic reference between repo and transaction.
972 972 reporef = weakref.ref(self)
973 973 def validate(tr):
974 974 """will run pre-closing hooks"""
975 975 pending = lambda: tr.writepending() and self.root or ""
976 976 reporef().hook('pretxnclose', throw=True, pending=pending,
977 977 xnname=desc, **tr.hookargs)
978 978
979 979 tr = transaction.transaction(rp, self.sopener, vfsmap,
980 980 "journal",
981 981 "undo",
982 982 aftertrans(renames),
983 983 self.store.createmode,
984 984 validator=validate)
985 985
986 986 trid = 'TXN:' + util.sha1("%s#%f" % (id(tr), time.time())).hexdigest()
987 987 tr.hookargs['TXNID'] = trid
988 988 # note: writing the fncache only during finalize mean that the file is
989 989 # outdated when running hooks. As fncache is used for streaming clone,
990 990 # this is not expected to break anything that happen during the hooks.
991 991 tr.addfinalize('flush-fncache', self.store.write)
992 992 def txnclosehook(tr2):
993 993 """To be run if transaction is successful, will schedule a hook run
994 994 """
995 995 def hook():
996 996 reporef().hook('txnclose', throw=False, txnname=desc,
997 997 **tr2.hookargs)
998 998 reporef()._afterlock(hook)
999 999 tr.addfinalize('txnclose-hook', txnclosehook)
1000 1000 def txnaborthook(tr2):
1001 1001 """To be run if transaction is aborted
1002 1002 """
1003 1003 reporef().hook('txnabort', throw=False, txnname=desc,
1004 1004 **tr2.hookargs)
1005 1005 tr.addabort('txnabort-hook', txnaborthook)
1006 1006 self._transref = weakref.ref(tr)
1007 1007 return tr
1008 1008
1009 1009 def _journalfiles(self):
1010 1010 return ((self.svfs, 'journal'),
1011 1011 (self.vfs, 'journal.dirstate'),
1012 1012 (self.vfs, 'journal.branch'),
1013 1013 (self.vfs, 'journal.desc'),
1014 1014 (self.vfs, 'journal.bookmarks'),
1015 1015 (self.svfs, 'journal.phaseroots'))
1016 1016
1017 1017 def undofiles(self):
1018 1018 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1019 1019
1020 1020 def _writejournal(self, desc):
1021 1021 self.vfs.write("journal.dirstate",
1022 1022 self.vfs.tryread("dirstate"))
1023 1023 self.vfs.write("journal.branch",
1024 1024 encoding.fromlocal(self.dirstate.branch()))
1025 1025 self.vfs.write("journal.desc",
1026 1026 "%d\n%s\n" % (len(self), desc))
1027 1027 self.vfs.write("journal.bookmarks",
1028 1028 self.vfs.tryread("bookmarks"))
1029 1029 self.svfs.write("journal.phaseroots",
1030 1030 self.svfs.tryread("phaseroots"))
1031 1031
1032 1032 def recover(self):
1033 1033 lock = self.lock()
1034 1034 try:
1035 1035 if self.svfs.exists("journal"):
1036 1036 self.ui.status(_("rolling back interrupted transaction\n"))
1037 1037 vfsmap = {'': self.svfs,
1038 1038 'plain': self.vfs,}
1039 1039 transaction.rollback(self.svfs, vfsmap, "journal",
1040 1040 self.ui.warn)
1041 1041 self.invalidate()
1042 1042 return True
1043 1043 else:
1044 1044 self.ui.warn(_("no interrupted transaction available\n"))
1045 1045 return False
1046 1046 finally:
1047 1047 lock.release()
1048 1048
1049 1049 def rollback(self, dryrun=False, force=False):
1050 1050 wlock = lock = None
1051 1051 try:
1052 1052 wlock = self.wlock()
1053 1053 lock = self.lock()
1054 1054 if self.svfs.exists("undo"):
1055 1055 return self._rollback(dryrun, force)
1056 1056 else:
1057 1057 self.ui.warn(_("no rollback information available\n"))
1058 1058 return 1
1059 1059 finally:
1060 1060 release(lock, wlock)
1061 1061
1062 1062 @unfilteredmethod # Until we get smarter cache management
1063 1063 def _rollback(self, dryrun, force):
1064 1064 ui = self.ui
1065 1065 try:
1066 1066 args = self.vfs.read('undo.desc').splitlines()
1067 1067 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1068 1068 if len(args) >= 3:
1069 1069 detail = args[2]
1070 1070 oldtip = oldlen - 1
1071 1071
1072 1072 if detail and ui.verbose:
1073 1073 msg = (_('repository tip rolled back to revision %s'
1074 1074 ' (undo %s: %s)\n')
1075 1075 % (oldtip, desc, detail))
1076 1076 else:
1077 1077 msg = (_('repository tip rolled back to revision %s'
1078 1078 ' (undo %s)\n')
1079 1079 % (oldtip, desc))
1080 1080 except IOError:
1081 1081 msg = _('rolling back unknown transaction\n')
1082 1082 desc = None
1083 1083
1084 1084 if not force and self['.'] != self['tip'] and desc == 'commit':
1085 1085 raise util.Abort(
1086 1086 _('rollback of last commit while not checked out '
1087 1087 'may lose data'), hint=_('use -f to force'))
1088 1088
1089 1089 ui.status(msg)
1090 1090 if dryrun:
1091 1091 return 0
1092 1092
1093 1093 parents = self.dirstate.parents()
1094 1094 self.destroying()
1095 1095 vfsmap = {'plain': self.vfs, '': self.svfs}
1096 1096 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1097 1097 if self.vfs.exists('undo.bookmarks'):
1098 1098 self.vfs.rename('undo.bookmarks', 'bookmarks')
1099 1099 if self.svfs.exists('undo.phaseroots'):
1100 1100 self.svfs.rename('undo.phaseroots', 'phaseroots')
1101 1101 self.invalidate()
1102 1102
1103 1103 parentgone = (parents[0] not in self.changelog.nodemap or
1104 1104 parents[1] not in self.changelog.nodemap)
1105 1105 if parentgone:
1106 1106 self.vfs.rename('undo.dirstate', 'dirstate')
1107 1107 try:
1108 1108 branch = self.vfs.read('undo.branch')
1109 1109 self.dirstate.setbranch(encoding.tolocal(branch))
1110 1110 except IOError:
1111 1111 ui.warn(_('named branch could not be reset: '
1112 1112 'current branch is still \'%s\'\n')
1113 1113 % self.dirstate.branch())
1114 1114
1115 1115 self.dirstate.invalidate()
1116 1116 parents = tuple([p.rev() for p in self.parents()])
1117 1117 if len(parents) > 1:
1118 1118 ui.status(_('working directory now based on '
1119 1119 'revisions %d and %d\n') % parents)
1120 1120 else:
1121 1121 ui.status(_('working directory now based on '
1122 1122 'revision %d\n') % parents)
1123 1123 ms = mergemod.mergestate(self)
1124 1124 ms.reset(self['.'].node())
1125 1125
1126 1126 # TODO: if we know which new heads may result from this rollback, pass
1127 1127 # them to destroy(), which will prevent the branchhead cache from being
1128 1128 # invalidated.
1129 1129 self.destroyed()
1130 1130 return 0
1131 1131
1132 1132 def invalidatecaches(self):
1133 1133
1134 1134 if '_tagscache' in vars(self):
1135 1135 # can't use delattr on proxy
1136 1136 del self.__dict__['_tagscache']
1137 1137
1138 1138 self.unfiltered()._branchcaches.clear()
1139 1139 self.invalidatevolatilesets()
1140 1140
1141 1141 def invalidatevolatilesets(self):
1142 1142 self.filteredrevcache.clear()
1143 1143 obsolete.clearobscaches(self)
1144 1144
1145 1145 def invalidatedirstate(self):
1146 1146 '''Invalidates the dirstate, causing the next call to dirstate
1147 1147 to check if it was modified since the last time it was read,
1148 1148 rereading it if it has.
1149 1149
1150 1150 This is different to dirstate.invalidate() that it doesn't always
1151 1151 rereads the dirstate. Use dirstate.invalidate() if you want to
1152 1152 explicitly read the dirstate again (i.e. restoring it to a previous
1153 1153 known good state).'''
1154 1154 if hasunfilteredcache(self, 'dirstate'):
1155 1155 for k in self.dirstate._filecache:
1156 1156 try:
1157 1157 delattr(self.dirstate, k)
1158 1158 except AttributeError:
1159 1159 pass
1160 1160 delattr(self.unfiltered(), 'dirstate')
1161 1161
1162 1162 def invalidate(self):
1163 1163 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1164 1164 for k in self._filecache:
1165 1165 # dirstate is invalidated separately in invalidatedirstate()
1166 1166 if k == 'dirstate':
1167 1167 continue
1168 1168
1169 1169 try:
1170 1170 delattr(unfiltered, k)
1171 1171 except AttributeError:
1172 1172 pass
1173 1173 self.invalidatecaches()
1174 1174 self.store.invalidatecaches()
1175 1175
1176 1176 def invalidateall(self):
1177 1177 '''Fully invalidates both store and non-store parts, causing the
1178 1178 subsequent operation to reread any outside changes.'''
1179 1179 # extension should hook this to invalidate its caches
1180 1180 self.invalidate()
1181 1181 self.invalidatedirstate()
1182 1182
1183 1183 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1184 1184 try:
1185 1185 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1186 1186 except error.LockHeld, inst:
1187 1187 if not wait:
1188 1188 raise
1189 1189 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1190 1190 (desc, inst.locker))
1191 1191 # default to 600 seconds timeout
1192 1192 l = lockmod.lock(vfs, lockname,
1193 1193 int(self.ui.config("ui", "timeout", "600")),
1194 1194 releasefn, desc=desc)
1195 1195 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1196 1196 if acquirefn:
1197 1197 acquirefn()
1198 1198 return l
1199 1199
1200 1200 def _afterlock(self, callback):
1201 1201 """add a callback to be run when the repository is fully unlocked
1202 1202
1203 1203 The callback will be executed when the outermost lock is released
1204 1204 (with wlock being higher level than 'lock')."""
1205 1205 for ref in (self._wlockref, self._lockref):
1206 1206 l = ref and ref()
1207 1207 if l and l.held:
1208 1208 l.postrelease.append(callback)
1209 1209 break
1210 1210 else: # no lock have been found.
1211 1211 callback()
1212 1212
1213 1213 def lock(self, wait=True):
1214 1214 '''Lock the repository store (.hg/store) and return a weak reference
1215 1215 to the lock. Use this before modifying the store (e.g. committing or
1216 1216 stripping). If you are opening a transaction, get a lock as well.)
1217 1217
1218 1218 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1219 1219 'wlock' first to avoid a dead-lock hazard.'''
1220 1220 l = self._lockref and self._lockref()
1221 1221 if l is not None and l.held:
1222 1222 l.lock()
1223 1223 return l
1224 1224
1225 1225 def unlock():
1226 1226 for k, ce in self._filecache.items():
1227 1227 if k == 'dirstate' or k not in self.__dict__:
1228 1228 continue
1229 1229 ce.refresh()
1230 1230
1231 1231 l = self._lock(self.svfs, "lock", wait, unlock,
1232 1232 self.invalidate, _('repository %s') % self.origroot)
1233 1233 self._lockref = weakref.ref(l)
1234 1234 return l
1235 1235
1236 1236 def wlock(self, wait=True):
1237 1237 '''Lock the non-store parts of the repository (everything under
1238 1238 .hg except .hg/store) and return a weak reference to the lock.
1239 1239
1240 1240 Use this before modifying files in .hg.
1241 1241
1242 1242 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1243 1243 'wlock' first to avoid a dead-lock hazard.'''
1244 1244 l = self._wlockref and self._wlockref()
1245 1245 if l is not None and l.held:
1246 1246 l.lock()
1247 1247 return l
1248 1248
1249 1249 # We do not need to check for non-waiting lock aquisition. Such
1250 1250 # acquisition would not cause dead-lock as they would just fail.
1251 1251 if wait and (self.ui.configbool('devel', 'all')
1252 1252 or self.ui.configbool('devel', 'check-locks')):
1253 1253 l = self._lockref and self._lockref()
1254 1254 if l is not None and l.held:
1255 1255 scmutil.develwarn(self.ui, '"wlock" acquired after "lock"')
1256 1256
1257 1257 def unlock():
1258 1258 if self.dirstate.pendingparentchange():
1259 1259 self.dirstate.invalidate()
1260 1260 else:
1261 1261 self.dirstate.write()
1262 1262
1263 1263 self._filecache['dirstate'].refresh()
1264 1264
1265 1265 l = self._lock(self.vfs, "wlock", wait, unlock,
1266 1266 self.invalidatedirstate, _('working directory of %s') %
1267 1267 self.origroot)
1268 1268 self._wlockref = weakref.ref(l)
1269 1269 return l
1270 1270
1271 1271 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1272 1272 """
1273 1273 commit an individual file as part of a larger transaction
1274 1274 """
1275 1275
1276 1276 fname = fctx.path()
1277 1277 fparent1 = manifest1.get(fname, nullid)
1278 1278 fparent2 = manifest2.get(fname, nullid)
1279 1279 if isinstance(fctx, context.filectx):
1280 1280 node = fctx.filenode()
1281 1281 if node in [fparent1, fparent2]:
1282 1282 self.ui.debug('reusing %s filelog entry\n' % fname)
1283 1283 return node
1284 1284
1285 1285 flog = self.file(fname)
1286 1286 meta = {}
1287 1287 copy = fctx.renamed()
1288 1288 if copy and copy[0] != fname:
1289 1289 # Mark the new revision of this file as a copy of another
1290 1290 # file. This copy data will effectively act as a parent
1291 1291 # of this new revision. If this is a merge, the first
1292 1292 # parent will be the nullid (meaning "look up the copy data")
1293 1293 # and the second one will be the other parent. For example:
1294 1294 #
1295 1295 # 0 --- 1 --- 3 rev1 changes file foo
1296 1296 # \ / rev2 renames foo to bar and changes it
1297 1297 # \- 2 -/ rev3 should have bar with all changes and
1298 1298 # should record that bar descends from
1299 1299 # bar in rev2 and foo in rev1
1300 1300 #
1301 1301 # this allows this merge to succeed:
1302 1302 #
1303 1303 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1304 1304 # \ / merging rev3 and rev4 should use bar@rev2
1305 1305 # \- 2 --- 4 as the merge base
1306 1306 #
1307 1307
1308 1308 cfname = copy[0]
1309 1309 crev = manifest1.get(cfname)
1310 1310 newfparent = fparent2
1311 1311
1312 1312 if manifest2: # branch merge
1313 1313 if fparent2 == nullid or crev is None: # copied on remote side
1314 1314 if cfname in manifest2:
1315 1315 crev = manifest2[cfname]
1316 1316 newfparent = fparent1
1317 1317
1318 1318 # Here, we used to search backwards through history to try to find
1319 1319 # where the file copy came from if the source of a copy was not in
1320 1320 # the parent directory. However, this doesn't actually make sense to
1321 1321 # do (what does a copy from something not in your working copy even
1322 1322 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1323 1323 # the user that copy information was dropped, so if they didn't
1324 1324 # expect this outcome it can be fixed, but this is the correct
1325 1325 # behavior in this circumstance.
1326 1326
1327 1327 if crev:
1328 1328 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1329 1329 meta["copy"] = cfname
1330 1330 meta["copyrev"] = hex(crev)
1331 1331 fparent1, fparent2 = nullid, newfparent
1332 1332 else:
1333 1333 self.ui.warn(_("warning: can't find ancestor for '%s' "
1334 1334 "copied from '%s'!\n") % (fname, cfname))
1335 1335
1336 1336 elif fparent1 == nullid:
1337 1337 fparent1, fparent2 = fparent2, nullid
1338 1338 elif fparent2 != nullid:
1339 1339 # is one parent an ancestor of the other?
1340 1340 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1341 1341 if fparent1 in fparentancestors:
1342 1342 fparent1, fparent2 = fparent2, nullid
1343 1343 elif fparent2 in fparentancestors:
1344 1344 fparent2 = nullid
1345 1345
1346 1346 # is the file changed?
1347 1347 text = fctx.data()
1348 1348 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1349 1349 changelist.append(fname)
1350 1350 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1351 1351 # are just the flags changed during merge?
1352 1352 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1353 1353 changelist.append(fname)
1354 1354
1355 1355 return fparent1
1356 1356
1357 1357 @unfilteredmethod
1358 1358 def commit(self, text="", user=None, date=None, match=None, force=False,
1359 1359 editor=False, extra={}):
1360 1360 """Add a new revision to current repository.
1361 1361
1362 1362 Revision information is gathered from the working directory,
1363 1363 match can be used to filter the committed files. If editor is
1364 1364 supplied, it is called to get a commit message.
1365 1365 """
1366 1366
1367 1367 def fail(f, msg):
1368 1368 raise util.Abort('%s: %s' % (f, msg))
1369 1369
1370 1370 if not match:
1371 1371 match = matchmod.always(self.root, '')
1372 1372
1373 1373 if not force:
1374 1374 vdirs = []
1375 1375 match.explicitdir = vdirs.append
1376 1376 match.bad = fail
1377 1377
1378 1378 wlock = self.wlock()
1379 1379 try:
1380 1380 wctx = self[None]
1381 1381 merge = len(wctx.parents()) > 1
1382 1382
1383 1383 if not force and merge and match.ispartial():
1384 1384 raise util.Abort(_('cannot partially commit a merge '
1385 1385 '(do not specify files or patterns)'))
1386 1386
1387 1387 status = self.status(match=match, clean=force)
1388 1388 if force:
1389 1389 status.modified.extend(status.clean) # mq may commit clean files
1390 1390
1391 1391 # check subrepos
1392 1392 subs = []
1393 1393 commitsubs = set()
1394 1394 newstate = wctx.substate.copy()
1395 1395 # only manage subrepos and .hgsubstate if .hgsub is present
1396 1396 if '.hgsub' in wctx:
1397 1397 # we'll decide whether to track this ourselves, thanks
1398 1398 for c in status.modified, status.added, status.removed:
1399 1399 if '.hgsubstate' in c:
1400 1400 c.remove('.hgsubstate')
1401 1401
1402 1402 # compare current state to last committed state
1403 1403 # build new substate based on last committed state
1404 1404 oldstate = wctx.p1().substate
1405 1405 for s in sorted(newstate.keys()):
1406 1406 if not match(s):
1407 1407 # ignore working copy, use old state if present
1408 1408 if s in oldstate:
1409 1409 newstate[s] = oldstate[s]
1410 1410 continue
1411 1411 if not force:
1412 1412 raise util.Abort(
1413 1413 _("commit with new subrepo %s excluded") % s)
1414 1414 dirtyreason = wctx.sub(s).dirtyreason(True)
1415 1415 if dirtyreason:
1416 1416 if not self.ui.configbool('ui', 'commitsubrepos'):
1417 1417 raise util.Abort(dirtyreason,
1418 1418 hint=_("use --subrepos for recursive commit"))
1419 1419 subs.append(s)
1420 1420 commitsubs.add(s)
1421 1421 else:
1422 1422 bs = wctx.sub(s).basestate()
1423 1423 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1424 1424 if oldstate.get(s, (None, None, None))[1] != bs:
1425 1425 subs.append(s)
1426 1426
1427 1427 # check for removed subrepos
1428 1428 for p in wctx.parents():
1429 1429 r = [s for s in p.substate if s not in newstate]
1430 1430 subs += [s for s in r if match(s)]
1431 1431 if subs:
1432 1432 if (not match('.hgsub') and
1433 1433 '.hgsub' in (wctx.modified() + wctx.added())):
1434 1434 raise util.Abort(
1435 1435 _("can't commit subrepos without .hgsub"))
1436 1436 status.modified.insert(0, '.hgsubstate')
1437 1437
1438 1438 elif '.hgsub' in status.removed:
1439 1439 # clean up .hgsubstate when .hgsub is removed
1440 1440 if ('.hgsubstate' in wctx and
1441 1441 '.hgsubstate' not in (status.modified + status.added +
1442 1442 status.removed)):
1443 1443 status.removed.insert(0, '.hgsubstate')
1444 1444
1445 1445 # make sure all explicit patterns are matched
1446 1446 if not force and match.files():
1447 1447 matched = set(status.modified + status.added + status.removed)
1448 1448
1449 1449 for f in match.files():
1450 1450 f = self.dirstate.normalize(f)
1451 1451 if f == '.' or f in matched or f in wctx.substate:
1452 1452 continue
1453 1453 if f in status.deleted:
1454 1454 fail(f, _('file not found!'))
1455 1455 if f in vdirs: # visited directory
1456 1456 d = f + '/'
1457 1457 for mf in matched:
1458 1458 if mf.startswith(d):
1459 1459 break
1460 1460 else:
1461 1461 fail(f, _("no match under directory!"))
1462 1462 elif f not in self.dirstate:
1463 1463 fail(f, _("file not tracked!"))
1464 1464
1465 1465 cctx = context.workingcommitctx(self, status,
1466 1466 text, user, date, extra)
1467 1467
1468 1468 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1469 1469 or extra.get('close') or merge or cctx.files()
1470 1470 or self.ui.configbool('ui', 'allowemptycommit'))
1471 1471 if not allowemptycommit:
1472 1472 return None
1473 1473
1474 1474 if merge and cctx.deleted():
1475 1475 raise util.Abort(_("cannot commit merge with missing files"))
1476 1476
1477 1477 ms = mergemod.mergestate(self)
1478 1478 for f in status.modified:
1479 1479 if f in ms and ms[f] == 'u':
1480 1480 raise util.Abort(_('unresolved merge conflicts '
1481 1481 '(see "hg help resolve")'))
1482 1482
1483 1483 if editor:
1484 1484 cctx._text = editor(self, cctx, subs)
1485 1485 edited = (text != cctx._text)
1486 1486
1487 1487 # Save commit message in case this transaction gets rolled back
1488 1488 # (e.g. by a pretxncommit hook). Leave the content alone on
1489 1489 # the assumption that the user will use the same editor again.
1490 1490 msgfn = self.savecommitmessage(cctx._text)
1491 1491
1492 1492 # commit subs and write new state
1493 1493 if subs:
1494 1494 for s in sorted(commitsubs):
1495 1495 sub = wctx.sub(s)
1496 1496 self.ui.status(_('committing subrepository %s\n') %
1497 1497 subrepo.subrelpath(sub))
1498 1498 sr = sub.commit(cctx._text, user, date)
1499 1499 newstate[s] = (newstate[s][0], sr)
1500 1500 subrepo.writestate(self, newstate)
1501 1501
1502 1502 p1, p2 = self.dirstate.parents()
1503 1503 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1504 1504 try:
1505 1505 self.hook("precommit", throw=True, parent1=hookp1,
1506 1506 parent2=hookp2)
1507 1507 ret = self.commitctx(cctx, True)
1508 1508 except: # re-raises
1509 1509 if edited:
1510 1510 self.ui.write(
1511 1511 _('note: commit message saved in %s\n') % msgfn)
1512 1512 raise
1513 1513
1514 1514 # update bookmarks, dirstate and mergestate
1515 1515 bookmarks.update(self, [p1, p2], ret)
1516 1516 cctx.markcommitted(ret)
1517 1517 ms.reset()
1518 1518 finally:
1519 1519 wlock.release()
1520 1520
1521 1521 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1522 1522 # hack for command that use a temporary commit (eg: histedit)
1523 1523 # temporary commit got stripped before hook release
1524 1524 if self.changelog.hasnode(ret):
1525 1525 self.hook("commit", node=node, parent1=parent1,
1526 1526 parent2=parent2)
1527 1527 self._afterlock(commithook)
1528 1528 return ret
1529 1529
1530 1530 @unfilteredmethod
1531 1531 def commitctx(self, ctx, error=False):
1532 1532 """Add a new revision to current repository.
1533 1533 Revision information is passed via the context argument.
1534 1534 """
1535 1535
1536 1536 tr = None
1537 1537 p1, p2 = ctx.p1(), ctx.p2()
1538 1538 user = ctx.user()
1539 1539
1540 1540 lock = self.lock()
1541 1541 try:
1542 1542 tr = self.transaction("commit")
1543 1543 trp = weakref.proxy(tr)
1544 1544
1545 1545 if ctx.files():
1546 1546 m1 = p1.manifest()
1547 1547 m2 = p2.manifest()
1548 1548 m = m1.copy()
1549 1549
1550 1550 # check in files
1551 1551 added = []
1552 1552 changed = []
1553 1553 removed = list(ctx.removed())
1554 1554 linkrev = len(self)
1555 1555 self.ui.note(_("committing files:\n"))
1556 1556 for f in sorted(ctx.modified() + ctx.added()):
1557 1557 self.ui.note(f + "\n")
1558 1558 try:
1559 1559 fctx = ctx[f]
1560 1560 if fctx is None:
1561 1561 removed.append(f)
1562 1562 else:
1563 1563 added.append(f)
1564 1564 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1565 1565 trp, changed)
1566 1566 m.setflag(f, fctx.flags())
1567 1567 except OSError, inst:
1568 1568 self.ui.warn(_("trouble committing %s!\n") % f)
1569 1569 raise
1570 1570 except IOError, inst:
1571 1571 errcode = getattr(inst, 'errno', errno.ENOENT)
1572 1572 if error or errcode and errcode != errno.ENOENT:
1573 1573 self.ui.warn(_("trouble committing %s!\n") % f)
1574 1574 raise
1575 1575
1576 1576 # update manifest
1577 1577 self.ui.note(_("committing manifest\n"))
1578 1578 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1579 1579 drop = [f for f in removed if f in m]
1580 1580 for f in drop:
1581 1581 del m[f]
1582 1582 mn = self.manifest.add(m, trp, linkrev,
1583 1583 p1.manifestnode(), p2.manifestnode(),
1584 1584 added, drop)
1585 1585 files = changed + removed
1586 1586 else:
1587 1587 mn = p1.manifestnode()
1588 1588 files = []
1589 1589
1590 1590 # update changelog
1591 1591 self.ui.note(_("committing changelog\n"))
1592 1592 self.changelog.delayupdate(tr)
1593 1593 n = self.changelog.add(mn, files, ctx.description(),
1594 1594 trp, p1.node(), p2.node(),
1595 1595 user, ctx.date(), ctx.extra().copy())
1596 1596 p = lambda: tr.writepending() and self.root or ""
1597 1597 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1598 1598 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1599 1599 parent2=xp2, pending=p)
1600 1600 # set the new commit is proper phase
1601 1601 targetphase = subrepo.newcommitphase(self.ui, ctx)
1602 1602 if targetphase:
1603 1603 # retract boundary do not alter parent changeset.
1604 1604 # if a parent have higher the resulting phase will
1605 1605 # be compliant anyway
1606 1606 #
1607 1607 # if minimal phase was 0 we don't need to retract anything
1608 1608 phases.retractboundary(self, tr, targetphase, [n])
1609 1609 tr.close()
1610 1610 branchmap.updatecache(self.filtered('served'))
1611 1611 return n
1612 1612 finally:
1613 1613 if tr:
1614 1614 tr.release()
1615 1615 lock.release()
1616 1616
1617 1617 @unfilteredmethod
1618 1618 def destroying(self):
1619 1619 '''Inform the repository that nodes are about to be destroyed.
1620 1620 Intended for use by strip and rollback, so there's a common
1621 1621 place for anything that has to be done before destroying history.
1622 1622
1623 1623 This is mostly useful for saving state that is in memory and waiting
1624 1624 to be flushed when the current lock is released. Because a call to
1625 1625 destroyed is imminent, the repo will be invalidated causing those
1626 1626 changes to stay in memory (waiting for the next unlock), or vanish
1627 1627 completely.
1628 1628 '''
1629 1629 # When using the same lock to commit and strip, the phasecache is left
1630 1630 # dirty after committing. Then when we strip, the repo is invalidated,
1631 1631 # causing those changes to disappear.
1632 1632 if '_phasecache' in vars(self):
1633 1633 self._phasecache.write()
1634 1634
1635 1635 @unfilteredmethod
1636 1636 def destroyed(self):
1637 1637 '''Inform the repository that nodes have been destroyed.
1638 1638 Intended for use by strip and rollback, so there's a common
1639 1639 place for anything that has to be done after destroying history.
1640 1640 '''
1641 1641 # When one tries to:
1642 1642 # 1) destroy nodes thus calling this method (e.g. strip)
1643 1643 # 2) use phasecache somewhere (e.g. commit)
1644 1644 #
1645 1645 # then 2) will fail because the phasecache contains nodes that were
1646 1646 # removed. We can either remove phasecache from the filecache,
1647 1647 # causing it to reload next time it is accessed, or simply filter
1648 1648 # the removed nodes now and write the updated cache.
1649 1649 self._phasecache.filterunknown(self)
1650 1650 self._phasecache.write()
1651 1651
1652 1652 # update the 'served' branch cache to help read only server process
1653 1653 # Thanks to branchcache collaboration this is done from the nearest
1654 1654 # filtered subset and it is expected to be fast.
1655 1655 branchmap.updatecache(self.filtered('served'))
1656 1656
1657 1657 # Ensure the persistent tag cache is updated. Doing it now
1658 1658 # means that the tag cache only has to worry about destroyed
1659 1659 # heads immediately after a strip/rollback. That in turn
1660 1660 # guarantees that "cachetip == currenttip" (comparing both rev
1661 1661 # and node) always means no nodes have been added or destroyed.
1662 1662
1663 1663 # XXX this is suboptimal when qrefresh'ing: we strip the current
1664 1664 # head, refresh the tag cache, then immediately add a new head.
1665 1665 # But I think doing it this way is necessary for the "instant
1666 1666 # tag cache retrieval" case to work.
1667 1667 self.invalidate()
1668 1668
1669 1669 def walk(self, match, node=None):
1670 1670 '''
1671 1671 walk recursively through the directory tree or a given
1672 1672 changeset, finding all files matched by the match
1673 1673 function
1674 1674 '''
1675 1675 return self[node].walk(match)
1676 1676
1677 1677 def status(self, node1='.', node2=None, match=None,
1678 1678 ignored=False, clean=False, unknown=False,
1679 1679 listsubrepos=False):
1680 1680 '''a convenience method that calls node1.status(node2)'''
1681 1681 return self[node1].status(node2, match, ignored, clean, unknown,
1682 1682 listsubrepos)
1683 1683
1684 1684 def heads(self, start=None):
1685 1685 heads = self.changelog.heads(start)
1686 1686 # sort the output in rev descending order
1687 1687 return sorted(heads, key=self.changelog.rev, reverse=True)
1688 1688
1689 1689 def branchheads(self, branch=None, start=None, closed=False):
1690 1690 '''return a (possibly filtered) list of heads for the given branch
1691 1691
1692 1692 Heads are returned in topological order, from newest to oldest.
1693 1693 If branch is None, use the dirstate branch.
1694 1694 If start is not None, return only heads reachable from start.
1695 1695 If closed is True, return heads that are marked as closed as well.
1696 1696 '''
1697 1697 if branch is None:
1698 1698 branch = self[None].branch()
1699 1699 branches = self.branchmap()
1700 1700 if branch not in branches:
1701 1701 return []
1702 1702 # the cache returns heads ordered lowest to highest
1703 1703 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1704 1704 if start is not None:
1705 1705 # filter out the heads that cannot be reached from startrev
1706 1706 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1707 1707 bheads = [h for h in bheads if h in fbheads]
1708 1708 return bheads
1709 1709
1710 1710 def branches(self, nodes):
1711 1711 if not nodes:
1712 1712 nodes = [self.changelog.tip()]
1713 1713 b = []
1714 1714 for n in nodes:
1715 1715 t = n
1716 1716 while True:
1717 1717 p = self.changelog.parents(n)
1718 1718 if p[1] != nullid or p[0] == nullid:
1719 1719 b.append((t, n, p[0], p[1]))
1720 1720 break
1721 1721 n = p[0]
1722 1722 return b
1723 1723
1724 1724 def between(self, pairs):
1725 1725 r = []
1726 1726
1727 1727 for top, bottom in pairs:
1728 1728 n, l, i = top, [], 0
1729 1729 f = 1
1730 1730
1731 1731 while n != bottom and n != nullid:
1732 1732 p = self.changelog.parents(n)[0]
1733 1733 if i == f:
1734 1734 l.append(n)
1735 1735 f = f * 2
1736 1736 n = p
1737 1737 i += 1
1738 1738
1739 1739 r.append(l)
1740 1740
1741 1741 return r
1742 1742
1743 1743 def checkpush(self, pushop):
1744 1744 """Extensions can override this function if additional checks have
1745 1745 to be performed before pushing, or call it if they override push
1746 1746 command.
1747 1747 """
1748 1748 pass
1749 1749
1750 1750 @unfilteredpropertycache
1751 1751 def prepushoutgoinghooks(self):
1752 1752 """Return util.hooks consists of "(repo, remote, outgoing)"
1753 1753 functions, which are called before pushing changesets.
1754 1754 """
1755 1755 return util.hooks()
1756 1756
1757 1757 def stream_in(self, remote, remotereqs):
1758 1758 lock = self.lock()
1759 1759 try:
1760 1760 # Save remote branchmap. We will use it later
1761 1761 # to speed up branchcache creation
1762 1762 rbranchmap = None
1763 1763 if remote.capable("branchmap"):
1764 1764 rbranchmap = remote.branchmap()
1765 1765
1766 1766 fp = remote.stream_out()
1767 1767 l = fp.readline()
1768 1768 try:
1769 1769 resp = int(l)
1770 1770 except ValueError:
1771 1771 raise error.ResponseError(
1772 1772 _('unexpected response from remote server:'), l)
1773 1773 if resp == 1:
1774 1774 raise util.Abort(_('operation forbidden by server'))
1775 1775 elif resp == 2:
1776 1776 raise util.Abort(_('locking the remote repository failed'))
1777 1777 elif resp != 0:
1778 1778 raise util.Abort(_('the server sent an unknown error code'))
1779 1779 self.ui.status(_('streaming all changes\n'))
1780 1780 l = fp.readline()
1781 1781 try:
1782 1782 total_files, total_bytes = map(int, l.split(' ', 1))
1783 1783 except (ValueError, TypeError):
1784 1784 raise error.ResponseError(
1785 1785 _('unexpected response from remote server:'), l)
1786 1786 self.ui.status(_('%d files to transfer, %s of data\n') %
1787 1787 (total_files, util.bytecount(total_bytes)))
1788 1788 handled_bytes = 0
1789 1789 self.ui.progress(_('clone'), 0, total=total_bytes)
1790 1790 start = time.time()
1791 1791
1792 1792 tr = self.transaction(_('clone'))
1793 1793 try:
1794 1794 for i in xrange(total_files):
1795 1795 # XXX doesn't support '\n' or '\r' in filenames
1796 1796 l = fp.readline()
1797 1797 try:
1798 1798 name, size = l.split('\0', 1)
1799 1799 size = int(size)
1800 1800 except (ValueError, TypeError):
1801 1801 raise error.ResponseError(
1802 1802 _('unexpected response from remote server:'), l)
1803 1803 if self.ui.debugflag:
1804 1804 self.ui.debug('adding %s (%s)\n' %
1805 1805 (name, util.bytecount(size)))
1806 1806 # for backwards compat, name was partially encoded
1807 1807 ofp = self.svfs(store.decodedir(name), 'w')
1808 1808 for chunk in util.filechunkiter(fp, limit=size):
1809 1809 handled_bytes += len(chunk)
1810 1810 self.ui.progress(_('clone'), handled_bytes,
1811 1811 total=total_bytes)
1812 1812 ofp.write(chunk)
1813 1813 ofp.close()
1814 1814 tr.close()
1815 1815 finally:
1816 1816 tr.release()
1817 1817
1818 1818 # Writing straight to files circumvented the inmemory caches
1819 1819 self.invalidate()
1820 1820
1821 1821 elapsed = time.time() - start
1822 1822 if elapsed <= 0:
1823 1823 elapsed = 0.001
1824 1824 self.ui.progress(_('clone'), None)
1825 1825 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1826 1826 (util.bytecount(total_bytes), elapsed,
1827 1827 util.bytecount(total_bytes / elapsed)))
1828 1828
1829 1829 # new requirements = old non-format requirements +
1830 1830 # new format-related remote requirements
1831 1831 # requirements from the streamed-in repository
1832 1832 self.requirements = remotereqs | (
1833 1833 self.requirements - self.supportedformats)
1834 1834 self._applyopenerreqs()
1835 1835 self._writerequirements()
1836 1836
1837 1837 if rbranchmap:
1838 1838 rbheads = []
1839 1839 closed = []
1840 1840 for bheads in rbranchmap.itervalues():
1841 1841 rbheads.extend(bheads)
1842 1842 for h in bheads:
1843 1843 r = self.changelog.rev(h)
1844 1844 b, c = self.changelog.branchinfo(r)
1845 1845 if c:
1846 1846 closed.append(h)
1847 1847
1848 1848 if rbheads:
1849 1849 rtiprev = max((int(self.changelog.rev(node))
1850 1850 for node in rbheads))
1851 1851 cache = branchmap.branchcache(rbranchmap,
1852 1852 self[rtiprev].node(),
1853 1853 rtiprev,
1854 1854 closednodes=closed)
1855 1855 # Try to stick it as low as possible
1856 1856 # filter above served are unlikely to be fetch from a clone
1857 1857 for candidate in ('base', 'immutable', 'served'):
1858 1858 rview = self.filtered(candidate)
1859 1859 if cache.validfor(rview):
1860 1860 self._branchcaches[candidate] = cache
1861 1861 cache.write(rview)
1862 1862 break
1863 1863 self.invalidate()
1864 1864 return len(self.heads()) + 1
1865 1865 finally:
1866 1866 lock.release()
1867 1867
1868 1868 def clone(self, remote, heads=[], stream=None):
1869 1869 '''clone remote repository.
1870 1870
1871 1871 keyword arguments:
1872 1872 heads: list of revs to clone (forces use of pull)
1873 1873 stream: use streaming clone if possible'''
1874 1874
1875 1875 # now, all clients that can request uncompressed clones can
1876 1876 # read repo formats supported by all servers that can serve
1877 1877 # them.
1878 1878
1879 1879 # if revlog format changes, client will have to check version
1880 1880 # and format flags on "stream" capability, and use
1881 1881 # uncompressed only if compatible.
1882 1882
1883 1883 if stream is None:
1884 1884 # if the server explicitly prefers to stream (for fast LANs)
1885 1885 stream = remote.capable('stream-preferred')
1886 1886
1887 1887 if stream and not heads:
1888 1888 # 'stream' means remote revlog format is revlogv1 only
1889 1889 if remote.capable('stream'):
1890 1890 self.stream_in(remote, set(('revlogv1',)))
1891 1891 else:
1892 1892 # otherwise, 'streamreqs' contains the remote revlog format
1893 1893 streamreqs = remote.capable('streamreqs')
1894 1894 if streamreqs:
1895 1895 streamreqs = set(streamreqs.split(','))
1896 1896 # if we support it, stream in and adjust our requirements
1897 1897 if not streamreqs - self.supportedformats:
1898 1898 self.stream_in(remote, streamreqs)
1899 1899
1900 1900 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1901 1901 try:
1902 1902 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1903 1903 ret = exchange.pull(self, remote, heads).cgresult
1904 1904 finally:
1905 1905 self.ui.restoreconfig(quiet)
1906 1906 return ret
1907 1907
1908 1908 def pushkey(self, namespace, key, old, new):
1909 1909 try:
1910 1910 tr = self.currenttransaction()
1911 1911 hookargs = {}
1912 1912 if tr is not None:
1913 1913 hookargs.update(tr.hookargs)
1914 1914 pending = lambda: tr.writepending() and self.root or ""
1915 1915 hookargs['pending'] = pending
1916 1916 hookargs['namespace'] = namespace
1917 1917 hookargs['key'] = key
1918 1918 hookargs['old'] = old
1919 1919 hookargs['new'] = new
1920 1920 self.hook('prepushkey', throw=True, **hookargs)
1921 1921 except error.HookAbort, exc:
1922 1922 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1923 1923 if exc.hint:
1924 1924 self.ui.write_err(_("(%s)\n") % exc.hint)
1925 1925 return False
1926 1926 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1927 1927 ret = pushkey.push(self, namespace, key, old, new)
1928 1928 def runhook():
1929 1929 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1930 1930 ret=ret)
1931 1931 self._afterlock(runhook)
1932 1932 return ret
1933 1933
1934 1934 def listkeys(self, namespace):
1935 1935 self.hook('prelistkeys', throw=True, namespace=namespace)
1936 1936 self.ui.debug('listing keys for "%s"\n' % namespace)
1937 1937 values = pushkey.list(self, namespace)
1938 1938 self.hook('listkeys', namespace=namespace, values=values)
1939 1939 return values
1940 1940
1941 1941 def debugwireargs(self, one, two, three=None, four=None, five=None):
1942 1942 '''used to test argument passing over the wire'''
1943 1943 return "%s %s %s %s %s" % (one, two, three, four, five)
1944 1944
1945 1945 def savecommitmessage(self, text):
1946 1946 fp = self.vfs('last-message.txt', 'wb')
1947 1947 try:
1948 1948 fp.write(text)
1949 1949 finally:
1950 1950 fp.close()
1951 1951 return self.pathto(fp.name[len(self.root) + 1:])
1952 1952
1953 1953 # used to avoid circular references so destructors work
1954 1954 def aftertrans(files):
1955 1955 renamefiles = [tuple(t) for t in files]
1956 1956 def a():
1957 1957 for vfs, src, dest in renamefiles:
1958 1958 try:
1959 1959 vfs.rename(src, dest)
1960 1960 except OSError: # journal file does not yet exist
1961 1961 pass
1962 1962 return a
1963 1963
1964 1964 def undoname(fn):
1965 1965 base, name = os.path.split(fn)
1966 1966 assert name.startswith('journal')
1967 1967 return os.path.join(base, name.replace('journal', 'undo', 1))
1968 1968
1969 1969 def instance(ui, path, create):
1970 1970 return localrepository(ui, util.urllocalpath(path), create)
1971 1971
1972 1972 def islocal(path):
1973 1973 return True
@@ -1,946 +1,961 b''
1 1 # manifest.py - manifest revision class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 import mdiff, parsers, error, revlog, util
10 10 import array, struct
11 11 import os
12 12
13 13 propertycache = util.propertycache
14 14
15 15 def _parsev1(data):
16 16 # This method does a little bit of excessive-looking
17 17 # precondition checking. This is so that the behavior of this
18 18 # class exactly matches its C counterpart to try and help
19 19 # prevent surprise breakage for anyone that develops against
20 20 # the pure version.
21 21 if data and data[-1] != '\n':
22 22 raise ValueError('Manifest did not end in a newline.')
23 23 prev = None
24 24 for l in data.splitlines():
25 25 if prev is not None and prev > l:
26 26 raise ValueError('Manifest lines not in sorted order.')
27 27 prev = l
28 28 f, n = l.split('\0')
29 29 if len(n) > 40:
30 30 yield f, revlog.bin(n[:40]), n[40:]
31 31 else:
32 32 yield f, revlog.bin(n), ''
33 33
34 34 def _parsev2(data):
35 35 metadataend = data.find('\n')
36 36 # Just ignore metadata for now
37 37 pos = metadataend + 1
38 38 prevf = ''
39 39 while pos < len(data):
40 40 end = data.find('\n', pos + 1) # +1 to skip stem length byte
41 41 if end == -1:
42 42 raise ValueError('Manifest ended with incomplete file entry.')
43 43 stemlen = ord(data[pos])
44 44 items = data[pos + 1:end].split('\0')
45 45 f = prevf[:stemlen] + items[0]
46 46 if prevf > f:
47 47 raise ValueError('Manifest entries not in sorted order.')
48 48 fl = items[1]
49 49 # Just ignore metadata (items[2:] for now)
50 50 n = data[end + 1:end + 21]
51 51 yield f, n, fl
52 52 pos = end + 22
53 53 prevf = f
54 54
55 55 def _parse(data):
56 56 """Generates (path, node, flags) tuples from a manifest text"""
57 57 if data.startswith('\0'):
58 58 return iter(_parsev2(data))
59 59 else:
60 60 return iter(_parsev1(data))
61 61
62 62 def _text(it, usemanifestv2):
63 63 """Given an iterator over (path, node, flags) tuples, returns a manifest
64 64 text"""
65 65 if usemanifestv2:
66 66 return _textv2(it)
67 67 else:
68 68 return _textv1(it)
69 69
70 70 def _textv1(it):
71 71 files = []
72 72 lines = []
73 73 _hex = revlog.hex
74 74 for f, n, fl in it:
75 75 files.append(f)
76 76 # if this is changed to support newlines in filenames,
77 77 # be sure to check the templates/ dir again (especially *-raw.tmpl)
78 78 lines.append("%s\0%s%s\n" % (f, _hex(n), fl))
79 79
80 80 _checkforbidden(files)
81 81 return ''.join(lines)
82 82
83 83 def _textv2(it):
84 84 files = []
85 85 lines = ['\0\n']
86 86 prevf = ''
87 87 for f, n, fl in it:
88 88 files.append(f)
89 89 stem = os.path.commonprefix([prevf, f])
90 90 stemlen = min(len(stem), 255)
91 91 lines.append("%c%s\0%s\n%s\n" % (stemlen, f[stemlen:], fl, n))
92 92 prevf = f
93 93 _checkforbidden(files)
94 94 return ''.join(lines)
95 95
96 96 class _lazymanifest(dict):
97 97 """This is the pure implementation of lazymanifest.
98 98
99 99 It has not been optimized *at all* and is not lazy.
100 100 """
101 101
102 102 def __init__(self, data):
103 103 dict.__init__(self)
104 104 for f, n, fl in _parse(data):
105 105 self[f] = n, fl
106 106
107 107 def __setitem__(self, k, v):
108 108 node, flag = v
109 109 assert node is not None
110 110 if len(node) > 21:
111 111 node = node[:21] # match c implementation behavior
112 112 dict.__setitem__(self, k, (node, flag))
113 113
114 114 def __iter__(self):
115 115 return iter(sorted(dict.keys(self)))
116 116
117 117 def iterkeys(self):
118 118 return iter(sorted(dict.keys(self)))
119 119
120 120 def iterentries(self):
121 121 return ((f, e[0], e[1]) for f, e in sorted(self.iteritems()))
122 122
123 123 def copy(self):
124 124 c = _lazymanifest('')
125 125 c.update(self)
126 126 return c
127 127
128 128 def diff(self, m2, clean=False):
129 129 '''Finds changes between the current manifest and m2.'''
130 130 diff = {}
131 131
132 132 for fn, e1 in self.iteritems():
133 133 if fn not in m2:
134 134 diff[fn] = e1, (None, '')
135 135 else:
136 136 e2 = m2[fn]
137 137 if e1 != e2:
138 138 diff[fn] = e1, e2
139 139 elif clean:
140 140 diff[fn] = None
141 141
142 142 for fn, e2 in m2.iteritems():
143 143 if fn not in self:
144 144 diff[fn] = (None, ''), e2
145 145
146 146 return diff
147 147
148 148 def filtercopy(self, filterfn):
149 149 c = _lazymanifest('')
150 150 for f, n, fl in self.iterentries():
151 151 if filterfn(f):
152 152 c[f] = n, fl
153 153 return c
154 154
155 155 def text(self):
156 156 """Get the full data of this manifest as a bytestring."""
157 157 return _textv1(self.iterentries())
158 158
159 159 try:
160 160 _lazymanifest = parsers.lazymanifest
161 161 except AttributeError:
162 162 pass
163 163
164 164 class manifestdict(object):
165 165 def __init__(self, data=''):
166 166 if data.startswith('\0'):
167 167 #_lazymanifest can not parse v2
168 168 self._lm = _lazymanifest('')
169 169 for f, n, fl in _parsev2(data):
170 170 self._lm[f] = n, fl
171 171 else:
172 172 self._lm = _lazymanifest(data)
173 173
174 174 def __getitem__(self, key):
175 175 return self._lm[key][0]
176 176
177 177 def find(self, key):
178 178 return self._lm[key]
179 179
180 180 def __len__(self):
181 181 return len(self._lm)
182 182
183 183 def __setitem__(self, key, node):
184 184 self._lm[key] = node, self.flags(key, '')
185 185
186 186 def __contains__(self, key):
187 187 return key in self._lm
188 188
189 189 def __delitem__(self, key):
190 190 del self._lm[key]
191 191
192 192 def __iter__(self):
193 193 return self._lm.__iter__()
194 194
195 195 def iterkeys(self):
196 196 return self._lm.iterkeys()
197 197
198 198 def keys(self):
199 199 return list(self.iterkeys())
200 200
201 201 def filesnotin(self, m2):
202 202 '''Set of files in this manifest that are not in the other'''
203 203 files = set(self)
204 204 files.difference_update(m2)
205 205 return files
206 206
207 207 @propertycache
208 208 def _dirs(self):
209 209 return util.dirs(self)
210 210
211 211 def dirs(self):
212 212 return self._dirs
213 213
214 214 def hasdir(self, dir):
215 215 return dir in self._dirs
216 216
217 217 def _filesfastpath(self, match):
218 218 '''Checks whether we can correctly and quickly iterate over matcher
219 219 files instead of over manifest files.'''
220 220 files = match.files()
221 221 return (len(files) < 100 and (match.isexact() or
222 222 (not match.anypats() and all(fn in self for fn in files))))
223 223
224 224 def walk(self, match):
225 225 '''Generates matching file names.
226 226
227 227 Equivalent to manifest.matches(match).iterkeys(), but without creating
228 228 an entirely new manifest.
229 229
230 230 It also reports nonexistent files by marking them bad with match.bad().
231 231 '''
232 232 if match.always():
233 233 for f in iter(self):
234 234 yield f
235 235 return
236 236
237 237 fset = set(match.files())
238 238
239 239 # avoid the entire walk if we're only looking for specific files
240 240 if self._filesfastpath(match):
241 241 for fn in sorted(fset):
242 242 yield fn
243 243 return
244 244
245 245 for fn in self:
246 246 if fn in fset:
247 247 # specified pattern is the exact name
248 248 fset.remove(fn)
249 249 if match(fn):
250 250 yield fn
251 251
252 252 # for dirstate.walk, files=['.'] means "walk the whole tree".
253 253 # follow that here, too
254 254 fset.discard('.')
255 255
256 256 for fn in sorted(fset):
257 257 if not self.hasdir(fn):
258 258 match.bad(fn, None)
259 259
260 260 def matches(self, match):
261 261 '''generate a new manifest filtered by the match argument'''
262 262 if match.always():
263 263 return self.copy()
264 264
265 265 if self._filesfastpath(match):
266 266 m = manifestdict()
267 267 lm = self._lm
268 268 for fn in match.files():
269 269 if fn in lm:
270 270 m._lm[fn] = lm[fn]
271 271 return m
272 272
273 273 m = manifestdict()
274 274 m._lm = self._lm.filtercopy(match)
275 275 return m
276 276
277 277 def diff(self, m2, clean=False):
278 278 '''Finds changes between the current manifest and m2.
279 279
280 280 Args:
281 281 m2: the manifest to which this manifest should be compared.
282 282 clean: if true, include files unchanged between these manifests
283 283 with a None value in the returned dictionary.
284 284
285 285 The result is returned as a dict with filename as key and
286 286 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
287 287 nodeid in the current/other manifest and fl1/fl2 is the flag
288 288 in the current/other manifest. Where the file does not exist,
289 289 the nodeid will be None and the flags will be the empty
290 290 string.
291 291 '''
292 292 return self._lm.diff(m2._lm, clean)
293 293
294 294 def setflag(self, key, flag):
295 295 self._lm[key] = self[key], flag
296 296
297 297 def get(self, key, default=None):
298 298 try:
299 299 return self._lm[key][0]
300 300 except KeyError:
301 301 return default
302 302
303 303 def flags(self, key, default=''):
304 304 try:
305 305 return self._lm[key][1]
306 306 except KeyError:
307 307 return default
308 308
309 309 def copy(self):
310 310 c = manifestdict()
311 311 c._lm = self._lm.copy()
312 312 return c
313 313
314 314 def iteritems(self):
315 315 return (x[:2] for x in self._lm.iterentries())
316 316
317 317 def text(self, usemanifestv2=False):
318 318 if usemanifestv2:
319 319 return _textv2(self._lm.iterentries())
320 320 else:
321 321 # use (probably) native version for v1
322 322 return self._lm.text()
323 323
324 324 def fastdelta(self, base, changes):
325 325 """Given a base manifest text as an array.array and a list of changes
326 326 relative to that text, compute a delta that can be used by revlog.
327 327 """
328 328 delta = []
329 329 dstart = None
330 330 dend = None
331 331 dline = [""]
332 332 start = 0
333 333 # zero copy representation of base as a buffer
334 334 addbuf = util.buffer(base)
335 335
336 336 # start with a readonly loop that finds the offset of
337 337 # each line and creates the deltas
338 338 for f, todelete in changes:
339 339 # bs will either be the index of the item or the insert point
340 340 start, end = _msearch(addbuf, f, start)
341 341 if not todelete:
342 342 h, fl = self._lm[f]
343 343 l = "%s\0%s%s\n" % (f, revlog.hex(h), fl)
344 344 else:
345 345 if start == end:
346 346 # item we want to delete was not found, error out
347 347 raise AssertionError(
348 348 _("failed to remove %s from manifest") % f)
349 349 l = ""
350 350 if dstart is not None and dstart <= start and dend >= start:
351 351 if dend < end:
352 352 dend = end
353 353 if l:
354 354 dline.append(l)
355 355 else:
356 356 if dstart is not None:
357 357 delta.append([dstart, dend, "".join(dline)])
358 358 dstart = start
359 359 dend = end
360 360 dline = [l]
361 361
362 362 if dstart is not None:
363 363 delta.append([dstart, dend, "".join(dline)])
364 364 # apply the delta to the base, and get a delta for addrevision
365 365 deltatext, arraytext = _addlistdelta(base, delta)
366 366 return arraytext, deltatext
367 367
368 368 def _msearch(m, s, lo=0, hi=None):
369 369 '''return a tuple (start, end) that says where to find s within m.
370 370
371 371 If the string is found m[start:end] are the line containing
372 372 that string. If start == end the string was not found and
373 373 they indicate the proper sorted insertion point.
374 374
375 375 m should be a buffer or a string
376 376 s is a string'''
377 377 def advance(i, c):
378 378 while i < lenm and m[i] != c:
379 379 i += 1
380 380 return i
381 381 if not s:
382 382 return (lo, lo)
383 383 lenm = len(m)
384 384 if not hi:
385 385 hi = lenm
386 386 while lo < hi:
387 387 mid = (lo + hi) // 2
388 388 start = mid
389 389 while start > 0 and m[start - 1] != '\n':
390 390 start -= 1
391 391 end = advance(start, '\0')
392 392 if m[start:end] < s:
393 393 # we know that after the null there are 40 bytes of sha1
394 394 # this translates to the bisect lo = mid + 1
395 395 lo = advance(end + 40, '\n') + 1
396 396 else:
397 397 # this translates to the bisect hi = mid
398 398 hi = start
399 399 end = advance(lo, '\0')
400 400 found = m[lo:end]
401 401 if s == found:
402 402 # we know that after the null there are 40 bytes of sha1
403 403 end = advance(end + 40, '\n')
404 404 return (lo, end + 1)
405 405 else:
406 406 return (lo, lo)
407 407
408 408 def _checkforbidden(l):
409 409 """Check filenames for illegal characters."""
410 410 for f in l:
411 411 if '\n' in f or '\r' in f:
412 412 raise error.RevlogError(
413 413 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
414 414
415 415
416 416 # apply the changes collected during the bisect loop to our addlist
417 417 # return a delta suitable for addrevision
418 418 def _addlistdelta(addlist, x):
419 419 # for large addlist arrays, building a new array is cheaper
420 420 # than repeatedly modifying the existing one
421 421 currentposition = 0
422 422 newaddlist = array.array('c')
423 423
424 424 for start, end, content in x:
425 425 newaddlist += addlist[currentposition:start]
426 426 if content:
427 427 newaddlist += array.array('c', content)
428 428
429 429 currentposition = end
430 430
431 431 newaddlist += addlist[currentposition:]
432 432
433 433 deltatext = "".join(struct.pack(">lll", start, end, len(content))
434 434 + content for start, end, content in x)
435 435 return deltatext, newaddlist
436 436
437 437 def _splittopdir(f):
438 438 if '/' in f:
439 439 dir, subpath = f.split('/', 1)
440 440 return dir + '/', subpath
441 441 else:
442 442 return '', f
443 443
444 444 class treemanifest(object):
445 445 def __init__(self, dir='', text=''):
446 446 self._dir = dir
447 447 self._node = revlog.nullid
448 448 self._dirs = {}
449 449 # Using _lazymanifest here is a little slower than plain old dicts
450 450 self._files = {}
451 451 self._flags = {}
452 452 def readsubtree(subdir, subm):
453 453 raise AssertionError('treemanifest constructor only accepts '
454 454 'flat manifests')
455 455 self.parse(text, readsubtree)
456 456
457 457 def _subpath(self, path):
458 458 return self._dir + path
459 459
460 460 def __len__(self):
461 461 size = len(self._files)
462 462 for m in self._dirs.values():
463 463 size += m.__len__()
464 464 return size
465 465
466 466 def _isempty(self):
467 467 return (not self._files and (not self._dirs or
468 468 all(m._isempty() for m in self._dirs.values())))
469 469
470 470 def __str__(self):
471 471 return ('<treemanifest dir=%s, node=%s>' %
472 472 (self._dir, revlog.hex(self._node)))
473 473
474 474 def dir(self):
475 475 '''The directory that this tree manifest represents, including a
476 476 trailing '/'. Empty string for the repo root directory.'''
477 477 return self._dir
478 478
479 479 def node(self):
480 480 '''This node of this instance. nullid for unsaved instances. Should
481 481 be updated when the instance is read or written from a revlog.
482 482 '''
483 483 return self._node
484 484
485 485 def setnode(self, node):
486 486 self._node = node
487 487
488 488 def iteritems(self):
489 489 for p, n in sorted(self._dirs.items() + self._files.items()):
490 490 if p in self._files:
491 491 yield self._subpath(p), n
492 492 else:
493 493 for f, sn in n.iteritems():
494 494 yield f, sn
495 495
496 496 def iterkeys(self):
497 497 for p in sorted(self._dirs.keys() + self._files.keys()):
498 498 if p in self._files:
499 499 yield self._subpath(p)
500 500 else:
501 501 for f in self._dirs[p].iterkeys():
502 502 yield f
503 503
504 504 def keys(self):
505 505 return list(self.iterkeys())
506 506
507 507 def __iter__(self):
508 508 return self.iterkeys()
509 509
510 510 def __contains__(self, f):
511 511 if f is None:
512 512 return False
513 513 dir, subpath = _splittopdir(f)
514 514 if dir:
515 515 if dir not in self._dirs:
516 516 return False
517 517 return self._dirs[dir].__contains__(subpath)
518 518 else:
519 519 return f in self._files
520 520
521 521 def get(self, f, default=None):
522 522 dir, subpath = _splittopdir(f)
523 523 if dir:
524 524 if dir not in self._dirs:
525 525 return default
526 526 return self._dirs[dir].get(subpath, default)
527 527 else:
528 528 return self._files.get(f, default)
529 529
530 530 def __getitem__(self, f):
531 531 dir, subpath = _splittopdir(f)
532 532 if dir:
533 533 return self._dirs[dir].__getitem__(subpath)
534 534 else:
535 535 return self._files[f]
536 536
537 537 def flags(self, f):
538 538 dir, subpath = _splittopdir(f)
539 539 if dir:
540 540 if dir not in self._dirs:
541 541 return ''
542 542 return self._dirs[dir].flags(subpath)
543 543 else:
544 544 if f in self._dirs:
545 545 return ''
546 546 return self._flags.get(f, '')
547 547
548 548 def find(self, f):
549 549 dir, subpath = _splittopdir(f)
550 550 if dir:
551 551 return self._dirs[dir].find(subpath)
552 552 else:
553 553 return self._files[f], self._flags.get(f, '')
554 554
555 555 def __delitem__(self, f):
556 556 dir, subpath = _splittopdir(f)
557 557 if dir:
558 558 self._dirs[dir].__delitem__(subpath)
559 559 # If the directory is now empty, remove it
560 560 if self._dirs[dir]._isempty():
561 561 del self._dirs[dir]
562 562 else:
563 563 del self._files[f]
564 564 if f in self._flags:
565 565 del self._flags[f]
566 566
567 567 def __setitem__(self, f, n):
568 568 assert n is not None
569 569 dir, subpath = _splittopdir(f)
570 570 if dir:
571 571 if dir not in self._dirs:
572 572 self._dirs[dir] = treemanifest(self._subpath(dir))
573 573 self._dirs[dir].__setitem__(subpath, n)
574 574 else:
575 575 self._files[f] = n[:21] # to match manifestdict's behavior
576 576
577 577 def setflag(self, f, flags):
578 578 """Set the flags (symlink, executable) for path f."""
579 579 assert 'd' not in flags
580 580 dir, subpath = _splittopdir(f)
581 581 if dir:
582 582 if dir not in self._dirs:
583 583 self._dirs[dir] = treemanifest(self._subpath(dir))
584 584 self._dirs[dir].setflag(subpath, flags)
585 585 else:
586 586 self._flags[f] = flags
587 587
588 588 def copy(self):
589 589 copy = treemanifest(self._dir)
590 590 copy._node = self._node
591 591 for d in self._dirs:
592 592 copy._dirs[d] = self._dirs[d].copy()
593 593 copy._files = dict.copy(self._files)
594 594 copy._flags = dict.copy(self._flags)
595 595 return copy
596 596
597 597 def filesnotin(self, m2):
598 598 '''Set of files in this manifest that are not in the other'''
599 599 files = set()
600 600 def _filesnotin(t1, t2):
601 601 for d, m1 in t1._dirs.iteritems():
602 602 if d in t2._dirs:
603 603 m2 = t2._dirs[d]
604 604 _filesnotin(m1, m2)
605 605 else:
606 606 files.update(m1.iterkeys())
607 607
608 608 for fn in t1._files.iterkeys():
609 609 if fn not in t2._files:
610 610 files.add(t1._subpath(fn))
611 611
612 612 _filesnotin(self, m2)
613 613 return files
614 614
615 615 @propertycache
616 616 def _alldirs(self):
617 617 return util.dirs(self)
618 618
619 619 def dirs(self):
620 620 return self._alldirs
621 621
622 622 def hasdir(self, dir):
623 623 topdir, subdir = _splittopdir(dir)
624 624 if topdir:
625 625 if topdir in self._dirs:
626 626 return self._dirs[topdir].hasdir(subdir)
627 627 return False
628 628 return (dir + '/') in self._dirs
629 629
630 630 def walk(self, match):
631 631 '''Generates matching file names.
632 632
633 633 Equivalent to manifest.matches(match).iterkeys(), but without creating
634 634 an entirely new manifest.
635 635
636 636 It also reports nonexistent files by marking them bad with match.bad().
637 637 '''
638 638 if match.always():
639 639 for f in iter(self):
640 640 yield f
641 641 return
642 642
643 643 fset = set(match.files())
644 644
645 645 for fn in self._walk(match):
646 646 if fn in fset:
647 647 # specified pattern is the exact name
648 648 fset.remove(fn)
649 649 yield fn
650 650
651 651 # for dirstate.walk, files=['.'] means "walk the whole tree".
652 652 # follow that here, too
653 653 fset.discard('.')
654 654
655 655 for fn in sorted(fset):
656 656 if not self.hasdir(fn):
657 657 match.bad(fn, None)
658 658
659 659 def _walk(self, match, alldirs=False):
660 660 '''Recursively generates matching file names for walk().
661 661
662 662 Will visit all subdirectories if alldirs is True, otherwise it will
663 663 only visit subdirectories for which match.visitdir is True.'''
664 664
665 665 if not alldirs:
666 666 # substring to strip trailing slash
667 667 visit = match.visitdir(self._dir[:-1] or '.')
668 668 if not visit:
669 669 return
670 670 alldirs = (visit == 'all')
671 671
672 672 # yield this dir's files and walk its submanifests
673 673 for p in sorted(self._dirs.keys() + self._files.keys()):
674 674 if p in self._files:
675 675 fullp = self._subpath(p)
676 676 if match(fullp):
677 677 yield fullp
678 678 else:
679 679 for f in self._dirs[p]._walk(match, alldirs):
680 680 yield f
681 681
682 682 def matches(self, match):
683 683 '''generate a new manifest filtered by the match argument'''
684 684 if match.always():
685 685 return self.copy()
686 686
687 687 return self._matches(match)
688 688
689 689 def _matches(self, match, alldirs=False):
690 690 '''recursively generate a new manifest filtered by the match argument.
691 691
692 692 Will visit all subdirectories if alldirs is True, otherwise it will
693 693 only visit subdirectories for which match.visitdir is True.'''
694 694
695 695 ret = treemanifest(self._dir)
696 696 if not alldirs:
697 697 # substring to strip trailing slash
698 698 visit = match.visitdir(self._dir[:-1] or '.')
699 699 if not visit:
700 700 return ret
701 701 alldirs = (visit == 'all')
702 702
703 703 for fn in self._files:
704 704 fullp = self._subpath(fn)
705 705 if not match(fullp):
706 706 continue
707 707 ret._files[fn] = self._files[fn]
708 708 if fn in self._flags:
709 709 ret._flags[fn] = self._flags[fn]
710 710
711 711 for dir, subm in self._dirs.iteritems():
712 712 m = subm._matches(match, alldirs)
713 713 if not m._isempty():
714 714 ret._dirs[dir] = m
715 715
716 716 return ret
717 717
718 718 def diff(self, m2, clean=False):
719 719 '''Finds changes between the current manifest and m2.
720 720
721 721 Args:
722 722 m2: the manifest to which this manifest should be compared.
723 723 clean: if true, include files unchanged between these manifests
724 724 with a None value in the returned dictionary.
725 725
726 726 The result is returned as a dict with filename as key and
727 727 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
728 728 nodeid in the current/other manifest and fl1/fl2 is the flag
729 729 in the current/other manifest. Where the file does not exist,
730 730 the nodeid will be None and the flags will be the empty
731 731 string.
732 732 '''
733 733 result = {}
734 734 emptytree = treemanifest()
735 735 def _diff(t1, t2):
736 736 for d, m1 in t1._dirs.iteritems():
737 737 m2 = t2._dirs.get(d, emptytree)
738 738 _diff(m1, m2)
739 739
740 740 for d, m2 in t2._dirs.iteritems():
741 741 if d not in t1._dirs:
742 742 _diff(emptytree, m2)
743 743
744 744 for fn, n1 in t1._files.iteritems():
745 745 fl1 = t1._flags.get(fn, '')
746 746 n2 = t2._files.get(fn, None)
747 747 fl2 = t2._flags.get(fn, '')
748 748 if n1 != n2 or fl1 != fl2:
749 749 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
750 750 elif clean:
751 751 result[t1._subpath(fn)] = None
752 752
753 753 for fn, n2 in t2._files.iteritems():
754 754 if fn not in t1._files:
755 755 fl2 = t2._flags.get(fn, '')
756 756 result[t2._subpath(fn)] = ((None, ''), (n2, fl2))
757 757
758 758 _diff(self, m2)
759 759 return result
760 760
761 761 def parse(self, text, readsubtree):
762 762 for f, n, fl in _parse(text):
763 763 if fl == 'd':
764 764 f = f + '/'
765 765 self._dirs[f] = readsubtree(self._subpath(f), n)
766 766 else:
767 767 # Use __setitem__ and setflag rather than assigning directly
768 768 # to _files and _flags, thereby letting us parse flat manifests
769 769 # as well as tree manifests.
770 770 self[f] = n
771 771 if fl:
772 772 self.setflag(f, fl)
773 773
774 774 def text(self, usemanifestv2=False):
775 775 """Get the full data of this manifest as a bytestring."""
776 776 flags = self.flags
777 777 return _text(((f, self[f], flags(f)) for f in self.keys()),
778 778 usemanifestv2)
779 779
780 780 def dirtext(self, usemanifestv2=False):
781 781 """Get the full data of this directory as a bytestring. Make sure that
782 782 any submanifests have been written first, so their nodeids are correct.
783 783 """
784 784 flags = self.flags
785 785 dirs = [(d[:-1], self._dirs[d]._node, 'd') for d in self._dirs]
786 786 files = [(f, self._files[f], flags(f)) for f in self._files]
787 787 return _text(sorted(dirs + files), usemanifestv2)
788 788
789 789 def writesubtrees(self, m1, m2, writesubtree):
790 790 emptytree = treemanifest()
791 791 for d, subm in self._dirs.iteritems():
792 792 subp1 = m1._dirs.get(d, emptytree)._node
793 793 subp2 = m2._dirs.get(d, emptytree)._node
794 794 if subp1 == revlog.nullid:
795 795 subp1, subp2 = subp2, subp1
796 796 writesubtree(subm, subp1, subp2)
797 797
798 798 class manifest(revlog.revlog):
799 def __init__(self, opener, dir=''):
799 def __init__(self, opener, dir='', dirlogcache=None):
800 '''The 'dir' and 'dirlogcache' arguments are for internal use by
801 manifest.manifest only. External users should create a root manifest
802 log with manifest.manifest(opener) and call dirlog() on it.
803 '''
800 804 # During normal operations, we expect to deal with not more than four
801 805 # revs at a time (such as during commit --amend). When rebasing large
802 806 # stacks of commits, the number can go up, hence the config knob below.
803 807 cachesize = 4
804 808 usetreemanifest = False
805 809 usemanifestv2 = False
806 810 opts = getattr(opener, 'options', None)
807 811 if opts is not None:
808 812 cachesize = opts.get('manifestcachesize', cachesize)
809 813 usetreemanifest = opts.get('treemanifest', usetreemanifest)
810 814 usemanifestv2 = opts.get('manifestv2', usemanifestv2)
811 815 self._mancache = util.lrucachedict(cachesize)
812 816 self._treeinmem = usetreemanifest
813 817 self._treeondisk = usetreemanifest
814 818 self._usemanifestv2 = usemanifestv2
815 819 indexfile = "00manifest.i"
816 820 if dir:
817 821 assert self._treeondisk
818 822 if not dir.endswith('/'):
819 823 dir = dir + '/'
820 824 indexfile = "meta/" + dir + "00manifest.i"
821 825 revlog.revlog.__init__(self, opener, indexfile)
822 826 self._dir = dir
827 # The dirlogcache is kept on the root manifest log
828 if dir:
829 self._dirlogcache = dirlogcache
830 else:
831 self._dirlogcache = {'': self}
823 832
824 833 def _newmanifest(self, data=''):
825 834 if self._treeinmem:
826 835 return treemanifest(self._dir, data)
827 836 return manifestdict(data)
828 837
838 def dirlog(self, dir):
839 assert self._treeondisk
840 if dir not in self._dirlogcache:
841 self._dirlogcache[dir] = manifest(self.opener, dir,
842 self._dirlogcache)
843 return self._dirlogcache[dir]
844
829 845 def _slowreaddelta(self, node):
830 846 r0 = self.deltaparent(self.rev(node))
831 847 m0 = self.read(self.node(r0))
832 848 m1 = self.read(node)
833 849 md = self._newmanifest()
834 850 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
835 851 if n1:
836 852 md[f] = n1
837 853 if fl1:
838 854 md.setflag(f, fl1)
839 855 return md
840 856
841 857 def readdelta(self, node):
842 858 if self._usemanifestv2 or self._treeondisk:
843 859 return self._slowreaddelta(node)
844 860 r = self.rev(node)
845 861 d = mdiff.patchtext(self.revdiff(self.deltaparent(r), r))
846 862 return self._newmanifest(d)
847 863
848 864 def readfast(self, node):
849 865 '''use the faster of readdelta or read
850 866
851 867 This will return a manifest which is either only the files
852 868 added/modified relative to p1, or all files in the
853 869 manifest. Which one is returned depends on the codepath used
854 870 to retrieve the data.
855 871 '''
856 872 r = self.rev(node)
857 873 deltaparent = self.deltaparent(r)
858 874 if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r):
859 875 return self.readdelta(node)
860 876 return self.read(node)
861 877
862 878 def read(self, node):
863 879 if node == revlog.nullid:
864 880 return self._newmanifest() # don't upset local cache
865 881 if node in self._mancache:
866 882 return self._mancache[node][0]
867 883 text = self.revision(node)
868 884 if self._treeondisk:
869 885 def readsubtree(dir, subm):
870 sublog = manifest(self.opener, dir)
871 return sublog.read(subm)
886 return self.dirlog(dir).read(subm)
872 887 m = self._newmanifest()
873 888 m.parse(text, readsubtree)
874 889 m.setnode(node)
875 890 arraytext = None
876 891 else:
877 892 m = self._newmanifest(text)
878 893 arraytext = array.array('c', text)
879 894 self._mancache[node] = (m, arraytext)
880 895 return m
881 896
882 897 def find(self, node, f):
883 898 '''look up entry for a single file efficiently.
884 899 return (node, flags) pair if found, (None, None) if not.'''
885 900 m = self.read(node)
886 901 try:
887 902 return m.find(f)
888 903 except KeyError:
889 904 return None, None
890 905
891 906 def add(self, m, transaction, link, p1, p2, added, removed):
892 907 if (p1 in self._mancache and not self._treeinmem
893 908 and not self._usemanifestv2):
894 909 # If our first parent is in the manifest cache, we can
895 910 # compute a delta here using properties we know about the
896 911 # manifest up-front, which may save time later for the
897 912 # revlog layer.
898 913
899 914 _checkforbidden(added)
900 915 # combine the changed lists into one list for sorting
901 916 work = [(x, False) for x in added]
902 917 work.extend((x, True) for x in removed)
903 918 # this could use heapq.merge() (from Python 2.6+) or equivalent
904 919 # since the lists are already sorted
905 920 work.sort()
906 921
907 922 arraytext, deltatext = m.fastdelta(self._mancache[p1][1], work)
908 923 cachedelta = self.rev(p1), deltatext
909 924 text = util.buffer(arraytext)
910 925 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
911 926 else:
912 927 # The first parent manifest isn't already loaded, so we'll
913 928 # just encode a fulltext of the manifest and pass that
914 929 # through to the revlog layer, and let it handle the delta
915 930 # process.
916 931 if self._treeondisk:
917 932 m1 = self.read(p1)
918 933 m2 = self.read(p2)
919 934 n = self._addtree(m, transaction, link, m1, m2)
920 935 arraytext = None
921 936 else:
922 937 text = m.text(self._usemanifestv2)
923 938 n = self.addrevision(text, transaction, link, p1, p2)
924 939 arraytext = array.array('c', text)
925 940
926 941 self._mancache[n] = (m, arraytext)
927 942
928 943 return n
929 944
930 945 def _addtree(self, m, transaction, link, m1, m2):
931 946 def writesubtree(subm, subp1, subp2):
932 sublog = manifest(self.opener, subm.dir())
947 sublog = self.dirlog(subm.dir())
933 948 sublog.add(subm, transaction, link, subp1, subp2, None, None)
934 949 m.writesubtrees(m1, m2, writesubtree)
935 950 text = m.dirtext(self._usemanifestv2)
936 951 # If the manifest is unchanged compared to one parent,
937 952 # don't write a new revision
938 953 if text == m1.dirtext(self._usemanifestv2):
939 954 n = m1.node()
940 955 elif text == m2.dirtext(self._usemanifestv2):
941 956 n = m2.node()
942 957 else:
943 958 n = self.addrevision(text, transaction, link, m1.node(), m2.node())
944 959 # Save nodeid so parent manifest can calculate its nodeid
945 960 m.setnode(n)
946 961 return n
General Comments 0
You need to be logged in to leave comments. Login now