##// END OF EJS Templates
manifestv2: set requires at repo creation time...
Martin von Zweigbergk -
r24571:919f8ce0 default
parent child Browse files
Show More
@@ -0,0 +1,31 b''
1 Check that entry is added to .hg/requires
2
3 $ hg --config experimental.manifestv2=True init repo
4 $ cd repo
5 $ grep manifestv2 .hg/requires
6 manifestv2
7
8 Set up simple repo
9
10 $ echo a > file1
11 $ echo b > file2
12 $ echo c > file3
13 $ hg ci -Aqm 'initial'
14 $ echo d > file2
15 $ hg ci -m 'modify file2'
16
17 Check that 'hg verify', which uses manifest.readdelta(), works
18
19 $ hg verify
20 checking changesets
21 checking manifests
22 crosschecking files in changesets and manifests
23 checking files
24 3 files, 2 changesets, 4 total revisions
25
26 TODO: Check that manifest revlog is smaller than for v1
27
28 $ hg debugindex -m
29 rev offset length base linkrev nodeid p1 p2
30 0 0 106 0 0 f6279f9f8b31 000000000000 000000000000
31 1 106 59 0 1 cd20459b75e6 f6279f9f8b31 000000000000
@@ -1,1926 +1,1925 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import urllib
10 10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock as lockmod
13 13 import transaction, store, encoding, exchange, bundle2
14 14 import scmutil, util, extensions, hook, error, revset
15 15 import match as matchmod
16 16 import merge as mergemod
17 17 import tags as tagsmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 import branchmap, pathutil
21 21 import namespaces
22 22 propertycache = util.propertycache
23 23 filecache = scmutil.filecache
24 24
25 25 class repofilecache(filecache):
26 26 """All filecache usage on repo are done for logic that should be unfiltered
27 27 """
28 28
29 29 def __get__(self, repo, type=None):
30 30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 31 def __set__(self, repo, value):
32 32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 33 def __delete__(self, repo):
34 34 return super(repofilecache, self).__delete__(repo.unfiltered())
35 35
36 36 class storecache(repofilecache):
37 37 """filecache for files in the store"""
38 38 def join(self, obj, fname):
39 39 return obj.sjoin(fname)
40 40
41 41 class unfilteredpropertycache(propertycache):
42 42 """propertycache that apply to unfiltered repo only"""
43 43
44 44 def __get__(self, repo, type=None):
45 45 unfi = repo.unfiltered()
46 46 if unfi is repo:
47 47 return super(unfilteredpropertycache, self).__get__(unfi)
48 48 return getattr(unfi, self.name)
49 49
50 50 class filteredpropertycache(propertycache):
51 51 """propertycache that must take filtering in account"""
52 52
53 53 def cachevalue(self, obj, value):
54 54 object.__setattr__(obj, self.name, value)
55 55
56 56
57 57 def hasunfilteredcache(repo, name):
58 58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 59 return name in vars(repo.unfiltered())
60 60
61 61 def unfilteredmethod(orig):
62 62 """decorate method that always need to be run on unfiltered version"""
63 63 def wrapper(repo, *args, **kwargs):
64 64 return orig(repo.unfiltered(), *args, **kwargs)
65 65 return wrapper
66 66
67 67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 68 'unbundle'))
69 69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70 70
71 71 class localpeer(peer.peerrepository):
72 72 '''peer for a local repo; reflects only the most recent API'''
73 73
74 74 def __init__(self, repo, caps=moderncaps):
75 75 peer.peerrepository.__init__(self)
76 76 self._repo = repo.filtered('served')
77 77 self.ui = repo.ui
78 78 self._caps = repo._restrictcapabilities(caps)
79 79 self.requirements = repo.requirements
80 80 self.supportedformats = repo.supportedformats
81 81
82 82 def close(self):
83 83 self._repo.close()
84 84
85 85 def _capabilities(self):
86 86 return self._caps
87 87
88 88 def local(self):
89 89 return self._repo
90 90
91 91 def canpush(self):
92 92 return True
93 93
94 94 def url(self):
95 95 return self._repo.url()
96 96
97 97 def lookup(self, key):
98 98 return self._repo.lookup(key)
99 99
100 100 def branchmap(self):
101 101 return self._repo.branchmap()
102 102
103 103 def heads(self):
104 104 return self._repo.heads()
105 105
106 106 def known(self, nodes):
107 107 return self._repo.known(nodes)
108 108
109 109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 110 format='HG10', **kwargs):
111 111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 112 common=common, bundlecaps=bundlecaps, **kwargs)
113 113 if bundlecaps is not None and 'HG2Y' in bundlecaps:
114 114 # When requesting a bundle2, getbundle returns a stream to make the
115 115 # wire level function happier. We need to build a proper object
116 116 # from it in local peer.
117 117 cg = bundle2.unbundle20(self.ui, cg)
118 118 return cg
119 119
120 120 # TODO We might want to move the next two calls into legacypeer and add
121 121 # unbundle instead.
122 122
123 123 def unbundle(self, cg, heads, url):
124 124 """apply a bundle on a repo
125 125
126 126 This function handles the repo locking itself."""
127 127 try:
128 128 cg = exchange.readbundle(self.ui, cg, None)
129 129 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
130 130 if util.safehasattr(ret, 'getchunks'):
131 131 # This is a bundle20 object, turn it into an unbundler.
132 132 # This little dance should be dropped eventually when the API
133 133 # is finally improved.
134 134 stream = util.chunkbuffer(ret.getchunks())
135 135 ret = bundle2.unbundle20(self.ui, stream)
136 136 return ret
137 137 except error.PushRaced, exc:
138 138 raise error.ResponseError(_('push failed:'), str(exc))
139 139
140 140 def lock(self):
141 141 return self._repo.lock()
142 142
143 143 def addchangegroup(self, cg, source, url):
144 144 return changegroup.addchangegroup(self._repo, cg, source, url)
145 145
146 146 def pushkey(self, namespace, key, old, new):
147 147 return self._repo.pushkey(namespace, key, old, new)
148 148
149 149 def listkeys(self, namespace):
150 150 return self._repo.listkeys(namespace)
151 151
152 152 def debugwireargs(self, one, two, three=None, four=None, five=None):
153 153 '''used to test argument passing over the wire'''
154 154 return "%s %s %s %s %s" % (one, two, three, four, five)
155 155
156 156 class locallegacypeer(localpeer):
157 157 '''peer extension which implements legacy methods too; used for tests with
158 158 restricted capabilities'''
159 159
160 160 def __init__(self, repo):
161 161 localpeer.__init__(self, repo, caps=legacycaps)
162 162
163 163 def branches(self, nodes):
164 164 return self._repo.branches(nodes)
165 165
166 166 def between(self, pairs):
167 167 return self._repo.between(pairs)
168 168
169 169 def changegroup(self, basenodes, source):
170 170 return changegroup.changegroup(self._repo, basenodes, source)
171 171
172 172 def changegroupsubset(self, bases, heads, source):
173 173 return changegroup.changegroupsubset(self._repo, bases, heads, source)
174 174
175 175 class localrepository(object):
176 176
177 supportedformats = set(('revlogv1', 'generaldelta'))
177 supportedformats = set(('revlogv1', 'generaldelta', 'manifestv2'))
178 178 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
179 179 'dotencode'))
180 openerreqs = set(('revlogv1', 'generaldelta'))
180 openerreqs = set(('revlogv1', 'generaldelta', 'manifestv2'))
181 181 requirements = ['revlogv1']
182 182 filtername = None
183 183
184 184 # a list of (ui, featureset) functions.
185 185 # only functions defined in module of enabled extensions are invoked
186 186 featuresetupfuncs = set()
187 187
188 188 def _baserequirements(self, create):
189 189 return self.requirements[:]
190 190
191 191 def __init__(self, baseui, path=None, create=False):
192 192 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
193 193 self.wopener = self.wvfs
194 194 self.root = self.wvfs.base
195 195 self.path = self.wvfs.join(".hg")
196 196 self.origroot = path
197 197 self.auditor = pathutil.pathauditor(self.root, self._checknested)
198 198 self.vfs = scmutil.vfs(self.path)
199 199 self.opener = self.vfs
200 200 self.baseui = baseui
201 201 self.ui = baseui.copy()
202 202 self.ui.copy = baseui.copy # prevent copying repo configuration
203 203 # A list of callback to shape the phase if no data were found.
204 204 # Callback are in the form: func(repo, roots) --> processed root.
205 205 # This list it to be filled by extension during repo setup
206 206 self._phasedefaults = []
207 207 try:
208 208 self.ui.readconfig(self.join("hgrc"), self.root)
209 209 extensions.loadall(self.ui)
210 210 except IOError:
211 211 pass
212 212
213 213 if self.featuresetupfuncs:
214 214 self.supported = set(self._basesupported) # use private copy
215 215 extmods = set(m.__name__ for n, m
216 216 in extensions.extensions(self.ui))
217 217 for setupfunc in self.featuresetupfuncs:
218 218 if setupfunc.__module__ in extmods:
219 219 setupfunc(self.ui, self.supported)
220 220 else:
221 221 self.supported = self._basesupported
222 222
223 223 if not self.vfs.isdir():
224 224 if create:
225 225 if not self.wvfs.exists():
226 226 self.wvfs.makedirs()
227 227 self.vfs.makedir(notindexed=True)
228 228 requirements = self._baserequirements(create)
229 229 if self.ui.configbool('format', 'usestore', True):
230 230 self.vfs.mkdir("store")
231 231 requirements.append("store")
232 232 if self.ui.configbool('format', 'usefncache', True):
233 233 requirements.append("fncache")
234 234 if self.ui.configbool('format', 'dotencode', True):
235 235 requirements.append('dotencode')
236 236 # create an invalid changelog
237 237 self.vfs.append(
238 238 "00changelog.i",
239 239 '\0\0\0\2' # represents revlogv2
240 240 ' dummy changelog to prevent using the old repo layout'
241 241 )
242 242 if self.ui.configbool('format', 'generaldelta', False):
243 243 requirements.append("generaldelta")
244 if self.ui.configbool('experimental', 'manifestv2', False):
245 requirements.append("manifestv2")
244 246 requirements = set(requirements)
245 247 else:
246 248 raise error.RepoError(_("repository %s not found") % path)
247 249 elif create:
248 250 raise error.RepoError(_("repository %s already exists") % path)
249 251 else:
250 252 try:
251 253 requirements = scmutil.readrequires(self.vfs, self.supported)
252 254 except IOError, inst:
253 255 if inst.errno != errno.ENOENT:
254 256 raise
255 257 requirements = set()
256 258
257 259 self.sharedpath = self.path
258 260 try:
259 261 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
260 262 realpath=True)
261 263 s = vfs.base
262 264 if not vfs.exists():
263 265 raise error.RepoError(
264 266 _('.hg/sharedpath points to nonexistent directory %s') % s)
265 267 self.sharedpath = s
266 268 except IOError, inst:
267 269 if inst.errno != errno.ENOENT:
268 270 raise
269 271
270 272 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
271 273 self.spath = self.store.path
272 274 self.svfs = self.store.vfs
273 275 self.sopener = self.svfs
274 276 self.sjoin = self.store.join
275 277 self.vfs.createmode = self.store.createmode
276 278 self._applyrequirements(requirements)
277 279 if create:
278 280 self._writerequirements()
279 281
280 282
281 283 self._branchcaches = {}
282 284 self._revbranchcache = None
283 285 self.filterpats = {}
284 286 self._datafilters = {}
285 287 self._transref = self._lockref = self._wlockref = None
286 288
287 289 # A cache for various files under .hg/ that tracks file changes,
288 290 # (used by the filecache decorator)
289 291 #
290 292 # Maps a property name to its util.filecacheentry
291 293 self._filecache = {}
292 294
293 295 # hold sets of revision to be filtered
294 296 # should be cleared when something might have changed the filter value:
295 297 # - new changesets,
296 298 # - phase change,
297 299 # - new obsolescence marker,
298 300 # - working directory parent change,
299 301 # - bookmark changes
300 302 self.filteredrevcache = {}
301 303
302 304 # generic mapping between names and nodes
303 305 self.names = namespaces.namespaces()
304 306
305 307 def close(self):
306 308 self._writecaches()
307 309
308 310 def _writecaches(self):
309 311 if self._revbranchcache:
310 312 self._revbranchcache.write()
311 313
312 314 def _restrictcapabilities(self, caps):
313 315 # bundle2 is not ready for prime time, drop it unless explicitly
314 316 # required by the tests (or some brave tester)
315 317 if self.ui.configbool('experimental', 'bundle2-exp', False):
316 318 caps = set(caps)
317 319 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
318 320 caps.add('bundle2-exp=' + urllib.quote(capsblob))
319 321 return caps
320 322
321 323 def _applyrequirements(self, requirements):
322 324 self.requirements = requirements
323 325 self.svfs.options = dict((r, 1) for r in requirements
324 326 if r in self.openerreqs)
325 327 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
326 328 if chunkcachesize is not None:
327 329 self.svfs.options['chunkcachesize'] = chunkcachesize
328 330 maxchainlen = self.ui.configint('format', 'maxchainlen')
329 331 if maxchainlen is not None:
330 332 self.svfs.options['maxchainlen'] = maxchainlen
331 333 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
332 334 if manifestcachesize is not None:
333 335 self.svfs.options['manifestcachesize'] = manifestcachesize
334 336 usetreemanifest = self.ui.configbool('experimental', 'treemanifest')
335 337 if usetreemanifest is not None:
336 338 self.svfs.options['usetreemanifest'] = usetreemanifest
337 usemanifestv2 = self.ui.configbool('experimental', 'manifestv2')
338 if usemanifestv2 is not None:
339 self.svfs.options['usemanifestv2'] = usemanifestv2
340 339
341 340 def _writerequirements(self):
342 341 reqfile = self.vfs("requires", "w")
343 342 for r in sorted(self.requirements):
344 343 reqfile.write("%s\n" % r)
345 344 reqfile.close()
346 345
347 346 def _checknested(self, path):
348 347 """Determine if path is a legal nested repository."""
349 348 if not path.startswith(self.root):
350 349 return False
351 350 subpath = path[len(self.root) + 1:]
352 351 normsubpath = util.pconvert(subpath)
353 352
354 353 # XXX: Checking against the current working copy is wrong in
355 354 # the sense that it can reject things like
356 355 #
357 356 # $ hg cat -r 10 sub/x.txt
358 357 #
359 358 # if sub/ is no longer a subrepository in the working copy
360 359 # parent revision.
361 360 #
362 361 # However, it can of course also allow things that would have
363 362 # been rejected before, such as the above cat command if sub/
364 363 # is a subrepository now, but was a normal directory before.
365 364 # The old path auditor would have rejected by mistake since it
366 365 # panics when it sees sub/.hg/.
367 366 #
368 367 # All in all, checking against the working copy seems sensible
369 368 # since we want to prevent access to nested repositories on
370 369 # the filesystem *now*.
371 370 ctx = self[None]
372 371 parts = util.splitpath(subpath)
373 372 while parts:
374 373 prefix = '/'.join(parts)
375 374 if prefix in ctx.substate:
376 375 if prefix == normsubpath:
377 376 return True
378 377 else:
379 378 sub = ctx.sub(prefix)
380 379 return sub.checknested(subpath[len(prefix) + 1:])
381 380 else:
382 381 parts.pop()
383 382 return False
384 383
385 384 def peer(self):
386 385 return localpeer(self) # not cached to avoid reference cycle
387 386
388 387 def unfiltered(self):
389 388 """Return unfiltered version of the repository
390 389
391 390 Intended to be overwritten by filtered repo."""
392 391 return self
393 392
394 393 def filtered(self, name):
395 394 """Return a filtered version of a repository"""
396 395 # build a new class with the mixin and the current class
397 396 # (possibly subclass of the repo)
398 397 class proxycls(repoview.repoview, self.unfiltered().__class__):
399 398 pass
400 399 return proxycls(self, name)
401 400
402 401 @repofilecache('bookmarks')
403 402 def _bookmarks(self):
404 403 return bookmarks.bmstore(self)
405 404
406 405 @repofilecache('bookmarks.current')
407 406 def _bookmarkcurrent(self):
408 407 return bookmarks.readcurrent(self)
409 408
410 409 def bookmarkheads(self, bookmark):
411 410 name = bookmark.split('@', 1)[0]
412 411 heads = []
413 412 for mark, n in self._bookmarks.iteritems():
414 413 if mark.split('@', 1)[0] == name:
415 414 heads.append(n)
416 415 return heads
417 416
418 417 @storecache('phaseroots')
419 418 def _phasecache(self):
420 419 return phases.phasecache(self, self._phasedefaults)
421 420
422 421 @storecache('obsstore')
423 422 def obsstore(self):
424 423 # read default format for new obsstore.
425 424 defaultformat = self.ui.configint('format', 'obsstore-version', None)
426 425 # rely on obsstore class default when possible.
427 426 kwargs = {}
428 427 if defaultformat is not None:
429 428 kwargs['defaultformat'] = defaultformat
430 429 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
431 430 store = obsolete.obsstore(self.svfs, readonly=readonly,
432 431 **kwargs)
433 432 if store and readonly:
434 433 self.ui.warn(
435 434 _('obsolete feature not enabled but %i markers found!\n')
436 435 % len(list(store)))
437 436 return store
438 437
439 438 @storecache('00changelog.i')
440 439 def changelog(self):
441 440 c = changelog.changelog(self.svfs)
442 441 if 'HG_PENDING' in os.environ:
443 442 p = os.environ['HG_PENDING']
444 443 if p.startswith(self.root):
445 444 c.readpending('00changelog.i.a')
446 445 return c
447 446
448 447 @storecache('00manifest.i')
449 448 def manifest(self):
450 449 return manifest.manifest(self.svfs)
451 450
452 451 @repofilecache('dirstate')
453 452 def dirstate(self):
454 453 warned = [0]
455 454 def validate(node):
456 455 try:
457 456 self.changelog.rev(node)
458 457 return node
459 458 except error.LookupError:
460 459 if not warned[0]:
461 460 warned[0] = True
462 461 self.ui.warn(_("warning: ignoring unknown"
463 462 " working parent %s!\n") % short(node))
464 463 return nullid
465 464
466 465 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
467 466
468 467 def __getitem__(self, changeid):
469 468 if changeid is None:
470 469 return context.workingctx(self)
471 470 if isinstance(changeid, slice):
472 471 return [context.changectx(self, i)
473 472 for i in xrange(*changeid.indices(len(self)))
474 473 if i not in self.changelog.filteredrevs]
475 474 return context.changectx(self, changeid)
476 475
477 476 def __contains__(self, changeid):
478 477 try:
479 478 self[changeid]
480 479 return True
481 480 except error.RepoLookupError:
482 481 return False
483 482
484 483 def __nonzero__(self):
485 484 return True
486 485
487 486 def __len__(self):
488 487 return len(self.changelog)
489 488
490 489 def __iter__(self):
491 490 return iter(self.changelog)
492 491
493 492 def revs(self, expr, *args):
494 493 '''Return a list of revisions matching the given revset'''
495 494 expr = revset.formatspec(expr, *args)
496 495 m = revset.match(None, expr)
497 496 return m(self)
498 497
499 498 def set(self, expr, *args):
500 499 '''
501 500 Yield a context for each matching revision, after doing arg
502 501 replacement via revset.formatspec
503 502 '''
504 503 for r in self.revs(expr, *args):
505 504 yield self[r]
506 505
507 506 def url(self):
508 507 return 'file:' + self.root
509 508
510 509 def hook(self, name, throw=False, **args):
511 510 """Call a hook, passing this repo instance.
512 511
513 512 This a convenience method to aid invoking hooks. Extensions likely
514 513 won't call this unless they have registered a custom hook or are
515 514 replacing code that is expected to call a hook.
516 515 """
517 516 return hook.hook(self.ui, self, name, throw, **args)
518 517
519 518 @unfilteredmethod
520 519 def _tag(self, names, node, message, local, user, date, extra={},
521 520 editor=False):
522 521 if isinstance(names, str):
523 522 names = (names,)
524 523
525 524 branches = self.branchmap()
526 525 for name in names:
527 526 self.hook('pretag', throw=True, node=hex(node), tag=name,
528 527 local=local)
529 528 if name in branches:
530 529 self.ui.warn(_("warning: tag %s conflicts with existing"
531 530 " branch name\n") % name)
532 531
533 532 def writetags(fp, names, munge, prevtags):
534 533 fp.seek(0, 2)
535 534 if prevtags and prevtags[-1] != '\n':
536 535 fp.write('\n')
537 536 for name in names:
538 537 if munge:
539 538 m = munge(name)
540 539 else:
541 540 m = name
542 541
543 542 if (self._tagscache.tagtypes and
544 543 name in self._tagscache.tagtypes):
545 544 old = self.tags().get(name, nullid)
546 545 fp.write('%s %s\n' % (hex(old), m))
547 546 fp.write('%s %s\n' % (hex(node), m))
548 547 fp.close()
549 548
550 549 prevtags = ''
551 550 if local:
552 551 try:
553 552 fp = self.vfs('localtags', 'r+')
554 553 except IOError:
555 554 fp = self.vfs('localtags', 'a')
556 555 else:
557 556 prevtags = fp.read()
558 557
559 558 # local tags are stored in the current charset
560 559 writetags(fp, names, None, prevtags)
561 560 for name in names:
562 561 self.hook('tag', node=hex(node), tag=name, local=local)
563 562 return
564 563
565 564 try:
566 565 fp = self.wfile('.hgtags', 'rb+')
567 566 except IOError, e:
568 567 if e.errno != errno.ENOENT:
569 568 raise
570 569 fp = self.wfile('.hgtags', 'ab')
571 570 else:
572 571 prevtags = fp.read()
573 572
574 573 # committed tags are stored in UTF-8
575 574 writetags(fp, names, encoding.fromlocal, prevtags)
576 575
577 576 fp.close()
578 577
579 578 self.invalidatecaches()
580 579
581 580 if '.hgtags' not in self.dirstate:
582 581 self[None].add(['.hgtags'])
583 582
584 583 m = matchmod.exact(self.root, '', ['.hgtags'])
585 584 tagnode = self.commit(message, user, date, extra=extra, match=m,
586 585 editor=editor)
587 586
588 587 for name in names:
589 588 self.hook('tag', node=hex(node), tag=name, local=local)
590 589
591 590 return tagnode
592 591
593 592 def tag(self, names, node, message, local, user, date, editor=False):
594 593 '''tag a revision with one or more symbolic names.
595 594
596 595 names is a list of strings or, when adding a single tag, names may be a
597 596 string.
598 597
599 598 if local is True, the tags are stored in a per-repository file.
600 599 otherwise, they are stored in the .hgtags file, and a new
601 600 changeset is committed with the change.
602 601
603 602 keyword arguments:
604 603
605 604 local: whether to store tags in non-version-controlled file
606 605 (default False)
607 606
608 607 message: commit message to use if committing
609 608
610 609 user: name of user to use if committing
611 610
612 611 date: date tuple to use if committing'''
613 612
614 613 if not local:
615 614 m = matchmod.exact(self.root, '', ['.hgtags'])
616 615 if util.any(self.status(match=m, unknown=True, ignored=True)):
617 616 raise util.Abort(_('working copy of .hgtags is changed'),
618 617 hint=_('please commit .hgtags manually'))
619 618
620 619 self.tags() # instantiate the cache
621 620 self._tag(names, node, message, local, user, date, editor=editor)
622 621
623 622 @filteredpropertycache
624 623 def _tagscache(self):
625 624 '''Returns a tagscache object that contains various tags related
626 625 caches.'''
627 626
628 627 # This simplifies its cache management by having one decorated
629 628 # function (this one) and the rest simply fetch things from it.
630 629 class tagscache(object):
631 630 def __init__(self):
632 631 # These two define the set of tags for this repository. tags
633 632 # maps tag name to node; tagtypes maps tag name to 'global' or
634 633 # 'local'. (Global tags are defined by .hgtags across all
635 634 # heads, and local tags are defined in .hg/localtags.)
636 635 # They constitute the in-memory cache of tags.
637 636 self.tags = self.tagtypes = None
638 637
639 638 self.nodetagscache = self.tagslist = None
640 639
641 640 cache = tagscache()
642 641 cache.tags, cache.tagtypes = self._findtags()
643 642
644 643 return cache
645 644
646 645 def tags(self):
647 646 '''return a mapping of tag to node'''
648 647 t = {}
649 648 if self.changelog.filteredrevs:
650 649 tags, tt = self._findtags()
651 650 else:
652 651 tags = self._tagscache.tags
653 652 for k, v in tags.iteritems():
654 653 try:
655 654 # ignore tags to unknown nodes
656 655 self.changelog.rev(v)
657 656 t[k] = v
658 657 except (error.LookupError, ValueError):
659 658 pass
660 659 return t
661 660
662 661 def _findtags(self):
663 662 '''Do the hard work of finding tags. Return a pair of dicts
664 663 (tags, tagtypes) where tags maps tag name to node, and tagtypes
665 664 maps tag name to a string like \'global\' or \'local\'.
666 665 Subclasses or extensions are free to add their own tags, but
667 666 should be aware that the returned dicts will be retained for the
668 667 duration of the localrepo object.'''
669 668
670 669 # XXX what tagtype should subclasses/extensions use? Currently
671 670 # mq and bookmarks add tags, but do not set the tagtype at all.
672 671 # Should each extension invent its own tag type? Should there
673 672 # be one tagtype for all such "virtual" tags? Or is the status
674 673 # quo fine?
675 674
676 675 alltags = {} # map tag name to (node, hist)
677 676 tagtypes = {}
678 677
679 678 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
680 679 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
681 680
682 681 # Build the return dicts. Have to re-encode tag names because
683 682 # the tags module always uses UTF-8 (in order not to lose info
684 683 # writing to the cache), but the rest of Mercurial wants them in
685 684 # local encoding.
686 685 tags = {}
687 686 for (name, (node, hist)) in alltags.iteritems():
688 687 if node != nullid:
689 688 tags[encoding.tolocal(name)] = node
690 689 tags['tip'] = self.changelog.tip()
691 690 tagtypes = dict([(encoding.tolocal(name), value)
692 691 for (name, value) in tagtypes.iteritems()])
693 692 return (tags, tagtypes)
694 693
695 694 def tagtype(self, tagname):
696 695 '''
697 696 return the type of the given tag. result can be:
698 697
699 698 'local' : a local tag
700 699 'global' : a global tag
701 700 None : tag does not exist
702 701 '''
703 702
704 703 return self._tagscache.tagtypes.get(tagname)
705 704
706 705 def tagslist(self):
707 706 '''return a list of tags ordered by revision'''
708 707 if not self._tagscache.tagslist:
709 708 l = []
710 709 for t, n in self.tags().iteritems():
711 710 l.append((self.changelog.rev(n), t, n))
712 711 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
713 712
714 713 return self._tagscache.tagslist
715 714
716 715 def nodetags(self, node):
717 716 '''return the tags associated with a node'''
718 717 if not self._tagscache.nodetagscache:
719 718 nodetagscache = {}
720 719 for t, n in self._tagscache.tags.iteritems():
721 720 nodetagscache.setdefault(n, []).append(t)
722 721 for tags in nodetagscache.itervalues():
723 722 tags.sort()
724 723 self._tagscache.nodetagscache = nodetagscache
725 724 return self._tagscache.nodetagscache.get(node, [])
726 725
727 726 def nodebookmarks(self, node):
728 727 marks = []
729 728 for bookmark, n in self._bookmarks.iteritems():
730 729 if n == node:
731 730 marks.append(bookmark)
732 731 return sorted(marks)
733 732
734 733 def branchmap(self):
735 734 '''returns a dictionary {branch: [branchheads]} with branchheads
736 735 ordered by increasing revision number'''
737 736 branchmap.updatecache(self)
738 737 return self._branchcaches[self.filtername]
739 738
740 739 @unfilteredmethod
741 740 def revbranchcache(self):
742 741 if not self._revbranchcache:
743 742 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
744 743 return self._revbranchcache
745 744
746 745 def branchtip(self, branch, ignoremissing=False):
747 746 '''return the tip node for a given branch
748 747
749 748 If ignoremissing is True, then this method will not raise an error.
750 749 This is helpful for callers that only expect None for a missing branch
751 750 (e.g. namespace).
752 751
753 752 '''
754 753 try:
755 754 return self.branchmap().branchtip(branch)
756 755 except KeyError:
757 756 if not ignoremissing:
758 757 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
759 758 else:
760 759 pass
761 760
762 761 def lookup(self, key):
763 762 return self[key].node()
764 763
765 764 def lookupbranch(self, key, remote=None):
766 765 repo = remote or self
767 766 if key in repo.branchmap():
768 767 return key
769 768
770 769 repo = (remote and remote.local()) and remote or self
771 770 return repo[key].branch()
772 771
773 772 def known(self, nodes):
774 773 nm = self.changelog.nodemap
775 774 pc = self._phasecache
776 775 result = []
777 776 for n in nodes:
778 777 r = nm.get(n)
779 778 resp = not (r is None or pc.phase(self, r) >= phases.secret)
780 779 result.append(resp)
781 780 return result
782 781
783 782 def local(self):
784 783 return self
785 784
786 785 def cancopy(self):
787 786 # so statichttprepo's override of local() works
788 787 if not self.local():
789 788 return False
790 789 if not self.ui.configbool('phases', 'publish', True):
791 790 return True
792 791 # if publishing we can't copy if there is filtered content
793 792 return not self.filtered('visible').changelog.filteredrevs
794 793
795 794 def shared(self):
796 795 '''the type of shared repository (None if not shared)'''
797 796 if self.sharedpath != self.path:
798 797 return 'store'
799 798 return None
800 799
801 800 def join(self, f, *insidef):
802 801 return self.vfs.join(os.path.join(f, *insidef))
803 802
804 803 def wjoin(self, f, *insidef):
805 804 return self.vfs.reljoin(self.root, f, *insidef)
806 805
807 806 def file(self, f):
808 807 if f[0] == '/':
809 808 f = f[1:]
810 809 return filelog.filelog(self.svfs, f)
811 810
812 811 def changectx(self, changeid):
813 812 return self[changeid]
814 813
815 814 def parents(self, changeid=None):
816 815 '''get list of changectxs for parents of changeid'''
817 816 return self[changeid].parents()
818 817
819 818 def setparents(self, p1, p2=nullid):
820 819 self.dirstate.beginparentchange()
821 820 copies = self.dirstate.setparents(p1, p2)
822 821 pctx = self[p1]
823 822 if copies:
824 823 # Adjust copy records, the dirstate cannot do it, it
825 824 # requires access to parents manifests. Preserve them
826 825 # only for entries added to first parent.
827 826 for f in copies:
828 827 if f not in pctx and copies[f] in pctx:
829 828 self.dirstate.copy(copies[f], f)
830 829 if p2 == nullid:
831 830 for f, s in sorted(self.dirstate.copies().items()):
832 831 if f not in pctx and s not in pctx:
833 832 self.dirstate.copy(None, f)
834 833 self.dirstate.endparentchange()
835 834
836 835 def filectx(self, path, changeid=None, fileid=None):
837 836 """changeid can be a changeset revision, node, or tag.
838 837 fileid can be a file revision or node."""
839 838 return context.filectx(self, path, changeid, fileid)
840 839
841 840 def getcwd(self):
842 841 return self.dirstate.getcwd()
843 842
844 843 def pathto(self, f, cwd=None):
845 844 return self.dirstate.pathto(f, cwd)
846 845
847 846 def wfile(self, f, mode='r'):
848 847 return self.wvfs(f, mode)
849 848
850 849 def _link(self, f):
851 850 return self.wvfs.islink(f)
852 851
853 852 def _loadfilter(self, filter):
854 853 if filter not in self.filterpats:
855 854 l = []
856 855 for pat, cmd in self.ui.configitems(filter):
857 856 if cmd == '!':
858 857 continue
859 858 mf = matchmod.match(self.root, '', [pat])
860 859 fn = None
861 860 params = cmd
862 861 for name, filterfn in self._datafilters.iteritems():
863 862 if cmd.startswith(name):
864 863 fn = filterfn
865 864 params = cmd[len(name):].lstrip()
866 865 break
867 866 if not fn:
868 867 fn = lambda s, c, **kwargs: util.filter(s, c)
869 868 # Wrap old filters not supporting keyword arguments
870 869 if not inspect.getargspec(fn)[2]:
871 870 oldfn = fn
872 871 fn = lambda s, c, **kwargs: oldfn(s, c)
873 872 l.append((mf, fn, params))
874 873 self.filterpats[filter] = l
875 874 return self.filterpats[filter]
876 875
877 876 def _filter(self, filterpats, filename, data):
878 877 for mf, fn, cmd in filterpats:
879 878 if mf(filename):
880 879 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
881 880 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
882 881 break
883 882
884 883 return data
885 884
886 885 @unfilteredpropertycache
887 886 def _encodefilterpats(self):
888 887 return self._loadfilter('encode')
889 888
890 889 @unfilteredpropertycache
891 890 def _decodefilterpats(self):
892 891 return self._loadfilter('decode')
893 892
894 893 def adddatafilter(self, name, filter):
895 894 self._datafilters[name] = filter
896 895
897 896 def wread(self, filename):
898 897 if self._link(filename):
899 898 data = self.wvfs.readlink(filename)
900 899 else:
901 900 data = self.wvfs.read(filename)
902 901 return self._filter(self._encodefilterpats, filename, data)
903 902
904 903 def wwrite(self, filename, data, flags):
905 904 data = self._filter(self._decodefilterpats, filename, data)
906 905 if 'l' in flags:
907 906 self.wvfs.symlink(data, filename)
908 907 else:
909 908 self.wvfs.write(filename, data)
910 909 if 'x' in flags:
911 910 self.wvfs.setflags(filename, False, True)
912 911
913 912 def wwritedata(self, filename, data):
914 913 return self._filter(self._decodefilterpats, filename, data)
915 914
916 915 def currenttransaction(self):
917 916 """return the current transaction or None if non exists"""
918 917 if self._transref:
919 918 tr = self._transref()
920 919 else:
921 920 tr = None
922 921
923 922 if tr and tr.running():
924 923 return tr
925 924 return None
926 925
927 926 def transaction(self, desc, report=None):
928 927 if (self.ui.configbool('devel', 'all')
929 928 or self.ui.configbool('devel', 'check-locks')):
930 929 l = self._lockref and self._lockref()
931 930 if l is None or not l.held:
932 931 msg = 'transaction with no lock\n'
933 932 if self.ui.tracebackflag:
934 933 util.debugstacktrace(msg, 1)
935 934 else:
936 935 self.ui.write_err(msg)
937 936 tr = self.currenttransaction()
938 937 if tr is not None:
939 938 return tr.nest()
940 939
941 940 # abort here if the journal already exists
942 941 if self.svfs.exists("journal"):
943 942 raise error.RepoError(
944 943 _("abandoned transaction found"),
945 944 hint=_("run 'hg recover' to clean up transaction"))
946 945
947 946 self.hook('pretxnopen', throw=True, txnname=desc)
948 947
949 948 self._writejournal(desc)
950 949 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
951 950 if report:
952 951 rp = report
953 952 else:
954 953 rp = self.ui.warn
955 954 vfsmap = {'plain': self.vfs} # root of .hg/
956 955 # we must avoid cyclic reference between repo and transaction.
957 956 reporef = weakref.ref(self)
958 957 def validate(tr):
959 958 """will run pre-closing hooks"""
960 959 pending = lambda: tr.writepending() and self.root or ""
961 960 reporef().hook('pretxnclose', throw=True, pending=pending,
962 961 xnname=desc)
963 962
964 963 tr = transaction.transaction(rp, self.sopener, vfsmap,
965 964 "journal",
966 965 "undo",
967 966 aftertrans(renames),
968 967 self.store.createmode,
969 968 validator=validate)
970 969 # note: writing the fncache only during finalize mean that the file is
971 970 # outdated when running hooks. As fncache is used for streaming clone,
972 971 # this is not expected to break anything that happen during the hooks.
973 972 tr.addfinalize('flush-fncache', self.store.write)
974 973 def txnclosehook(tr2):
975 974 """To be run if transaction is successful, will schedule a hook run
976 975 """
977 976 def hook():
978 977 reporef().hook('txnclose', throw=False, txnname=desc,
979 978 **tr2.hookargs)
980 979 reporef()._afterlock(hook)
981 980 tr.addfinalize('txnclose-hook', txnclosehook)
982 981 self._transref = weakref.ref(tr)
983 982 return tr
984 983
985 984 def _journalfiles(self):
986 985 return ((self.svfs, 'journal'),
987 986 (self.vfs, 'journal.dirstate'),
988 987 (self.vfs, 'journal.branch'),
989 988 (self.vfs, 'journal.desc'),
990 989 (self.vfs, 'journal.bookmarks'),
991 990 (self.svfs, 'journal.phaseroots'))
992 991
993 992 def undofiles(self):
994 993 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
995 994
996 995 def _writejournal(self, desc):
997 996 self.vfs.write("journal.dirstate",
998 997 self.vfs.tryread("dirstate"))
999 998 self.vfs.write("journal.branch",
1000 999 encoding.fromlocal(self.dirstate.branch()))
1001 1000 self.vfs.write("journal.desc",
1002 1001 "%d\n%s\n" % (len(self), desc))
1003 1002 self.vfs.write("journal.bookmarks",
1004 1003 self.vfs.tryread("bookmarks"))
1005 1004 self.svfs.write("journal.phaseroots",
1006 1005 self.svfs.tryread("phaseroots"))
1007 1006
1008 1007 def recover(self):
1009 1008 lock = self.lock()
1010 1009 try:
1011 1010 if self.svfs.exists("journal"):
1012 1011 self.ui.status(_("rolling back interrupted transaction\n"))
1013 1012 vfsmap = {'': self.svfs,
1014 1013 'plain': self.vfs,}
1015 1014 transaction.rollback(self.svfs, vfsmap, "journal",
1016 1015 self.ui.warn)
1017 1016 self.invalidate()
1018 1017 return True
1019 1018 else:
1020 1019 self.ui.warn(_("no interrupted transaction available\n"))
1021 1020 return False
1022 1021 finally:
1023 1022 lock.release()
1024 1023
1025 1024 def rollback(self, dryrun=False, force=False):
1026 1025 wlock = lock = None
1027 1026 try:
1028 1027 wlock = self.wlock()
1029 1028 lock = self.lock()
1030 1029 if self.svfs.exists("undo"):
1031 1030 return self._rollback(dryrun, force)
1032 1031 else:
1033 1032 self.ui.warn(_("no rollback information available\n"))
1034 1033 return 1
1035 1034 finally:
1036 1035 release(lock, wlock)
1037 1036
1038 1037 @unfilteredmethod # Until we get smarter cache management
1039 1038 def _rollback(self, dryrun, force):
1040 1039 ui = self.ui
1041 1040 try:
1042 1041 args = self.vfs.read('undo.desc').splitlines()
1043 1042 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1044 1043 if len(args) >= 3:
1045 1044 detail = args[2]
1046 1045 oldtip = oldlen - 1
1047 1046
1048 1047 if detail and ui.verbose:
1049 1048 msg = (_('repository tip rolled back to revision %s'
1050 1049 ' (undo %s: %s)\n')
1051 1050 % (oldtip, desc, detail))
1052 1051 else:
1053 1052 msg = (_('repository tip rolled back to revision %s'
1054 1053 ' (undo %s)\n')
1055 1054 % (oldtip, desc))
1056 1055 except IOError:
1057 1056 msg = _('rolling back unknown transaction\n')
1058 1057 desc = None
1059 1058
1060 1059 if not force and self['.'] != self['tip'] and desc == 'commit':
1061 1060 raise util.Abort(
1062 1061 _('rollback of last commit while not checked out '
1063 1062 'may lose data'), hint=_('use -f to force'))
1064 1063
1065 1064 ui.status(msg)
1066 1065 if dryrun:
1067 1066 return 0
1068 1067
1069 1068 parents = self.dirstate.parents()
1070 1069 self.destroying()
1071 1070 vfsmap = {'plain': self.vfs, '': self.svfs}
1072 1071 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1073 1072 if self.vfs.exists('undo.bookmarks'):
1074 1073 self.vfs.rename('undo.bookmarks', 'bookmarks')
1075 1074 if self.svfs.exists('undo.phaseroots'):
1076 1075 self.svfs.rename('undo.phaseroots', 'phaseroots')
1077 1076 self.invalidate()
1078 1077
1079 1078 parentgone = (parents[0] not in self.changelog.nodemap or
1080 1079 parents[1] not in self.changelog.nodemap)
1081 1080 if parentgone:
1082 1081 self.vfs.rename('undo.dirstate', 'dirstate')
1083 1082 try:
1084 1083 branch = self.vfs.read('undo.branch')
1085 1084 self.dirstate.setbranch(encoding.tolocal(branch))
1086 1085 except IOError:
1087 1086 ui.warn(_('named branch could not be reset: '
1088 1087 'current branch is still \'%s\'\n')
1089 1088 % self.dirstate.branch())
1090 1089
1091 1090 self.dirstate.invalidate()
1092 1091 parents = tuple([p.rev() for p in self.parents()])
1093 1092 if len(parents) > 1:
1094 1093 ui.status(_('working directory now based on '
1095 1094 'revisions %d and %d\n') % parents)
1096 1095 else:
1097 1096 ui.status(_('working directory now based on '
1098 1097 'revision %d\n') % parents)
1099 1098 # TODO: if we know which new heads may result from this rollback, pass
1100 1099 # them to destroy(), which will prevent the branchhead cache from being
1101 1100 # invalidated.
1102 1101 self.destroyed()
1103 1102 return 0
1104 1103
1105 1104 def invalidatecaches(self):
1106 1105
1107 1106 if '_tagscache' in vars(self):
1108 1107 # can't use delattr on proxy
1109 1108 del self.__dict__['_tagscache']
1110 1109
1111 1110 self.unfiltered()._branchcaches.clear()
1112 1111 self.invalidatevolatilesets()
1113 1112
1114 1113 def invalidatevolatilesets(self):
1115 1114 self.filteredrevcache.clear()
1116 1115 obsolete.clearobscaches(self)
1117 1116
1118 1117 def invalidatedirstate(self):
1119 1118 '''Invalidates the dirstate, causing the next call to dirstate
1120 1119 to check if it was modified since the last time it was read,
1121 1120 rereading it if it has.
1122 1121
1123 1122 This is different to dirstate.invalidate() that it doesn't always
1124 1123 rereads the dirstate. Use dirstate.invalidate() if you want to
1125 1124 explicitly read the dirstate again (i.e. restoring it to a previous
1126 1125 known good state).'''
1127 1126 if hasunfilteredcache(self, 'dirstate'):
1128 1127 for k in self.dirstate._filecache:
1129 1128 try:
1130 1129 delattr(self.dirstate, k)
1131 1130 except AttributeError:
1132 1131 pass
1133 1132 delattr(self.unfiltered(), 'dirstate')
1134 1133
1135 1134 def invalidate(self):
1136 1135 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1137 1136 for k in self._filecache:
1138 1137 # dirstate is invalidated separately in invalidatedirstate()
1139 1138 if k == 'dirstate':
1140 1139 continue
1141 1140
1142 1141 try:
1143 1142 delattr(unfiltered, k)
1144 1143 except AttributeError:
1145 1144 pass
1146 1145 self.invalidatecaches()
1147 1146 self.store.invalidatecaches()
1148 1147
1149 1148 def invalidateall(self):
1150 1149 '''Fully invalidates both store and non-store parts, causing the
1151 1150 subsequent operation to reread any outside changes.'''
1152 1151 # extension should hook this to invalidate its caches
1153 1152 self.invalidate()
1154 1153 self.invalidatedirstate()
1155 1154
1156 1155 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1157 1156 try:
1158 1157 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1159 1158 except error.LockHeld, inst:
1160 1159 if not wait:
1161 1160 raise
1162 1161 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1163 1162 (desc, inst.locker))
1164 1163 # default to 600 seconds timeout
1165 1164 l = lockmod.lock(vfs, lockname,
1166 1165 int(self.ui.config("ui", "timeout", "600")),
1167 1166 releasefn, desc=desc)
1168 1167 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1169 1168 if acquirefn:
1170 1169 acquirefn()
1171 1170 return l
1172 1171
1173 1172 def _afterlock(self, callback):
1174 1173 """add a callback to the current repository lock.
1175 1174
1176 1175 The callback will be executed on lock release."""
1177 1176 l = self._lockref and self._lockref()
1178 1177 if l:
1179 1178 l.postrelease.append(callback)
1180 1179 else:
1181 1180 callback()
1182 1181
1183 1182 def lock(self, wait=True):
1184 1183 '''Lock the repository store (.hg/store) and return a weak reference
1185 1184 to the lock. Use this before modifying the store (e.g. committing or
1186 1185 stripping). If you are opening a transaction, get a lock as well.)'''
1187 1186 l = self._lockref and self._lockref()
1188 1187 if l is not None and l.held:
1189 1188 l.lock()
1190 1189 return l
1191 1190
1192 1191 def unlock():
1193 1192 for k, ce in self._filecache.items():
1194 1193 if k == 'dirstate' or k not in self.__dict__:
1195 1194 continue
1196 1195 ce.refresh()
1197 1196
1198 1197 l = self._lock(self.svfs, "lock", wait, unlock,
1199 1198 self.invalidate, _('repository %s') % self.origroot)
1200 1199 self._lockref = weakref.ref(l)
1201 1200 return l
1202 1201
1203 1202 def wlock(self, wait=True):
1204 1203 '''Lock the non-store parts of the repository (everything under
1205 1204 .hg except .hg/store) and return a weak reference to the lock.
1206 1205 Use this before modifying files in .hg.'''
1207 1206 if (self.ui.configbool('devel', 'all')
1208 1207 or self.ui.configbool('devel', 'check-locks')):
1209 1208 l = self._lockref and self._lockref()
1210 1209 if l is not None and l.held:
1211 1210 msg = '"lock" taken before "wlock"\n'
1212 1211 if self.ui.tracebackflag:
1213 1212 util.debugstacktrace(msg, 1)
1214 1213 else:
1215 1214 self.ui.write_err(msg)
1216 1215 l = self._wlockref and self._wlockref()
1217 1216 if l is not None and l.held:
1218 1217 l.lock()
1219 1218 return l
1220 1219
1221 1220 def unlock():
1222 1221 if self.dirstate.pendingparentchange():
1223 1222 self.dirstate.invalidate()
1224 1223 else:
1225 1224 self.dirstate.write()
1226 1225
1227 1226 self._filecache['dirstate'].refresh()
1228 1227
1229 1228 l = self._lock(self.vfs, "wlock", wait, unlock,
1230 1229 self.invalidatedirstate, _('working directory of %s') %
1231 1230 self.origroot)
1232 1231 self._wlockref = weakref.ref(l)
1233 1232 return l
1234 1233
1235 1234 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1236 1235 """
1237 1236 commit an individual file as part of a larger transaction
1238 1237 """
1239 1238
1240 1239 fname = fctx.path()
1241 1240 fparent1 = manifest1.get(fname, nullid)
1242 1241 fparent2 = manifest2.get(fname, nullid)
1243 1242 if isinstance(fctx, context.filectx):
1244 1243 node = fctx.filenode()
1245 1244 if node in [fparent1, fparent2]:
1246 1245 self.ui.debug('reusing %s filelog entry\n' % fname)
1247 1246 return node
1248 1247
1249 1248 flog = self.file(fname)
1250 1249 meta = {}
1251 1250 copy = fctx.renamed()
1252 1251 if copy and copy[0] != fname:
1253 1252 # Mark the new revision of this file as a copy of another
1254 1253 # file. This copy data will effectively act as a parent
1255 1254 # of this new revision. If this is a merge, the first
1256 1255 # parent will be the nullid (meaning "look up the copy data")
1257 1256 # and the second one will be the other parent. For example:
1258 1257 #
1259 1258 # 0 --- 1 --- 3 rev1 changes file foo
1260 1259 # \ / rev2 renames foo to bar and changes it
1261 1260 # \- 2 -/ rev3 should have bar with all changes and
1262 1261 # should record that bar descends from
1263 1262 # bar in rev2 and foo in rev1
1264 1263 #
1265 1264 # this allows this merge to succeed:
1266 1265 #
1267 1266 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1268 1267 # \ / merging rev3 and rev4 should use bar@rev2
1269 1268 # \- 2 --- 4 as the merge base
1270 1269 #
1271 1270
1272 1271 cfname = copy[0]
1273 1272 crev = manifest1.get(cfname)
1274 1273 newfparent = fparent2
1275 1274
1276 1275 if manifest2: # branch merge
1277 1276 if fparent2 == nullid or crev is None: # copied on remote side
1278 1277 if cfname in manifest2:
1279 1278 crev = manifest2[cfname]
1280 1279 newfparent = fparent1
1281 1280
1282 1281 # Here, we used to search backwards through history to try to find
1283 1282 # where the file copy came from if the source of a copy was not in
1284 1283 # the parent directory. However, this doesn't actually make sense to
1285 1284 # do (what does a copy from something not in your working copy even
1286 1285 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1287 1286 # the user that copy information was dropped, so if they didn't
1288 1287 # expect this outcome it can be fixed, but this is the correct
1289 1288 # behavior in this circumstance.
1290 1289
1291 1290 if crev:
1292 1291 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1293 1292 meta["copy"] = cfname
1294 1293 meta["copyrev"] = hex(crev)
1295 1294 fparent1, fparent2 = nullid, newfparent
1296 1295 else:
1297 1296 self.ui.warn(_("warning: can't find ancestor for '%s' "
1298 1297 "copied from '%s'!\n") % (fname, cfname))
1299 1298
1300 1299 elif fparent1 == nullid:
1301 1300 fparent1, fparent2 = fparent2, nullid
1302 1301 elif fparent2 != nullid:
1303 1302 # is one parent an ancestor of the other?
1304 1303 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1305 1304 if fparent1 in fparentancestors:
1306 1305 fparent1, fparent2 = fparent2, nullid
1307 1306 elif fparent2 in fparentancestors:
1308 1307 fparent2 = nullid
1309 1308
1310 1309 # is the file changed?
1311 1310 text = fctx.data()
1312 1311 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1313 1312 changelist.append(fname)
1314 1313 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1315 1314 # are just the flags changed during merge?
1316 1315 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1317 1316 changelist.append(fname)
1318 1317
1319 1318 return fparent1
1320 1319
1321 1320 @unfilteredmethod
1322 1321 def commit(self, text="", user=None, date=None, match=None, force=False,
1323 1322 editor=False, extra={}):
1324 1323 """Add a new revision to current repository.
1325 1324
1326 1325 Revision information is gathered from the working directory,
1327 1326 match can be used to filter the committed files. If editor is
1328 1327 supplied, it is called to get a commit message.
1329 1328 """
1330 1329
1331 1330 def fail(f, msg):
1332 1331 raise util.Abort('%s: %s' % (f, msg))
1333 1332
1334 1333 if not match:
1335 1334 match = matchmod.always(self.root, '')
1336 1335
1337 1336 if not force:
1338 1337 vdirs = []
1339 1338 match.explicitdir = vdirs.append
1340 1339 match.bad = fail
1341 1340
1342 1341 wlock = self.wlock()
1343 1342 try:
1344 1343 wctx = self[None]
1345 1344 merge = len(wctx.parents()) > 1
1346 1345
1347 1346 if not force and merge and not match.always():
1348 1347 raise util.Abort(_('cannot partially commit a merge '
1349 1348 '(do not specify files or patterns)'))
1350 1349
1351 1350 status = self.status(match=match, clean=force)
1352 1351 if force:
1353 1352 status.modified.extend(status.clean) # mq may commit clean files
1354 1353
1355 1354 # check subrepos
1356 1355 subs = []
1357 1356 commitsubs = set()
1358 1357 newstate = wctx.substate.copy()
1359 1358 # only manage subrepos and .hgsubstate if .hgsub is present
1360 1359 if '.hgsub' in wctx:
1361 1360 # we'll decide whether to track this ourselves, thanks
1362 1361 for c in status.modified, status.added, status.removed:
1363 1362 if '.hgsubstate' in c:
1364 1363 c.remove('.hgsubstate')
1365 1364
1366 1365 # compare current state to last committed state
1367 1366 # build new substate based on last committed state
1368 1367 oldstate = wctx.p1().substate
1369 1368 for s in sorted(newstate.keys()):
1370 1369 if not match(s):
1371 1370 # ignore working copy, use old state if present
1372 1371 if s in oldstate:
1373 1372 newstate[s] = oldstate[s]
1374 1373 continue
1375 1374 if not force:
1376 1375 raise util.Abort(
1377 1376 _("commit with new subrepo %s excluded") % s)
1378 1377 dirtyreason = wctx.sub(s).dirtyreason(True)
1379 1378 if dirtyreason:
1380 1379 if not self.ui.configbool('ui', 'commitsubrepos'):
1381 1380 raise util.Abort(dirtyreason,
1382 1381 hint=_("use --subrepos for recursive commit"))
1383 1382 subs.append(s)
1384 1383 commitsubs.add(s)
1385 1384 else:
1386 1385 bs = wctx.sub(s).basestate()
1387 1386 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1388 1387 if oldstate.get(s, (None, None, None))[1] != bs:
1389 1388 subs.append(s)
1390 1389
1391 1390 # check for removed subrepos
1392 1391 for p in wctx.parents():
1393 1392 r = [s for s in p.substate if s not in newstate]
1394 1393 subs += [s for s in r if match(s)]
1395 1394 if subs:
1396 1395 if (not match('.hgsub') and
1397 1396 '.hgsub' in (wctx.modified() + wctx.added())):
1398 1397 raise util.Abort(
1399 1398 _("can't commit subrepos without .hgsub"))
1400 1399 status.modified.insert(0, '.hgsubstate')
1401 1400
1402 1401 elif '.hgsub' in status.removed:
1403 1402 # clean up .hgsubstate when .hgsub is removed
1404 1403 if ('.hgsubstate' in wctx and
1405 1404 '.hgsubstate' not in (status.modified + status.added +
1406 1405 status.removed)):
1407 1406 status.removed.insert(0, '.hgsubstate')
1408 1407
1409 1408 # make sure all explicit patterns are matched
1410 1409 if not force and match.files():
1411 1410 matched = set(status.modified + status.added + status.removed)
1412 1411
1413 1412 for f in match.files():
1414 1413 f = self.dirstate.normalize(f)
1415 1414 if f == '.' or f in matched or f in wctx.substate:
1416 1415 continue
1417 1416 if f in status.deleted:
1418 1417 fail(f, _('file not found!'))
1419 1418 if f in vdirs: # visited directory
1420 1419 d = f + '/'
1421 1420 for mf in matched:
1422 1421 if mf.startswith(d):
1423 1422 break
1424 1423 else:
1425 1424 fail(f, _("no match under directory!"))
1426 1425 elif f not in self.dirstate:
1427 1426 fail(f, _("file not tracked!"))
1428 1427
1429 1428 cctx = context.workingcommitctx(self, status,
1430 1429 text, user, date, extra)
1431 1430
1432 1431 if (not force and not extra.get("close") and not merge
1433 1432 and not cctx.files()
1434 1433 and wctx.branch() == wctx.p1().branch()):
1435 1434 return None
1436 1435
1437 1436 if merge and cctx.deleted():
1438 1437 raise util.Abort(_("cannot commit merge with missing files"))
1439 1438
1440 1439 ms = mergemod.mergestate(self)
1441 1440 for f in status.modified:
1442 1441 if f in ms and ms[f] == 'u':
1443 1442 raise util.Abort(_('unresolved merge conflicts '
1444 1443 '(see "hg help resolve")'))
1445 1444
1446 1445 if editor:
1447 1446 cctx._text = editor(self, cctx, subs)
1448 1447 edited = (text != cctx._text)
1449 1448
1450 1449 # Save commit message in case this transaction gets rolled back
1451 1450 # (e.g. by a pretxncommit hook). Leave the content alone on
1452 1451 # the assumption that the user will use the same editor again.
1453 1452 msgfn = self.savecommitmessage(cctx._text)
1454 1453
1455 1454 # commit subs and write new state
1456 1455 if subs:
1457 1456 for s in sorted(commitsubs):
1458 1457 sub = wctx.sub(s)
1459 1458 self.ui.status(_('committing subrepository %s\n') %
1460 1459 subrepo.subrelpath(sub))
1461 1460 sr = sub.commit(cctx._text, user, date)
1462 1461 newstate[s] = (newstate[s][0], sr)
1463 1462 subrepo.writestate(self, newstate)
1464 1463
1465 1464 p1, p2 = self.dirstate.parents()
1466 1465 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1467 1466 try:
1468 1467 self.hook("precommit", throw=True, parent1=hookp1,
1469 1468 parent2=hookp2)
1470 1469 ret = self.commitctx(cctx, True)
1471 1470 except: # re-raises
1472 1471 if edited:
1473 1472 self.ui.write(
1474 1473 _('note: commit message saved in %s\n') % msgfn)
1475 1474 raise
1476 1475
1477 1476 # update bookmarks, dirstate and mergestate
1478 1477 bookmarks.update(self, [p1, p2], ret)
1479 1478 cctx.markcommitted(ret)
1480 1479 ms.reset()
1481 1480 finally:
1482 1481 wlock.release()
1483 1482
1484 1483 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1485 1484 # hack for command that use a temporary commit (eg: histedit)
1486 1485 # temporary commit got stripped before hook release
1487 1486 if node in self:
1488 1487 self.hook("commit", node=node, parent1=parent1,
1489 1488 parent2=parent2)
1490 1489 self._afterlock(commithook)
1491 1490 return ret
1492 1491
1493 1492 @unfilteredmethod
1494 1493 def commitctx(self, ctx, error=False):
1495 1494 """Add a new revision to current repository.
1496 1495 Revision information is passed via the context argument.
1497 1496 """
1498 1497
1499 1498 tr = None
1500 1499 p1, p2 = ctx.p1(), ctx.p2()
1501 1500 user = ctx.user()
1502 1501
1503 1502 lock = self.lock()
1504 1503 try:
1505 1504 tr = self.transaction("commit")
1506 1505 trp = weakref.proxy(tr)
1507 1506
1508 1507 if ctx.files():
1509 1508 m1 = p1.manifest()
1510 1509 m2 = p2.manifest()
1511 1510 m = m1.copy()
1512 1511
1513 1512 # check in files
1514 1513 added = []
1515 1514 changed = []
1516 1515 removed = list(ctx.removed())
1517 1516 linkrev = len(self)
1518 1517 self.ui.note(_("committing files:\n"))
1519 1518 for f in sorted(ctx.modified() + ctx.added()):
1520 1519 self.ui.note(f + "\n")
1521 1520 try:
1522 1521 fctx = ctx[f]
1523 1522 if fctx is None:
1524 1523 removed.append(f)
1525 1524 else:
1526 1525 added.append(f)
1527 1526 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1528 1527 trp, changed)
1529 1528 m.setflag(f, fctx.flags())
1530 1529 except OSError, inst:
1531 1530 self.ui.warn(_("trouble committing %s!\n") % f)
1532 1531 raise
1533 1532 except IOError, inst:
1534 1533 errcode = getattr(inst, 'errno', errno.ENOENT)
1535 1534 if error or errcode and errcode != errno.ENOENT:
1536 1535 self.ui.warn(_("trouble committing %s!\n") % f)
1537 1536 raise
1538 1537
1539 1538 # update manifest
1540 1539 self.ui.note(_("committing manifest\n"))
1541 1540 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1542 1541 drop = [f for f in removed if f in m]
1543 1542 for f in drop:
1544 1543 del m[f]
1545 1544 mn = self.manifest.add(m, trp, linkrev,
1546 1545 p1.manifestnode(), p2.manifestnode(),
1547 1546 added, drop)
1548 1547 files = changed + removed
1549 1548 else:
1550 1549 mn = p1.manifestnode()
1551 1550 files = []
1552 1551
1553 1552 # update changelog
1554 1553 self.ui.note(_("committing changelog\n"))
1555 1554 self.changelog.delayupdate(tr)
1556 1555 n = self.changelog.add(mn, files, ctx.description(),
1557 1556 trp, p1.node(), p2.node(),
1558 1557 user, ctx.date(), ctx.extra().copy())
1559 1558 p = lambda: tr.writepending() and self.root or ""
1560 1559 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1561 1560 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1562 1561 parent2=xp2, pending=p)
1563 1562 # set the new commit is proper phase
1564 1563 targetphase = subrepo.newcommitphase(self.ui, ctx)
1565 1564 if targetphase:
1566 1565 # retract boundary do not alter parent changeset.
1567 1566 # if a parent have higher the resulting phase will
1568 1567 # be compliant anyway
1569 1568 #
1570 1569 # if minimal phase was 0 we don't need to retract anything
1571 1570 phases.retractboundary(self, tr, targetphase, [n])
1572 1571 tr.close()
1573 1572 branchmap.updatecache(self.filtered('served'))
1574 1573 return n
1575 1574 finally:
1576 1575 if tr:
1577 1576 tr.release()
1578 1577 lock.release()
1579 1578
1580 1579 @unfilteredmethod
1581 1580 def destroying(self):
1582 1581 '''Inform the repository that nodes are about to be destroyed.
1583 1582 Intended for use by strip and rollback, so there's a common
1584 1583 place for anything that has to be done before destroying history.
1585 1584
1586 1585 This is mostly useful for saving state that is in memory and waiting
1587 1586 to be flushed when the current lock is released. Because a call to
1588 1587 destroyed is imminent, the repo will be invalidated causing those
1589 1588 changes to stay in memory (waiting for the next unlock), or vanish
1590 1589 completely.
1591 1590 '''
1592 1591 # When using the same lock to commit and strip, the phasecache is left
1593 1592 # dirty after committing. Then when we strip, the repo is invalidated,
1594 1593 # causing those changes to disappear.
1595 1594 if '_phasecache' in vars(self):
1596 1595 self._phasecache.write()
1597 1596
1598 1597 @unfilteredmethod
1599 1598 def destroyed(self):
1600 1599 '''Inform the repository that nodes have been destroyed.
1601 1600 Intended for use by strip and rollback, so there's a common
1602 1601 place for anything that has to be done after destroying history.
1603 1602 '''
1604 1603 # When one tries to:
1605 1604 # 1) destroy nodes thus calling this method (e.g. strip)
1606 1605 # 2) use phasecache somewhere (e.g. commit)
1607 1606 #
1608 1607 # then 2) will fail because the phasecache contains nodes that were
1609 1608 # removed. We can either remove phasecache from the filecache,
1610 1609 # causing it to reload next time it is accessed, or simply filter
1611 1610 # the removed nodes now and write the updated cache.
1612 1611 self._phasecache.filterunknown(self)
1613 1612 self._phasecache.write()
1614 1613
1615 1614 # update the 'served' branch cache to help read only server process
1616 1615 # Thanks to branchcache collaboration this is done from the nearest
1617 1616 # filtered subset and it is expected to be fast.
1618 1617 branchmap.updatecache(self.filtered('served'))
1619 1618
1620 1619 # Ensure the persistent tag cache is updated. Doing it now
1621 1620 # means that the tag cache only has to worry about destroyed
1622 1621 # heads immediately after a strip/rollback. That in turn
1623 1622 # guarantees that "cachetip == currenttip" (comparing both rev
1624 1623 # and node) always means no nodes have been added or destroyed.
1625 1624
1626 1625 # XXX this is suboptimal when qrefresh'ing: we strip the current
1627 1626 # head, refresh the tag cache, then immediately add a new head.
1628 1627 # But I think doing it this way is necessary for the "instant
1629 1628 # tag cache retrieval" case to work.
1630 1629 self.invalidate()
1631 1630
1632 1631 def walk(self, match, node=None):
1633 1632 '''
1634 1633 walk recursively through the directory tree or a given
1635 1634 changeset, finding all files matched by the match
1636 1635 function
1637 1636 '''
1638 1637 return self[node].walk(match)
1639 1638
1640 1639 def status(self, node1='.', node2=None, match=None,
1641 1640 ignored=False, clean=False, unknown=False,
1642 1641 listsubrepos=False):
1643 1642 '''a convenience method that calls node1.status(node2)'''
1644 1643 return self[node1].status(node2, match, ignored, clean, unknown,
1645 1644 listsubrepos)
1646 1645
1647 1646 def heads(self, start=None):
1648 1647 heads = self.changelog.heads(start)
1649 1648 # sort the output in rev descending order
1650 1649 return sorted(heads, key=self.changelog.rev, reverse=True)
1651 1650
1652 1651 def branchheads(self, branch=None, start=None, closed=False):
1653 1652 '''return a (possibly filtered) list of heads for the given branch
1654 1653
1655 1654 Heads are returned in topological order, from newest to oldest.
1656 1655 If branch is None, use the dirstate branch.
1657 1656 If start is not None, return only heads reachable from start.
1658 1657 If closed is True, return heads that are marked as closed as well.
1659 1658 '''
1660 1659 if branch is None:
1661 1660 branch = self[None].branch()
1662 1661 branches = self.branchmap()
1663 1662 if branch not in branches:
1664 1663 return []
1665 1664 # the cache returns heads ordered lowest to highest
1666 1665 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1667 1666 if start is not None:
1668 1667 # filter out the heads that cannot be reached from startrev
1669 1668 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1670 1669 bheads = [h for h in bheads if h in fbheads]
1671 1670 return bheads
1672 1671
1673 1672 def branches(self, nodes):
1674 1673 if not nodes:
1675 1674 nodes = [self.changelog.tip()]
1676 1675 b = []
1677 1676 for n in nodes:
1678 1677 t = n
1679 1678 while True:
1680 1679 p = self.changelog.parents(n)
1681 1680 if p[1] != nullid or p[0] == nullid:
1682 1681 b.append((t, n, p[0], p[1]))
1683 1682 break
1684 1683 n = p[0]
1685 1684 return b
1686 1685
1687 1686 def between(self, pairs):
1688 1687 r = []
1689 1688
1690 1689 for top, bottom in pairs:
1691 1690 n, l, i = top, [], 0
1692 1691 f = 1
1693 1692
1694 1693 while n != bottom and n != nullid:
1695 1694 p = self.changelog.parents(n)[0]
1696 1695 if i == f:
1697 1696 l.append(n)
1698 1697 f = f * 2
1699 1698 n = p
1700 1699 i += 1
1701 1700
1702 1701 r.append(l)
1703 1702
1704 1703 return r
1705 1704
1706 1705 def checkpush(self, pushop):
1707 1706 """Extensions can override this function if additional checks have
1708 1707 to be performed before pushing, or call it if they override push
1709 1708 command.
1710 1709 """
1711 1710 pass
1712 1711
1713 1712 @unfilteredpropertycache
1714 1713 def prepushoutgoinghooks(self):
1715 1714 """Return util.hooks consists of "(repo, remote, outgoing)"
1716 1715 functions, which are called before pushing changesets.
1717 1716 """
1718 1717 return util.hooks()
1719 1718
1720 1719 def stream_in(self, remote, requirements):
1721 1720 lock = self.lock()
1722 1721 try:
1723 1722 # Save remote branchmap. We will use it later
1724 1723 # to speed up branchcache creation
1725 1724 rbranchmap = None
1726 1725 if remote.capable("branchmap"):
1727 1726 rbranchmap = remote.branchmap()
1728 1727
1729 1728 fp = remote.stream_out()
1730 1729 l = fp.readline()
1731 1730 try:
1732 1731 resp = int(l)
1733 1732 except ValueError:
1734 1733 raise error.ResponseError(
1735 1734 _('unexpected response from remote server:'), l)
1736 1735 if resp == 1:
1737 1736 raise util.Abort(_('operation forbidden by server'))
1738 1737 elif resp == 2:
1739 1738 raise util.Abort(_('locking the remote repository failed'))
1740 1739 elif resp != 0:
1741 1740 raise util.Abort(_('the server sent an unknown error code'))
1742 1741 self.ui.status(_('streaming all changes\n'))
1743 1742 l = fp.readline()
1744 1743 try:
1745 1744 total_files, total_bytes = map(int, l.split(' ', 1))
1746 1745 except (ValueError, TypeError):
1747 1746 raise error.ResponseError(
1748 1747 _('unexpected response from remote server:'), l)
1749 1748 self.ui.status(_('%d files to transfer, %s of data\n') %
1750 1749 (total_files, util.bytecount(total_bytes)))
1751 1750 handled_bytes = 0
1752 1751 self.ui.progress(_('clone'), 0, total=total_bytes)
1753 1752 start = time.time()
1754 1753
1755 1754 tr = self.transaction(_('clone'))
1756 1755 try:
1757 1756 for i in xrange(total_files):
1758 1757 # XXX doesn't support '\n' or '\r' in filenames
1759 1758 l = fp.readline()
1760 1759 try:
1761 1760 name, size = l.split('\0', 1)
1762 1761 size = int(size)
1763 1762 except (ValueError, TypeError):
1764 1763 raise error.ResponseError(
1765 1764 _('unexpected response from remote server:'), l)
1766 1765 if self.ui.debugflag:
1767 1766 self.ui.debug('adding %s (%s)\n' %
1768 1767 (name, util.bytecount(size)))
1769 1768 # for backwards compat, name was partially encoded
1770 1769 ofp = self.svfs(store.decodedir(name), 'w')
1771 1770 for chunk in util.filechunkiter(fp, limit=size):
1772 1771 handled_bytes += len(chunk)
1773 1772 self.ui.progress(_('clone'), handled_bytes,
1774 1773 total=total_bytes)
1775 1774 ofp.write(chunk)
1776 1775 ofp.close()
1777 1776 tr.close()
1778 1777 finally:
1779 1778 tr.release()
1780 1779
1781 1780 # Writing straight to files circumvented the inmemory caches
1782 1781 self.invalidate()
1783 1782
1784 1783 elapsed = time.time() - start
1785 1784 if elapsed <= 0:
1786 1785 elapsed = 0.001
1787 1786 self.ui.progress(_('clone'), None)
1788 1787 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1789 1788 (util.bytecount(total_bytes), elapsed,
1790 1789 util.bytecount(total_bytes / elapsed)))
1791 1790
1792 1791 # new requirements = old non-format requirements +
1793 1792 # new format-related
1794 1793 # requirements from the streamed-in repository
1795 1794 requirements.update(set(self.requirements) - self.supportedformats)
1796 1795 self._applyrequirements(requirements)
1797 1796 self._writerequirements()
1798 1797
1799 1798 if rbranchmap:
1800 1799 rbheads = []
1801 1800 closed = []
1802 1801 for bheads in rbranchmap.itervalues():
1803 1802 rbheads.extend(bheads)
1804 1803 for h in bheads:
1805 1804 r = self.changelog.rev(h)
1806 1805 b, c = self.changelog.branchinfo(r)
1807 1806 if c:
1808 1807 closed.append(h)
1809 1808
1810 1809 if rbheads:
1811 1810 rtiprev = max((int(self.changelog.rev(node))
1812 1811 for node in rbheads))
1813 1812 cache = branchmap.branchcache(rbranchmap,
1814 1813 self[rtiprev].node(),
1815 1814 rtiprev,
1816 1815 closednodes=closed)
1817 1816 # Try to stick it as low as possible
1818 1817 # filter above served are unlikely to be fetch from a clone
1819 1818 for candidate in ('base', 'immutable', 'served'):
1820 1819 rview = self.filtered(candidate)
1821 1820 if cache.validfor(rview):
1822 1821 self._branchcaches[candidate] = cache
1823 1822 cache.write(rview)
1824 1823 break
1825 1824 self.invalidate()
1826 1825 return len(self.heads()) + 1
1827 1826 finally:
1828 1827 lock.release()
1829 1828
1830 1829 def clone(self, remote, heads=[], stream=None):
1831 1830 '''clone remote repository.
1832 1831
1833 1832 keyword arguments:
1834 1833 heads: list of revs to clone (forces use of pull)
1835 1834 stream: use streaming clone if possible'''
1836 1835
1837 1836 # now, all clients that can request uncompressed clones can
1838 1837 # read repo formats supported by all servers that can serve
1839 1838 # them.
1840 1839
1841 1840 # if revlog format changes, client will have to check version
1842 1841 # and format flags on "stream" capability, and use
1843 1842 # uncompressed only if compatible.
1844 1843
1845 1844 if stream is None:
1846 1845 # if the server explicitly prefers to stream (for fast LANs)
1847 1846 stream = remote.capable('stream-preferred')
1848 1847
1849 1848 if stream and not heads:
1850 1849 # 'stream' means remote revlog format is revlogv1 only
1851 1850 if remote.capable('stream'):
1852 1851 self.stream_in(remote, set(('revlogv1',)))
1853 1852 else:
1854 1853 # otherwise, 'streamreqs' contains the remote revlog format
1855 1854 streamreqs = remote.capable('streamreqs')
1856 1855 if streamreqs:
1857 1856 streamreqs = set(streamreqs.split(','))
1858 1857 # if we support it, stream in and adjust our requirements
1859 1858 if not streamreqs - self.supportedformats:
1860 1859 self.stream_in(remote, streamreqs)
1861 1860
1862 1861 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1863 1862 try:
1864 1863 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1865 1864 ret = exchange.pull(self, remote, heads).cgresult
1866 1865 finally:
1867 1866 self.ui.restoreconfig(quiet)
1868 1867 return ret
1869 1868
1870 1869 def pushkey(self, namespace, key, old, new):
1871 1870 try:
1872 1871 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1873 1872 old=old, new=new)
1874 1873 except error.HookAbort, exc:
1875 1874 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1876 1875 if exc.hint:
1877 1876 self.ui.write_err(_("(%s)\n") % exc.hint)
1878 1877 return False
1879 1878 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1880 1879 ret = pushkey.push(self, namespace, key, old, new)
1881 1880 def runhook():
1882 1881 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1883 1882 ret=ret)
1884 1883 self._afterlock(runhook)
1885 1884 return ret
1886 1885
1887 1886 def listkeys(self, namespace):
1888 1887 self.hook('prelistkeys', throw=True, namespace=namespace)
1889 1888 self.ui.debug('listing keys for "%s"\n' % namespace)
1890 1889 values = pushkey.list(self, namespace)
1891 1890 self.hook('listkeys', namespace=namespace, values=values)
1892 1891 return values
1893 1892
1894 1893 def debugwireargs(self, one, two, three=None, four=None, five=None):
1895 1894 '''used to test argument passing over the wire'''
1896 1895 return "%s %s %s %s %s" % (one, two, three, four, five)
1897 1896
1898 1897 def savecommitmessage(self, text):
1899 1898 fp = self.vfs('last-message.txt', 'wb')
1900 1899 try:
1901 1900 fp.write(text)
1902 1901 finally:
1903 1902 fp.close()
1904 1903 return self.pathto(fp.name[len(self.root) + 1:])
1905 1904
1906 1905 # used to avoid circular references so destructors work
1907 1906 def aftertrans(files):
1908 1907 renamefiles = [tuple(t) for t in files]
1909 1908 def a():
1910 1909 for vfs, src, dest in renamefiles:
1911 1910 try:
1912 1911 vfs.rename(src, dest)
1913 1912 except OSError: # journal file does not yet exist
1914 1913 pass
1915 1914 return a
1916 1915
1917 1916 def undoname(fn):
1918 1917 base, name = os.path.split(fn)
1919 1918 assert name.startswith('journal')
1920 1919 return os.path.join(base, name.replace('journal', 'undo', 1))
1921 1920
1922 1921 def instance(ui, path, create):
1923 1922 return localrepository(ui, util.urllocalpath(path), create)
1924 1923
1925 1924 def islocal(path):
1926 1925 return True
@@ -1,697 +1,697 b''
1 1 # manifest.py - manifest revision class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 import mdiff, parsers, error, revlog, util, scmutil
10 10 import array, struct
11 11
12 12 propertycache = util.propertycache
13 13
14 14 def _parse(data):
15 15 """Generates (path, node, flags) tuples from a manifest text"""
16 16 # This method does a little bit of excessive-looking
17 17 # precondition checking. This is so that the behavior of this
18 18 # class exactly matches its C counterpart to try and help
19 19 # prevent surprise breakage for anyone that develops against
20 20 # the pure version.
21 21 if data and data[-1] != '\n':
22 22 raise ValueError('Manifest did not end in a newline.')
23 23 prev = None
24 24 for l in data.splitlines():
25 25 if prev is not None and prev > l:
26 26 raise ValueError('Manifest lines not in sorted order.')
27 27 prev = l
28 28 f, n = l.split('\0')
29 29 if len(n) > 40:
30 30 yield f, revlog.bin(n[:40]), n[40:]
31 31 else:
32 32 yield f, revlog.bin(n), ''
33 33
34 34 def _text(it):
35 35 """Given an iterator over (path, node, flags) tuples, returns a manifest
36 36 text"""
37 37 files = []
38 38 lines = []
39 39 _hex = revlog.hex
40 40 for f, n, fl in it:
41 41 files.append(f)
42 42 # if this is changed to support newlines in filenames,
43 43 # be sure to check the templates/ dir again (especially *-raw.tmpl)
44 44 lines.append("%s\0%s%s\n" % (f, _hex(n), fl))
45 45
46 46 _checkforbidden(files)
47 47 return ''.join(lines)
48 48
49 49 class _lazymanifest(dict):
50 50 """This is the pure implementation of lazymanifest.
51 51
52 52 It has not been optimized *at all* and is not lazy.
53 53 """
54 54
55 55 def __init__(self, data):
56 56 dict.__init__(self)
57 57 for f, n, fl in _parse(data):
58 58 self[f] = n, fl
59 59
60 60 def __setitem__(self, k, v):
61 61 node, flag = v
62 62 assert node is not None
63 63 if len(node) > 21:
64 64 node = node[:21] # match c implementation behavior
65 65 dict.__setitem__(self, k, (node, flag))
66 66
67 67 def __iter__(self):
68 68 return iter(sorted(dict.keys(self)))
69 69
70 70 def iterkeys(self):
71 71 return iter(sorted(dict.keys(self)))
72 72
73 73 def iterentries(self):
74 74 return ((f, e[0], e[1]) for f, e in sorted(self.iteritems()))
75 75
76 76 def copy(self):
77 77 c = _lazymanifest('')
78 78 c.update(self)
79 79 return c
80 80
81 81 def diff(self, m2, clean=False):
82 82 '''Finds changes between the current manifest and m2.'''
83 83 diff = {}
84 84
85 85 for fn, e1 in self.iteritems():
86 86 if fn not in m2:
87 87 diff[fn] = e1, (None, '')
88 88 else:
89 89 e2 = m2[fn]
90 90 if e1 != e2:
91 91 diff[fn] = e1, e2
92 92 elif clean:
93 93 diff[fn] = None
94 94
95 95 for fn, e2 in m2.iteritems():
96 96 if fn not in self:
97 97 diff[fn] = (None, ''), e2
98 98
99 99 return diff
100 100
101 101 def filtercopy(self, filterfn):
102 102 c = _lazymanifest('')
103 103 for f, n, fl in self.iterentries():
104 104 if filterfn(f):
105 105 c[f] = n, fl
106 106 return c
107 107
108 108 def text(self):
109 109 """Get the full data of this manifest as a bytestring."""
110 110 return _text(self.iterentries())
111 111
112 112 try:
113 113 _lazymanifest = parsers.lazymanifest
114 114 except AttributeError:
115 115 pass
116 116
117 117 class manifestdict(object):
118 118 def __init__(self, data=''):
119 119 self._lm = _lazymanifest(data)
120 120
121 121 def __getitem__(self, key):
122 122 return self._lm[key][0]
123 123
124 124 def find(self, key):
125 125 return self._lm[key]
126 126
127 127 def __len__(self):
128 128 return len(self._lm)
129 129
130 130 def __setitem__(self, key, node):
131 131 self._lm[key] = node, self.flags(key, '')
132 132
133 133 def __contains__(self, key):
134 134 return key in self._lm
135 135
136 136 def __delitem__(self, key):
137 137 del self._lm[key]
138 138
139 139 def __iter__(self):
140 140 return self._lm.__iter__()
141 141
142 142 def iterkeys(self):
143 143 return self._lm.iterkeys()
144 144
145 145 def keys(self):
146 146 return list(self.iterkeys())
147 147
148 148 def _intersectfiles(self, files):
149 149 '''make a new lazymanifest with the intersection of self with files
150 150
151 151 The algorithm assumes that files is much smaller than self.'''
152 152 ret = manifestdict()
153 153 lm = self._lm
154 154 for fn in files:
155 155 if fn in lm:
156 156 ret._lm[fn] = self._lm[fn]
157 157 return ret
158 158
159 159 def filesnotin(self, m2):
160 160 '''Set of files in this manifest that are not in the other'''
161 161 files = set(self)
162 162 files.difference_update(m2)
163 163 return files
164 164
165 165 @propertycache
166 166 def _dirs(self):
167 167 return scmutil.dirs(self)
168 168
169 169 def dirs(self):
170 170 return self._dirs
171 171
172 172 def hasdir(self, dir):
173 173 return dir in self._dirs
174 174
175 175 def matches(self, match):
176 176 '''generate a new manifest filtered by the match argument'''
177 177 if match.always():
178 178 return self.copy()
179 179
180 180 files = match.files()
181 181 if (len(files) < 100 and (match.isexact() or
182 182 (not match.anypats() and util.all(fn in self for fn in files)))):
183 183 return self._intersectfiles(files)
184 184
185 185 lm = manifestdict('')
186 186 lm._lm = self._lm.filtercopy(match)
187 187 return lm
188 188
189 189 def diff(self, m2, clean=False):
190 190 '''Finds changes between the current manifest and m2.
191 191
192 192 Args:
193 193 m2: the manifest to which this manifest should be compared.
194 194 clean: if true, include files unchanged between these manifests
195 195 with a None value in the returned dictionary.
196 196
197 197 The result is returned as a dict with filename as key and
198 198 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
199 199 nodeid in the current/other manifest and fl1/fl2 is the flag
200 200 in the current/other manifest. Where the file does not exist,
201 201 the nodeid will be None and the flags will be the empty
202 202 string.
203 203 '''
204 204 return self._lm.diff(m2._lm, clean)
205 205
206 206 def setflag(self, key, flag):
207 207 self._lm[key] = self[key], flag
208 208
209 209 def get(self, key, default=None):
210 210 try:
211 211 return self._lm[key][0]
212 212 except KeyError:
213 213 return default
214 214
215 215 def flags(self, key, default=''):
216 216 try:
217 217 return self._lm[key][1]
218 218 except KeyError:
219 219 return default
220 220
221 221 def copy(self):
222 222 c = manifestdict('')
223 223 c._lm = self._lm.copy()
224 224 return c
225 225
226 226 def iteritems(self):
227 227 return (x[:2] for x in self._lm.iterentries())
228 228
229 229 def text(self):
230 230 return self._lm.text()
231 231
232 232 def fastdelta(self, base, changes):
233 233 """Given a base manifest text as an array.array and a list of changes
234 234 relative to that text, compute a delta that can be used by revlog.
235 235 """
236 236 delta = []
237 237 dstart = None
238 238 dend = None
239 239 dline = [""]
240 240 start = 0
241 241 # zero copy representation of base as a buffer
242 242 addbuf = util.buffer(base)
243 243
244 244 # start with a readonly loop that finds the offset of
245 245 # each line and creates the deltas
246 246 for f, todelete in changes:
247 247 # bs will either be the index of the item or the insert point
248 248 start, end = _msearch(addbuf, f, start)
249 249 if not todelete:
250 250 h, fl = self._lm[f]
251 251 l = "%s\0%s%s\n" % (f, revlog.hex(h), fl)
252 252 else:
253 253 if start == end:
254 254 # item we want to delete was not found, error out
255 255 raise AssertionError(
256 256 _("failed to remove %s from manifest") % f)
257 257 l = ""
258 258 if dstart is not None and dstart <= start and dend >= start:
259 259 if dend < end:
260 260 dend = end
261 261 if l:
262 262 dline.append(l)
263 263 else:
264 264 if dstart is not None:
265 265 delta.append([dstart, dend, "".join(dline)])
266 266 dstart = start
267 267 dend = end
268 268 dline = [l]
269 269
270 270 if dstart is not None:
271 271 delta.append([dstart, dend, "".join(dline)])
272 272 # apply the delta to the base, and get a delta for addrevision
273 273 deltatext, arraytext = _addlistdelta(base, delta)
274 274 return arraytext, deltatext
275 275
276 276 def _msearch(m, s, lo=0, hi=None):
277 277 '''return a tuple (start, end) that says where to find s within m.
278 278
279 279 If the string is found m[start:end] are the line containing
280 280 that string. If start == end the string was not found and
281 281 they indicate the proper sorted insertion point.
282 282
283 283 m should be a buffer or a string
284 284 s is a string'''
285 285 def advance(i, c):
286 286 while i < lenm and m[i] != c:
287 287 i += 1
288 288 return i
289 289 if not s:
290 290 return (lo, lo)
291 291 lenm = len(m)
292 292 if not hi:
293 293 hi = lenm
294 294 while lo < hi:
295 295 mid = (lo + hi) // 2
296 296 start = mid
297 297 while start > 0 and m[start - 1] != '\n':
298 298 start -= 1
299 299 end = advance(start, '\0')
300 300 if m[start:end] < s:
301 301 # we know that after the null there are 40 bytes of sha1
302 302 # this translates to the bisect lo = mid + 1
303 303 lo = advance(end + 40, '\n') + 1
304 304 else:
305 305 # this translates to the bisect hi = mid
306 306 hi = start
307 307 end = advance(lo, '\0')
308 308 found = m[lo:end]
309 309 if s == found:
310 310 # we know that after the null there are 40 bytes of sha1
311 311 end = advance(end + 40, '\n')
312 312 return (lo, end + 1)
313 313 else:
314 314 return (lo, lo)
315 315
316 316 def _checkforbidden(l):
317 317 """Check filenames for illegal characters."""
318 318 for f in l:
319 319 if '\n' in f or '\r' in f:
320 320 raise error.RevlogError(
321 321 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
322 322
323 323
324 324 # apply the changes collected during the bisect loop to our addlist
325 325 # return a delta suitable for addrevision
326 326 def _addlistdelta(addlist, x):
327 327 # for large addlist arrays, building a new array is cheaper
328 328 # than repeatedly modifying the existing one
329 329 currentposition = 0
330 330 newaddlist = array.array('c')
331 331
332 332 for start, end, content in x:
333 333 newaddlist += addlist[currentposition:start]
334 334 if content:
335 335 newaddlist += array.array('c', content)
336 336
337 337 currentposition = end
338 338
339 339 newaddlist += addlist[currentposition:]
340 340
341 341 deltatext = "".join(struct.pack(">lll", start, end, len(content))
342 342 + content for start, end, content in x)
343 343 return deltatext, newaddlist
344 344
345 345 def _splittopdir(f):
346 346 if '/' in f:
347 347 dir, subpath = f.split('/', 1)
348 348 return dir + '/', subpath
349 349 else:
350 350 return '', f
351 351
352 352 class treemanifest(object):
353 353 def __init__(self, dir='', text=''):
354 354 self._dir = dir
355 355 self._dirs = {}
356 356 # Using _lazymanifest here is a little slower than plain old dicts
357 357 self._files = {}
358 358 self._flags = {}
359 359 for f, n, fl in _parse(text):
360 360 self[f] = n
361 361 if fl:
362 362 self.setflag(f, fl)
363 363
364 364 def _subpath(self, path):
365 365 return self._dir + path
366 366
367 367 def __len__(self):
368 368 size = len(self._files)
369 369 for m in self._dirs.values():
370 370 size += m.__len__()
371 371 return size
372 372
373 373 def _isempty(self):
374 374 return (not self._files and (not self._dirs or
375 375 util.all(m._isempty() for m in self._dirs.values())))
376 376
377 377 def __str__(self):
378 378 return '<treemanifest dir=%s>' % self._dir
379 379
380 380 def iteritems(self):
381 381 for p, n in sorted(self._dirs.items() + self._files.items()):
382 382 if p in self._files:
383 383 yield self._subpath(p), n
384 384 else:
385 385 for f, sn in n.iteritems():
386 386 yield f, sn
387 387
388 388 def iterkeys(self):
389 389 for p in sorted(self._dirs.keys() + self._files.keys()):
390 390 if p in self._files:
391 391 yield self._subpath(p)
392 392 else:
393 393 for f in self._dirs[p].iterkeys():
394 394 yield f
395 395
396 396 def keys(self):
397 397 return list(self.iterkeys())
398 398
399 399 def __iter__(self):
400 400 return self.iterkeys()
401 401
402 402 def __contains__(self, f):
403 403 if f is None:
404 404 return False
405 405 dir, subpath = _splittopdir(f)
406 406 if dir:
407 407 if dir not in self._dirs:
408 408 return False
409 409 return self._dirs[dir].__contains__(subpath)
410 410 else:
411 411 return f in self._files
412 412
413 413 def get(self, f, default=None):
414 414 dir, subpath = _splittopdir(f)
415 415 if dir:
416 416 if dir not in self._dirs:
417 417 return default
418 418 return self._dirs[dir].get(subpath, default)
419 419 else:
420 420 return self._files.get(f, default)
421 421
422 422 def __getitem__(self, f):
423 423 dir, subpath = _splittopdir(f)
424 424 if dir:
425 425 return self._dirs[dir].__getitem__(subpath)
426 426 else:
427 427 return self._files[f]
428 428
429 429 def flags(self, f):
430 430 dir, subpath = _splittopdir(f)
431 431 if dir:
432 432 if dir not in self._dirs:
433 433 return ''
434 434 return self._dirs[dir].flags(subpath)
435 435 else:
436 436 if f in self._dirs:
437 437 return ''
438 438 return self._flags.get(f, '')
439 439
440 440 def find(self, f):
441 441 dir, subpath = _splittopdir(f)
442 442 if dir:
443 443 return self._dirs[dir].find(subpath)
444 444 else:
445 445 return self._files[f], self._flags.get(f, '')
446 446
447 447 def __delitem__(self, f):
448 448 dir, subpath = _splittopdir(f)
449 449 if dir:
450 450 self._dirs[dir].__delitem__(subpath)
451 451 # If the directory is now empty, remove it
452 452 if self._dirs[dir]._isempty():
453 453 del self._dirs[dir]
454 454 else:
455 455 del self._files[f]
456 456 if f in self._flags:
457 457 del self._flags[f]
458 458
459 459 def __setitem__(self, f, n):
460 460 assert n is not None
461 461 dir, subpath = _splittopdir(f)
462 462 if dir:
463 463 if dir not in self._dirs:
464 464 self._dirs[dir] = treemanifest(self._subpath(dir))
465 465 self._dirs[dir].__setitem__(subpath, n)
466 466 else:
467 467 self._files[f] = n[:21] # to match manifestdict's behavior
468 468
469 469 def setflag(self, f, flags):
470 470 """Set the flags (symlink, executable) for path f."""
471 471 dir, subpath = _splittopdir(f)
472 472 if dir:
473 473 if dir not in self._dirs:
474 474 self._dirs[dir] = treemanifest(self._subpath(dir))
475 475 self._dirs[dir].setflag(subpath, flags)
476 476 else:
477 477 self._flags[f] = flags
478 478
479 479 def copy(self):
480 480 copy = treemanifest(self._dir)
481 481 for d in self._dirs:
482 482 copy._dirs[d] = self._dirs[d].copy()
483 483 copy._files = dict.copy(self._files)
484 484 copy._flags = dict.copy(self._flags)
485 485 return copy
486 486
487 487 def filesnotin(self, m2):
488 488 '''Set of files in this manifest that are not in the other'''
489 489 files = set()
490 490 def _filesnotin(t1, t2):
491 491 for d, m1 in t1._dirs.iteritems():
492 492 if d in t2._dirs:
493 493 m2 = t2._dirs[d]
494 494 _filesnotin(m1, m2)
495 495 else:
496 496 files.update(m1.iterkeys())
497 497
498 498 for fn in t1._files.iterkeys():
499 499 if fn not in t2._files:
500 500 files.add(t1._subpath(fn))
501 501
502 502 _filesnotin(self, m2)
503 503 return files
504 504
505 505 @propertycache
506 506 def _alldirs(self):
507 507 return scmutil.dirs(self)
508 508
509 509 def dirs(self):
510 510 return self._alldirs
511 511
512 512 def hasdir(self, dir):
513 513 topdir, subdir = _splittopdir(dir)
514 514 if topdir:
515 515 if topdir in self._dirs:
516 516 return self._dirs[topdir].hasdir(subdir)
517 517 return False
518 518 return (dir + '/') in self._dirs
519 519
520 520 def matches(self, match):
521 521 '''generate a new manifest filtered by the match argument'''
522 522 if match.always():
523 523 return self.copy()
524 524
525 525 return self._matches(match)
526 526
527 527 def _matches(self, match):
528 528 '''recursively generate a new manifest filtered by the match argument.
529 529 '''
530 530
531 531 ret = treemanifest(self._dir)
532 532
533 533 for fn in self._files:
534 534 fullp = self._subpath(fn)
535 535 if not match(fullp):
536 536 continue
537 537 ret._files[fn] = self._files[fn]
538 538 if fn in self._flags:
539 539 ret._flags[fn] = self._flags[fn]
540 540
541 541 for dir, subm in self._dirs.iteritems():
542 542 m = subm._matches(match)
543 543 if not m._isempty():
544 544 ret._dirs[dir] = m
545 545
546 546 return ret
547 547
548 548 def diff(self, m2, clean=False):
549 549 '''Finds changes between the current manifest and m2.
550 550
551 551 Args:
552 552 m2: the manifest to which this manifest should be compared.
553 553 clean: if true, include files unchanged between these manifests
554 554 with a None value in the returned dictionary.
555 555
556 556 The result is returned as a dict with filename as key and
557 557 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
558 558 nodeid in the current/other manifest and fl1/fl2 is the flag
559 559 in the current/other manifest. Where the file does not exist,
560 560 the nodeid will be None and the flags will be the empty
561 561 string.
562 562 '''
563 563 result = {}
564 564 emptytree = treemanifest()
565 565 def _diff(t1, t2):
566 566 for d, m1 in t1._dirs.iteritems():
567 567 m2 = t2._dirs.get(d, emptytree)
568 568 _diff(m1, m2)
569 569
570 570 for d, m2 in t2._dirs.iteritems():
571 571 if d not in t1._dirs:
572 572 _diff(emptytree, m2)
573 573
574 574 for fn, n1 in t1._files.iteritems():
575 575 fl1 = t1._flags.get(fn, '')
576 576 n2 = t2._files.get(fn, None)
577 577 fl2 = t2._flags.get(fn, '')
578 578 if n1 != n2 or fl1 != fl2:
579 579 result[t1._subpath(fn)] = ((n1, fl1), (n2, fl2))
580 580 elif clean:
581 581 result[t1._subpath(fn)] = None
582 582
583 583 for fn, n2 in t2._files.iteritems():
584 584 if fn not in t1._files:
585 585 fl2 = t2._flags.get(fn, '')
586 586 result[t2._subpath(fn)] = ((None, ''), (n2, fl2))
587 587
588 588 _diff(self, m2)
589 589 return result
590 590
591 591 def text(self):
592 592 """Get the full data of this manifest as a bytestring."""
593 593 flags = self.flags
594 594 return _text((f, self[f], flags(f)) for f in self.keys())
595 595
596 596 class manifest(revlog.revlog):
597 597 def __init__(self, opener):
598 598 # During normal operations, we expect to deal with not more than four
599 599 # revs at a time (such as during commit --amend). When rebasing large
600 600 # stacks of commits, the number can go up, hence the config knob below.
601 601 cachesize = 4
602 602 usetreemanifest = False
603 603 usemanifestv2 = False
604 604 opts = getattr(opener, 'options', None)
605 605 if opts is not None:
606 606 cachesize = opts.get('manifestcachesize', cachesize)
607 607 usetreemanifest = opts.get('usetreemanifest', usetreemanifest)
608 usemanifestv2 = opts.get('usemanifestv2', usemanifestv2)
608 usemanifestv2 = opts.get('manifestv2', usemanifestv2)
609 609 self._mancache = util.lrucachedict(cachesize)
610 610 revlog.revlog.__init__(self, opener, "00manifest.i")
611 611 self._usetreemanifest = usetreemanifest
612 612 self._usemanifestv2 = usemanifestv2
613 613
614 614 def _newmanifest(self, data=''):
615 615 if self._usetreemanifest:
616 616 return treemanifest('', data)
617 617 return manifestdict(data)
618 618
619 619 def _slowreaddelta(self, node):
620 620 r0 = self.deltaparent(self.rev(node))
621 621 m0 = self.read(self.node(r0))
622 622 m1 = self.read(node)
623 623 md = self._newmanifest()
624 624 for f, ((n0, fl0), (n1, fl1)) in m0.diff(m1).iteritems():
625 625 if n1:
626 626 md[f] = n1
627 627 if fl1:
628 628 md.setflag(f, fl1)
629 629 return md
630 630
631 631 def readdelta(self, node):
632 632 if self._usemanifestv2:
633 633 return self._slowreaddelta(node)
634 634 r = self.rev(node)
635 635 d = mdiff.patchtext(self.revdiff(self.deltaparent(r), r))
636 636 return self._newmanifest(d)
637 637
638 638 def readfast(self, node):
639 639 '''use the faster of readdelta or read'''
640 640 r = self.rev(node)
641 641 deltaparent = self.deltaparent(r)
642 642 if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r):
643 643 return self.readdelta(node)
644 644 return self.read(node)
645 645
646 646 def read(self, node):
647 647 if node == revlog.nullid:
648 648 return self._newmanifest() # don't upset local cache
649 649 if node in self._mancache:
650 650 return self._mancache[node][0]
651 651 text = self.revision(node)
652 652 arraytext = array.array('c', text)
653 653 m = self._newmanifest(text)
654 654 self._mancache[node] = (m, arraytext)
655 655 return m
656 656
657 657 def find(self, node, f):
658 658 '''look up entry for a single file efficiently.
659 659 return (node, flags) pair if found, (None, None) if not.'''
660 660 m = self.read(node)
661 661 try:
662 662 return m.find(f)
663 663 except KeyError:
664 664 return None, None
665 665
666 666 def add(self, m, transaction, link, p1, p2, added, removed):
667 667 if (p1 in self._mancache and not self._usetreemanifest
668 668 and not self._usemanifestv2):
669 669 # If our first parent is in the manifest cache, we can
670 670 # compute a delta here using properties we know about the
671 671 # manifest up-front, which may save time later for the
672 672 # revlog layer.
673 673
674 674 _checkforbidden(added)
675 675 # combine the changed lists into one list for sorting
676 676 work = [(x, False) for x in added]
677 677 work.extend((x, True) for x in removed)
678 678 # this could use heapq.merge() (from Python 2.6+) or equivalent
679 679 # since the lists are already sorted
680 680 work.sort()
681 681
682 682 arraytext, deltatext = m.fastdelta(self._mancache[p1][1], work)
683 683 cachedelta = self.rev(p1), deltatext
684 684 text = util.buffer(arraytext)
685 685 else:
686 686 # The first parent manifest isn't already loaded, so we'll
687 687 # just encode a fulltext of the manifest and pass that
688 688 # through to the revlog layer, and let it handle the delta
689 689 # process.
690 690 text = m.text()
691 691 arraytext = array.array('c', text)
692 692 cachedelta = None
693 693
694 694 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
695 695 self._mancache[n] = (m, arraytext)
696 696
697 697 return n
General Comments 0
You need to be logged in to leave comments. Login now