##// END OF EJS Templates
manifest: make lru size configurable...
Durham Goode -
r24033:ed5e8a95 default
parent child Browse files
Show More
@@ -1,1853 +1,1856 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import urllib
10 10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock as lockmod
13 13 import transaction, store, encoding, exchange, bundle2
14 14 import scmutil, util, extensions, hook, error, revset
15 15 import match as matchmod
16 16 import merge as mergemod
17 17 import tags as tagsmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 import branchmap, pathutil
21 21 import namespaces
22 22 propertycache = util.propertycache
23 23 filecache = scmutil.filecache
24 24
25 25 class repofilecache(filecache):
26 26 """All filecache usage on repo are done for logic that should be unfiltered
27 27 """
28 28
29 29 def __get__(self, repo, type=None):
30 30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 31 def __set__(self, repo, value):
32 32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 33 def __delete__(self, repo):
34 34 return super(repofilecache, self).__delete__(repo.unfiltered())
35 35
36 36 class storecache(repofilecache):
37 37 """filecache for files in the store"""
38 38 def join(self, obj, fname):
39 39 return obj.sjoin(fname)
40 40
41 41 class unfilteredpropertycache(propertycache):
42 42 """propertycache that apply to unfiltered repo only"""
43 43
44 44 def __get__(self, repo, type=None):
45 45 unfi = repo.unfiltered()
46 46 if unfi is repo:
47 47 return super(unfilteredpropertycache, self).__get__(unfi)
48 48 return getattr(unfi, self.name)
49 49
50 50 class filteredpropertycache(propertycache):
51 51 """propertycache that must take filtering in account"""
52 52
53 53 def cachevalue(self, obj, value):
54 54 object.__setattr__(obj, self.name, value)
55 55
56 56
57 57 def hasunfilteredcache(repo, name):
58 58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 59 return name in vars(repo.unfiltered())
60 60
61 61 def unfilteredmethod(orig):
62 62 """decorate method that always need to be run on unfiltered version"""
63 63 def wrapper(repo, *args, **kwargs):
64 64 return orig(repo.unfiltered(), *args, **kwargs)
65 65 return wrapper
66 66
67 67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 68 'unbundle'))
69 69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70 70
71 71 class localpeer(peer.peerrepository):
72 72 '''peer for a local repo; reflects only the most recent API'''
73 73
74 74 def __init__(self, repo, caps=moderncaps):
75 75 peer.peerrepository.__init__(self)
76 76 self._repo = repo.filtered('served')
77 77 self.ui = repo.ui
78 78 self._caps = repo._restrictcapabilities(caps)
79 79 self.requirements = repo.requirements
80 80 self.supportedformats = repo.supportedformats
81 81
82 82 def close(self):
83 83 self._repo.close()
84 84
85 85 def _capabilities(self):
86 86 return self._caps
87 87
88 88 def local(self):
89 89 return self._repo
90 90
91 91 def canpush(self):
92 92 return True
93 93
94 94 def url(self):
95 95 return self._repo.url()
96 96
97 97 def lookup(self, key):
98 98 return self._repo.lookup(key)
99 99
100 100 def branchmap(self):
101 101 return self._repo.branchmap()
102 102
103 103 def heads(self):
104 104 return self._repo.heads()
105 105
106 106 def known(self, nodes):
107 107 return self._repo.known(nodes)
108 108
109 109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 110 format='HG10', **kwargs):
111 111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 112 common=common, bundlecaps=bundlecaps, **kwargs)
113 113 if bundlecaps is not None and 'HG2Y' in bundlecaps:
114 114 # When requesting a bundle2, getbundle returns a stream to make the
115 115 # wire level function happier. We need to build a proper object
116 116 # from it in local peer.
117 117 cg = bundle2.unbundle20(self.ui, cg)
118 118 return cg
119 119
120 120 # TODO We might want to move the next two calls into legacypeer and add
121 121 # unbundle instead.
122 122
123 123 def unbundle(self, cg, heads, url):
124 124 """apply a bundle on a repo
125 125
126 126 This function handles the repo locking itself."""
127 127 try:
128 128 cg = exchange.readbundle(self.ui, cg, None)
129 129 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
130 130 if util.safehasattr(ret, 'getchunks'):
131 131 # This is a bundle20 object, turn it into an unbundler.
132 132 # This little dance should be dropped eventually when the API
133 133 # is finally improved.
134 134 stream = util.chunkbuffer(ret.getchunks())
135 135 ret = bundle2.unbundle20(self.ui, stream)
136 136 return ret
137 137 except error.PushRaced, exc:
138 138 raise error.ResponseError(_('push failed:'), str(exc))
139 139
140 140 def lock(self):
141 141 return self._repo.lock()
142 142
143 143 def addchangegroup(self, cg, source, url):
144 144 return changegroup.addchangegroup(self._repo, cg, source, url)
145 145
146 146 def pushkey(self, namespace, key, old, new):
147 147 return self._repo.pushkey(namespace, key, old, new)
148 148
149 149 def listkeys(self, namespace):
150 150 return self._repo.listkeys(namespace)
151 151
152 152 def debugwireargs(self, one, two, three=None, four=None, five=None):
153 153 '''used to test argument passing over the wire'''
154 154 return "%s %s %s %s %s" % (one, two, three, four, five)
155 155
156 156 class locallegacypeer(localpeer):
157 157 '''peer extension which implements legacy methods too; used for tests with
158 158 restricted capabilities'''
159 159
160 160 def __init__(self, repo):
161 161 localpeer.__init__(self, repo, caps=legacycaps)
162 162
163 163 def branches(self, nodes):
164 164 return self._repo.branches(nodes)
165 165
166 166 def between(self, pairs):
167 167 return self._repo.between(pairs)
168 168
169 169 def changegroup(self, basenodes, source):
170 170 return changegroup.changegroup(self._repo, basenodes, source)
171 171
172 172 def changegroupsubset(self, bases, heads, source):
173 173 return changegroup.changegroupsubset(self._repo, bases, heads, source)
174 174
175 175 class localrepository(object):
176 176
177 177 supportedformats = set(('revlogv1', 'generaldelta'))
178 178 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
179 179 'dotencode'))
180 180 openerreqs = set(('revlogv1', 'generaldelta'))
181 181 requirements = ['revlogv1']
182 182 filtername = None
183 183
184 184 # a list of (ui, featureset) functions.
185 185 # only functions defined in module of enabled extensions are invoked
186 186 featuresetupfuncs = set()
187 187
188 188 def _baserequirements(self, create):
189 189 return self.requirements[:]
190 190
191 191 def __init__(self, baseui, path=None, create=False):
192 192 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
193 193 self.wopener = self.wvfs
194 194 self.root = self.wvfs.base
195 195 self.path = self.wvfs.join(".hg")
196 196 self.origroot = path
197 197 self.auditor = pathutil.pathauditor(self.root, self._checknested)
198 198 self.vfs = scmutil.vfs(self.path)
199 199 self.opener = self.vfs
200 200 self.baseui = baseui
201 201 self.ui = baseui.copy()
202 202 self.ui.copy = baseui.copy # prevent copying repo configuration
203 203 # A list of callback to shape the phase if no data were found.
204 204 # Callback are in the form: func(repo, roots) --> processed root.
205 205 # This list it to be filled by extension during repo setup
206 206 self._phasedefaults = []
207 207 try:
208 208 self.ui.readconfig(self.join("hgrc"), self.root)
209 209 extensions.loadall(self.ui)
210 210 except IOError:
211 211 pass
212 212
213 213 if self.featuresetupfuncs:
214 214 self.supported = set(self._basesupported) # use private copy
215 215 extmods = set(m.__name__ for n, m
216 216 in extensions.extensions(self.ui))
217 217 for setupfunc in self.featuresetupfuncs:
218 218 if setupfunc.__module__ in extmods:
219 219 setupfunc(self.ui, self.supported)
220 220 else:
221 221 self.supported = self._basesupported
222 222
223 223 if not self.vfs.isdir():
224 224 if create:
225 225 if not self.wvfs.exists():
226 226 self.wvfs.makedirs()
227 227 self.vfs.makedir(notindexed=True)
228 228 requirements = self._baserequirements(create)
229 229 if self.ui.configbool('format', 'usestore', True):
230 230 self.vfs.mkdir("store")
231 231 requirements.append("store")
232 232 if self.ui.configbool('format', 'usefncache', True):
233 233 requirements.append("fncache")
234 234 if self.ui.configbool('format', 'dotencode', True):
235 235 requirements.append('dotencode')
236 236 # create an invalid changelog
237 237 self.vfs.append(
238 238 "00changelog.i",
239 239 '\0\0\0\2' # represents revlogv2
240 240 ' dummy changelog to prevent using the old repo layout'
241 241 )
242 242 if self.ui.configbool('format', 'generaldelta', False):
243 243 requirements.append("generaldelta")
244 244 requirements = set(requirements)
245 245 else:
246 246 raise error.RepoError(_("repository %s not found") % path)
247 247 elif create:
248 248 raise error.RepoError(_("repository %s already exists") % path)
249 249 else:
250 250 try:
251 251 requirements = scmutil.readrequires(self.vfs, self.supported)
252 252 except IOError, inst:
253 253 if inst.errno != errno.ENOENT:
254 254 raise
255 255 requirements = set()
256 256
257 257 self.sharedpath = self.path
258 258 try:
259 259 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
260 260 realpath=True)
261 261 s = vfs.base
262 262 if not vfs.exists():
263 263 raise error.RepoError(
264 264 _('.hg/sharedpath points to nonexistent directory %s') % s)
265 265 self.sharedpath = s
266 266 except IOError, inst:
267 267 if inst.errno != errno.ENOENT:
268 268 raise
269 269
270 270 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
271 271 self.spath = self.store.path
272 272 self.svfs = self.store.vfs
273 273 self.sopener = self.svfs
274 274 self.sjoin = self.store.join
275 275 self.vfs.createmode = self.store.createmode
276 276 self._applyrequirements(requirements)
277 277 if create:
278 278 self._writerequirements()
279 279
280 280
281 281 self._branchcaches = {}
282 282 self.filterpats = {}
283 283 self._datafilters = {}
284 284 self._transref = self._lockref = self._wlockref = None
285 285
286 286 # A cache for various files under .hg/ that tracks file changes,
287 287 # (used by the filecache decorator)
288 288 #
289 289 # Maps a property name to its util.filecacheentry
290 290 self._filecache = {}
291 291
292 292 # hold sets of revision to be filtered
293 293 # should be cleared when something might have changed the filter value:
294 294 # - new changesets,
295 295 # - phase change,
296 296 # - new obsolescence marker,
297 297 # - working directory parent change,
298 298 # - bookmark changes
299 299 self.filteredrevcache = {}
300 300
301 301 # generic mapping between names and nodes
302 302 self.names = namespaces.namespaces()
303 303
304 304 def close(self):
305 305 pass
306 306
307 307 def _restrictcapabilities(self, caps):
308 308 # bundle2 is not ready for prime time, drop it unless explicitly
309 309 # required by the tests (or some brave tester)
310 310 if self.ui.configbool('experimental', 'bundle2-exp', False):
311 311 caps = set(caps)
312 312 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
313 313 caps.add('bundle2-exp=' + urllib.quote(capsblob))
314 314 return caps
315 315
316 316 def _applyrequirements(self, requirements):
317 317 self.requirements = requirements
318 318 self.svfs.options = dict((r, 1) for r in requirements
319 319 if r in self.openerreqs)
320 320 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
321 321 if chunkcachesize is not None:
322 322 self.svfs.options['chunkcachesize'] = chunkcachesize
323 323 maxchainlen = self.ui.configint('format', 'maxchainlen')
324 324 if maxchainlen is not None:
325 325 self.svfs.options['maxchainlen'] = maxchainlen
326 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
327 if manifestcachesize is not None:
328 self.svfs.options['manifestcachesize'] = manifestcachesize
326 329
327 330 def _writerequirements(self):
328 331 reqfile = self.vfs("requires", "w")
329 332 for r in sorted(self.requirements):
330 333 reqfile.write("%s\n" % r)
331 334 reqfile.close()
332 335
333 336 def _checknested(self, path):
334 337 """Determine if path is a legal nested repository."""
335 338 if not path.startswith(self.root):
336 339 return False
337 340 subpath = path[len(self.root) + 1:]
338 341 normsubpath = util.pconvert(subpath)
339 342
340 343 # XXX: Checking against the current working copy is wrong in
341 344 # the sense that it can reject things like
342 345 #
343 346 # $ hg cat -r 10 sub/x.txt
344 347 #
345 348 # if sub/ is no longer a subrepository in the working copy
346 349 # parent revision.
347 350 #
348 351 # However, it can of course also allow things that would have
349 352 # been rejected before, such as the above cat command if sub/
350 353 # is a subrepository now, but was a normal directory before.
351 354 # The old path auditor would have rejected by mistake since it
352 355 # panics when it sees sub/.hg/.
353 356 #
354 357 # All in all, checking against the working copy seems sensible
355 358 # since we want to prevent access to nested repositories on
356 359 # the filesystem *now*.
357 360 ctx = self[None]
358 361 parts = util.splitpath(subpath)
359 362 while parts:
360 363 prefix = '/'.join(parts)
361 364 if prefix in ctx.substate:
362 365 if prefix == normsubpath:
363 366 return True
364 367 else:
365 368 sub = ctx.sub(prefix)
366 369 return sub.checknested(subpath[len(prefix) + 1:])
367 370 else:
368 371 parts.pop()
369 372 return False
370 373
371 374 def peer(self):
372 375 return localpeer(self) # not cached to avoid reference cycle
373 376
374 377 def unfiltered(self):
375 378 """Return unfiltered version of the repository
376 379
377 380 Intended to be overwritten by filtered repo."""
378 381 return self
379 382
380 383 def filtered(self, name):
381 384 """Return a filtered version of a repository"""
382 385 # build a new class with the mixin and the current class
383 386 # (possibly subclass of the repo)
384 387 class proxycls(repoview.repoview, self.unfiltered().__class__):
385 388 pass
386 389 return proxycls(self, name)
387 390
388 391 @repofilecache('bookmarks')
389 392 def _bookmarks(self):
390 393 return bookmarks.bmstore(self)
391 394
392 395 @repofilecache('bookmarks.current')
393 396 def _bookmarkcurrent(self):
394 397 return bookmarks.readcurrent(self)
395 398
396 399 def bookmarkheads(self, bookmark):
397 400 name = bookmark.split('@', 1)[0]
398 401 heads = []
399 402 for mark, n in self._bookmarks.iteritems():
400 403 if mark.split('@', 1)[0] == name:
401 404 heads.append(n)
402 405 return heads
403 406
404 407 @storecache('phaseroots')
405 408 def _phasecache(self):
406 409 return phases.phasecache(self, self._phasedefaults)
407 410
408 411 @storecache('obsstore')
409 412 def obsstore(self):
410 413 # read default format for new obsstore.
411 414 defaultformat = self.ui.configint('format', 'obsstore-version', None)
412 415 # rely on obsstore class default when possible.
413 416 kwargs = {}
414 417 if defaultformat is not None:
415 418 kwargs['defaultformat'] = defaultformat
416 419 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
417 420 store = obsolete.obsstore(self.svfs, readonly=readonly,
418 421 **kwargs)
419 422 if store and readonly:
420 423 # message is rare enough to not be translated
421 424 msg = 'obsolete feature not enabled but %i markers found!\n'
422 425 self.ui.warn(msg % len(list(store)))
423 426 return store
424 427
425 428 @storecache('00changelog.i')
426 429 def changelog(self):
427 430 c = changelog.changelog(self.svfs)
428 431 if 'HG_PENDING' in os.environ:
429 432 p = os.environ['HG_PENDING']
430 433 if p.startswith(self.root):
431 434 c.readpending('00changelog.i.a')
432 435 return c
433 436
434 437 @storecache('00manifest.i')
435 438 def manifest(self):
436 439 return manifest.manifest(self.svfs)
437 440
438 441 @repofilecache('dirstate')
439 442 def dirstate(self):
440 443 warned = [0]
441 444 def validate(node):
442 445 try:
443 446 self.changelog.rev(node)
444 447 return node
445 448 except error.LookupError:
446 449 if not warned[0]:
447 450 warned[0] = True
448 451 self.ui.warn(_("warning: ignoring unknown"
449 452 " working parent %s!\n") % short(node))
450 453 return nullid
451 454
452 455 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
453 456
454 457 def __getitem__(self, changeid):
455 458 if changeid is None:
456 459 return context.workingctx(self)
457 460 if isinstance(changeid, slice):
458 461 return [context.changectx(self, i)
459 462 for i in xrange(*changeid.indices(len(self)))
460 463 if i not in self.changelog.filteredrevs]
461 464 return context.changectx(self, changeid)
462 465
463 466 def __contains__(self, changeid):
464 467 try:
465 468 return bool(self.lookup(changeid))
466 469 except error.RepoLookupError:
467 470 return False
468 471
469 472 def __nonzero__(self):
470 473 return True
471 474
472 475 def __len__(self):
473 476 return len(self.changelog)
474 477
475 478 def __iter__(self):
476 479 return iter(self.changelog)
477 480
478 481 def revs(self, expr, *args):
479 482 '''Return a list of revisions matching the given revset'''
480 483 expr = revset.formatspec(expr, *args)
481 484 m = revset.match(None, expr)
482 485 return m(self, revset.spanset(self))
483 486
484 487 def set(self, expr, *args):
485 488 '''
486 489 Yield a context for each matching revision, after doing arg
487 490 replacement via revset.formatspec
488 491 '''
489 492 for r in self.revs(expr, *args):
490 493 yield self[r]
491 494
492 495 def url(self):
493 496 return 'file:' + self.root
494 497
495 498 def hook(self, name, throw=False, **args):
496 499 """Call a hook, passing this repo instance.
497 500
498 501 This a convenience method to aid invoking hooks. Extensions likely
499 502 won't call this unless they have registered a custom hook or are
500 503 replacing code that is expected to call a hook.
501 504 """
502 505 return hook.hook(self.ui, self, name, throw, **args)
503 506
504 507 @unfilteredmethod
505 508 def _tag(self, names, node, message, local, user, date, extra={},
506 509 editor=False):
507 510 if isinstance(names, str):
508 511 names = (names,)
509 512
510 513 branches = self.branchmap()
511 514 for name in names:
512 515 self.hook('pretag', throw=True, node=hex(node), tag=name,
513 516 local=local)
514 517 if name in branches:
515 518 self.ui.warn(_("warning: tag %s conflicts with existing"
516 519 " branch name\n") % name)
517 520
518 521 def writetags(fp, names, munge, prevtags):
519 522 fp.seek(0, 2)
520 523 if prevtags and prevtags[-1] != '\n':
521 524 fp.write('\n')
522 525 for name in names:
523 526 m = munge and munge(name) or name
524 527 if (self._tagscache.tagtypes and
525 528 name in self._tagscache.tagtypes):
526 529 old = self.tags().get(name, nullid)
527 530 fp.write('%s %s\n' % (hex(old), m))
528 531 fp.write('%s %s\n' % (hex(node), m))
529 532 fp.close()
530 533
531 534 prevtags = ''
532 535 if local:
533 536 try:
534 537 fp = self.vfs('localtags', 'r+')
535 538 except IOError:
536 539 fp = self.vfs('localtags', 'a')
537 540 else:
538 541 prevtags = fp.read()
539 542
540 543 # local tags are stored in the current charset
541 544 writetags(fp, names, None, prevtags)
542 545 for name in names:
543 546 self.hook('tag', node=hex(node), tag=name, local=local)
544 547 return
545 548
546 549 try:
547 550 fp = self.wfile('.hgtags', 'rb+')
548 551 except IOError, e:
549 552 if e.errno != errno.ENOENT:
550 553 raise
551 554 fp = self.wfile('.hgtags', 'ab')
552 555 else:
553 556 prevtags = fp.read()
554 557
555 558 # committed tags are stored in UTF-8
556 559 writetags(fp, names, encoding.fromlocal, prevtags)
557 560
558 561 fp.close()
559 562
560 563 self.invalidatecaches()
561 564
562 565 if '.hgtags' not in self.dirstate:
563 566 self[None].add(['.hgtags'])
564 567
565 568 m = matchmod.exact(self.root, '', ['.hgtags'])
566 569 tagnode = self.commit(message, user, date, extra=extra, match=m,
567 570 editor=editor)
568 571
569 572 for name in names:
570 573 self.hook('tag', node=hex(node), tag=name, local=local)
571 574
572 575 return tagnode
573 576
574 577 def tag(self, names, node, message, local, user, date, editor=False):
575 578 '''tag a revision with one or more symbolic names.
576 579
577 580 names is a list of strings or, when adding a single tag, names may be a
578 581 string.
579 582
580 583 if local is True, the tags are stored in a per-repository file.
581 584 otherwise, they are stored in the .hgtags file, and a new
582 585 changeset is committed with the change.
583 586
584 587 keyword arguments:
585 588
586 589 local: whether to store tags in non-version-controlled file
587 590 (default False)
588 591
589 592 message: commit message to use if committing
590 593
591 594 user: name of user to use if committing
592 595
593 596 date: date tuple to use if committing'''
594 597
595 598 if not local:
596 599 m = matchmod.exact(self.root, '', ['.hgtags'])
597 600 if util.any(self.status(match=m, unknown=True, ignored=True)):
598 601 raise util.Abort(_('working copy of .hgtags is changed'),
599 602 hint=_('please commit .hgtags manually'))
600 603
601 604 self.tags() # instantiate the cache
602 605 self._tag(names, node, message, local, user, date, editor=editor)
603 606
604 607 @filteredpropertycache
605 608 def _tagscache(self):
606 609 '''Returns a tagscache object that contains various tags related
607 610 caches.'''
608 611
609 612 # This simplifies its cache management by having one decorated
610 613 # function (this one) and the rest simply fetch things from it.
611 614 class tagscache(object):
612 615 def __init__(self):
613 616 # These two define the set of tags for this repository. tags
614 617 # maps tag name to node; tagtypes maps tag name to 'global' or
615 618 # 'local'. (Global tags are defined by .hgtags across all
616 619 # heads, and local tags are defined in .hg/localtags.)
617 620 # They constitute the in-memory cache of tags.
618 621 self.tags = self.tagtypes = None
619 622
620 623 self.nodetagscache = self.tagslist = None
621 624
622 625 cache = tagscache()
623 626 cache.tags, cache.tagtypes = self._findtags()
624 627
625 628 return cache
626 629
627 630 def tags(self):
628 631 '''return a mapping of tag to node'''
629 632 t = {}
630 633 if self.changelog.filteredrevs:
631 634 tags, tt = self._findtags()
632 635 else:
633 636 tags = self._tagscache.tags
634 637 for k, v in tags.iteritems():
635 638 try:
636 639 # ignore tags to unknown nodes
637 640 self.changelog.rev(v)
638 641 t[k] = v
639 642 except (error.LookupError, ValueError):
640 643 pass
641 644 return t
642 645
643 646 def _findtags(self):
644 647 '''Do the hard work of finding tags. Return a pair of dicts
645 648 (tags, tagtypes) where tags maps tag name to node, and tagtypes
646 649 maps tag name to a string like \'global\' or \'local\'.
647 650 Subclasses or extensions are free to add their own tags, but
648 651 should be aware that the returned dicts will be retained for the
649 652 duration of the localrepo object.'''
650 653
651 654 # XXX what tagtype should subclasses/extensions use? Currently
652 655 # mq and bookmarks add tags, but do not set the tagtype at all.
653 656 # Should each extension invent its own tag type? Should there
654 657 # be one tagtype for all such "virtual" tags? Or is the status
655 658 # quo fine?
656 659
657 660 alltags = {} # map tag name to (node, hist)
658 661 tagtypes = {}
659 662
660 663 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
661 664 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
662 665
663 666 # Build the return dicts. Have to re-encode tag names because
664 667 # the tags module always uses UTF-8 (in order not to lose info
665 668 # writing to the cache), but the rest of Mercurial wants them in
666 669 # local encoding.
667 670 tags = {}
668 671 for (name, (node, hist)) in alltags.iteritems():
669 672 if node != nullid:
670 673 tags[encoding.tolocal(name)] = node
671 674 tags['tip'] = self.changelog.tip()
672 675 tagtypes = dict([(encoding.tolocal(name), value)
673 676 for (name, value) in tagtypes.iteritems()])
674 677 return (tags, tagtypes)
675 678
676 679 def tagtype(self, tagname):
677 680 '''
678 681 return the type of the given tag. result can be:
679 682
680 683 'local' : a local tag
681 684 'global' : a global tag
682 685 None : tag does not exist
683 686 '''
684 687
685 688 return self._tagscache.tagtypes.get(tagname)
686 689
687 690 def tagslist(self):
688 691 '''return a list of tags ordered by revision'''
689 692 if not self._tagscache.tagslist:
690 693 l = []
691 694 for t, n in self.tags().iteritems():
692 695 l.append((self.changelog.rev(n), t, n))
693 696 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
694 697
695 698 return self._tagscache.tagslist
696 699
697 700 def nodetags(self, node):
698 701 '''return the tags associated with a node'''
699 702 if not self._tagscache.nodetagscache:
700 703 nodetagscache = {}
701 704 for t, n in self._tagscache.tags.iteritems():
702 705 nodetagscache.setdefault(n, []).append(t)
703 706 for tags in nodetagscache.itervalues():
704 707 tags.sort()
705 708 self._tagscache.nodetagscache = nodetagscache
706 709 return self._tagscache.nodetagscache.get(node, [])
707 710
708 711 def nodebookmarks(self, node):
709 712 marks = []
710 713 for bookmark, n in self._bookmarks.iteritems():
711 714 if n == node:
712 715 marks.append(bookmark)
713 716 return sorted(marks)
714 717
715 718 def branchmap(self):
716 719 '''returns a dictionary {branch: [branchheads]} with branchheads
717 720 ordered by increasing revision number'''
718 721 branchmap.updatecache(self)
719 722 return self._branchcaches[self.filtername]
720 723
721 724 def branchtip(self, branch, ignoremissing=False):
722 725 '''return the tip node for a given branch
723 726
724 727 If ignoremissing is True, then this method will not raise an error.
725 728 This is helpful for callers that only expect None for a missing branch
726 729 (e.g. namespace).
727 730
728 731 '''
729 732 try:
730 733 return self.branchmap().branchtip(branch)
731 734 except KeyError:
732 735 if not ignoremissing:
733 736 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
734 737 else:
735 738 pass
736 739
737 740 def lookup(self, key):
738 741 return self[key].node()
739 742
740 743 def lookupbranch(self, key, remote=None):
741 744 repo = remote or self
742 745 if key in repo.branchmap():
743 746 return key
744 747
745 748 repo = (remote and remote.local()) and remote or self
746 749 return repo[key].branch()
747 750
748 751 def known(self, nodes):
749 752 nm = self.changelog.nodemap
750 753 pc = self._phasecache
751 754 result = []
752 755 for n in nodes:
753 756 r = nm.get(n)
754 757 resp = not (r is None or pc.phase(self, r) >= phases.secret)
755 758 result.append(resp)
756 759 return result
757 760
758 761 def local(self):
759 762 return self
760 763
761 764 def cancopy(self):
762 765 # so statichttprepo's override of local() works
763 766 if not self.local():
764 767 return False
765 768 if not self.ui.configbool('phases', 'publish', True):
766 769 return True
767 770 # if publishing we can't copy if there is filtered content
768 771 return not self.filtered('visible').changelog.filteredrevs
769 772
770 773 def shared(self):
771 774 '''the type of shared repository (None if not shared)'''
772 775 if self.sharedpath != self.path:
773 776 return 'store'
774 777 return None
775 778
776 779 def join(self, f, *insidef):
777 780 return self.vfs.join(os.path.join(f, *insidef))
778 781
779 782 def wjoin(self, f, *insidef):
780 783 return self.vfs.reljoin(self.root, f, *insidef)
781 784
782 785 def file(self, f):
783 786 if f[0] == '/':
784 787 f = f[1:]
785 788 return filelog.filelog(self.svfs, f)
786 789
787 790 def changectx(self, changeid):
788 791 return self[changeid]
789 792
790 793 def parents(self, changeid=None):
791 794 '''get list of changectxs for parents of changeid'''
792 795 return self[changeid].parents()
793 796
794 797 def setparents(self, p1, p2=nullid):
795 798 self.dirstate.beginparentchange()
796 799 copies = self.dirstate.setparents(p1, p2)
797 800 pctx = self[p1]
798 801 if copies:
799 802 # Adjust copy records, the dirstate cannot do it, it
800 803 # requires access to parents manifests. Preserve them
801 804 # only for entries added to first parent.
802 805 for f in copies:
803 806 if f not in pctx and copies[f] in pctx:
804 807 self.dirstate.copy(copies[f], f)
805 808 if p2 == nullid:
806 809 for f, s in sorted(self.dirstate.copies().items()):
807 810 if f not in pctx and s not in pctx:
808 811 self.dirstate.copy(None, f)
809 812 self.dirstate.endparentchange()
810 813
811 814 def filectx(self, path, changeid=None, fileid=None):
812 815 """changeid can be a changeset revision, node, or tag.
813 816 fileid can be a file revision or node."""
814 817 return context.filectx(self, path, changeid, fileid)
815 818
816 819 def getcwd(self):
817 820 return self.dirstate.getcwd()
818 821
819 822 def pathto(self, f, cwd=None):
820 823 return self.dirstate.pathto(f, cwd)
821 824
822 825 def wfile(self, f, mode='r'):
823 826 return self.wvfs(f, mode)
824 827
825 828 def _link(self, f):
826 829 return self.wvfs.islink(f)
827 830
828 831 def _loadfilter(self, filter):
829 832 if filter not in self.filterpats:
830 833 l = []
831 834 for pat, cmd in self.ui.configitems(filter):
832 835 if cmd == '!':
833 836 continue
834 837 mf = matchmod.match(self.root, '', [pat])
835 838 fn = None
836 839 params = cmd
837 840 for name, filterfn in self._datafilters.iteritems():
838 841 if cmd.startswith(name):
839 842 fn = filterfn
840 843 params = cmd[len(name):].lstrip()
841 844 break
842 845 if not fn:
843 846 fn = lambda s, c, **kwargs: util.filter(s, c)
844 847 # Wrap old filters not supporting keyword arguments
845 848 if not inspect.getargspec(fn)[2]:
846 849 oldfn = fn
847 850 fn = lambda s, c, **kwargs: oldfn(s, c)
848 851 l.append((mf, fn, params))
849 852 self.filterpats[filter] = l
850 853 return self.filterpats[filter]
851 854
852 855 def _filter(self, filterpats, filename, data):
853 856 for mf, fn, cmd in filterpats:
854 857 if mf(filename):
855 858 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
856 859 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
857 860 break
858 861
859 862 return data
860 863
861 864 @unfilteredpropertycache
862 865 def _encodefilterpats(self):
863 866 return self._loadfilter('encode')
864 867
865 868 @unfilteredpropertycache
866 869 def _decodefilterpats(self):
867 870 return self._loadfilter('decode')
868 871
869 872 def adddatafilter(self, name, filter):
870 873 self._datafilters[name] = filter
871 874
872 875 def wread(self, filename):
873 876 if self._link(filename):
874 877 data = self.wvfs.readlink(filename)
875 878 else:
876 879 data = self.wvfs.read(filename)
877 880 return self._filter(self._encodefilterpats, filename, data)
878 881
879 882 def wwrite(self, filename, data, flags):
880 883 data = self._filter(self._decodefilterpats, filename, data)
881 884 if 'l' in flags:
882 885 self.wvfs.symlink(data, filename)
883 886 else:
884 887 self.wvfs.write(filename, data)
885 888 if 'x' in flags:
886 889 self.wvfs.setflags(filename, False, True)
887 890
888 891 def wwritedata(self, filename, data):
889 892 return self._filter(self._decodefilterpats, filename, data)
890 893
891 894 def currenttransaction(self):
892 895 """return the current transaction or None if non exists"""
893 896 tr = self._transref and self._transref() or None
894 897 if tr and tr.running():
895 898 return tr
896 899 return None
897 900
898 901 def transaction(self, desc, report=None):
899 902 tr = self.currenttransaction()
900 903 if tr is not None:
901 904 return tr.nest()
902 905
903 906 # abort here if the journal already exists
904 907 if self.svfs.exists("journal"):
905 908 raise error.RepoError(
906 909 _("abandoned transaction found"),
907 910 hint=_("run 'hg recover' to clean up transaction"))
908 911
909 912 self._writejournal(desc)
910 913 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
911 914 rp = report and report or self.ui.warn
912 915 vfsmap = {'plain': self.vfs} # root of .hg/
913 916 tr = transaction.transaction(rp, self.svfs, vfsmap,
914 917 "journal",
915 918 "undo",
916 919 aftertrans(renames),
917 920 self.store.createmode)
918 921 # note: writing the fncache only during finalize mean that the file is
919 922 # outdated when running hooks. As fncache is used for streaming clone,
920 923 # this is not expected to break anything that happen during the hooks.
921 924 tr.addfinalize('flush-fncache', self.store.write)
922 925 self._transref = weakref.ref(tr)
923 926 return tr
924 927
925 928 def _journalfiles(self):
926 929 return ((self.svfs, 'journal'),
927 930 (self.vfs, 'journal.dirstate'),
928 931 (self.vfs, 'journal.branch'),
929 932 (self.vfs, 'journal.desc'),
930 933 (self.vfs, 'journal.bookmarks'),
931 934 (self.svfs, 'journal.phaseroots'))
932 935
933 936 def undofiles(self):
934 937 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
935 938
936 939 def _writejournal(self, desc):
937 940 self.vfs.write("journal.dirstate",
938 941 self.vfs.tryread("dirstate"))
939 942 self.vfs.write("journal.branch",
940 943 encoding.fromlocal(self.dirstate.branch()))
941 944 self.vfs.write("journal.desc",
942 945 "%d\n%s\n" % (len(self), desc))
943 946 self.vfs.write("journal.bookmarks",
944 947 self.vfs.tryread("bookmarks"))
945 948 self.svfs.write("journal.phaseroots",
946 949 self.svfs.tryread("phaseroots"))
947 950
948 951 def recover(self):
949 952 lock = self.lock()
950 953 try:
951 954 if self.svfs.exists("journal"):
952 955 self.ui.status(_("rolling back interrupted transaction\n"))
953 956 vfsmap = {'': self.svfs,
954 957 'plain': self.vfs,}
955 958 transaction.rollback(self.svfs, vfsmap, "journal",
956 959 self.ui.warn)
957 960 self.invalidate()
958 961 return True
959 962 else:
960 963 self.ui.warn(_("no interrupted transaction available\n"))
961 964 return False
962 965 finally:
963 966 lock.release()
964 967
965 968 def rollback(self, dryrun=False, force=False):
966 969 wlock = lock = None
967 970 try:
968 971 wlock = self.wlock()
969 972 lock = self.lock()
970 973 if self.svfs.exists("undo"):
971 974 return self._rollback(dryrun, force)
972 975 else:
973 976 self.ui.warn(_("no rollback information available\n"))
974 977 return 1
975 978 finally:
976 979 release(lock, wlock)
977 980
978 981 @unfilteredmethod # Until we get smarter cache management
979 982 def _rollback(self, dryrun, force):
980 983 ui = self.ui
981 984 try:
982 985 args = self.vfs.read('undo.desc').splitlines()
983 986 (oldlen, desc, detail) = (int(args[0]), args[1], None)
984 987 if len(args) >= 3:
985 988 detail = args[2]
986 989 oldtip = oldlen - 1
987 990
988 991 if detail and ui.verbose:
989 992 msg = (_('repository tip rolled back to revision %s'
990 993 ' (undo %s: %s)\n')
991 994 % (oldtip, desc, detail))
992 995 else:
993 996 msg = (_('repository tip rolled back to revision %s'
994 997 ' (undo %s)\n')
995 998 % (oldtip, desc))
996 999 except IOError:
997 1000 msg = _('rolling back unknown transaction\n')
998 1001 desc = None
999 1002
1000 1003 if not force and self['.'] != self['tip'] and desc == 'commit':
1001 1004 raise util.Abort(
1002 1005 _('rollback of last commit while not checked out '
1003 1006 'may lose data'), hint=_('use -f to force'))
1004 1007
1005 1008 ui.status(msg)
1006 1009 if dryrun:
1007 1010 return 0
1008 1011
1009 1012 parents = self.dirstate.parents()
1010 1013 self.destroying()
1011 1014 vfsmap = {'plain': self.vfs, '': self.svfs}
1012 1015 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1013 1016 if self.vfs.exists('undo.bookmarks'):
1014 1017 self.vfs.rename('undo.bookmarks', 'bookmarks')
1015 1018 if self.svfs.exists('undo.phaseroots'):
1016 1019 self.svfs.rename('undo.phaseroots', 'phaseroots')
1017 1020 self.invalidate()
1018 1021
1019 1022 parentgone = (parents[0] not in self.changelog.nodemap or
1020 1023 parents[1] not in self.changelog.nodemap)
1021 1024 if parentgone:
1022 1025 self.vfs.rename('undo.dirstate', 'dirstate')
1023 1026 try:
1024 1027 branch = self.vfs.read('undo.branch')
1025 1028 self.dirstate.setbranch(encoding.tolocal(branch))
1026 1029 except IOError:
1027 1030 ui.warn(_('named branch could not be reset: '
1028 1031 'current branch is still \'%s\'\n')
1029 1032 % self.dirstate.branch())
1030 1033
1031 1034 self.dirstate.invalidate()
1032 1035 parents = tuple([p.rev() for p in self.parents()])
1033 1036 if len(parents) > 1:
1034 1037 ui.status(_('working directory now based on '
1035 1038 'revisions %d and %d\n') % parents)
1036 1039 else:
1037 1040 ui.status(_('working directory now based on '
1038 1041 'revision %d\n') % parents)
1039 1042 # TODO: if we know which new heads may result from this rollback, pass
1040 1043 # them to destroy(), which will prevent the branchhead cache from being
1041 1044 # invalidated.
1042 1045 self.destroyed()
1043 1046 return 0
1044 1047
1045 1048 def invalidatecaches(self):
1046 1049
1047 1050 if '_tagscache' in vars(self):
1048 1051 # can't use delattr on proxy
1049 1052 del self.__dict__['_tagscache']
1050 1053
1051 1054 self.unfiltered()._branchcaches.clear()
1052 1055 self.invalidatevolatilesets()
1053 1056
1054 1057 def invalidatevolatilesets(self):
1055 1058 self.filteredrevcache.clear()
1056 1059 obsolete.clearobscaches(self)
1057 1060
1058 1061 def invalidatedirstate(self):
1059 1062 '''Invalidates the dirstate, causing the next call to dirstate
1060 1063 to check if it was modified since the last time it was read,
1061 1064 rereading it if it has.
1062 1065
1063 1066 This is different to dirstate.invalidate() that it doesn't always
1064 1067 rereads the dirstate. Use dirstate.invalidate() if you want to
1065 1068 explicitly read the dirstate again (i.e. restoring it to a previous
1066 1069 known good state).'''
1067 1070 if hasunfilteredcache(self, 'dirstate'):
1068 1071 for k in self.dirstate._filecache:
1069 1072 try:
1070 1073 delattr(self.dirstate, k)
1071 1074 except AttributeError:
1072 1075 pass
1073 1076 delattr(self.unfiltered(), 'dirstate')
1074 1077
1075 1078 def invalidate(self):
1076 1079 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1077 1080 for k in self._filecache:
1078 1081 # dirstate is invalidated separately in invalidatedirstate()
1079 1082 if k == 'dirstate':
1080 1083 continue
1081 1084
1082 1085 try:
1083 1086 delattr(unfiltered, k)
1084 1087 except AttributeError:
1085 1088 pass
1086 1089 self.invalidatecaches()
1087 1090 self.store.invalidatecaches()
1088 1091
1089 1092 def invalidateall(self):
1090 1093 '''Fully invalidates both store and non-store parts, causing the
1091 1094 subsequent operation to reread any outside changes.'''
1092 1095 # extension should hook this to invalidate its caches
1093 1096 self.invalidate()
1094 1097 self.invalidatedirstate()
1095 1098
1096 1099 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1097 1100 try:
1098 1101 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1099 1102 except error.LockHeld, inst:
1100 1103 if not wait:
1101 1104 raise
1102 1105 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1103 1106 (desc, inst.locker))
1104 1107 # default to 600 seconds timeout
1105 1108 l = lockmod.lock(vfs, lockname,
1106 1109 int(self.ui.config("ui", "timeout", "600")),
1107 1110 releasefn, desc=desc)
1108 1111 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1109 1112 if acquirefn:
1110 1113 acquirefn()
1111 1114 return l
1112 1115
1113 1116 def _afterlock(self, callback):
1114 1117 """add a callback to the current repository lock.
1115 1118
1116 1119 The callback will be executed on lock release."""
1117 1120 l = self._lockref and self._lockref()
1118 1121 if l:
1119 1122 l.postrelease.append(callback)
1120 1123 else:
1121 1124 callback()
1122 1125
1123 1126 def lock(self, wait=True):
1124 1127 '''Lock the repository store (.hg/store) and return a weak reference
1125 1128 to the lock. Use this before modifying the store (e.g. committing or
1126 1129 stripping). If you are opening a transaction, get a lock as well.)'''
1127 1130 l = self._lockref and self._lockref()
1128 1131 if l is not None and l.held:
1129 1132 l.lock()
1130 1133 return l
1131 1134
1132 1135 def unlock():
1133 1136 for k, ce in self._filecache.items():
1134 1137 if k == 'dirstate' or k not in self.__dict__:
1135 1138 continue
1136 1139 ce.refresh()
1137 1140
1138 1141 l = self._lock(self.svfs, "lock", wait, unlock,
1139 1142 self.invalidate, _('repository %s') % self.origroot)
1140 1143 self._lockref = weakref.ref(l)
1141 1144 return l
1142 1145
1143 1146 def wlock(self, wait=True):
1144 1147 '''Lock the non-store parts of the repository (everything under
1145 1148 .hg except .hg/store) and return a weak reference to the lock.
1146 1149 Use this before modifying files in .hg.'''
1147 1150 l = self._wlockref and self._wlockref()
1148 1151 if l is not None and l.held:
1149 1152 l.lock()
1150 1153 return l
1151 1154
1152 1155 def unlock():
1153 1156 if self.dirstate.pendingparentchange():
1154 1157 self.dirstate.invalidate()
1155 1158 else:
1156 1159 self.dirstate.write()
1157 1160
1158 1161 self._filecache['dirstate'].refresh()
1159 1162
1160 1163 l = self._lock(self.vfs, "wlock", wait, unlock,
1161 1164 self.invalidatedirstate, _('working directory of %s') %
1162 1165 self.origroot)
1163 1166 self._wlockref = weakref.ref(l)
1164 1167 return l
1165 1168
1166 1169 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1167 1170 """
1168 1171 commit an individual file as part of a larger transaction
1169 1172 """
1170 1173
1171 1174 fname = fctx.path()
1172 1175 text = fctx.data()
1173 1176 flog = self.file(fname)
1174 1177 fparent1 = manifest1.get(fname, nullid)
1175 1178 fparent2 = manifest2.get(fname, nullid)
1176 1179
1177 1180 meta = {}
1178 1181 copy = fctx.renamed()
1179 1182 if copy and copy[0] != fname:
1180 1183 # Mark the new revision of this file as a copy of another
1181 1184 # file. This copy data will effectively act as a parent
1182 1185 # of this new revision. If this is a merge, the first
1183 1186 # parent will be the nullid (meaning "look up the copy data")
1184 1187 # and the second one will be the other parent. For example:
1185 1188 #
1186 1189 # 0 --- 1 --- 3 rev1 changes file foo
1187 1190 # \ / rev2 renames foo to bar and changes it
1188 1191 # \- 2 -/ rev3 should have bar with all changes and
1189 1192 # should record that bar descends from
1190 1193 # bar in rev2 and foo in rev1
1191 1194 #
1192 1195 # this allows this merge to succeed:
1193 1196 #
1194 1197 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1195 1198 # \ / merging rev3 and rev4 should use bar@rev2
1196 1199 # \- 2 --- 4 as the merge base
1197 1200 #
1198 1201
1199 1202 cfname = copy[0]
1200 1203 crev = manifest1.get(cfname)
1201 1204 newfparent = fparent2
1202 1205
1203 1206 if manifest2: # branch merge
1204 1207 if fparent2 == nullid or crev is None: # copied on remote side
1205 1208 if cfname in manifest2:
1206 1209 crev = manifest2[cfname]
1207 1210 newfparent = fparent1
1208 1211
1209 1212 # Here, we used to search backwards through history to try to find
1210 1213 # where the file copy came from if the source of a copy was not in
1211 1214 # the parent diretory. However, this doesn't actually make sense to
1212 1215 # do (what does a copy from something not in your working copy even
1213 1216 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1214 1217 # the user that copy information was dropped, so if they didn't
1215 1218 # expect this outcome it can be fixed, but this is the correct
1216 1219 # behavior in this circumstance.
1217 1220
1218 1221 if crev:
1219 1222 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1220 1223 meta["copy"] = cfname
1221 1224 meta["copyrev"] = hex(crev)
1222 1225 fparent1, fparent2 = nullid, newfparent
1223 1226 else:
1224 1227 self.ui.warn(_("warning: can't find ancestor for '%s' "
1225 1228 "copied from '%s'!\n") % (fname, cfname))
1226 1229
1227 1230 elif fparent1 == nullid:
1228 1231 fparent1, fparent2 = fparent2, nullid
1229 1232 elif fparent2 != nullid:
1230 1233 # is one parent an ancestor of the other?
1231 1234 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1232 1235 if fparent1 in fparentancestors:
1233 1236 fparent1, fparent2 = fparent2, nullid
1234 1237 elif fparent2 in fparentancestors:
1235 1238 fparent2 = nullid
1236 1239
1237 1240 # is the file changed?
1238 1241 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1239 1242 changelist.append(fname)
1240 1243 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1241 1244 # are just the flags changed during merge?
1242 1245 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1243 1246 changelist.append(fname)
1244 1247
1245 1248 return fparent1
1246 1249
1247 1250 @unfilteredmethod
1248 1251 def commit(self, text="", user=None, date=None, match=None, force=False,
1249 1252 editor=False, extra={}):
1250 1253 """Add a new revision to current repository.
1251 1254
1252 1255 Revision information is gathered from the working directory,
1253 1256 match can be used to filter the committed files. If editor is
1254 1257 supplied, it is called to get a commit message.
1255 1258 """
1256 1259
1257 1260 def fail(f, msg):
1258 1261 raise util.Abort('%s: %s' % (f, msg))
1259 1262
1260 1263 if not match:
1261 1264 match = matchmod.always(self.root, '')
1262 1265
1263 1266 if not force:
1264 1267 vdirs = []
1265 1268 match.explicitdir = vdirs.append
1266 1269 match.bad = fail
1267 1270
1268 1271 wlock = self.wlock()
1269 1272 try:
1270 1273 wctx = self[None]
1271 1274 merge = len(wctx.parents()) > 1
1272 1275
1273 1276 if (not force and merge and match and
1274 1277 (match.files() or match.anypats())):
1275 1278 raise util.Abort(_('cannot partially commit a merge '
1276 1279 '(do not specify files or patterns)'))
1277 1280
1278 1281 status = self.status(match=match, clean=force)
1279 1282 if force:
1280 1283 status.modified.extend(status.clean) # mq may commit clean files
1281 1284
1282 1285 # check subrepos
1283 1286 subs = []
1284 1287 commitsubs = set()
1285 1288 newstate = wctx.substate.copy()
1286 1289 # only manage subrepos and .hgsubstate if .hgsub is present
1287 1290 if '.hgsub' in wctx:
1288 1291 # we'll decide whether to track this ourselves, thanks
1289 1292 for c in status.modified, status.added, status.removed:
1290 1293 if '.hgsubstate' in c:
1291 1294 c.remove('.hgsubstate')
1292 1295
1293 1296 # compare current state to last committed state
1294 1297 # build new substate based on last committed state
1295 1298 oldstate = wctx.p1().substate
1296 1299 for s in sorted(newstate.keys()):
1297 1300 if not match(s):
1298 1301 # ignore working copy, use old state if present
1299 1302 if s in oldstate:
1300 1303 newstate[s] = oldstate[s]
1301 1304 continue
1302 1305 if not force:
1303 1306 raise util.Abort(
1304 1307 _("commit with new subrepo %s excluded") % s)
1305 1308 if wctx.sub(s).dirty(True):
1306 1309 if not self.ui.configbool('ui', 'commitsubrepos'):
1307 1310 raise util.Abort(
1308 1311 _("uncommitted changes in subrepo %s") % s,
1309 1312 hint=_("use --subrepos for recursive commit"))
1310 1313 subs.append(s)
1311 1314 commitsubs.add(s)
1312 1315 else:
1313 1316 bs = wctx.sub(s).basestate()
1314 1317 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1315 1318 if oldstate.get(s, (None, None, None))[1] != bs:
1316 1319 subs.append(s)
1317 1320
1318 1321 # check for removed subrepos
1319 1322 for p in wctx.parents():
1320 1323 r = [s for s in p.substate if s not in newstate]
1321 1324 subs += [s for s in r if match(s)]
1322 1325 if subs:
1323 1326 if (not match('.hgsub') and
1324 1327 '.hgsub' in (wctx.modified() + wctx.added())):
1325 1328 raise util.Abort(
1326 1329 _("can't commit subrepos without .hgsub"))
1327 1330 status.modified.insert(0, '.hgsubstate')
1328 1331
1329 1332 elif '.hgsub' in status.removed:
1330 1333 # clean up .hgsubstate when .hgsub is removed
1331 1334 if ('.hgsubstate' in wctx and
1332 1335 '.hgsubstate' not in (status.modified + status.added +
1333 1336 status.removed)):
1334 1337 status.removed.insert(0, '.hgsubstate')
1335 1338
1336 1339 # make sure all explicit patterns are matched
1337 1340 if not force and match.files():
1338 1341 matched = set(status.modified + status.added + status.removed)
1339 1342
1340 1343 for f in match.files():
1341 1344 f = self.dirstate.normalize(f)
1342 1345 if f == '.' or f in matched or f in wctx.substate:
1343 1346 continue
1344 1347 if f in status.deleted:
1345 1348 fail(f, _('file not found!'))
1346 1349 if f in vdirs: # visited directory
1347 1350 d = f + '/'
1348 1351 for mf in matched:
1349 1352 if mf.startswith(d):
1350 1353 break
1351 1354 else:
1352 1355 fail(f, _("no match under directory!"))
1353 1356 elif f not in self.dirstate:
1354 1357 fail(f, _("file not tracked!"))
1355 1358
1356 1359 cctx = context.workingcommitctx(self, status,
1357 1360 text, user, date, extra)
1358 1361
1359 1362 if (not force and not extra.get("close") and not merge
1360 1363 and not cctx.files()
1361 1364 and wctx.branch() == wctx.p1().branch()):
1362 1365 return None
1363 1366
1364 1367 if merge and cctx.deleted():
1365 1368 raise util.Abort(_("cannot commit merge with missing files"))
1366 1369
1367 1370 ms = mergemod.mergestate(self)
1368 1371 for f in status.modified:
1369 1372 if f in ms and ms[f] == 'u':
1370 1373 raise util.Abort(_('unresolved merge conflicts '
1371 1374 '(see "hg help resolve")'))
1372 1375
1373 1376 if editor:
1374 1377 cctx._text = editor(self, cctx, subs)
1375 1378 edited = (text != cctx._text)
1376 1379
1377 1380 # Save commit message in case this transaction gets rolled back
1378 1381 # (e.g. by a pretxncommit hook). Leave the content alone on
1379 1382 # the assumption that the user will use the same editor again.
1380 1383 msgfn = self.savecommitmessage(cctx._text)
1381 1384
1382 1385 # commit subs and write new state
1383 1386 if subs:
1384 1387 for s in sorted(commitsubs):
1385 1388 sub = wctx.sub(s)
1386 1389 self.ui.status(_('committing subrepository %s\n') %
1387 1390 subrepo.subrelpath(sub))
1388 1391 sr = sub.commit(cctx._text, user, date)
1389 1392 newstate[s] = (newstate[s][0], sr)
1390 1393 subrepo.writestate(self, newstate)
1391 1394
1392 1395 p1, p2 = self.dirstate.parents()
1393 1396 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1394 1397 try:
1395 1398 self.hook("precommit", throw=True, parent1=hookp1,
1396 1399 parent2=hookp2)
1397 1400 ret = self.commitctx(cctx, True)
1398 1401 except: # re-raises
1399 1402 if edited:
1400 1403 self.ui.write(
1401 1404 _('note: commit message saved in %s\n') % msgfn)
1402 1405 raise
1403 1406
1404 1407 # update bookmarks, dirstate and mergestate
1405 1408 bookmarks.update(self, [p1, p2], ret)
1406 1409 cctx.markcommitted(ret)
1407 1410 ms.reset()
1408 1411 finally:
1409 1412 wlock.release()
1410 1413
1411 1414 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1412 1415 # hack for command that use a temporary commit (eg: histedit)
1413 1416 # temporary commit got stripped before hook release
1414 1417 if node in self:
1415 1418 self.hook("commit", node=node, parent1=parent1,
1416 1419 parent2=parent2)
1417 1420 self._afterlock(commithook)
1418 1421 return ret
1419 1422
1420 1423 @unfilteredmethod
1421 1424 def commitctx(self, ctx, error=False):
1422 1425 """Add a new revision to current repository.
1423 1426 Revision information is passed via the context argument.
1424 1427 """
1425 1428
1426 1429 tr = None
1427 1430 p1, p2 = ctx.p1(), ctx.p2()
1428 1431 user = ctx.user()
1429 1432
1430 1433 lock = self.lock()
1431 1434 try:
1432 1435 tr = self.transaction("commit")
1433 1436 trp = weakref.proxy(tr)
1434 1437
1435 1438 if ctx.files():
1436 1439 m1 = p1.manifest()
1437 1440 m2 = p2.manifest()
1438 1441 m = m1.copy()
1439 1442
1440 1443 # check in files
1441 1444 added = []
1442 1445 changed = []
1443 1446 removed = list(ctx.removed())
1444 1447 linkrev = len(self)
1445 1448 self.ui.note(_("committing files:\n"))
1446 1449 for f in sorted(ctx.modified() + ctx.added()):
1447 1450 self.ui.note(f + "\n")
1448 1451 try:
1449 1452 fctx = ctx[f]
1450 1453 if fctx is None:
1451 1454 removed.append(f)
1452 1455 else:
1453 1456 added.append(f)
1454 1457 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1455 1458 trp, changed)
1456 1459 m.setflag(f, fctx.flags())
1457 1460 except OSError, inst:
1458 1461 self.ui.warn(_("trouble committing %s!\n") % f)
1459 1462 raise
1460 1463 except IOError, inst:
1461 1464 errcode = getattr(inst, 'errno', errno.ENOENT)
1462 1465 if error or errcode and errcode != errno.ENOENT:
1463 1466 self.ui.warn(_("trouble committing %s!\n") % f)
1464 1467 raise
1465 1468
1466 1469 # update manifest
1467 1470 self.ui.note(_("committing manifest\n"))
1468 1471 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1469 1472 drop = [f for f in removed if f in m]
1470 1473 for f in drop:
1471 1474 del m[f]
1472 1475 mn = self.manifest.add(m, trp, linkrev,
1473 1476 p1.manifestnode(), p2.manifestnode(),
1474 1477 added, drop)
1475 1478 files = changed + removed
1476 1479 else:
1477 1480 mn = p1.manifestnode()
1478 1481 files = []
1479 1482
1480 1483 # update changelog
1481 1484 self.ui.note(_("committing changelog\n"))
1482 1485 self.changelog.delayupdate(tr)
1483 1486 n = self.changelog.add(mn, files, ctx.description(),
1484 1487 trp, p1.node(), p2.node(),
1485 1488 user, ctx.date(), ctx.extra().copy())
1486 1489 p = lambda: tr.writepending() and self.root or ""
1487 1490 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1488 1491 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1489 1492 parent2=xp2, pending=p)
1490 1493 # set the new commit is proper phase
1491 1494 targetphase = subrepo.newcommitphase(self.ui, ctx)
1492 1495 if targetphase:
1493 1496 # retract boundary do not alter parent changeset.
1494 1497 # if a parent have higher the resulting phase will
1495 1498 # be compliant anyway
1496 1499 #
1497 1500 # if minimal phase was 0 we don't need to retract anything
1498 1501 phases.retractboundary(self, tr, targetphase, [n])
1499 1502 tr.close()
1500 1503 branchmap.updatecache(self.filtered('served'))
1501 1504 return n
1502 1505 finally:
1503 1506 if tr:
1504 1507 tr.release()
1505 1508 lock.release()
1506 1509
1507 1510 @unfilteredmethod
1508 1511 def destroying(self):
1509 1512 '''Inform the repository that nodes are about to be destroyed.
1510 1513 Intended for use by strip and rollback, so there's a common
1511 1514 place for anything that has to be done before destroying history.
1512 1515
1513 1516 This is mostly useful for saving state that is in memory and waiting
1514 1517 to be flushed when the current lock is released. Because a call to
1515 1518 destroyed is imminent, the repo will be invalidated causing those
1516 1519 changes to stay in memory (waiting for the next unlock), or vanish
1517 1520 completely.
1518 1521 '''
1519 1522 # When using the same lock to commit and strip, the phasecache is left
1520 1523 # dirty after committing. Then when we strip, the repo is invalidated,
1521 1524 # causing those changes to disappear.
1522 1525 if '_phasecache' in vars(self):
1523 1526 self._phasecache.write()
1524 1527
1525 1528 @unfilteredmethod
1526 1529 def destroyed(self):
1527 1530 '''Inform the repository that nodes have been destroyed.
1528 1531 Intended for use by strip and rollback, so there's a common
1529 1532 place for anything that has to be done after destroying history.
1530 1533 '''
1531 1534 # When one tries to:
1532 1535 # 1) destroy nodes thus calling this method (e.g. strip)
1533 1536 # 2) use phasecache somewhere (e.g. commit)
1534 1537 #
1535 1538 # then 2) will fail because the phasecache contains nodes that were
1536 1539 # removed. We can either remove phasecache from the filecache,
1537 1540 # causing it to reload next time it is accessed, or simply filter
1538 1541 # the removed nodes now and write the updated cache.
1539 1542 self._phasecache.filterunknown(self)
1540 1543 self._phasecache.write()
1541 1544
1542 1545 # update the 'served' branch cache to help read only server process
1543 1546 # Thanks to branchcache collaboration this is done from the nearest
1544 1547 # filtered subset and it is expected to be fast.
1545 1548 branchmap.updatecache(self.filtered('served'))
1546 1549
1547 1550 # Ensure the persistent tag cache is updated. Doing it now
1548 1551 # means that the tag cache only has to worry about destroyed
1549 1552 # heads immediately after a strip/rollback. That in turn
1550 1553 # guarantees that "cachetip == currenttip" (comparing both rev
1551 1554 # and node) always means no nodes have been added or destroyed.
1552 1555
1553 1556 # XXX this is suboptimal when qrefresh'ing: we strip the current
1554 1557 # head, refresh the tag cache, then immediately add a new head.
1555 1558 # But I think doing it this way is necessary for the "instant
1556 1559 # tag cache retrieval" case to work.
1557 1560 self.invalidate()
1558 1561
1559 1562 def walk(self, match, node=None):
1560 1563 '''
1561 1564 walk recursively through the directory tree or a given
1562 1565 changeset, finding all files matched by the match
1563 1566 function
1564 1567 '''
1565 1568 return self[node].walk(match)
1566 1569
1567 1570 def status(self, node1='.', node2=None, match=None,
1568 1571 ignored=False, clean=False, unknown=False,
1569 1572 listsubrepos=False):
1570 1573 '''a convenience method that calls node1.status(node2)'''
1571 1574 return self[node1].status(node2, match, ignored, clean, unknown,
1572 1575 listsubrepos)
1573 1576
1574 1577 def heads(self, start=None):
1575 1578 heads = self.changelog.heads(start)
1576 1579 # sort the output in rev descending order
1577 1580 return sorted(heads, key=self.changelog.rev, reverse=True)
1578 1581
1579 1582 def branchheads(self, branch=None, start=None, closed=False):
1580 1583 '''return a (possibly filtered) list of heads for the given branch
1581 1584
1582 1585 Heads are returned in topological order, from newest to oldest.
1583 1586 If branch is None, use the dirstate branch.
1584 1587 If start is not None, return only heads reachable from start.
1585 1588 If closed is True, return heads that are marked as closed as well.
1586 1589 '''
1587 1590 if branch is None:
1588 1591 branch = self[None].branch()
1589 1592 branches = self.branchmap()
1590 1593 if branch not in branches:
1591 1594 return []
1592 1595 # the cache returns heads ordered lowest to highest
1593 1596 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1594 1597 if start is not None:
1595 1598 # filter out the heads that cannot be reached from startrev
1596 1599 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1597 1600 bheads = [h for h in bheads if h in fbheads]
1598 1601 return bheads
1599 1602
1600 1603 def branches(self, nodes):
1601 1604 if not nodes:
1602 1605 nodes = [self.changelog.tip()]
1603 1606 b = []
1604 1607 for n in nodes:
1605 1608 t = n
1606 1609 while True:
1607 1610 p = self.changelog.parents(n)
1608 1611 if p[1] != nullid or p[0] == nullid:
1609 1612 b.append((t, n, p[0], p[1]))
1610 1613 break
1611 1614 n = p[0]
1612 1615 return b
1613 1616
1614 1617 def between(self, pairs):
1615 1618 r = []
1616 1619
1617 1620 for top, bottom in pairs:
1618 1621 n, l, i = top, [], 0
1619 1622 f = 1
1620 1623
1621 1624 while n != bottom and n != nullid:
1622 1625 p = self.changelog.parents(n)[0]
1623 1626 if i == f:
1624 1627 l.append(n)
1625 1628 f = f * 2
1626 1629 n = p
1627 1630 i += 1
1628 1631
1629 1632 r.append(l)
1630 1633
1631 1634 return r
1632 1635
1633 1636 def checkpush(self, pushop):
1634 1637 """Extensions can override this function if additional checks have
1635 1638 to be performed before pushing, or call it if they override push
1636 1639 command.
1637 1640 """
1638 1641 pass
1639 1642
1640 1643 @unfilteredpropertycache
1641 1644 def prepushoutgoinghooks(self):
1642 1645 """Return util.hooks consists of "(repo, remote, outgoing)"
1643 1646 functions, which are called before pushing changesets.
1644 1647 """
1645 1648 return util.hooks()
1646 1649
1647 1650 def stream_in(self, remote, requirements):
1648 1651 lock = self.lock()
1649 1652 try:
1650 1653 # Save remote branchmap. We will use it later
1651 1654 # to speed up branchcache creation
1652 1655 rbranchmap = None
1653 1656 if remote.capable("branchmap"):
1654 1657 rbranchmap = remote.branchmap()
1655 1658
1656 1659 fp = remote.stream_out()
1657 1660 l = fp.readline()
1658 1661 try:
1659 1662 resp = int(l)
1660 1663 except ValueError:
1661 1664 raise error.ResponseError(
1662 1665 _('unexpected response from remote server:'), l)
1663 1666 if resp == 1:
1664 1667 raise util.Abort(_('operation forbidden by server'))
1665 1668 elif resp == 2:
1666 1669 raise util.Abort(_('locking the remote repository failed'))
1667 1670 elif resp != 0:
1668 1671 raise util.Abort(_('the server sent an unknown error code'))
1669 1672 self.ui.status(_('streaming all changes\n'))
1670 1673 l = fp.readline()
1671 1674 try:
1672 1675 total_files, total_bytes = map(int, l.split(' ', 1))
1673 1676 except (ValueError, TypeError):
1674 1677 raise error.ResponseError(
1675 1678 _('unexpected response from remote server:'), l)
1676 1679 self.ui.status(_('%d files to transfer, %s of data\n') %
1677 1680 (total_files, util.bytecount(total_bytes)))
1678 1681 handled_bytes = 0
1679 1682 self.ui.progress(_('clone'), 0, total=total_bytes)
1680 1683 start = time.time()
1681 1684
1682 1685 tr = self.transaction(_('clone'))
1683 1686 try:
1684 1687 for i in xrange(total_files):
1685 1688 # XXX doesn't support '\n' or '\r' in filenames
1686 1689 l = fp.readline()
1687 1690 try:
1688 1691 name, size = l.split('\0', 1)
1689 1692 size = int(size)
1690 1693 except (ValueError, TypeError):
1691 1694 raise error.ResponseError(
1692 1695 _('unexpected response from remote server:'), l)
1693 1696 if self.ui.debugflag:
1694 1697 self.ui.debug('adding %s (%s)\n' %
1695 1698 (name, util.bytecount(size)))
1696 1699 # for backwards compat, name was partially encoded
1697 1700 ofp = self.svfs(store.decodedir(name), 'w')
1698 1701 for chunk in util.filechunkiter(fp, limit=size):
1699 1702 handled_bytes += len(chunk)
1700 1703 self.ui.progress(_('clone'), handled_bytes,
1701 1704 total=total_bytes)
1702 1705 ofp.write(chunk)
1703 1706 ofp.close()
1704 1707 tr.close()
1705 1708 finally:
1706 1709 tr.release()
1707 1710
1708 1711 # Writing straight to files circumvented the inmemory caches
1709 1712 self.invalidate()
1710 1713
1711 1714 elapsed = time.time() - start
1712 1715 if elapsed <= 0:
1713 1716 elapsed = 0.001
1714 1717 self.ui.progress(_('clone'), None)
1715 1718 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1716 1719 (util.bytecount(total_bytes), elapsed,
1717 1720 util.bytecount(total_bytes / elapsed)))
1718 1721
1719 1722 # new requirements = old non-format requirements +
1720 1723 # new format-related
1721 1724 # requirements from the streamed-in repository
1722 1725 requirements.update(set(self.requirements) - self.supportedformats)
1723 1726 self._applyrequirements(requirements)
1724 1727 self._writerequirements()
1725 1728
1726 1729 if rbranchmap:
1727 1730 rbheads = []
1728 1731 closed = []
1729 1732 for bheads in rbranchmap.itervalues():
1730 1733 rbheads.extend(bheads)
1731 1734 for h in bheads:
1732 1735 r = self.changelog.rev(h)
1733 1736 b, c = self.changelog.branchinfo(r)
1734 1737 if c:
1735 1738 closed.append(h)
1736 1739
1737 1740 if rbheads:
1738 1741 rtiprev = max((int(self.changelog.rev(node))
1739 1742 for node in rbheads))
1740 1743 cache = branchmap.branchcache(rbranchmap,
1741 1744 self[rtiprev].node(),
1742 1745 rtiprev,
1743 1746 closednodes=closed)
1744 1747 # Try to stick it as low as possible
1745 1748 # filter above served are unlikely to be fetch from a clone
1746 1749 for candidate in ('base', 'immutable', 'served'):
1747 1750 rview = self.filtered(candidate)
1748 1751 if cache.validfor(rview):
1749 1752 self._branchcaches[candidate] = cache
1750 1753 cache.write(rview)
1751 1754 break
1752 1755 self.invalidate()
1753 1756 return len(self.heads()) + 1
1754 1757 finally:
1755 1758 lock.release()
1756 1759
1757 1760 def clone(self, remote, heads=[], stream=None):
1758 1761 '''clone remote repository.
1759 1762
1760 1763 keyword arguments:
1761 1764 heads: list of revs to clone (forces use of pull)
1762 1765 stream: use streaming clone if possible'''
1763 1766
1764 1767 # now, all clients that can request uncompressed clones can
1765 1768 # read repo formats supported by all servers that can serve
1766 1769 # them.
1767 1770
1768 1771 # if revlog format changes, client will have to check version
1769 1772 # and format flags on "stream" capability, and use
1770 1773 # uncompressed only if compatible.
1771 1774
1772 1775 if stream is None:
1773 1776 # if the server explicitly prefers to stream (for fast LANs)
1774 1777 stream = remote.capable('stream-preferred')
1775 1778
1776 1779 if stream and not heads:
1777 1780 # 'stream' means remote revlog format is revlogv1 only
1778 1781 if remote.capable('stream'):
1779 1782 self.stream_in(remote, set(('revlogv1',)))
1780 1783 else:
1781 1784 # otherwise, 'streamreqs' contains the remote revlog format
1782 1785 streamreqs = remote.capable('streamreqs')
1783 1786 if streamreqs:
1784 1787 streamreqs = set(streamreqs.split(','))
1785 1788 # if we support it, stream in and adjust our requirements
1786 1789 if not streamreqs - self.supportedformats:
1787 1790 self.stream_in(remote, streamreqs)
1788 1791
1789 1792 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1790 1793 try:
1791 1794 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1792 1795 ret = exchange.pull(self, remote, heads).cgresult
1793 1796 finally:
1794 1797 self.ui.restoreconfig(quiet)
1795 1798 return ret
1796 1799
1797 1800 def pushkey(self, namespace, key, old, new):
1798 1801 try:
1799 1802 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1800 1803 old=old, new=new)
1801 1804 except error.HookAbort, exc:
1802 1805 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1803 1806 if exc.hint:
1804 1807 self.ui.write_err(_("(%s)\n") % exc.hint)
1805 1808 return False
1806 1809 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1807 1810 ret = pushkey.push(self, namespace, key, old, new)
1808 1811 def runhook():
1809 1812 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1810 1813 ret=ret)
1811 1814 self._afterlock(runhook)
1812 1815 return ret
1813 1816
1814 1817 def listkeys(self, namespace):
1815 1818 self.hook('prelistkeys', throw=True, namespace=namespace)
1816 1819 self.ui.debug('listing keys for "%s"\n' % namespace)
1817 1820 values = pushkey.list(self, namespace)
1818 1821 self.hook('listkeys', namespace=namespace, values=values)
1819 1822 return values
1820 1823
1821 1824 def debugwireargs(self, one, two, three=None, four=None, five=None):
1822 1825 '''used to test argument passing over the wire'''
1823 1826 return "%s %s %s %s %s" % (one, two, three, four, five)
1824 1827
1825 1828 def savecommitmessage(self, text):
1826 1829 fp = self.vfs('last-message.txt', 'wb')
1827 1830 try:
1828 1831 fp.write(text)
1829 1832 finally:
1830 1833 fp.close()
1831 1834 return self.pathto(fp.name[len(self.root) + 1:])
1832 1835
1833 1836 # used to avoid circular references so destructors work
1834 1837 def aftertrans(files):
1835 1838 renamefiles = [tuple(t) for t in files]
1836 1839 def a():
1837 1840 for vfs, src, dest in renamefiles:
1838 1841 try:
1839 1842 vfs.rename(src, dest)
1840 1843 except OSError: # journal file does not yet exist
1841 1844 pass
1842 1845 return a
1843 1846
1844 1847 def undoname(fn):
1845 1848 base, name = os.path.split(fn)
1846 1849 assert name.startswith('journal')
1847 1850 return os.path.join(base, name.replace('journal', 'undo', 1))
1848 1851
1849 1852 def instance(ui, path, create):
1850 1853 return localrepository(ui, util.urllocalpath(path), create)
1851 1854
1852 1855 def islocal(path):
1853 1856 return True
@@ -1,295 +1,300 b''
1 1 # manifest.py - manifest revision class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from i18n import _
9 9 import mdiff, parsers, error, revlog, util
10 10 import array, struct
11 11
12 12 class manifestdict(dict):
13 13 def __init__(self, mapping=None, flags=None):
14 14 if mapping is None:
15 15 mapping = {}
16 16 if flags is None:
17 17 flags = {}
18 18 dict.__init__(self, mapping)
19 19 self._flags = flags
20 20 def __setitem__(self, k, v):
21 21 assert v is not None
22 22 dict.__setitem__(self, k, v)
23 23 def flags(self, f):
24 24 return self._flags.get(f, "")
25 25 def setflag(self, f, flags):
26 26 """Set the flags (symlink, executable) for path f."""
27 27 self._flags[f] = flags
28 28 def copy(self):
29 29 return manifestdict(self, dict.copy(self._flags))
30 30 def intersectfiles(self, files):
31 31 '''make a new manifestdict with the intersection of self with files
32 32
33 33 The algorithm assumes that files is much smaller than self.'''
34 34 ret = manifestdict()
35 35 for fn in files:
36 36 if fn in self:
37 37 ret[fn] = self[fn]
38 38 flags = self._flags.get(fn, None)
39 39 if flags:
40 40 ret._flags[fn] = flags
41 41 return ret
42 42
43 43 def matches(self, match):
44 44 '''generate a new manifest filtered by the match argument'''
45 45 if match.always():
46 46 return self.copy()
47 47
48 48 files = match.files()
49 49 if (match.matchfn == match.exact or
50 50 (not match.anypats() and util.all(fn in self for fn in files))):
51 51 return self.intersectfiles(files)
52 52
53 53 mf = self.copy()
54 54 for fn in mf.keys():
55 55 if not match(fn):
56 56 del mf[fn]
57 57 return mf
58 58
59 59 def diff(self, m2, clean=False):
60 60 '''Finds changes between the current manifest and m2.
61 61
62 62 Args:
63 63 m2: the manifest to which this manifest should be compared.
64 64 clean: if true, include files unchanged between these manifests
65 65 with a None value in the returned dictionary.
66 66
67 67 The result is returned as a dict with filename as key and
68 68 values of the form ((n1,fl1),(n2,fl2)), where n1/n2 is the
69 69 nodeid in the current/other manifest and fl1/fl2 is the flag
70 70 in the current/other manifest. Where the file does not exist,
71 71 the nodeid will be None and the flags will be the empty
72 72 string.
73 73 '''
74 74 diff = {}
75 75
76 76 for fn, n1 in self.iteritems():
77 77 fl1 = self._flags.get(fn, '')
78 78 n2 = m2.get(fn, None)
79 79 fl2 = m2._flags.get(fn, '')
80 80 if n2 is None:
81 81 fl2 = ''
82 82 if n1 != n2 or fl1 != fl2:
83 83 diff[fn] = ((n1, fl1), (n2, fl2))
84 84 elif clean:
85 85 diff[fn] = None
86 86
87 87 for fn, n2 in m2.iteritems():
88 88 if fn not in self:
89 89 fl2 = m2._flags.get(fn, '')
90 90 diff[fn] = ((None, ''), (n2, fl2))
91 91
92 92 return diff
93 93
94 94 def text(self):
95 95 """Get the full data of this manifest as a bytestring."""
96 96 fl = sorted(self)
97 97 _checkforbidden(fl)
98 98
99 99 hex, flags = revlog.hex, self.flags
100 100 # if this is changed to support newlines in filenames,
101 101 # be sure to check the templates/ dir again (especially *-raw.tmpl)
102 102 return ''.join("%s\0%s%s\n" % (f, hex(self[f]), flags(f)) for f in fl)
103 103
104 104 def fastdelta(self, base, changes):
105 105 """Given a base manifest text as an array.array and a list of changes
106 106 relative to that text, compute a delta that can be used by revlog.
107 107 """
108 108 delta = []
109 109 dstart = None
110 110 dend = None
111 111 dline = [""]
112 112 start = 0
113 113 # zero copy representation of base as a buffer
114 114 addbuf = util.buffer(base)
115 115
116 116 # start with a readonly loop that finds the offset of
117 117 # each line and creates the deltas
118 118 for f, todelete in changes:
119 119 # bs will either be the index of the item or the insert point
120 120 start, end = _msearch(addbuf, f, start)
121 121 if not todelete:
122 122 l = "%s\0%s%s\n" % (f, revlog.hex(self[f]), self.flags(f))
123 123 else:
124 124 if start == end:
125 125 # item we want to delete was not found, error out
126 126 raise AssertionError(
127 127 _("failed to remove %s from manifest") % f)
128 128 l = ""
129 129 if dstart is not None and dstart <= start and dend >= start:
130 130 if dend < end:
131 131 dend = end
132 132 if l:
133 133 dline.append(l)
134 134 else:
135 135 if dstart is not None:
136 136 delta.append([dstart, dend, "".join(dline)])
137 137 dstart = start
138 138 dend = end
139 139 dline = [l]
140 140
141 141 if dstart is not None:
142 142 delta.append([dstart, dend, "".join(dline)])
143 143 # apply the delta to the base, and get a delta for addrevision
144 144 deltatext, arraytext = _addlistdelta(base, delta)
145 145 return arraytext, deltatext
146 146
147 147 def _msearch(m, s, lo=0, hi=None):
148 148 '''return a tuple (start, end) that says where to find s within m.
149 149
150 150 If the string is found m[start:end] are the line containing
151 151 that string. If start == end the string was not found and
152 152 they indicate the proper sorted insertion point.
153 153
154 154 m should be a buffer or a string
155 155 s is a string'''
156 156 def advance(i, c):
157 157 while i < lenm and m[i] != c:
158 158 i += 1
159 159 return i
160 160 if not s:
161 161 return (lo, lo)
162 162 lenm = len(m)
163 163 if not hi:
164 164 hi = lenm
165 165 while lo < hi:
166 166 mid = (lo + hi) // 2
167 167 start = mid
168 168 while start > 0 and m[start - 1] != '\n':
169 169 start -= 1
170 170 end = advance(start, '\0')
171 171 if m[start:end] < s:
172 172 # we know that after the null there are 40 bytes of sha1
173 173 # this translates to the bisect lo = mid + 1
174 174 lo = advance(end + 40, '\n') + 1
175 175 else:
176 176 # this translates to the bisect hi = mid
177 177 hi = start
178 178 end = advance(lo, '\0')
179 179 found = m[lo:end]
180 180 if s == found:
181 181 # we know that after the null there are 40 bytes of sha1
182 182 end = advance(end + 40, '\n')
183 183 return (lo, end + 1)
184 184 else:
185 185 return (lo, lo)
186 186
187 187 def _checkforbidden(l):
188 188 """Check filenames for illegal characters."""
189 189 for f in l:
190 190 if '\n' in f or '\r' in f:
191 191 raise error.RevlogError(
192 192 _("'\\n' and '\\r' disallowed in filenames: %r") % f)
193 193
194 194
195 195 # apply the changes collected during the bisect loop to our addlist
196 196 # return a delta suitable for addrevision
197 197 def _addlistdelta(addlist, x):
198 198 # for large addlist arrays, building a new array is cheaper
199 199 # than repeatedly modifying the existing one
200 200 currentposition = 0
201 201 newaddlist = array.array('c')
202 202
203 203 for start, end, content in x:
204 204 newaddlist += addlist[currentposition:start]
205 205 if content:
206 206 newaddlist += array.array('c', content)
207 207
208 208 currentposition = end
209 209
210 210 newaddlist += addlist[currentposition:]
211 211
212 212 deltatext = "".join(struct.pack(">lll", start, end, len(content))
213 213 + content for start, end, content in x)
214 214 return deltatext, newaddlist
215 215
216 216 def _parse(lines):
217 217 mfdict = manifestdict()
218 218 parsers.parse_manifest(mfdict, mfdict._flags, lines)
219 219 return mfdict
220 220
221 221 class manifest(revlog.revlog):
222 222 def __init__(self, opener):
223 # we expect to deal with not more than four revs at a time,
224 # during a commit --amend
225 self._mancache = util.lrucachedict(4)
223 # During normal operations, we expect to deal with not more than four
224 # revs at a time (such as during commit --amend). When rebasing large
225 # stacks of commits, the number can go up, hence the config knob below.
226 cachesize = 4
227 opts = getattr(opener, 'options', None)
228 if opts is not None:
229 cachesize = opts.get('manifestcachesize', cachesize)
230 self._mancache = util.lrucachedict(cachesize)
226 231 revlog.revlog.__init__(self, opener, "00manifest.i")
227 232
228 233 def readdelta(self, node):
229 234 r = self.rev(node)
230 235 return _parse(mdiff.patchtext(self.revdiff(self.deltaparent(r), r)))
231 236
232 237 def readfast(self, node):
233 238 '''use the faster of readdelta or read'''
234 239 r = self.rev(node)
235 240 deltaparent = self.deltaparent(r)
236 241 if deltaparent != revlog.nullrev and deltaparent in self.parentrevs(r):
237 242 return self.readdelta(node)
238 243 return self.read(node)
239 244
240 245 def read(self, node):
241 246 if node == revlog.nullid:
242 247 return manifestdict() # don't upset local cache
243 248 if node in self._mancache:
244 249 return self._mancache[node][0]
245 250 text = self.revision(node)
246 251 arraytext = array.array('c', text)
247 252 mapping = _parse(text)
248 253 self._mancache[node] = (mapping, arraytext)
249 254 return mapping
250 255
251 256 def find(self, node, f):
252 257 '''look up entry for a single file efficiently.
253 258 return (node, flags) pair if found, (None, None) if not.'''
254 259 if node in self._mancache:
255 260 mapping = self._mancache[node][0]
256 261 return mapping.get(f), mapping.flags(f)
257 262 text = self.revision(node)
258 263 start, end = _msearch(text, f)
259 264 if start == end:
260 265 return None, None
261 266 l = text[start:end]
262 267 f, n = l.split('\0')
263 268 return revlog.bin(n[:40]), n[40:-1]
264 269
265 270 def add(self, map, transaction, link, p1, p2, added, removed):
266 271 if p1 in self._mancache:
267 272 # If our first parent is in the manifest cache, we can
268 273 # compute a delta here using properties we know about the
269 274 # manifest up-front, which may save time later for the
270 275 # revlog layer.
271 276
272 277 _checkforbidden(added)
273 278 # combine the changed lists into one list for sorting
274 279 work = [(x, False) for x in added]
275 280 work.extend((x, True) for x in removed)
276 281 # this could use heapq.merge() (from Python 2.6+) or equivalent
277 282 # since the lists are already sorted
278 283 work.sort()
279 284
280 285 arraytext, deltatext = map.fastdelta(self._mancache[p1][1], work)
281 286 cachedelta = self.rev(p1), deltatext
282 287 text = util.buffer(arraytext)
283 288 else:
284 289 # The first parent manifest isn't already loaded, so we'll
285 290 # just encode a fulltext of the manifest and pass that
286 291 # through to the revlog layer, and let it handle the delta
287 292 # process.
288 293 text = map.text()
289 294 arraytext = array.array('c', text)
290 295 cachedelta = None
291 296
292 297 n = self.addrevision(text, transaction, link, p1, p2, cachedelta)
293 298 self._mancache[n] = (map, arraytext)
294 299
295 300 return n
General Comments 0
You need to be logged in to leave comments. Login now