##// END OF EJS Templates
localrepo: rename revlog.maxchainlen to format.maxchainlen...
Augie Fackler -
r23256:1c11393d default
parent child Browse files
Show More
@@ -1,1806 +1,1806 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, short
8 8 from i18n import _
9 9 import urllib
10 10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock as lockmod
13 13 import transaction, store, encoding, exchange, bundle2
14 14 import scmutil, util, extensions, hook, error, revset
15 15 import match as matchmod
16 16 import merge as mergemod
17 17 import tags as tagsmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect
20 20 import branchmap, pathutil
21 21 propertycache = util.propertycache
22 22 filecache = scmutil.filecache
23 23
24 24 class repofilecache(filecache):
25 25 """All filecache usage on repo are done for logic that should be unfiltered
26 26 """
27 27
28 28 def __get__(self, repo, type=None):
29 29 return super(repofilecache, self).__get__(repo.unfiltered(), type)
30 30 def __set__(self, repo, value):
31 31 return super(repofilecache, self).__set__(repo.unfiltered(), value)
32 32 def __delete__(self, repo):
33 33 return super(repofilecache, self).__delete__(repo.unfiltered())
34 34
35 35 class storecache(repofilecache):
36 36 """filecache for files in the store"""
37 37 def join(self, obj, fname):
38 38 return obj.sjoin(fname)
39 39
40 40 class unfilteredpropertycache(propertycache):
41 41 """propertycache that apply to unfiltered repo only"""
42 42
43 43 def __get__(self, repo, type=None):
44 44 unfi = repo.unfiltered()
45 45 if unfi is repo:
46 46 return super(unfilteredpropertycache, self).__get__(unfi)
47 47 return getattr(unfi, self.name)
48 48
49 49 class filteredpropertycache(propertycache):
50 50 """propertycache that must take filtering in account"""
51 51
52 52 def cachevalue(self, obj, value):
53 53 object.__setattr__(obj, self.name, value)
54 54
55 55
56 56 def hasunfilteredcache(repo, name):
57 57 """check if a repo has an unfilteredpropertycache value for <name>"""
58 58 return name in vars(repo.unfiltered())
59 59
60 60 def unfilteredmethod(orig):
61 61 """decorate method that always need to be run on unfiltered version"""
62 62 def wrapper(repo, *args, **kwargs):
63 63 return orig(repo.unfiltered(), *args, **kwargs)
64 64 return wrapper
65 65
66 66 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
67 67 'unbundle'))
68 68 legacycaps = moderncaps.union(set(['changegroupsubset']))
69 69
70 70 class localpeer(peer.peerrepository):
71 71 '''peer for a local repo; reflects only the most recent API'''
72 72
73 73 def __init__(self, repo, caps=moderncaps):
74 74 peer.peerrepository.__init__(self)
75 75 self._repo = repo.filtered('served')
76 76 self.ui = repo.ui
77 77 self._caps = repo._restrictcapabilities(caps)
78 78 self.requirements = repo.requirements
79 79 self.supportedformats = repo.supportedformats
80 80
81 81 def close(self):
82 82 self._repo.close()
83 83
84 84 def _capabilities(self):
85 85 return self._caps
86 86
87 87 def local(self):
88 88 return self._repo
89 89
90 90 def canpush(self):
91 91 return True
92 92
93 93 def url(self):
94 94 return self._repo.url()
95 95
96 96 def lookup(self, key):
97 97 return self._repo.lookup(key)
98 98
99 99 def branchmap(self):
100 100 return self._repo.branchmap()
101 101
102 102 def heads(self):
103 103 return self._repo.heads()
104 104
105 105 def known(self, nodes):
106 106 return self._repo.known(nodes)
107 107
108 108 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
109 109 format='HG10', **kwargs):
110 110 cg = exchange.getbundle(self._repo, source, heads=heads,
111 111 common=common, bundlecaps=bundlecaps, **kwargs)
112 112 if bundlecaps is not None and 'HG2Y' in bundlecaps:
113 113 # When requesting a bundle2, getbundle returns a stream to make the
114 114 # wire level function happier. We need to build a proper object
115 115 # from it in local peer.
116 116 cg = bundle2.unbundle20(self.ui, cg)
117 117 return cg
118 118
119 119 # TODO We might want to move the next two calls into legacypeer and add
120 120 # unbundle instead.
121 121
122 122 def unbundle(self, cg, heads, url):
123 123 """apply a bundle on a repo
124 124
125 125 This function handles the repo locking itself."""
126 126 try:
127 127 cg = exchange.readbundle(self.ui, cg, None)
128 128 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
129 129 if util.safehasattr(ret, 'getchunks'):
130 130 # This is a bundle20 object, turn it into an unbundler.
131 131 # This little dance should be dropped eventually when the API
132 132 # is finally improved.
133 133 stream = util.chunkbuffer(ret.getchunks())
134 134 ret = bundle2.unbundle20(self.ui, stream)
135 135 return ret
136 136 except error.PushRaced, exc:
137 137 raise error.ResponseError(_('push failed:'), str(exc))
138 138
139 139 def lock(self):
140 140 return self._repo.lock()
141 141
142 142 def addchangegroup(self, cg, source, url):
143 143 return changegroup.addchangegroup(self._repo, cg, source, url)
144 144
145 145 def pushkey(self, namespace, key, old, new):
146 146 return self._repo.pushkey(namespace, key, old, new)
147 147
148 148 def listkeys(self, namespace):
149 149 return self._repo.listkeys(namespace)
150 150
151 151 def debugwireargs(self, one, two, three=None, four=None, five=None):
152 152 '''used to test argument passing over the wire'''
153 153 return "%s %s %s %s %s" % (one, two, three, four, five)
154 154
155 155 class locallegacypeer(localpeer):
156 156 '''peer extension which implements legacy methods too; used for tests with
157 157 restricted capabilities'''
158 158
159 159 def __init__(self, repo):
160 160 localpeer.__init__(self, repo, caps=legacycaps)
161 161
162 162 def branches(self, nodes):
163 163 return self._repo.branches(nodes)
164 164
165 165 def between(self, pairs):
166 166 return self._repo.between(pairs)
167 167
168 168 def changegroup(self, basenodes, source):
169 169 return changegroup.changegroup(self._repo, basenodes, source)
170 170
171 171 def changegroupsubset(self, bases, heads, source):
172 172 return changegroup.changegroupsubset(self._repo, bases, heads, source)
173 173
174 174 class localrepository(object):
175 175
176 176 supportedformats = set(('revlogv1', 'generaldelta'))
177 177 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
178 178 'dotencode'))
179 179 openerreqs = set(('revlogv1', 'generaldelta'))
180 180 requirements = ['revlogv1']
181 181 filtername = None
182 182
183 183 # a list of (ui, featureset) functions.
184 184 # only functions defined in module of enabled extensions are invoked
185 185 featuresetupfuncs = set()
186 186
187 187 def _baserequirements(self, create):
188 188 return self.requirements[:]
189 189
190 190 def __init__(self, baseui, path=None, create=False):
191 191 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
192 192 self.wopener = self.wvfs
193 193 self.root = self.wvfs.base
194 194 self.path = self.wvfs.join(".hg")
195 195 self.origroot = path
196 196 self.auditor = pathutil.pathauditor(self.root, self._checknested)
197 197 self.vfs = scmutil.vfs(self.path)
198 198 self.opener = self.vfs
199 199 self.baseui = baseui
200 200 self.ui = baseui.copy()
201 201 self.ui.copy = baseui.copy # prevent copying repo configuration
202 202 # A list of callback to shape the phase if no data were found.
203 203 # Callback are in the form: func(repo, roots) --> processed root.
204 204 # This list it to be filled by extension during repo setup
205 205 self._phasedefaults = []
206 206 try:
207 207 self.ui.readconfig(self.join("hgrc"), self.root)
208 208 extensions.loadall(self.ui)
209 209 except IOError:
210 210 pass
211 211
212 212 if self.featuresetupfuncs:
213 213 self.supported = set(self._basesupported) # use private copy
214 214 extmods = set(m.__name__ for n, m
215 215 in extensions.extensions(self.ui))
216 216 for setupfunc in self.featuresetupfuncs:
217 217 if setupfunc.__module__ in extmods:
218 218 setupfunc(self.ui, self.supported)
219 219 else:
220 220 self.supported = self._basesupported
221 221
222 222 if not self.vfs.isdir():
223 223 if create:
224 224 if not self.wvfs.exists():
225 225 self.wvfs.makedirs()
226 226 self.vfs.makedir(notindexed=True)
227 227 requirements = self._baserequirements(create)
228 228 if self.ui.configbool('format', 'usestore', True):
229 229 self.vfs.mkdir("store")
230 230 requirements.append("store")
231 231 if self.ui.configbool('format', 'usefncache', True):
232 232 requirements.append("fncache")
233 233 if self.ui.configbool('format', 'dotencode', True):
234 234 requirements.append('dotencode')
235 235 # create an invalid changelog
236 236 self.vfs.append(
237 237 "00changelog.i",
238 238 '\0\0\0\2' # represents revlogv2
239 239 ' dummy changelog to prevent using the old repo layout'
240 240 )
241 241 if self.ui.configbool('format', 'generaldelta', False):
242 242 requirements.append("generaldelta")
243 243 requirements = set(requirements)
244 244 else:
245 245 raise error.RepoError(_("repository %s not found") % path)
246 246 elif create:
247 247 raise error.RepoError(_("repository %s already exists") % path)
248 248 else:
249 249 try:
250 250 requirements = scmutil.readrequires(self.vfs, self.supported)
251 251 except IOError, inst:
252 252 if inst.errno != errno.ENOENT:
253 253 raise
254 254 requirements = set()
255 255
256 256 self.sharedpath = self.path
257 257 try:
258 258 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
259 259 realpath=True)
260 260 s = vfs.base
261 261 if not vfs.exists():
262 262 raise error.RepoError(
263 263 _('.hg/sharedpath points to nonexistent directory %s') % s)
264 264 self.sharedpath = s
265 265 except IOError, inst:
266 266 if inst.errno != errno.ENOENT:
267 267 raise
268 268
269 269 self.store = store.store(requirements, self.sharedpath, scmutil.vfs)
270 270 self.spath = self.store.path
271 271 self.svfs = self.store.vfs
272 272 self.sopener = self.svfs
273 273 self.sjoin = self.store.join
274 274 self.vfs.createmode = self.store.createmode
275 275 self._applyrequirements(requirements)
276 276 if create:
277 277 self._writerequirements()
278 278
279 279
280 280 self._branchcaches = {}
281 281 self.filterpats = {}
282 282 self._datafilters = {}
283 283 self._transref = self._lockref = self._wlockref = None
284 284
285 285 # A cache for various files under .hg/ that tracks file changes,
286 286 # (used by the filecache decorator)
287 287 #
288 288 # Maps a property name to its util.filecacheentry
289 289 self._filecache = {}
290 290
291 291 # hold sets of revision to be filtered
292 292 # should be cleared when something might have changed the filter value:
293 293 # - new changesets,
294 294 # - phase change,
295 295 # - new obsolescence marker,
296 296 # - working directory parent change,
297 297 # - bookmark changes
298 298 self.filteredrevcache = {}
299 299
300 300 def close(self):
301 301 pass
302 302
303 303 def _restrictcapabilities(self, caps):
304 304 # bundle2 is not ready for prime time, drop it unless explicitly
305 305 # required by the tests (or some brave tester)
306 306 if self.ui.configbool('experimental', 'bundle2-exp', False):
307 307 caps = set(caps)
308 308 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
309 309 caps.add('bundle2-exp=' + urllib.quote(capsblob))
310 310 return caps
311 311
312 312 def _applyrequirements(self, requirements):
313 313 self.requirements = requirements
314 314 self.sopener.options = dict((r, 1) for r in requirements
315 315 if r in self.openerreqs)
316 316 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
317 317 if chunkcachesize is not None:
318 318 self.sopener.options['chunkcachesize'] = chunkcachesize
319 maxchainlen = self.ui.configint('revlog', 'maxchainlen')
319 maxchainlen = self.ui.configint('format', 'maxchainlen')
320 320 if maxchainlen is not None:
321 321 self.sopener.options['maxchainlen'] = maxchainlen
322 322
323 323 def _writerequirements(self):
324 324 reqfile = self.opener("requires", "w")
325 325 for r in sorted(self.requirements):
326 326 reqfile.write("%s\n" % r)
327 327 reqfile.close()
328 328
329 329 def _checknested(self, path):
330 330 """Determine if path is a legal nested repository."""
331 331 if not path.startswith(self.root):
332 332 return False
333 333 subpath = path[len(self.root) + 1:]
334 334 normsubpath = util.pconvert(subpath)
335 335
336 336 # XXX: Checking against the current working copy is wrong in
337 337 # the sense that it can reject things like
338 338 #
339 339 # $ hg cat -r 10 sub/x.txt
340 340 #
341 341 # if sub/ is no longer a subrepository in the working copy
342 342 # parent revision.
343 343 #
344 344 # However, it can of course also allow things that would have
345 345 # been rejected before, such as the above cat command if sub/
346 346 # is a subrepository now, but was a normal directory before.
347 347 # The old path auditor would have rejected by mistake since it
348 348 # panics when it sees sub/.hg/.
349 349 #
350 350 # All in all, checking against the working copy seems sensible
351 351 # since we want to prevent access to nested repositories on
352 352 # the filesystem *now*.
353 353 ctx = self[None]
354 354 parts = util.splitpath(subpath)
355 355 while parts:
356 356 prefix = '/'.join(parts)
357 357 if prefix in ctx.substate:
358 358 if prefix == normsubpath:
359 359 return True
360 360 else:
361 361 sub = ctx.sub(prefix)
362 362 return sub.checknested(subpath[len(prefix) + 1:])
363 363 else:
364 364 parts.pop()
365 365 return False
366 366
367 367 def peer(self):
368 368 return localpeer(self) # not cached to avoid reference cycle
369 369
370 370 def unfiltered(self):
371 371 """Return unfiltered version of the repository
372 372
373 373 Intended to be overwritten by filtered repo."""
374 374 return self
375 375
376 376 def filtered(self, name):
377 377 """Return a filtered version of a repository"""
378 378 # build a new class with the mixin and the current class
379 379 # (possibly subclass of the repo)
380 380 class proxycls(repoview.repoview, self.unfiltered().__class__):
381 381 pass
382 382 return proxycls(self, name)
383 383
384 384 @repofilecache('bookmarks')
385 385 def _bookmarks(self):
386 386 return bookmarks.bmstore(self)
387 387
388 388 @repofilecache('bookmarks.current')
389 389 def _bookmarkcurrent(self):
390 390 return bookmarks.readcurrent(self)
391 391
392 392 def bookmarkheads(self, bookmark):
393 393 name = bookmark.split('@', 1)[0]
394 394 heads = []
395 395 for mark, n in self._bookmarks.iteritems():
396 396 if mark.split('@', 1)[0] == name:
397 397 heads.append(n)
398 398 return heads
399 399
400 400 @storecache('phaseroots')
401 401 def _phasecache(self):
402 402 return phases.phasecache(self, self._phasedefaults)
403 403
404 404 @storecache('obsstore')
405 405 def obsstore(self):
406 406 # read default format for new obsstore.
407 407 defaultformat = self.ui.configint('format', 'obsstore-version', None)
408 408 # rely on obsstore class default when possible.
409 409 kwargs = {}
410 410 if defaultformat is not None:
411 411 kwargs['defaultformat'] = defaultformat
412 412 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
413 413 store = obsolete.obsstore(self.sopener, readonly=readonly,
414 414 **kwargs)
415 415 if store and readonly:
416 416 # message is rare enough to not be translated
417 417 msg = 'obsolete feature not enabled but %i markers found!\n'
418 418 self.ui.warn(msg % len(list(store)))
419 419 return store
420 420
421 421 @storecache('00changelog.i')
422 422 def changelog(self):
423 423 c = changelog.changelog(self.sopener)
424 424 if 'HG_PENDING' in os.environ:
425 425 p = os.environ['HG_PENDING']
426 426 if p.startswith(self.root):
427 427 c.readpending('00changelog.i.a')
428 428 return c
429 429
430 430 @storecache('00manifest.i')
431 431 def manifest(self):
432 432 return manifest.manifest(self.sopener)
433 433
434 434 @repofilecache('dirstate')
435 435 def dirstate(self):
436 436 warned = [0]
437 437 def validate(node):
438 438 try:
439 439 self.changelog.rev(node)
440 440 return node
441 441 except error.LookupError:
442 442 if not warned[0]:
443 443 warned[0] = True
444 444 self.ui.warn(_("warning: ignoring unknown"
445 445 " working parent %s!\n") % short(node))
446 446 return nullid
447 447
448 448 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
449 449
450 450 def __getitem__(self, changeid):
451 451 if changeid is None:
452 452 return context.workingctx(self)
453 453 return context.changectx(self, changeid)
454 454
455 455 def __contains__(self, changeid):
456 456 try:
457 457 return bool(self.lookup(changeid))
458 458 except error.RepoLookupError:
459 459 return False
460 460
461 461 def __nonzero__(self):
462 462 return True
463 463
464 464 def __len__(self):
465 465 return len(self.changelog)
466 466
467 467 def __iter__(self):
468 468 return iter(self.changelog)
469 469
470 470 def revs(self, expr, *args):
471 471 '''Return a list of revisions matching the given revset'''
472 472 expr = revset.formatspec(expr, *args)
473 473 m = revset.match(None, expr)
474 474 return m(self, revset.spanset(self))
475 475
476 476 def set(self, expr, *args):
477 477 '''
478 478 Yield a context for each matching revision, after doing arg
479 479 replacement via revset.formatspec
480 480 '''
481 481 for r in self.revs(expr, *args):
482 482 yield self[r]
483 483
484 484 def url(self):
485 485 return 'file:' + self.root
486 486
487 487 def hook(self, name, throw=False, **args):
488 488 """Call a hook, passing this repo instance.
489 489
490 490 This a convenience method to aid invoking hooks. Extensions likely
491 491 won't call this unless they have registered a custom hook or are
492 492 replacing code that is expected to call a hook.
493 493 """
494 494 return hook.hook(self.ui, self, name, throw, **args)
495 495
496 496 @unfilteredmethod
497 497 def _tag(self, names, node, message, local, user, date, extra={},
498 498 editor=False):
499 499 if isinstance(names, str):
500 500 names = (names,)
501 501
502 502 branches = self.branchmap()
503 503 for name in names:
504 504 self.hook('pretag', throw=True, node=hex(node), tag=name,
505 505 local=local)
506 506 if name in branches:
507 507 self.ui.warn(_("warning: tag %s conflicts with existing"
508 508 " branch name\n") % name)
509 509
510 510 def writetags(fp, names, munge, prevtags):
511 511 fp.seek(0, 2)
512 512 if prevtags and prevtags[-1] != '\n':
513 513 fp.write('\n')
514 514 for name in names:
515 515 m = munge and munge(name) or name
516 516 if (self._tagscache.tagtypes and
517 517 name in self._tagscache.tagtypes):
518 518 old = self.tags().get(name, nullid)
519 519 fp.write('%s %s\n' % (hex(old), m))
520 520 fp.write('%s %s\n' % (hex(node), m))
521 521 fp.close()
522 522
523 523 prevtags = ''
524 524 if local:
525 525 try:
526 526 fp = self.opener('localtags', 'r+')
527 527 except IOError:
528 528 fp = self.opener('localtags', 'a')
529 529 else:
530 530 prevtags = fp.read()
531 531
532 532 # local tags are stored in the current charset
533 533 writetags(fp, names, None, prevtags)
534 534 for name in names:
535 535 self.hook('tag', node=hex(node), tag=name, local=local)
536 536 return
537 537
538 538 try:
539 539 fp = self.wfile('.hgtags', 'rb+')
540 540 except IOError, e:
541 541 if e.errno != errno.ENOENT:
542 542 raise
543 543 fp = self.wfile('.hgtags', 'ab')
544 544 else:
545 545 prevtags = fp.read()
546 546
547 547 # committed tags are stored in UTF-8
548 548 writetags(fp, names, encoding.fromlocal, prevtags)
549 549
550 550 fp.close()
551 551
552 552 self.invalidatecaches()
553 553
554 554 if '.hgtags' not in self.dirstate:
555 555 self[None].add(['.hgtags'])
556 556
557 557 m = matchmod.exact(self.root, '', ['.hgtags'])
558 558 tagnode = self.commit(message, user, date, extra=extra, match=m,
559 559 editor=editor)
560 560
561 561 for name in names:
562 562 self.hook('tag', node=hex(node), tag=name, local=local)
563 563
564 564 return tagnode
565 565
566 566 def tag(self, names, node, message, local, user, date, editor=False):
567 567 '''tag a revision with one or more symbolic names.
568 568
569 569 names is a list of strings or, when adding a single tag, names may be a
570 570 string.
571 571
572 572 if local is True, the tags are stored in a per-repository file.
573 573 otherwise, they are stored in the .hgtags file, and a new
574 574 changeset is committed with the change.
575 575
576 576 keyword arguments:
577 577
578 578 local: whether to store tags in non-version-controlled file
579 579 (default False)
580 580
581 581 message: commit message to use if committing
582 582
583 583 user: name of user to use if committing
584 584
585 585 date: date tuple to use if committing'''
586 586
587 587 if not local:
588 588 m = matchmod.exact(self.root, '', ['.hgtags'])
589 589 if util.any(self.status(match=m, unknown=True, ignored=True)):
590 590 raise util.Abort(_('working copy of .hgtags is changed'),
591 591 hint=_('please commit .hgtags manually'))
592 592
593 593 self.tags() # instantiate the cache
594 594 self._tag(names, node, message, local, user, date, editor=editor)
595 595
596 596 @filteredpropertycache
597 597 def _tagscache(self):
598 598 '''Returns a tagscache object that contains various tags related
599 599 caches.'''
600 600
601 601 # This simplifies its cache management by having one decorated
602 602 # function (this one) and the rest simply fetch things from it.
603 603 class tagscache(object):
604 604 def __init__(self):
605 605 # These two define the set of tags for this repository. tags
606 606 # maps tag name to node; tagtypes maps tag name to 'global' or
607 607 # 'local'. (Global tags are defined by .hgtags across all
608 608 # heads, and local tags are defined in .hg/localtags.)
609 609 # They constitute the in-memory cache of tags.
610 610 self.tags = self.tagtypes = None
611 611
612 612 self.nodetagscache = self.tagslist = None
613 613
614 614 cache = tagscache()
615 615 cache.tags, cache.tagtypes = self._findtags()
616 616
617 617 return cache
618 618
619 619 def tags(self):
620 620 '''return a mapping of tag to node'''
621 621 t = {}
622 622 if self.changelog.filteredrevs:
623 623 tags, tt = self._findtags()
624 624 else:
625 625 tags = self._tagscache.tags
626 626 for k, v in tags.iteritems():
627 627 try:
628 628 # ignore tags to unknown nodes
629 629 self.changelog.rev(v)
630 630 t[k] = v
631 631 except (error.LookupError, ValueError):
632 632 pass
633 633 return t
634 634
635 635 def _findtags(self):
636 636 '''Do the hard work of finding tags. Return a pair of dicts
637 637 (tags, tagtypes) where tags maps tag name to node, and tagtypes
638 638 maps tag name to a string like \'global\' or \'local\'.
639 639 Subclasses or extensions are free to add their own tags, but
640 640 should be aware that the returned dicts will be retained for the
641 641 duration of the localrepo object.'''
642 642
643 643 # XXX what tagtype should subclasses/extensions use? Currently
644 644 # mq and bookmarks add tags, but do not set the tagtype at all.
645 645 # Should each extension invent its own tag type? Should there
646 646 # be one tagtype for all such "virtual" tags? Or is the status
647 647 # quo fine?
648 648
649 649 alltags = {} # map tag name to (node, hist)
650 650 tagtypes = {}
651 651
652 652 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
653 653 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
654 654
655 655 # Build the return dicts. Have to re-encode tag names because
656 656 # the tags module always uses UTF-8 (in order not to lose info
657 657 # writing to the cache), but the rest of Mercurial wants them in
658 658 # local encoding.
659 659 tags = {}
660 660 for (name, (node, hist)) in alltags.iteritems():
661 661 if node != nullid:
662 662 tags[encoding.tolocal(name)] = node
663 663 tags['tip'] = self.changelog.tip()
664 664 tagtypes = dict([(encoding.tolocal(name), value)
665 665 for (name, value) in tagtypes.iteritems()])
666 666 return (tags, tagtypes)
667 667
668 668 def tagtype(self, tagname):
669 669 '''
670 670 return the type of the given tag. result can be:
671 671
672 672 'local' : a local tag
673 673 'global' : a global tag
674 674 None : tag does not exist
675 675 '''
676 676
677 677 return self._tagscache.tagtypes.get(tagname)
678 678
679 679 def tagslist(self):
680 680 '''return a list of tags ordered by revision'''
681 681 if not self._tagscache.tagslist:
682 682 l = []
683 683 for t, n in self.tags().iteritems():
684 684 l.append((self.changelog.rev(n), t, n))
685 685 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
686 686
687 687 return self._tagscache.tagslist
688 688
689 689 def nodetags(self, node):
690 690 '''return the tags associated with a node'''
691 691 if not self._tagscache.nodetagscache:
692 692 nodetagscache = {}
693 693 for t, n in self._tagscache.tags.iteritems():
694 694 nodetagscache.setdefault(n, []).append(t)
695 695 for tags in nodetagscache.itervalues():
696 696 tags.sort()
697 697 self._tagscache.nodetagscache = nodetagscache
698 698 return self._tagscache.nodetagscache.get(node, [])
699 699
700 700 def nodebookmarks(self, node):
701 701 marks = []
702 702 for bookmark, n in self._bookmarks.iteritems():
703 703 if n == node:
704 704 marks.append(bookmark)
705 705 return sorted(marks)
706 706
707 707 def branchmap(self):
708 708 '''returns a dictionary {branch: [branchheads]} with branchheads
709 709 ordered by increasing revision number'''
710 710 branchmap.updatecache(self)
711 711 return self._branchcaches[self.filtername]
712 712
713 713 def branchtip(self, branch):
714 714 '''return the tip node for a given branch'''
715 715 try:
716 716 return self.branchmap().branchtip(branch)
717 717 except KeyError:
718 718 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
719 719
720 720 def lookup(self, key):
721 721 return self[key].node()
722 722
723 723 def lookupbranch(self, key, remote=None):
724 724 repo = remote or self
725 725 if key in repo.branchmap():
726 726 return key
727 727
728 728 repo = (remote and remote.local()) and remote or self
729 729 return repo[key].branch()
730 730
731 731 def known(self, nodes):
732 732 nm = self.changelog.nodemap
733 733 pc = self._phasecache
734 734 result = []
735 735 for n in nodes:
736 736 r = nm.get(n)
737 737 resp = not (r is None or pc.phase(self, r) >= phases.secret)
738 738 result.append(resp)
739 739 return result
740 740
741 741 def local(self):
742 742 return self
743 743
744 744 def cancopy(self):
745 745 # so statichttprepo's override of local() works
746 746 if not self.local():
747 747 return False
748 748 if not self.ui.configbool('phases', 'publish', True):
749 749 return True
750 750 # if publishing we can't copy if there is filtered content
751 751 return not self.filtered('visible').changelog.filteredrevs
752 752
753 753 def join(self, f, *insidef):
754 754 return os.path.join(self.path, f, *insidef)
755 755
756 756 def wjoin(self, f, *insidef):
757 757 return os.path.join(self.root, f, *insidef)
758 758
759 759 def file(self, f):
760 760 if f[0] == '/':
761 761 f = f[1:]
762 762 return filelog.filelog(self.sopener, f)
763 763
764 764 def changectx(self, changeid):
765 765 return self[changeid]
766 766
767 767 def parents(self, changeid=None):
768 768 '''get list of changectxs for parents of changeid'''
769 769 return self[changeid].parents()
770 770
771 771 def setparents(self, p1, p2=nullid):
772 772 self.dirstate.beginparentchange()
773 773 copies = self.dirstate.setparents(p1, p2)
774 774 pctx = self[p1]
775 775 if copies:
776 776 # Adjust copy records, the dirstate cannot do it, it
777 777 # requires access to parents manifests. Preserve them
778 778 # only for entries added to first parent.
779 779 for f in copies:
780 780 if f not in pctx and copies[f] in pctx:
781 781 self.dirstate.copy(copies[f], f)
782 782 if p2 == nullid:
783 783 for f, s in sorted(self.dirstate.copies().items()):
784 784 if f not in pctx and s not in pctx:
785 785 self.dirstate.copy(None, f)
786 786 self.dirstate.endparentchange()
787 787
788 788 def filectx(self, path, changeid=None, fileid=None):
789 789 """changeid can be a changeset revision, node, or tag.
790 790 fileid can be a file revision or node."""
791 791 return context.filectx(self, path, changeid, fileid)
792 792
793 793 def getcwd(self):
794 794 return self.dirstate.getcwd()
795 795
796 796 def pathto(self, f, cwd=None):
797 797 return self.dirstate.pathto(f, cwd)
798 798
799 799 def wfile(self, f, mode='r'):
800 800 return self.wopener(f, mode)
801 801
802 802 def _link(self, f):
803 803 return self.wvfs.islink(f)
804 804
805 805 def _loadfilter(self, filter):
806 806 if filter not in self.filterpats:
807 807 l = []
808 808 for pat, cmd in self.ui.configitems(filter):
809 809 if cmd == '!':
810 810 continue
811 811 mf = matchmod.match(self.root, '', [pat])
812 812 fn = None
813 813 params = cmd
814 814 for name, filterfn in self._datafilters.iteritems():
815 815 if cmd.startswith(name):
816 816 fn = filterfn
817 817 params = cmd[len(name):].lstrip()
818 818 break
819 819 if not fn:
820 820 fn = lambda s, c, **kwargs: util.filter(s, c)
821 821 # Wrap old filters not supporting keyword arguments
822 822 if not inspect.getargspec(fn)[2]:
823 823 oldfn = fn
824 824 fn = lambda s, c, **kwargs: oldfn(s, c)
825 825 l.append((mf, fn, params))
826 826 self.filterpats[filter] = l
827 827 return self.filterpats[filter]
828 828
829 829 def _filter(self, filterpats, filename, data):
830 830 for mf, fn, cmd in filterpats:
831 831 if mf(filename):
832 832 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
833 833 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
834 834 break
835 835
836 836 return data
837 837
838 838 @unfilteredpropertycache
839 839 def _encodefilterpats(self):
840 840 return self._loadfilter('encode')
841 841
842 842 @unfilteredpropertycache
843 843 def _decodefilterpats(self):
844 844 return self._loadfilter('decode')
845 845
846 846 def adddatafilter(self, name, filter):
847 847 self._datafilters[name] = filter
848 848
849 849 def wread(self, filename):
850 850 if self._link(filename):
851 851 data = self.wvfs.readlink(filename)
852 852 else:
853 853 data = self.wopener.read(filename)
854 854 return self._filter(self._encodefilterpats, filename, data)
855 855
856 856 def wwrite(self, filename, data, flags):
857 857 data = self._filter(self._decodefilterpats, filename, data)
858 858 if 'l' in flags:
859 859 self.wopener.symlink(data, filename)
860 860 else:
861 861 self.wopener.write(filename, data)
862 862 if 'x' in flags:
863 863 self.wvfs.setflags(filename, False, True)
864 864
865 865 def wwritedata(self, filename, data):
866 866 return self._filter(self._decodefilterpats, filename, data)
867 867
868 868 def transaction(self, desc, report=None):
869 869 tr = self._transref and self._transref() or None
870 870 if tr and tr.running():
871 871 return tr.nest()
872 872
873 873 # abort here if the journal already exists
874 874 if self.svfs.exists("journal"):
875 875 raise error.RepoError(
876 876 _("abandoned transaction found"),
877 877 hint=_("run 'hg recover' to clean up transaction"))
878 878
879 879 def onclose():
880 880 self.store.write(self._transref())
881 881
882 882 self._writejournal(desc)
883 883 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
884 884 rp = report and report or self.ui.warn
885 885 tr = transaction.transaction(rp, self.sopener,
886 886 "journal",
887 887 aftertrans(renames),
888 888 self.store.createmode,
889 889 onclose)
890 890 self._transref = weakref.ref(tr)
891 891 return tr
892 892
893 893 def _journalfiles(self):
894 894 return ((self.svfs, 'journal'),
895 895 (self.vfs, 'journal.dirstate'),
896 896 (self.vfs, 'journal.branch'),
897 897 (self.vfs, 'journal.desc'),
898 898 (self.vfs, 'journal.bookmarks'),
899 899 (self.svfs, 'journal.phaseroots'))
900 900
901 901 def undofiles(self):
902 902 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
903 903
904 904 def _writejournal(self, desc):
905 905 self.opener.write("journal.dirstate",
906 906 self.opener.tryread("dirstate"))
907 907 self.opener.write("journal.branch",
908 908 encoding.fromlocal(self.dirstate.branch()))
909 909 self.opener.write("journal.desc",
910 910 "%d\n%s\n" % (len(self), desc))
911 911 self.opener.write("journal.bookmarks",
912 912 self.opener.tryread("bookmarks"))
913 913 self.sopener.write("journal.phaseroots",
914 914 self.sopener.tryread("phaseroots"))
915 915
916 916 def recover(self):
917 917 lock = self.lock()
918 918 try:
919 919 if self.svfs.exists("journal"):
920 920 self.ui.status(_("rolling back interrupted transaction\n"))
921 921 transaction.rollback(self.sopener, "journal",
922 922 self.ui.warn)
923 923 self.invalidate()
924 924 return True
925 925 else:
926 926 self.ui.warn(_("no interrupted transaction available\n"))
927 927 return False
928 928 finally:
929 929 lock.release()
930 930
931 931 def rollback(self, dryrun=False, force=False):
932 932 wlock = lock = None
933 933 try:
934 934 wlock = self.wlock()
935 935 lock = self.lock()
936 936 if self.svfs.exists("undo"):
937 937 return self._rollback(dryrun, force)
938 938 else:
939 939 self.ui.warn(_("no rollback information available\n"))
940 940 return 1
941 941 finally:
942 942 release(lock, wlock)
943 943
944 944 @unfilteredmethod # Until we get smarter cache management
945 945 def _rollback(self, dryrun, force):
946 946 ui = self.ui
947 947 try:
948 948 args = self.opener.read('undo.desc').splitlines()
949 949 (oldlen, desc, detail) = (int(args[0]), args[1], None)
950 950 if len(args) >= 3:
951 951 detail = args[2]
952 952 oldtip = oldlen - 1
953 953
954 954 if detail and ui.verbose:
955 955 msg = (_('repository tip rolled back to revision %s'
956 956 ' (undo %s: %s)\n')
957 957 % (oldtip, desc, detail))
958 958 else:
959 959 msg = (_('repository tip rolled back to revision %s'
960 960 ' (undo %s)\n')
961 961 % (oldtip, desc))
962 962 except IOError:
963 963 msg = _('rolling back unknown transaction\n')
964 964 desc = None
965 965
966 966 if not force and self['.'] != self['tip'] and desc == 'commit':
967 967 raise util.Abort(
968 968 _('rollback of last commit while not checked out '
969 969 'may lose data'), hint=_('use -f to force'))
970 970
971 971 ui.status(msg)
972 972 if dryrun:
973 973 return 0
974 974
975 975 parents = self.dirstate.parents()
976 976 self.destroying()
977 977 transaction.rollback(self.sopener, 'undo', ui.warn)
978 978 if self.vfs.exists('undo.bookmarks'):
979 979 self.vfs.rename('undo.bookmarks', 'bookmarks')
980 980 if self.svfs.exists('undo.phaseroots'):
981 981 self.svfs.rename('undo.phaseroots', 'phaseroots')
982 982 self.invalidate()
983 983
984 984 parentgone = (parents[0] not in self.changelog.nodemap or
985 985 parents[1] not in self.changelog.nodemap)
986 986 if parentgone:
987 987 self.vfs.rename('undo.dirstate', 'dirstate')
988 988 try:
989 989 branch = self.opener.read('undo.branch')
990 990 self.dirstate.setbranch(encoding.tolocal(branch))
991 991 except IOError:
992 992 ui.warn(_('named branch could not be reset: '
993 993 'current branch is still \'%s\'\n')
994 994 % self.dirstate.branch())
995 995
996 996 self.dirstate.invalidate()
997 997 parents = tuple([p.rev() for p in self.parents()])
998 998 if len(parents) > 1:
999 999 ui.status(_('working directory now based on '
1000 1000 'revisions %d and %d\n') % parents)
1001 1001 else:
1002 1002 ui.status(_('working directory now based on '
1003 1003 'revision %d\n') % parents)
1004 1004 # TODO: if we know which new heads may result from this rollback, pass
1005 1005 # them to destroy(), which will prevent the branchhead cache from being
1006 1006 # invalidated.
1007 1007 self.destroyed()
1008 1008 return 0
1009 1009
1010 1010 def invalidatecaches(self):
1011 1011
1012 1012 if '_tagscache' in vars(self):
1013 1013 # can't use delattr on proxy
1014 1014 del self.__dict__['_tagscache']
1015 1015
1016 1016 self.unfiltered()._branchcaches.clear()
1017 1017 self.invalidatevolatilesets()
1018 1018
1019 1019 def invalidatevolatilesets(self):
1020 1020 self.filteredrevcache.clear()
1021 1021 obsolete.clearobscaches(self)
1022 1022
1023 1023 def invalidatedirstate(self):
1024 1024 '''Invalidates the dirstate, causing the next call to dirstate
1025 1025 to check if it was modified since the last time it was read,
1026 1026 rereading it if it has.
1027 1027
1028 1028 This is different to dirstate.invalidate() that it doesn't always
1029 1029 rereads the dirstate. Use dirstate.invalidate() if you want to
1030 1030 explicitly read the dirstate again (i.e. restoring it to a previous
1031 1031 known good state).'''
1032 1032 if hasunfilteredcache(self, 'dirstate'):
1033 1033 for k in self.dirstate._filecache:
1034 1034 try:
1035 1035 delattr(self.dirstate, k)
1036 1036 except AttributeError:
1037 1037 pass
1038 1038 delattr(self.unfiltered(), 'dirstate')
1039 1039
1040 1040 def invalidate(self):
1041 1041 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1042 1042 for k in self._filecache:
1043 1043 # dirstate is invalidated separately in invalidatedirstate()
1044 1044 if k == 'dirstate':
1045 1045 continue
1046 1046
1047 1047 try:
1048 1048 delattr(unfiltered, k)
1049 1049 except AttributeError:
1050 1050 pass
1051 1051 self.invalidatecaches()
1052 1052 self.store.invalidatecaches()
1053 1053
1054 1054 def invalidateall(self):
1055 1055 '''Fully invalidates both store and non-store parts, causing the
1056 1056 subsequent operation to reread any outside changes.'''
1057 1057 # extension should hook this to invalidate its caches
1058 1058 self.invalidate()
1059 1059 self.invalidatedirstate()
1060 1060
1061 1061 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1062 1062 try:
1063 1063 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1064 1064 except error.LockHeld, inst:
1065 1065 if not wait:
1066 1066 raise
1067 1067 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1068 1068 (desc, inst.locker))
1069 1069 # default to 600 seconds timeout
1070 1070 l = lockmod.lock(vfs, lockname,
1071 1071 int(self.ui.config("ui", "timeout", "600")),
1072 1072 releasefn, desc=desc)
1073 1073 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1074 1074 if acquirefn:
1075 1075 acquirefn()
1076 1076 return l
1077 1077
1078 1078 def _afterlock(self, callback):
1079 1079 """add a callback to the current repository lock.
1080 1080
1081 1081 The callback will be executed on lock release."""
1082 1082 l = self._lockref and self._lockref()
1083 1083 if l:
1084 1084 l.postrelease.append(callback)
1085 1085 else:
1086 1086 callback()
1087 1087
1088 1088 def lock(self, wait=True):
1089 1089 '''Lock the repository store (.hg/store) and return a weak reference
1090 1090 to the lock. Use this before modifying the store (e.g. committing or
1091 1091 stripping). If you are opening a transaction, get a lock as well.)'''
1092 1092 l = self._lockref and self._lockref()
1093 1093 if l is not None and l.held:
1094 1094 l.lock()
1095 1095 return l
1096 1096
1097 1097 def unlock():
1098 1098 for k, ce in self._filecache.items():
1099 1099 if k == 'dirstate' or k not in self.__dict__:
1100 1100 continue
1101 1101 ce.refresh()
1102 1102
1103 1103 l = self._lock(self.svfs, "lock", wait, unlock,
1104 1104 self.invalidate, _('repository %s') % self.origroot)
1105 1105 self._lockref = weakref.ref(l)
1106 1106 return l
1107 1107
1108 1108 def wlock(self, wait=True):
1109 1109 '''Lock the non-store parts of the repository (everything under
1110 1110 .hg except .hg/store) and return a weak reference to the lock.
1111 1111 Use this before modifying files in .hg.'''
1112 1112 l = self._wlockref and self._wlockref()
1113 1113 if l is not None and l.held:
1114 1114 l.lock()
1115 1115 return l
1116 1116
1117 1117 def unlock():
1118 1118 if self.dirstate.pendingparentchange():
1119 1119 self.dirstate.invalidate()
1120 1120 else:
1121 1121 self.dirstate.write()
1122 1122
1123 1123 self._filecache['dirstate'].refresh()
1124 1124
1125 1125 l = self._lock(self.vfs, "wlock", wait, unlock,
1126 1126 self.invalidatedirstate, _('working directory of %s') %
1127 1127 self.origroot)
1128 1128 self._wlockref = weakref.ref(l)
1129 1129 return l
1130 1130
1131 1131 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1132 1132 """
1133 1133 commit an individual file as part of a larger transaction
1134 1134 """
1135 1135
1136 1136 fname = fctx.path()
1137 1137 text = fctx.data()
1138 1138 flog = self.file(fname)
1139 1139 fparent1 = manifest1.get(fname, nullid)
1140 1140 fparent2 = manifest2.get(fname, nullid)
1141 1141
1142 1142 meta = {}
1143 1143 copy = fctx.renamed()
1144 1144 if copy and copy[0] != fname:
1145 1145 # Mark the new revision of this file as a copy of another
1146 1146 # file. This copy data will effectively act as a parent
1147 1147 # of this new revision. If this is a merge, the first
1148 1148 # parent will be the nullid (meaning "look up the copy data")
1149 1149 # and the second one will be the other parent. For example:
1150 1150 #
1151 1151 # 0 --- 1 --- 3 rev1 changes file foo
1152 1152 # \ / rev2 renames foo to bar and changes it
1153 1153 # \- 2 -/ rev3 should have bar with all changes and
1154 1154 # should record that bar descends from
1155 1155 # bar in rev2 and foo in rev1
1156 1156 #
1157 1157 # this allows this merge to succeed:
1158 1158 #
1159 1159 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1160 1160 # \ / merging rev3 and rev4 should use bar@rev2
1161 1161 # \- 2 --- 4 as the merge base
1162 1162 #
1163 1163
1164 1164 cfname = copy[0]
1165 1165 crev = manifest1.get(cfname)
1166 1166 newfparent = fparent2
1167 1167
1168 1168 if manifest2: # branch merge
1169 1169 if fparent2 == nullid or crev is None: # copied on remote side
1170 1170 if cfname in manifest2:
1171 1171 crev = manifest2[cfname]
1172 1172 newfparent = fparent1
1173 1173
1174 1174 # find source in nearest ancestor if we've lost track
1175 1175 if not crev:
1176 1176 self.ui.debug(" %s: searching for copy revision for %s\n" %
1177 1177 (fname, cfname))
1178 1178 for ancestor in self[None].ancestors():
1179 1179 if cfname in ancestor:
1180 1180 crev = ancestor[cfname].filenode()
1181 1181 break
1182 1182
1183 1183 if crev:
1184 1184 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1185 1185 meta["copy"] = cfname
1186 1186 meta["copyrev"] = hex(crev)
1187 1187 fparent1, fparent2 = nullid, newfparent
1188 1188 else:
1189 1189 self.ui.warn(_("warning: can't find ancestor for '%s' "
1190 1190 "copied from '%s'!\n") % (fname, cfname))
1191 1191
1192 1192 elif fparent1 == nullid:
1193 1193 fparent1, fparent2 = fparent2, nullid
1194 1194 elif fparent2 != nullid:
1195 1195 # is one parent an ancestor of the other?
1196 1196 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1197 1197 if fparent1 in fparentancestors:
1198 1198 fparent1, fparent2 = fparent2, nullid
1199 1199 elif fparent2 in fparentancestors:
1200 1200 fparent2 = nullid
1201 1201
1202 1202 # is the file changed?
1203 1203 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1204 1204 changelist.append(fname)
1205 1205 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1206 1206 # are just the flags changed during merge?
1207 1207 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1208 1208 changelist.append(fname)
1209 1209
1210 1210 return fparent1
1211 1211
1212 1212 @unfilteredmethod
1213 1213 def commit(self, text="", user=None, date=None, match=None, force=False,
1214 1214 editor=False, extra={}):
1215 1215 """Add a new revision to current repository.
1216 1216
1217 1217 Revision information is gathered from the working directory,
1218 1218 match can be used to filter the committed files. If editor is
1219 1219 supplied, it is called to get a commit message.
1220 1220 """
1221 1221
1222 1222 def fail(f, msg):
1223 1223 raise util.Abort('%s: %s' % (f, msg))
1224 1224
1225 1225 if not match:
1226 1226 match = matchmod.always(self.root, '')
1227 1227
1228 1228 if not force:
1229 1229 vdirs = []
1230 1230 match.explicitdir = vdirs.append
1231 1231 match.bad = fail
1232 1232
1233 1233 wlock = self.wlock()
1234 1234 try:
1235 1235 wctx = self[None]
1236 1236 merge = len(wctx.parents()) > 1
1237 1237
1238 1238 if (not force and merge and match and
1239 1239 (match.files() or match.anypats())):
1240 1240 raise util.Abort(_('cannot partially commit a merge '
1241 1241 '(do not specify files or patterns)'))
1242 1242
1243 1243 status = self.status(match=match, clean=force)
1244 1244 if force:
1245 1245 status.modified.extend(status.clean) # mq may commit clean files
1246 1246
1247 1247 # check subrepos
1248 1248 subs = []
1249 1249 commitsubs = set()
1250 1250 newstate = wctx.substate.copy()
1251 1251 # only manage subrepos and .hgsubstate if .hgsub is present
1252 1252 if '.hgsub' in wctx:
1253 1253 # we'll decide whether to track this ourselves, thanks
1254 1254 for c in status.modified, status.added, status.removed:
1255 1255 if '.hgsubstate' in c:
1256 1256 c.remove('.hgsubstate')
1257 1257
1258 1258 # compare current state to last committed state
1259 1259 # build new substate based on last committed state
1260 1260 oldstate = wctx.p1().substate
1261 1261 for s in sorted(newstate.keys()):
1262 1262 if not match(s):
1263 1263 # ignore working copy, use old state if present
1264 1264 if s in oldstate:
1265 1265 newstate[s] = oldstate[s]
1266 1266 continue
1267 1267 if not force:
1268 1268 raise util.Abort(
1269 1269 _("commit with new subrepo %s excluded") % s)
1270 1270 if wctx.sub(s).dirty(True):
1271 1271 if not self.ui.configbool('ui', 'commitsubrepos'):
1272 1272 raise util.Abort(
1273 1273 _("uncommitted changes in subrepo %s") % s,
1274 1274 hint=_("use --subrepos for recursive commit"))
1275 1275 subs.append(s)
1276 1276 commitsubs.add(s)
1277 1277 else:
1278 1278 bs = wctx.sub(s).basestate()
1279 1279 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1280 1280 if oldstate.get(s, (None, None, None))[1] != bs:
1281 1281 subs.append(s)
1282 1282
1283 1283 # check for removed subrepos
1284 1284 for p in wctx.parents():
1285 1285 r = [s for s in p.substate if s not in newstate]
1286 1286 subs += [s for s in r if match(s)]
1287 1287 if subs:
1288 1288 if (not match('.hgsub') and
1289 1289 '.hgsub' in (wctx.modified() + wctx.added())):
1290 1290 raise util.Abort(
1291 1291 _("can't commit subrepos without .hgsub"))
1292 1292 status.modified.insert(0, '.hgsubstate')
1293 1293
1294 1294 elif '.hgsub' in status.removed:
1295 1295 # clean up .hgsubstate when .hgsub is removed
1296 1296 if ('.hgsubstate' in wctx and
1297 1297 '.hgsubstate' not in (status.modified + status.added +
1298 1298 status.removed)):
1299 1299 status.removed.insert(0, '.hgsubstate')
1300 1300
1301 1301 # make sure all explicit patterns are matched
1302 1302 if not force and match.files():
1303 1303 matched = set(status.modified + status.added + status.removed)
1304 1304
1305 1305 for f in match.files():
1306 1306 f = self.dirstate.normalize(f)
1307 1307 if f == '.' or f in matched or f in wctx.substate:
1308 1308 continue
1309 1309 if f in status.deleted:
1310 1310 fail(f, _('file not found!'))
1311 1311 if f in vdirs: # visited directory
1312 1312 d = f + '/'
1313 1313 for mf in matched:
1314 1314 if mf.startswith(d):
1315 1315 break
1316 1316 else:
1317 1317 fail(f, _("no match under directory!"))
1318 1318 elif f not in self.dirstate:
1319 1319 fail(f, _("file not tracked!"))
1320 1320
1321 1321 cctx = context.workingctx(self, text, user, date, extra, status)
1322 1322
1323 1323 if (not force and not extra.get("close") and not merge
1324 1324 and not cctx.files()
1325 1325 and wctx.branch() == wctx.p1().branch()):
1326 1326 return None
1327 1327
1328 1328 if merge and cctx.deleted():
1329 1329 raise util.Abort(_("cannot commit merge with missing files"))
1330 1330
1331 1331 ms = mergemod.mergestate(self)
1332 1332 for f in status.modified:
1333 1333 if f in ms and ms[f] == 'u':
1334 1334 raise util.Abort(_("unresolved merge conflicts "
1335 1335 "(see hg help resolve)"))
1336 1336
1337 1337 if editor:
1338 1338 cctx._text = editor(self, cctx, subs)
1339 1339 edited = (text != cctx._text)
1340 1340
1341 1341 # Save commit message in case this transaction gets rolled back
1342 1342 # (e.g. by a pretxncommit hook). Leave the content alone on
1343 1343 # the assumption that the user will use the same editor again.
1344 1344 msgfn = self.savecommitmessage(cctx._text)
1345 1345
1346 1346 # commit subs and write new state
1347 1347 if subs:
1348 1348 for s in sorted(commitsubs):
1349 1349 sub = wctx.sub(s)
1350 1350 self.ui.status(_('committing subrepository %s\n') %
1351 1351 subrepo.subrelpath(sub))
1352 1352 sr = sub.commit(cctx._text, user, date)
1353 1353 newstate[s] = (newstate[s][0], sr)
1354 1354 subrepo.writestate(self, newstate)
1355 1355
1356 1356 p1, p2 = self.dirstate.parents()
1357 1357 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1358 1358 try:
1359 1359 self.hook("precommit", throw=True, parent1=hookp1,
1360 1360 parent2=hookp2)
1361 1361 ret = self.commitctx(cctx, True)
1362 1362 except: # re-raises
1363 1363 if edited:
1364 1364 self.ui.write(
1365 1365 _('note: commit message saved in %s\n') % msgfn)
1366 1366 raise
1367 1367
1368 1368 # update bookmarks, dirstate and mergestate
1369 1369 bookmarks.update(self, [p1, p2], ret)
1370 1370 cctx.markcommitted(ret)
1371 1371 ms.reset()
1372 1372 finally:
1373 1373 wlock.release()
1374 1374
1375 1375 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1376 1376 # hack for command that use a temporary commit (eg: histedit)
1377 1377 # temporary commit got stripped before hook release
1378 1378 if node in self:
1379 1379 self.hook("commit", node=node, parent1=parent1,
1380 1380 parent2=parent2)
1381 1381 self._afterlock(commithook)
1382 1382 return ret
1383 1383
1384 1384 @unfilteredmethod
1385 1385 def commitctx(self, ctx, error=False):
1386 1386 """Add a new revision to current repository.
1387 1387 Revision information is passed via the context argument.
1388 1388 """
1389 1389
1390 1390 tr = None
1391 1391 p1, p2 = ctx.p1(), ctx.p2()
1392 1392 user = ctx.user()
1393 1393
1394 1394 lock = self.lock()
1395 1395 try:
1396 1396 tr = self.transaction("commit")
1397 1397 trp = weakref.proxy(tr)
1398 1398
1399 1399 if ctx.files():
1400 1400 m1 = p1.manifest()
1401 1401 m2 = p2.manifest()
1402 1402 m = m1.copy()
1403 1403
1404 1404 # check in files
1405 1405 added = []
1406 1406 changed = []
1407 1407 removed = list(ctx.removed())
1408 1408 linkrev = len(self)
1409 1409 for f in sorted(ctx.modified() + ctx.added()):
1410 1410 self.ui.note(f + "\n")
1411 1411 try:
1412 1412 fctx = ctx[f]
1413 1413 if fctx is None:
1414 1414 removed.append(f)
1415 1415 else:
1416 1416 added.append(f)
1417 1417 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1418 1418 trp, changed)
1419 1419 m.setflag(f, fctx.flags())
1420 1420 except OSError, inst:
1421 1421 self.ui.warn(_("trouble committing %s!\n") % f)
1422 1422 raise
1423 1423 except IOError, inst:
1424 1424 errcode = getattr(inst, 'errno', errno.ENOENT)
1425 1425 if error or errcode and errcode != errno.ENOENT:
1426 1426 self.ui.warn(_("trouble committing %s!\n") % f)
1427 1427 raise
1428 1428
1429 1429 # update manifest
1430 1430 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1431 1431 drop = [f for f in removed if f in m]
1432 1432 for f in drop:
1433 1433 del m[f]
1434 1434 mn = self.manifest.add(m, trp, linkrev,
1435 1435 p1.manifestnode(), p2.manifestnode(),
1436 1436 added, drop)
1437 1437 files = changed + removed
1438 1438 else:
1439 1439 mn = p1.manifestnode()
1440 1440 files = []
1441 1441
1442 1442 # update changelog
1443 1443 self.changelog.delayupdate(tr)
1444 1444 n = self.changelog.add(mn, files, ctx.description(),
1445 1445 trp, p1.node(), p2.node(),
1446 1446 user, ctx.date(), ctx.extra().copy())
1447 1447 p = lambda: tr.writepending() and self.root or ""
1448 1448 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1449 1449 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1450 1450 parent2=xp2, pending=p)
1451 1451 # set the new commit is proper phase
1452 1452 targetphase = subrepo.newcommitphase(self.ui, ctx)
1453 1453 if targetphase:
1454 1454 # retract boundary do not alter parent changeset.
1455 1455 # if a parent have higher the resulting phase will
1456 1456 # be compliant anyway
1457 1457 #
1458 1458 # if minimal phase was 0 we don't need to retract anything
1459 1459 phases.retractboundary(self, tr, targetphase, [n])
1460 1460 tr.close()
1461 1461 branchmap.updatecache(self.filtered('served'))
1462 1462 return n
1463 1463 finally:
1464 1464 if tr:
1465 1465 tr.release()
1466 1466 lock.release()
1467 1467
1468 1468 @unfilteredmethod
1469 1469 def destroying(self):
1470 1470 '''Inform the repository that nodes are about to be destroyed.
1471 1471 Intended for use by strip and rollback, so there's a common
1472 1472 place for anything that has to be done before destroying history.
1473 1473
1474 1474 This is mostly useful for saving state that is in memory and waiting
1475 1475 to be flushed when the current lock is released. Because a call to
1476 1476 destroyed is imminent, the repo will be invalidated causing those
1477 1477 changes to stay in memory (waiting for the next unlock), or vanish
1478 1478 completely.
1479 1479 '''
1480 1480 # When using the same lock to commit and strip, the phasecache is left
1481 1481 # dirty after committing. Then when we strip, the repo is invalidated,
1482 1482 # causing those changes to disappear.
1483 1483 if '_phasecache' in vars(self):
1484 1484 self._phasecache.write()
1485 1485
1486 1486 @unfilteredmethod
1487 1487 def destroyed(self):
1488 1488 '''Inform the repository that nodes have been destroyed.
1489 1489 Intended for use by strip and rollback, so there's a common
1490 1490 place for anything that has to be done after destroying history.
1491 1491 '''
1492 1492 # When one tries to:
1493 1493 # 1) destroy nodes thus calling this method (e.g. strip)
1494 1494 # 2) use phasecache somewhere (e.g. commit)
1495 1495 #
1496 1496 # then 2) will fail because the phasecache contains nodes that were
1497 1497 # removed. We can either remove phasecache from the filecache,
1498 1498 # causing it to reload next time it is accessed, or simply filter
1499 1499 # the removed nodes now and write the updated cache.
1500 1500 self._phasecache.filterunknown(self)
1501 1501 self._phasecache.write()
1502 1502
1503 1503 # update the 'served' branch cache to help read only server process
1504 1504 # Thanks to branchcache collaboration this is done from the nearest
1505 1505 # filtered subset and it is expected to be fast.
1506 1506 branchmap.updatecache(self.filtered('served'))
1507 1507
1508 1508 # Ensure the persistent tag cache is updated. Doing it now
1509 1509 # means that the tag cache only has to worry about destroyed
1510 1510 # heads immediately after a strip/rollback. That in turn
1511 1511 # guarantees that "cachetip == currenttip" (comparing both rev
1512 1512 # and node) always means no nodes have been added or destroyed.
1513 1513
1514 1514 # XXX this is suboptimal when qrefresh'ing: we strip the current
1515 1515 # head, refresh the tag cache, then immediately add a new head.
1516 1516 # But I think doing it this way is necessary for the "instant
1517 1517 # tag cache retrieval" case to work.
1518 1518 self.invalidate()
1519 1519
1520 1520 def walk(self, match, node=None):
1521 1521 '''
1522 1522 walk recursively through the directory tree or a given
1523 1523 changeset, finding all files matched by the match
1524 1524 function
1525 1525 '''
1526 1526 return self[node].walk(match)
1527 1527
1528 1528 def status(self, node1='.', node2=None, match=None,
1529 1529 ignored=False, clean=False, unknown=False,
1530 1530 listsubrepos=False):
1531 1531 '''a convenience method that calls node1.status(node2)'''
1532 1532 return self[node1].status(node2, match, ignored, clean, unknown,
1533 1533 listsubrepos)
1534 1534
1535 1535 def heads(self, start=None):
1536 1536 heads = self.changelog.heads(start)
1537 1537 # sort the output in rev descending order
1538 1538 return sorted(heads, key=self.changelog.rev, reverse=True)
1539 1539
1540 1540 def branchheads(self, branch=None, start=None, closed=False):
1541 1541 '''return a (possibly filtered) list of heads for the given branch
1542 1542
1543 1543 Heads are returned in topological order, from newest to oldest.
1544 1544 If branch is None, use the dirstate branch.
1545 1545 If start is not None, return only heads reachable from start.
1546 1546 If closed is True, return heads that are marked as closed as well.
1547 1547 '''
1548 1548 if branch is None:
1549 1549 branch = self[None].branch()
1550 1550 branches = self.branchmap()
1551 1551 if branch not in branches:
1552 1552 return []
1553 1553 # the cache returns heads ordered lowest to highest
1554 1554 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1555 1555 if start is not None:
1556 1556 # filter out the heads that cannot be reached from startrev
1557 1557 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1558 1558 bheads = [h for h in bheads if h in fbheads]
1559 1559 return bheads
1560 1560
1561 1561 def branches(self, nodes):
1562 1562 if not nodes:
1563 1563 nodes = [self.changelog.tip()]
1564 1564 b = []
1565 1565 for n in nodes:
1566 1566 t = n
1567 1567 while True:
1568 1568 p = self.changelog.parents(n)
1569 1569 if p[1] != nullid or p[0] == nullid:
1570 1570 b.append((t, n, p[0], p[1]))
1571 1571 break
1572 1572 n = p[0]
1573 1573 return b
1574 1574
1575 1575 def between(self, pairs):
1576 1576 r = []
1577 1577
1578 1578 for top, bottom in pairs:
1579 1579 n, l, i = top, [], 0
1580 1580 f = 1
1581 1581
1582 1582 while n != bottom and n != nullid:
1583 1583 p = self.changelog.parents(n)[0]
1584 1584 if i == f:
1585 1585 l.append(n)
1586 1586 f = f * 2
1587 1587 n = p
1588 1588 i += 1
1589 1589
1590 1590 r.append(l)
1591 1591
1592 1592 return r
1593 1593
1594 1594 def checkpush(self, pushop):
1595 1595 """Extensions can override this function if additional checks have
1596 1596 to be performed before pushing, or call it if they override push
1597 1597 command.
1598 1598 """
1599 1599 pass
1600 1600
1601 1601 @unfilteredpropertycache
1602 1602 def prepushoutgoinghooks(self):
1603 1603 """Return util.hooks consists of "(repo, remote, outgoing)"
1604 1604 functions, which are called before pushing changesets.
1605 1605 """
1606 1606 return util.hooks()
1607 1607
1608 1608 def stream_in(self, remote, requirements):
1609 1609 lock = self.lock()
1610 1610 try:
1611 1611 # Save remote branchmap. We will use it later
1612 1612 # to speed up branchcache creation
1613 1613 rbranchmap = None
1614 1614 if remote.capable("branchmap"):
1615 1615 rbranchmap = remote.branchmap()
1616 1616
1617 1617 fp = remote.stream_out()
1618 1618 l = fp.readline()
1619 1619 try:
1620 1620 resp = int(l)
1621 1621 except ValueError:
1622 1622 raise error.ResponseError(
1623 1623 _('unexpected response from remote server:'), l)
1624 1624 if resp == 1:
1625 1625 raise util.Abort(_('operation forbidden by server'))
1626 1626 elif resp == 2:
1627 1627 raise util.Abort(_('locking the remote repository failed'))
1628 1628 elif resp != 0:
1629 1629 raise util.Abort(_('the server sent an unknown error code'))
1630 1630 self.ui.status(_('streaming all changes\n'))
1631 1631 l = fp.readline()
1632 1632 try:
1633 1633 total_files, total_bytes = map(int, l.split(' ', 1))
1634 1634 except (ValueError, TypeError):
1635 1635 raise error.ResponseError(
1636 1636 _('unexpected response from remote server:'), l)
1637 1637 self.ui.status(_('%d files to transfer, %s of data\n') %
1638 1638 (total_files, util.bytecount(total_bytes)))
1639 1639 handled_bytes = 0
1640 1640 self.ui.progress(_('clone'), 0, total=total_bytes)
1641 1641 start = time.time()
1642 1642
1643 1643 tr = self.transaction(_('clone'))
1644 1644 try:
1645 1645 for i in xrange(total_files):
1646 1646 # XXX doesn't support '\n' or '\r' in filenames
1647 1647 l = fp.readline()
1648 1648 try:
1649 1649 name, size = l.split('\0', 1)
1650 1650 size = int(size)
1651 1651 except (ValueError, TypeError):
1652 1652 raise error.ResponseError(
1653 1653 _('unexpected response from remote server:'), l)
1654 1654 if self.ui.debugflag:
1655 1655 self.ui.debug('adding %s (%s)\n' %
1656 1656 (name, util.bytecount(size)))
1657 1657 # for backwards compat, name was partially encoded
1658 1658 ofp = self.sopener(store.decodedir(name), 'w')
1659 1659 for chunk in util.filechunkiter(fp, limit=size):
1660 1660 handled_bytes += len(chunk)
1661 1661 self.ui.progress(_('clone'), handled_bytes,
1662 1662 total=total_bytes)
1663 1663 ofp.write(chunk)
1664 1664 ofp.close()
1665 1665 tr.close()
1666 1666 finally:
1667 1667 tr.release()
1668 1668
1669 1669 # Writing straight to files circumvented the inmemory caches
1670 1670 self.invalidate()
1671 1671
1672 1672 elapsed = time.time() - start
1673 1673 if elapsed <= 0:
1674 1674 elapsed = 0.001
1675 1675 self.ui.progress(_('clone'), None)
1676 1676 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1677 1677 (util.bytecount(total_bytes), elapsed,
1678 1678 util.bytecount(total_bytes / elapsed)))
1679 1679
1680 1680 # new requirements = old non-format requirements +
1681 1681 # new format-related
1682 1682 # requirements from the streamed-in repository
1683 1683 requirements.update(set(self.requirements) - self.supportedformats)
1684 1684 self._applyrequirements(requirements)
1685 1685 self._writerequirements()
1686 1686
1687 1687 if rbranchmap:
1688 1688 rbheads = []
1689 1689 closed = []
1690 1690 for bheads in rbranchmap.itervalues():
1691 1691 rbheads.extend(bheads)
1692 1692 for h in bheads:
1693 1693 r = self.changelog.rev(h)
1694 1694 b, c = self.changelog.branchinfo(r)
1695 1695 if c:
1696 1696 closed.append(h)
1697 1697
1698 1698 if rbheads:
1699 1699 rtiprev = max((int(self.changelog.rev(node))
1700 1700 for node in rbheads))
1701 1701 cache = branchmap.branchcache(rbranchmap,
1702 1702 self[rtiprev].node(),
1703 1703 rtiprev,
1704 1704 closednodes=closed)
1705 1705 # Try to stick it as low as possible
1706 1706 # filter above served are unlikely to be fetch from a clone
1707 1707 for candidate in ('base', 'immutable', 'served'):
1708 1708 rview = self.filtered(candidate)
1709 1709 if cache.validfor(rview):
1710 1710 self._branchcaches[candidate] = cache
1711 1711 cache.write(rview)
1712 1712 break
1713 1713 self.invalidate()
1714 1714 return len(self.heads()) + 1
1715 1715 finally:
1716 1716 lock.release()
1717 1717
1718 1718 def clone(self, remote, heads=[], stream=False):
1719 1719 '''clone remote repository.
1720 1720
1721 1721 keyword arguments:
1722 1722 heads: list of revs to clone (forces use of pull)
1723 1723 stream: use streaming clone if possible'''
1724 1724
1725 1725 # now, all clients that can request uncompressed clones can
1726 1726 # read repo formats supported by all servers that can serve
1727 1727 # them.
1728 1728
1729 1729 # if revlog format changes, client will have to check version
1730 1730 # and format flags on "stream" capability, and use
1731 1731 # uncompressed only if compatible.
1732 1732
1733 1733 if not stream:
1734 1734 # if the server explicitly prefers to stream (for fast LANs)
1735 1735 stream = remote.capable('stream-preferred')
1736 1736
1737 1737 if stream and not heads:
1738 1738 # 'stream' means remote revlog format is revlogv1 only
1739 1739 if remote.capable('stream'):
1740 1740 self.stream_in(remote, set(('revlogv1',)))
1741 1741 else:
1742 1742 # otherwise, 'streamreqs' contains the remote revlog format
1743 1743 streamreqs = remote.capable('streamreqs')
1744 1744 if streamreqs:
1745 1745 streamreqs = set(streamreqs.split(','))
1746 1746 # if we support it, stream in and adjust our requirements
1747 1747 if not streamreqs - self.supportedformats:
1748 1748 self.stream_in(remote, streamreqs)
1749 1749
1750 1750 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1751 1751 try:
1752 1752 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1753 1753 ret = exchange.pull(self, remote, heads).cgresult
1754 1754 finally:
1755 1755 self.ui.restoreconfig(quiet)
1756 1756 return ret
1757 1757
1758 1758 def pushkey(self, namespace, key, old, new):
1759 1759 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1760 1760 old=old, new=new)
1761 1761 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1762 1762 ret = pushkey.push(self, namespace, key, old, new)
1763 1763 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1764 1764 ret=ret)
1765 1765 return ret
1766 1766
1767 1767 def listkeys(self, namespace):
1768 1768 self.hook('prelistkeys', throw=True, namespace=namespace)
1769 1769 self.ui.debug('listing keys for "%s"\n' % namespace)
1770 1770 values = pushkey.list(self, namespace)
1771 1771 self.hook('listkeys', namespace=namespace, values=values)
1772 1772 return values
1773 1773
1774 1774 def debugwireargs(self, one, two, three=None, four=None, five=None):
1775 1775 '''used to test argument passing over the wire'''
1776 1776 return "%s %s %s %s %s" % (one, two, three, four, five)
1777 1777
1778 1778 def savecommitmessage(self, text):
1779 1779 fp = self.opener('last-message.txt', 'wb')
1780 1780 try:
1781 1781 fp.write(text)
1782 1782 finally:
1783 1783 fp.close()
1784 1784 return self.pathto(fp.name[len(self.root) + 1:])
1785 1785
1786 1786 # used to avoid circular references so destructors work
1787 1787 def aftertrans(files):
1788 1788 renamefiles = [tuple(t) for t in files]
1789 1789 def a():
1790 1790 for vfs, src, dest in renamefiles:
1791 1791 try:
1792 1792 vfs.rename(src, dest)
1793 1793 except OSError: # journal file does not yet exist
1794 1794 pass
1795 1795 return a
1796 1796
1797 1797 def undoname(fn):
1798 1798 base, name = os.path.split(fn)
1799 1799 assert name.startswith('journal')
1800 1800 return os.path.join(base, name.replace('journal', 'undo', 1))
1801 1801
1802 1802 def instance(ui, path, create):
1803 1803 return localrepository(ui, util.urllocalpath(path), create)
1804 1804
1805 1805 def islocal(path):
1806 1806 return True
@@ -1,81 +1,81 b''
1 1 $ hg init debugrevlog
2 2 $ cd debugrevlog
3 3 $ echo a > a
4 4 $ hg ci -Am adda
5 5 adding a
6 6 $ hg debugrevlog -m
7 7 format : 1
8 8 flags : inline
9 9
10 10 revisions : 1
11 11 merges : 0 ( 0.00%)
12 12 normal : 1 (100.00%)
13 13 revisions : 1
14 14 full : 1 (100.00%)
15 15 deltas : 0 ( 0.00%)
16 16 revision size : 44
17 17 full : 44 (100.00%)
18 18 deltas : 0 ( 0.00%)
19 19
20 20 avg chain length : 0
21 21 compression ratio : 0
22 22
23 23 uncompressed data size (min/max/avg) : 43 / 43 / 43
24 24 full revision size (min/max/avg) : 44 / 44 / 44
25 25 delta size (min/max/avg) : 0 / 0 / 0
26 26
27 27 Test max chain len
28 28 $ cat >> $HGRCPATH << EOF
29 > [revlog]
29 > [format]
30 30 > maxchainlen=4
31 31 > EOF
32 32
33 $ echo "This test checks if maxchainlen config value is respected also it can serve as basic test for debugrevlog -d <file>.\n" >> a
33 $ printf "This test checks if maxchainlen config value is respected also it can serve as basic test for debugrevlog -d <file>.\n" >> a
34 34 $ hg ci -m a
35 $ echo "b\n" >> a
35 $ printf "b\n" >> a
36 36 $ hg ci -m a
37 $ echo "c\n" >> a
37 $ printf "c\n" >> a
38 38 $ hg ci -m a
39 $ echo "d\n" >> a
39 $ printf "d\n" >> a
40 40 $ hg ci -m a
41 $ echo "e\n" >> a
41 $ printf "e\n" >> a
42 42 $ hg ci -m a
43 $ echo "f\n" >> a
43 $ printf "f\n" >> a
44 44 $ hg ci -m a
45 $ echo 'g\n' >> a
45 $ printf 'g\n' >> a
46 46 $ hg ci -m a
47 $ echo 'h\n' >> a
47 $ printf 'h\n' >> a
48 48 $ hg ci -m a
49 49 $ hg debugrevlog -d a
50 50 # rev p1rev p2rev start end deltastart base p1 p2 rawsize totalsize compression heads chainlen
51 51 0 -1 -1 0 ??? 0 0 0 0 ??? ???? ? 1 0 (glob)
52 52 1 0 -1 ??? ??? 0 0 0 0 ??? ???? ? 1 1 (glob)
53 53 2 1 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 2 (glob)
54 54 3 2 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 3 (glob)
55 55 4 3 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 4 (glob)
56 56 5 4 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 0 (glob)
57 57 6 5 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 1 (glob)
58 58 7 6 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 2 (glob)
59 59 8 7 -1 ??? ??? ??? ??? ??? 0 ??? ???? ? 1 3 (glob)
60 60 $ cd ..
61 61
62 62 Test internal debugstacktrace command
63 63
64 64 $ cat > debugstacktrace.py << EOF
65 65 > from mercurial.util import debugstacktrace, dst, sys
66 66 > def f():
67 67 > dst('hello world')
68 68 > def g():
69 69 > f()
70 70 > debugstacktrace(skip=-5, f=sys.stdout)
71 71 > g()
72 72 > EOF
73 73 $ python debugstacktrace.py
74 74 hello world at:
75 75 debugstacktrace.py:7 in * (glob)
76 76 debugstacktrace.py:5 in g
77 77 debugstacktrace.py:3 in f
78 78 stacktrace at:
79 79 debugstacktrace.py:7 *in * (glob)
80 80 debugstacktrace.py:6 *in g (glob)
81 81 */util.py:* in debugstacktrace (glob)
General Comments 0
You need to be logged in to leave comments. Login now