##// END OF EJS Templates
revlog: add an aggressivemergedelta option...
Durham Goode -
r26118:049005de default
parent child Browse files
Show More
@@ -1,1952 +1,1956 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7 from node import hex, nullid, wdirrev, short
8 8 from i18n import _
9 9 import urllib
10 10 import peer, changegroup, subrepo, pushkey, obsolete, repoview
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks, phases
12 12 import lock as lockmod
13 13 import transaction, store, encoding, exchange, bundle2
14 14 import scmutil, util, extensions, hook, error, revset
15 15 import match as matchmod
16 16 import merge as mergemod
17 17 import tags as tagsmod
18 18 from lock import release
19 19 import weakref, errno, os, time, inspect, random
20 20 import branchmap, pathutil
21 21 import namespaces
22 22 propertycache = util.propertycache
23 23 filecache = scmutil.filecache
24 24
25 25 class repofilecache(filecache):
26 26 """All filecache usage on repo are done for logic that should be unfiltered
27 27 """
28 28
29 29 def __get__(self, repo, type=None):
30 30 return super(repofilecache, self).__get__(repo.unfiltered(), type)
31 31 def __set__(self, repo, value):
32 32 return super(repofilecache, self).__set__(repo.unfiltered(), value)
33 33 def __delete__(self, repo):
34 34 return super(repofilecache, self).__delete__(repo.unfiltered())
35 35
36 36 class storecache(repofilecache):
37 37 """filecache for files in the store"""
38 38 def join(self, obj, fname):
39 39 return obj.sjoin(fname)
40 40
41 41 class unfilteredpropertycache(propertycache):
42 42 """propertycache that apply to unfiltered repo only"""
43 43
44 44 def __get__(self, repo, type=None):
45 45 unfi = repo.unfiltered()
46 46 if unfi is repo:
47 47 return super(unfilteredpropertycache, self).__get__(unfi)
48 48 return getattr(unfi, self.name)
49 49
50 50 class filteredpropertycache(propertycache):
51 51 """propertycache that must take filtering in account"""
52 52
53 53 def cachevalue(self, obj, value):
54 54 object.__setattr__(obj, self.name, value)
55 55
56 56
57 57 def hasunfilteredcache(repo, name):
58 58 """check if a repo has an unfilteredpropertycache value for <name>"""
59 59 return name in vars(repo.unfiltered())
60 60
61 61 def unfilteredmethod(orig):
62 62 """decorate method that always need to be run on unfiltered version"""
63 63 def wrapper(repo, *args, **kwargs):
64 64 return orig(repo.unfiltered(), *args, **kwargs)
65 65 return wrapper
66 66
67 67 moderncaps = set(('lookup', 'branchmap', 'pushkey', 'known', 'getbundle',
68 68 'unbundle'))
69 69 legacycaps = moderncaps.union(set(['changegroupsubset']))
70 70
71 71 class localpeer(peer.peerrepository):
72 72 '''peer for a local repo; reflects only the most recent API'''
73 73
74 74 def __init__(self, repo, caps=moderncaps):
75 75 peer.peerrepository.__init__(self)
76 76 self._repo = repo.filtered('served')
77 77 self.ui = repo.ui
78 78 self._caps = repo._restrictcapabilities(caps)
79 79 self.requirements = repo.requirements
80 80 self.supportedformats = repo.supportedformats
81 81
82 82 def close(self):
83 83 self._repo.close()
84 84
85 85 def _capabilities(self):
86 86 return self._caps
87 87
88 88 def local(self):
89 89 return self._repo
90 90
91 91 def canpush(self):
92 92 return True
93 93
94 94 def url(self):
95 95 return self._repo.url()
96 96
97 97 def lookup(self, key):
98 98 return self._repo.lookup(key)
99 99
100 100 def branchmap(self):
101 101 return self._repo.branchmap()
102 102
103 103 def heads(self):
104 104 return self._repo.heads()
105 105
106 106 def known(self, nodes):
107 107 return self._repo.known(nodes)
108 108
109 109 def getbundle(self, source, heads=None, common=None, bundlecaps=None,
110 110 **kwargs):
111 111 cg = exchange.getbundle(self._repo, source, heads=heads,
112 112 common=common, bundlecaps=bundlecaps, **kwargs)
113 113 if bundlecaps is not None and 'HG20' in bundlecaps:
114 114 # When requesting a bundle2, getbundle returns a stream to make the
115 115 # wire level function happier. We need to build a proper object
116 116 # from it in local peer.
117 117 cg = bundle2.getunbundler(self.ui, cg)
118 118 return cg
119 119
120 120 # TODO We might want to move the next two calls into legacypeer and add
121 121 # unbundle instead.
122 122
123 123 def unbundle(self, cg, heads, url):
124 124 """apply a bundle on a repo
125 125
126 126 This function handles the repo locking itself."""
127 127 try:
128 128 try:
129 129 cg = exchange.readbundle(self.ui, cg, None)
130 130 ret = exchange.unbundle(self._repo, cg, heads, 'push', url)
131 131 if util.safehasattr(ret, 'getchunks'):
132 132 # This is a bundle20 object, turn it into an unbundler.
133 133 # This little dance should be dropped eventually when the
134 134 # API is finally improved.
135 135 stream = util.chunkbuffer(ret.getchunks())
136 136 ret = bundle2.getunbundler(self.ui, stream)
137 137 return ret
138 138 except Exception as exc:
139 139 # If the exception contains output salvaged from a bundle2
140 140 # reply, we need to make sure it is printed before continuing
141 141 # to fail. So we build a bundle2 with such output and consume
142 142 # it directly.
143 143 #
144 144 # This is not very elegant but allows a "simple" solution for
145 145 # issue4594
146 146 output = getattr(exc, '_bundle2salvagedoutput', ())
147 147 if output:
148 148 bundler = bundle2.bundle20(self._repo.ui)
149 149 for out in output:
150 150 bundler.addpart(out)
151 151 stream = util.chunkbuffer(bundler.getchunks())
152 152 b = bundle2.getunbundler(self.ui, stream)
153 153 bundle2.processbundle(self._repo, b)
154 154 raise
155 155 except error.PushRaced as exc:
156 156 raise error.ResponseError(_('push failed:'), str(exc))
157 157
158 158 def lock(self):
159 159 return self._repo.lock()
160 160
161 161 def addchangegroup(self, cg, source, url):
162 162 return changegroup.addchangegroup(self._repo, cg, source, url)
163 163
164 164 def pushkey(self, namespace, key, old, new):
165 165 return self._repo.pushkey(namespace, key, old, new)
166 166
167 167 def listkeys(self, namespace):
168 168 return self._repo.listkeys(namespace)
169 169
170 170 def debugwireargs(self, one, two, three=None, four=None, five=None):
171 171 '''used to test argument passing over the wire'''
172 172 return "%s %s %s %s %s" % (one, two, three, four, five)
173 173
174 174 class locallegacypeer(localpeer):
175 175 '''peer extension which implements legacy methods too; used for tests with
176 176 restricted capabilities'''
177 177
178 178 def __init__(self, repo):
179 179 localpeer.__init__(self, repo, caps=legacycaps)
180 180
181 181 def branches(self, nodes):
182 182 return self._repo.branches(nodes)
183 183
184 184 def between(self, pairs):
185 185 return self._repo.between(pairs)
186 186
187 187 def changegroup(self, basenodes, source):
188 188 return changegroup.changegroup(self._repo, basenodes, source)
189 189
190 190 def changegroupsubset(self, bases, heads, source):
191 191 return changegroup.changegroupsubset(self._repo, bases, heads, source)
192 192
193 193 class localrepository(object):
194 194
195 195 supportedformats = set(('revlogv1', 'generaldelta', 'treemanifest',
196 196 'manifestv2'))
197 197 _basesupported = supportedformats | set(('store', 'fncache', 'shared',
198 198 'dotencode'))
199 199 openerreqs = set(('revlogv1', 'generaldelta', 'treemanifest', 'manifestv2'))
200 200 filtername = None
201 201
202 202 # a list of (ui, featureset) functions.
203 203 # only functions defined in module of enabled extensions are invoked
204 204 featuresetupfuncs = set()
205 205
206 206 def _baserequirements(self, create):
207 207 return ['revlogv1']
208 208
209 209 def __init__(self, baseui, path=None, create=False):
210 210 self.requirements = set()
211 211 self.wvfs = scmutil.vfs(path, expandpath=True, realpath=True)
212 212 self.wopener = self.wvfs
213 213 self.root = self.wvfs.base
214 214 self.path = self.wvfs.join(".hg")
215 215 self.origroot = path
216 216 self.auditor = pathutil.pathauditor(self.root, self._checknested)
217 217 self.vfs = scmutil.vfs(self.path)
218 218 self.opener = self.vfs
219 219 self.baseui = baseui
220 220 self.ui = baseui.copy()
221 221 self.ui.copy = baseui.copy # prevent copying repo configuration
222 222 # A list of callback to shape the phase if no data were found.
223 223 # Callback are in the form: func(repo, roots) --> processed root.
224 224 # This list it to be filled by extension during repo setup
225 225 self._phasedefaults = []
226 226 try:
227 227 self.ui.readconfig(self.join("hgrc"), self.root)
228 228 extensions.loadall(self.ui)
229 229 except IOError:
230 230 pass
231 231
232 232 if self.featuresetupfuncs:
233 233 self.supported = set(self._basesupported) # use private copy
234 234 extmods = set(m.__name__ for n, m
235 235 in extensions.extensions(self.ui))
236 236 for setupfunc in self.featuresetupfuncs:
237 237 if setupfunc.__module__ in extmods:
238 238 setupfunc(self.ui, self.supported)
239 239 else:
240 240 self.supported = self._basesupported
241 241
242 242 if not self.vfs.isdir():
243 243 if create:
244 244 if not self.wvfs.exists():
245 245 self.wvfs.makedirs()
246 246 self.vfs.makedir(notindexed=True)
247 247 self.requirements.update(self._baserequirements(create))
248 248 if self.ui.configbool('format', 'usestore', True):
249 249 self.vfs.mkdir("store")
250 250 self.requirements.add("store")
251 251 if self.ui.configbool('format', 'usefncache', True):
252 252 self.requirements.add("fncache")
253 253 if self.ui.configbool('format', 'dotencode', True):
254 254 self.requirements.add('dotencode')
255 255 # create an invalid changelog
256 256 self.vfs.append(
257 257 "00changelog.i",
258 258 '\0\0\0\2' # represents revlogv2
259 259 ' dummy changelog to prevent using the old repo layout'
260 260 )
261 261 # experimental config: format.generaldelta
262 262 if self.ui.configbool('format', 'generaldelta', False):
263 263 self.requirements.add("generaldelta")
264 264 if self.ui.configbool('experimental', 'treemanifest', False):
265 265 self.requirements.add("treemanifest")
266 266 if self.ui.configbool('experimental', 'manifestv2', False):
267 267 self.requirements.add("manifestv2")
268 268 else:
269 269 raise error.RepoError(_("repository %s not found") % path)
270 270 elif create:
271 271 raise error.RepoError(_("repository %s already exists") % path)
272 272 else:
273 273 try:
274 274 self.requirements = scmutil.readrequires(
275 275 self.vfs, self.supported)
276 276 except IOError as inst:
277 277 if inst.errno != errno.ENOENT:
278 278 raise
279 279
280 280 self.sharedpath = self.path
281 281 try:
282 282 vfs = scmutil.vfs(self.vfs.read("sharedpath").rstrip('\n'),
283 283 realpath=True)
284 284 s = vfs.base
285 285 if not vfs.exists():
286 286 raise error.RepoError(
287 287 _('.hg/sharedpath points to nonexistent directory %s') % s)
288 288 self.sharedpath = s
289 289 except IOError as inst:
290 290 if inst.errno != errno.ENOENT:
291 291 raise
292 292
293 293 self.store = store.store(
294 294 self.requirements, self.sharedpath, scmutil.vfs)
295 295 self.spath = self.store.path
296 296 self.svfs = self.store.vfs
297 297 self.sjoin = self.store.join
298 298 self.vfs.createmode = self.store.createmode
299 299 self._applyopenerreqs()
300 300 if create:
301 301 self._writerequirements()
302 302
303 303
304 304 self._branchcaches = {}
305 305 self._revbranchcache = None
306 306 self.filterpats = {}
307 307 self._datafilters = {}
308 308 self._transref = self._lockref = self._wlockref = None
309 309
310 310 # A cache for various files under .hg/ that tracks file changes,
311 311 # (used by the filecache decorator)
312 312 #
313 313 # Maps a property name to its util.filecacheentry
314 314 self._filecache = {}
315 315
316 316 # hold sets of revision to be filtered
317 317 # should be cleared when something might have changed the filter value:
318 318 # - new changesets,
319 319 # - phase change,
320 320 # - new obsolescence marker,
321 321 # - working directory parent change,
322 322 # - bookmark changes
323 323 self.filteredrevcache = {}
324 324
325 325 # generic mapping between names and nodes
326 326 self.names = namespaces.namespaces()
327 327
328 328 def close(self):
329 329 self._writecaches()
330 330
331 331 def _writecaches(self):
332 332 if self._revbranchcache:
333 333 self._revbranchcache.write()
334 334
335 335 def _restrictcapabilities(self, caps):
336 336 if self.ui.configbool('experimental', 'bundle2-advertise', True):
337 337 caps = set(caps)
338 338 capsblob = bundle2.encodecaps(bundle2.getrepocaps(self))
339 339 caps.add('bundle2=' + urllib.quote(capsblob))
340 340 return caps
341 341
342 342 def _applyopenerreqs(self):
343 343 self.svfs.options = dict((r, 1) for r in self.requirements
344 344 if r in self.openerreqs)
345 345 # experimental config: format.chunkcachesize
346 346 chunkcachesize = self.ui.configint('format', 'chunkcachesize')
347 347 if chunkcachesize is not None:
348 348 self.svfs.options['chunkcachesize'] = chunkcachesize
349 349 # experimental config: format.maxchainlen
350 350 maxchainlen = self.ui.configint('format', 'maxchainlen')
351 351 if maxchainlen is not None:
352 352 self.svfs.options['maxchainlen'] = maxchainlen
353 353 # experimental config: format.manifestcachesize
354 354 manifestcachesize = self.ui.configint('format', 'manifestcachesize')
355 355 if manifestcachesize is not None:
356 356 self.svfs.options['manifestcachesize'] = manifestcachesize
357 # experimental config: format.aggressivemergedeltas
358 aggressivemergedeltas = self.ui.configbool('format',
359 'aggressivemergedeltas', False)
360 self.svfs.options['aggressivemergedeltas'] = aggressivemergedeltas
357 361
358 362 def _writerequirements(self):
359 363 scmutil.writerequires(self.vfs, self.requirements)
360 364
361 365 def _checknested(self, path):
362 366 """Determine if path is a legal nested repository."""
363 367 if not path.startswith(self.root):
364 368 return False
365 369 subpath = path[len(self.root) + 1:]
366 370 normsubpath = util.pconvert(subpath)
367 371
368 372 # XXX: Checking against the current working copy is wrong in
369 373 # the sense that it can reject things like
370 374 #
371 375 # $ hg cat -r 10 sub/x.txt
372 376 #
373 377 # if sub/ is no longer a subrepository in the working copy
374 378 # parent revision.
375 379 #
376 380 # However, it can of course also allow things that would have
377 381 # been rejected before, such as the above cat command if sub/
378 382 # is a subrepository now, but was a normal directory before.
379 383 # The old path auditor would have rejected by mistake since it
380 384 # panics when it sees sub/.hg/.
381 385 #
382 386 # All in all, checking against the working copy seems sensible
383 387 # since we want to prevent access to nested repositories on
384 388 # the filesystem *now*.
385 389 ctx = self[None]
386 390 parts = util.splitpath(subpath)
387 391 while parts:
388 392 prefix = '/'.join(parts)
389 393 if prefix in ctx.substate:
390 394 if prefix == normsubpath:
391 395 return True
392 396 else:
393 397 sub = ctx.sub(prefix)
394 398 return sub.checknested(subpath[len(prefix) + 1:])
395 399 else:
396 400 parts.pop()
397 401 return False
398 402
399 403 def peer(self):
400 404 return localpeer(self) # not cached to avoid reference cycle
401 405
402 406 def unfiltered(self):
403 407 """Return unfiltered version of the repository
404 408
405 409 Intended to be overwritten by filtered repo."""
406 410 return self
407 411
408 412 def filtered(self, name):
409 413 """Return a filtered version of a repository"""
410 414 # build a new class with the mixin and the current class
411 415 # (possibly subclass of the repo)
412 416 class proxycls(repoview.repoview, self.unfiltered().__class__):
413 417 pass
414 418 return proxycls(self, name)
415 419
416 420 @repofilecache('bookmarks')
417 421 def _bookmarks(self):
418 422 return bookmarks.bmstore(self)
419 423
420 424 @repofilecache('bookmarks.current')
421 425 def _activebookmark(self):
422 426 return bookmarks.readactive(self)
423 427
424 428 def bookmarkheads(self, bookmark):
425 429 name = bookmark.split('@', 1)[0]
426 430 heads = []
427 431 for mark, n in self._bookmarks.iteritems():
428 432 if mark.split('@', 1)[0] == name:
429 433 heads.append(n)
430 434 return heads
431 435
432 436 @storecache('phaseroots')
433 437 def _phasecache(self):
434 438 return phases.phasecache(self, self._phasedefaults)
435 439
436 440 @storecache('obsstore')
437 441 def obsstore(self):
438 442 # read default format for new obsstore.
439 443 # developer config: format.obsstore-version
440 444 defaultformat = self.ui.configint('format', 'obsstore-version', None)
441 445 # rely on obsstore class default when possible.
442 446 kwargs = {}
443 447 if defaultformat is not None:
444 448 kwargs['defaultformat'] = defaultformat
445 449 readonly = not obsolete.isenabled(self, obsolete.createmarkersopt)
446 450 store = obsolete.obsstore(self.svfs, readonly=readonly,
447 451 **kwargs)
448 452 if store and readonly:
449 453 self.ui.warn(
450 454 _('obsolete feature not enabled but %i markers found!\n')
451 455 % len(list(store)))
452 456 return store
453 457
454 458 @storecache('00changelog.i')
455 459 def changelog(self):
456 460 c = changelog.changelog(self.svfs)
457 461 if 'HG_PENDING' in os.environ:
458 462 p = os.environ['HG_PENDING']
459 463 if p.startswith(self.root):
460 464 c.readpending('00changelog.i.a')
461 465 return c
462 466
463 467 @storecache('00manifest.i')
464 468 def manifest(self):
465 469 return manifest.manifest(self.svfs)
466 470
467 471 def dirlog(self, dir):
468 472 return self.manifest.dirlog(dir)
469 473
470 474 @repofilecache('dirstate')
471 475 def dirstate(self):
472 476 warned = [0]
473 477 def validate(node):
474 478 try:
475 479 self.changelog.rev(node)
476 480 return node
477 481 except error.LookupError:
478 482 if not warned[0]:
479 483 warned[0] = True
480 484 self.ui.warn(_("warning: ignoring unknown"
481 485 " working parent %s!\n") % short(node))
482 486 return nullid
483 487
484 488 return dirstate.dirstate(self.vfs, self.ui, self.root, validate)
485 489
486 490 def __getitem__(self, changeid):
487 491 if changeid is None or changeid == wdirrev:
488 492 return context.workingctx(self)
489 493 if isinstance(changeid, slice):
490 494 return [context.changectx(self, i)
491 495 for i in xrange(*changeid.indices(len(self)))
492 496 if i not in self.changelog.filteredrevs]
493 497 return context.changectx(self, changeid)
494 498
495 499 def __contains__(self, changeid):
496 500 try:
497 501 self[changeid]
498 502 return True
499 503 except error.RepoLookupError:
500 504 return False
501 505
502 506 def __nonzero__(self):
503 507 return True
504 508
505 509 def __len__(self):
506 510 return len(self.changelog)
507 511
508 512 def __iter__(self):
509 513 return iter(self.changelog)
510 514
511 515 def revs(self, expr, *args):
512 516 '''Return a list of revisions matching the given revset'''
513 517 expr = revset.formatspec(expr, *args)
514 518 m = revset.match(None, expr)
515 519 return m(self)
516 520
517 521 def set(self, expr, *args):
518 522 '''
519 523 Yield a context for each matching revision, after doing arg
520 524 replacement via revset.formatspec
521 525 '''
522 526 for r in self.revs(expr, *args):
523 527 yield self[r]
524 528
525 529 def url(self):
526 530 return 'file:' + self.root
527 531
528 532 def hook(self, name, throw=False, **args):
529 533 """Call a hook, passing this repo instance.
530 534
531 535 This a convenience method to aid invoking hooks. Extensions likely
532 536 won't call this unless they have registered a custom hook or are
533 537 replacing code that is expected to call a hook.
534 538 """
535 539 return hook.hook(self.ui, self, name, throw, **args)
536 540
537 541 @unfilteredmethod
538 542 def _tag(self, names, node, message, local, user, date, extra={},
539 543 editor=False):
540 544 if isinstance(names, str):
541 545 names = (names,)
542 546
543 547 branches = self.branchmap()
544 548 for name in names:
545 549 self.hook('pretag', throw=True, node=hex(node), tag=name,
546 550 local=local)
547 551 if name in branches:
548 552 self.ui.warn(_("warning: tag %s conflicts with existing"
549 553 " branch name\n") % name)
550 554
551 555 def writetags(fp, names, munge, prevtags):
552 556 fp.seek(0, 2)
553 557 if prevtags and prevtags[-1] != '\n':
554 558 fp.write('\n')
555 559 for name in names:
556 560 if munge:
557 561 m = munge(name)
558 562 else:
559 563 m = name
560 564
561 565 if (self._tagscache.tagtypes and
562 566 name in self._tagscache.tagtypes):
563 567 old = self.tags().get(name, nullid)
564 568 fp.write('%s %s\n' % (hex(old), m))
565 569 fp.write('%s %s\n' % (hex(node), m))
566 570 fp.close()
567 571
568 572 prevtags = ''
569 573 if local:
570 574 try:
571 575 fp = self.vfs('localtags', 'r+')
572 576 except IOError:
573 577 fp = self.vfs('localtags', 'a')
574 578 else:
575 579 prevtags = fp.read()
576 580
577 581 # local tags are stored in the current charset
578 582 writetags(fp, names, None, prevtags)
579 583 for name in names:
580 584 self.hook('tag', node=hex(node), tag=name, local=local)
581 585 return
582 586
583 587 try:
584 588 fp = self.wfile('.hgtags', 'rb+')
585 589 except IOError as e:
586 590 if e.errno != errno.ENOENT:
587 591 raise
588 592 fp = self.wfile('.hgtags', 'ab')
589 593 else:
590 594 prevtags = fp.read()
591 595
592 596 # committed tags are stored in UTF-8
593 597 writetags(fp, names, encoding.fromlocal, prevtags)
594 598
595 599 fp.close()
596 600
597 601 self.invalidatecaches()
598 602
599 603 if '.hgtags' not in self.dirstate:
600 604 self[None].add(['.hgtags'])
601 605
602 606 m = matchmod.exact(self.root, '', ['.hgtags'])
603 607 tagnode = self.commit(message, user, date, extra=extra, match=m,
604 608 editor=editor)
605 609
606 610 for name in names:
607 611 self.hook('tag', node=hex(node), tag=name, local=local)
608 612
609 613 return tagnode
610 614
611 615 def tag(self, names, node, message, local, user, date, editor=False):
612 616 '''tag a revision with one or more symbolic names.
613 617
614 618 names is a list of strings or, when adding a single tag, names may be a
615 619 string.
616 620
617 621 if local is True, the tags are stored in a per-repository file.
618 622 otherwise, they are stored in the .hgtags file, and a new
619 623 changeset is committed with the change.
620 624
621 625 keyword arguments:
622 626
623 627 local: whether to store tags in non-version-controlled file
624 628 (default False)
625 629
626 630 message: commit message to use if committing
627 631
628 632 user: name of user to use if committing
629 633
630 634 date: date tuple to use if committing'''
631 635
632 636 if not local:
633 637 m = matchmod.exact(self.root, '', ['.hgtags'])
634 638 if any(self.status(match=m, unknown=True, ignored=True)):
635 639 raise util.Abort(_('working copy of .hgtags is changed'),
636 640 hint=_('please commit .hgtags manually'))
637 641
638 642 self.tags() # instantiate the cache
639 643 self._tag(names, node, message, local, user, date, editor=editor)
640 644
641 645 @filteredpropertycache
642 646 def _tagscache(self):
643 647 '''Returns a tagscache object that contains various tags related
644 648 caches.'''
645 649
646 650 # This simplifies its cache management by having one decorated
647 651 # function (this one) and the rest simply fetch things from it.
648 652 class tagscache(object):
649 653 def __init__(self):
650 654 # These two define the set of tags for this repository. tags
651 655 # maps tag name to node; tagtypes maps tag name to 'global' or
652 656 # 'local'. (Global tags are defined by .hgtags across all
653 657 # heads, and local tags are defined in .hg/localtags.)
654 658 # They constitute the in-memory cache of tags.
655 659 self.tags = self.tagtypes = None
656 660
657 661 self.nodetagscache = self.tagslist = None
658 662
659 663 cache = tagscache()
660 664 cache.tags, cache.tagtypes = self._findtags()
661 665
662 666 return cache
663 667
664 668 def tags(self):
665 669 '''return a mapping of tag to node'''
666 670 t = {}
667 671 if self.changelog.filteredrevs:
668 672 tags, tt = self._findtags()
669 673 else:
670 674 tags = self._tagscache.tags
671 675 for k, v in tags.iteritems():
672 676 try:
673 677 # ignore tags to unknown nodes
674 678 self.changelog.rev(v)
675 679 t[k] = v
676 680 except (error.LookupError, ValueError):
677 681 pass
678 682 return t
679 683
680 684 def _findtags(self):
681 685 '''Do the hard work of finding tags. Return a pair of dicts
682 686 (tags, tagtypes) where tags maps tag name to node, and tagtypes
683 687 maps tag name to a string like \'global\' or \'local\'.
684 688 Subclasses or extensions are free to add their own tags, but
685 689 should be aware that the returned dicts will be retained for the
686 690 duration of the localrepo object.'''
687 691
688 692 # XXX what tagtype should subclasses/extensions use? Currently
689 693 # mq and bookmarks add tags, but do not set the tagtype at all.
690 694 # Should each extension invent its own tag type? Should there
691 695 # be one tagtype for all such "virtual" tags? Or is the status
692 696 # quo fine?
693 697
694 698 alltags = {} # map tag name to (node, hist)
695 699 tagtypes = {}
696 700
697 701 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
698 702 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
699 703
700 704 # Build the return dicts. Have to re-encode tag names because
701 705 # the tags module always uses UTF-8 (in order not to lose info
702 706 # writing to the cache), but the rest of Mercurial wants them in
703 707 # local encoding.
704 708 tags = {}
705 709 for (name, (node, hist)) in alltags.iteritems():
706 710 if node != nullid:
707 711 tags[encoding.tolocal(name)] = node
708 712 tags['tip'] = self.changelog.tip()
709 713 tagtypes = dict([(encoding.tolocal(name), value)
710 714 for (name, value) in tagtypes.iteritems()])
711 715 return (tags, tagtypes)
712 716
713 717 def tagtype(self, tagname):
714 718 '''
715 719 return the type of the given tag. result can be:
716 720
717 721 'local' : a local tag
718 722 'global' : a global tag
719 723 None : tag does not exist
720 724 '''
721 725
722 726 return self._tagscache.tagtypes.get(tagname)
723 727
724 728 def tagslist(self):
725 729 '''return a list of tags ordered by revision'''
726 730 if not self._tagscache.tagslist:
727 731 l = []
728 732 for t, n in self.tags().iteritems():
729 733 l.append((self.changelog.rev(n), t, n))
730 734 self._tagscache.tagslist = [(t, n) for r, t, n in sorted(l)]
731 735
732 736 return self._tagscache.tagslist
733 737
734 738 def nodetags(self, node):
735 739 '''return the tags associated with a node'''
736 740 if not self._tagscache.nodetagscache:
737 741 nodetagscache = {}
738 742 for t, n in self._tagscache.tags.iteritems():
739 743 nodetagscache.setdefault(n, []).append(t)
740 744 for tags in nodetagscache.itervalues():
741 745 tags.sort()
742 746 self._tagscache.nodetagscache = nodetagscache
743 747 return self._tagscache.nodetagscache.get(node, [])
744 748
745 749 def nodebookmarks(self, node):
746 750 marks = []
747 751 for bookmark, n in self._bookmarks.iteritems():
748 752 if n == node:
749 753 marks.append(bookmark)
750 754 return sorted(marks)
751 755
752 756 def branchmap(self):
753 757 '''returns a dictionary {branch: [branchheads]} with branchheads
754 758 ordered by increasing revision number'''
755 759 branchmap.updatecache(self)
756 760 return self._branchcaches[self.filtername]
757 761
758 762 @unfilteredmethod
759 763 def revbranchcache(self):
760 764 if not self._revbranchcache:
761 765 self._revbranchcache = branchmap.revbranchcache(self.unfiltered())
762 766 return self._revbranchcache
763 767
764 768 def branchtip(self, branch, ignoremissing=False):
765 769 '''return the tip node for a given branch
766 770
767 771 If ignoremissing is True, then this method will not raise an error.
768 772 This is helpful for callers that only expect None for a missing branch
769 773 (e.g. namespace).
770 774
771 775 '''
772 776 try:
773 777 return self.branchmap().branchtip(branch)
774 778 except KeyError:
775 779 if not ignoremissing:
776 780 raise error.RepoLookupError(_("unknown branch '%s'") % branch)
777 781 else:
778 782 pass
779 783
780 784 def lookup(self, key):
781 785 return self[key].node()
782 786
783 787 def lookupbranch(self, key, remote=None):
784 788 repo = remote or self
785 789 if key in repo.branchmap():
786 790 return key
787 791
788 792 repo = (remote and remote.local()) and remote or self
789 793 return repo[key].branch()
790 794
791 795 def known(self, nodes):
792 796 nm = self.changelog.nodemap
793 797 pc = self._phasecache
794 798 result = []
795 799 for n in nodes:
796 800 r = nm.get(n)
797 801 resp = not (r is None or pc.phase(self, r) >= phases.secret)
798 802 result.append(resp)
799 803 return result
800 804
801 805 def local(self):
802 806 return self
803 807
804 808 def publishing(self):
805 809 # it's safe (and desirable) to trust the publish flag unconditionally
806 810 # so that we don't finalize changes shared between users via ssh or nfs
807 811 return self.ui.configbool('phases', 'publish', True, untrusted=True)
808 812
809 813 def cancopy(self):
810 814 # so statichttprepo's override of local() works
811 815 if not self.local():
812 816 return False
813 817 if not self.publishing():
814 818 return True
815 819 # if publishing we can't copy if there is filtered content
816 820 return not self.filtered('visible').changelog.filteredrevs
817 821
818 822 def shared(self):
819 823 '''the type of shared repository (None if not shared)'''
820 824 if self.sharedpath != self.path:
821 825 return 'store'
822 826 return None
823 827
824 828 def join(self, f, *insidef):
825 829 return self.vfs.join(os.path.join(f, *insidef))
826 830
827 831 def wjoin(self, f, *insidef):
828 832 return self.vfs.reljoin(self.root, f, *insidef)
829 833
830 834 def file(self, f):
831 835 if f[0] == '/':
832 836 f = f[1:]
833 837 return filelog.filelog(self.svfs, f)
834 838
835 839 def changectx(self, changeid):
836 840 return self[changeid]
837 841
838 842 def parents(self, changeid=None):
839 843 '''get list of changectxs for parents of changeid'''
840 844 return self[changeid].parents()
841 845
842 846 def setparents(self, p1, p2=nullid):
843 847 self.dirstate.beginparentchange()
844 848 copies = self.dirstate.setparents(p1, p2)
845 849 pctx = self[p1]
846 850 if copies:
847 851 # Adjust copy records, the dirstate cannot do it, it
848 852 # requires access to parents manifests. Preserve them
849 853 # only for entries added to first parent.
850 854 for f in copies:
851 855 if f not in pctx and copies[f] in pctx:
852 856 self.dirstate.copy(copies[f], f)
853 857 if p2 == nullid:
854 858 for f, s in sorted(self.dirstate.copies().items()):
855 859 if f not in pctx and s not in pctx:
856 860 self.dirstate.copy(None, f)
857 861 self.dirstate.endparentchange()
858 862
859 863 def filectx(self, path, changeid=None, fileid=None):
860 864 """changeid can be a changeset revision, node, or tag.
861 865 fileid can be a file revision or node."""
862 866 return context.filectx(self, path, changeid, fileid)
863 867
864 868 def getcwd(self):
865 869 return self.dirstate.getcwd()
866 870
867 871 def pathto(self, f, cwd=None):
868 872 return self.dirstate.pathto(f, cwd)
869 873
870 874 def wfile(self, f, mode='r'):
871 875 return self.wvfs(f, mode)
872 876
873 877 def _link(self, f):
874 878 return self.wvfs.islink(f)
875 879
876 880 def _loadfilter(self, filter):
877 881 if filter not in self.filterpats:
878 882 l = []
879 883 for pat, cmd in self.ui.configitems(filter):
880 884 if cmd == '!':
881 885 continue
882 886 mf = matchmod.match(self.root, '', [pat])
883 887 fn = None
884 888 params = cmd
885 889 for name, filterfn in self._datafilters.iteritems():
886 890 if cmd.startswith(name):
887 891 fn = filterfn
888 892 params = cmd[len(name):].lstrip()
889 893 break
890 894 if not fn:
891 895 fn = lambda s, c, **kwargs: util.filter(s, c)
892 896 # Wrap old filters not supporting keyword arguments
893 897 if not inspect.getargspec(fn)[2]:
894 898 oldfn = fn
895 899 fn = lambda s, c, **kwargs: oldfn(s, c)
896 900 l.append((mf, fn, params))
897 901 self.filterpats[filter] = l
898 902 return self.filterpats[filter]
899 903
900 904 def _filter(self, filterpats, filename, data):
901 905 for mf, fn, cmd in filterpats:
902 906 if mf(filename):
903 907 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
904 908 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
905 909 break
906 910
907 911 return data
908 912
909 913 @unfilteredpropertycache
910 914 def _encodefilterpats(self):
911 915 return self._loadfilter('encode')
912 916
913 917 @unfilteredpropertycache
914 918 def _decodefilterpats(self):
915 919 return self._loadfilter('decode')
916 920
917 921 def adddatafilter(self, name, filter):
918 922 self._datafilters[name] = filter
919 923
920 924 def wread(self, filename):
921 925 if self._link(filename):
922 926 data = self.wvfs.readlink(filename)
923 927 else:
924 928 data = self.wvfs.read(filename)
925 929 return self._filter(self._encodefilterpats, filename, data)
926 930
927 931 def wwrite(self, filename, data, flags):
928 932 """write ``data`` into ``filename`` in the working directory
929 933
930 934 This returns length of written (maybe decoded) data.
931 935 """
932 936 data = self._filter(self._decodefilterpats, filename, data)
933 937 if 'l' in flags:
934 938 self.wvfs.symlink(data, filename)
935 939 else:
936 940 self.wvfs.write(filename, data)
937 941 if 'x' in flags:
938 942 self.wvfs.setflags(filename, False, True)
939 943 return len(data)
940 944
941 945 def wwritedata(self, filename, data):
942 946 return self._filter(self._decodefilterpats, filename, data)
943 947
944 948 def currenttransaction(self):
945 949 """return the current transaction or None if non exists"""
946 950 if self._transref:
947 951 tr = self._transref()
948 952 else:
949 953 tr = None
950 954
951 955 if tr and tr.running():
952 956 return tr
953 957 return None
954 958
955 959 def transaction(self, desc, report=None):
956 960 if (self.ui.configbool('devel', 'all-warnings')
957 961 or self.ui.configbool('devel', 'check-locks')):
958 962 l = self._lockref and self._lockref()
959 963 if l is None or not l.held:
960 964 self.ui.develwarn('transaction with no lock')
961 965 tr = self.currenttransaction()
962 966 if tr is not None:
963 967 return tr.nest()
964 968
965 969 # abort here if the journal already exists
966 970 if self.svfs.exists("journal"):
967 971 raise error.RepoError(
968 972 _("abandoned transaction found"),
969 973 hint=_("run 'hg recover' to clean up transaction"))
970 974
971 975 # make journal.dirstate contain in-memory changes at this point
972 976 self.dirstate.write()
973 977
974 978 idbase = "%.40f#%f" % (random.random(), time.time())
975 979 txnid = 'TXN:' + util.sha1(idbase).hexdigest()
976 980 self.hook('pretxnopen', throw=True, txnname=desc, txnid=txnid)
977 981
978 982 self._writejournal(desc)
979 983 renames = [(vfs, x, undoname(x)) for vfs, x in self._journalfiles()]
980 984 if report:
981 985 rp = report
982 986 else:
983 987 rp = self.ui.warn
984 988 vfsmap = {'plain': self.vfs} # root of .hg/
985 989 # we must avoid cyclic reference between repo and transaction.
986 990 reporef = weakref.ref(self)
987 991 def validate(tr):
988 992 """will run pre-closing hooks"""
989 993 pending = lambda: tr.writepending() and self.root or ""
990 994 reporef().hook('pretxnclose', throw=True, pending=pending,
991 995 txnname=desc, **tr.hookargs)
992 996
993 997 tr = transaction.transaction(rp, self.svfs, vfsmap,
994 998 "journal",
995 999 "undo",
996 1000 aftertrans(renames),
997 1001 self.store.createmode,
998 1002 validator=validate)
999 1003
1000 1004 tr.hookargs['txnid'] = txnid
1001 1005 # note: writing the fncache only during finalize mean that the file is
1002 1006 # outdated when running hooks. As fncache is used for streaming clone,
1003 1007 # this is not expected to break anything that happen during the hooks.
1004 1008 tr.addfinalize('flush-fncache', self.store.write)
1005 1009 def txnclosehook(tr2):
1006 1010 """To be run if transaction is successful, will schedule a hook run
1007 1011 """
1008 1012 def hook():
1009 1013 reporef().hook('txnclose', throw=False, txnname=desc,
1010 1014 **tr2.hookargs)
1011 1015 reporef()._afterlock(hook)
1012 1016 tr.addfinalize('txnclose-hook', txnclosehook)
1013 1017 def txnaborthook(tr2):
1014 1018 """To be run if transaction is aborted
1015 1019 """
1016 1020 reporef().hook('txnabort', throw=False, txnname=desc,
1017 1021 **tr2.hookargs)
1018 1022 tr.addabort('txnabort-hook', txnaborthook)
1019 1023 self._transref = weakref.ref(tr)
1020 1024 return tr
1021 1025
1022 1026 def _journalfiles(self):
1023 1027 return ((self.svfs, 'journal'),
1024 1028 (self.vfs, 'journal.dirstate'),
1025 1029 (self.vfs, 'journal.branch'),
1026 1030 (self.vfs, 'journal.desc'),
1027 1031 (self.vfs, 'journal.bookmarks'),
1028 1032 (self.svfs, 'journal.phaseroots'))
1029 1033
1030 1034 def undofiles(self):
1031 1035 return [(vfs, undoname(x)) for vfs, x in self._journalfiles()]
1032 1036
1033 1037 def _writejournal(self, desc):
1034 1038 self.vfs.write("journal.dirstate",
1035 1039 self.vfs.tryread("dirstate"))
1036 1040 self.vfs.write("journal.branch",
1037 1041 encoding.fromlocal(self.dirstate.branch()))
1038 1042 self.vfs.write("journal.desc",
1039 1043 "%d\n%s\n" % (len(self), desc))
1040 1044 self.vfs.write("journal.bookmarks",
1041 1045 self.vfs.tryread("bookmarks"))
1042 1046 self.svfs.write("journal.phaseroots",
1043 1047 self.svfs.tryread("phaseroots"))
1044 1048
1045 1049 def recover(self):
1046 1050 lock = self.lock()
1047 1051 try:
1048 1052 if self.svfs.exists("journal"):
1049 1053 self.ui.status(_("rolling back interrupted transaction\n"))
1050 1054 vfsmap = {'': self.svfs,
1051 1055 'plain': self.vfs,}
1052 1056 transaction.rollback(self.svfs, vfsmap, "journal",
1053 1057 self.ui.warn)
1054 1058 self.invalidate()
1055 1059 return True
1056 1060 else:
1057 1061 self.ui.warn(_("no interrupted transaction available\n"))
1058 1062 return False
1059 1063 finally:
1060 1064 lock.release()
1061 1065
1062 1066 def rollback(self, dryrun=False, force=False):
1063 1067 wlock = lock = None
1064 1068 try:
1065 1069 wlock = self.wlock()
1066 1070 lock = self.lock()
1067 1071 if self.svfs.exists("undo"):
1068 1072 return self._rollback(dryrun, force)
1069 1073 else:
1070 1074 self.ui.warn(_("no rollback information available\n"))
1071 1075 return 1
1072 1076 finally:
1073 1077 release(lock, wlock)
1074 1078
1075 1079 @unfilteredmethod # Until we get smarter cache management
1076 1080 def _rollback(self, dryrun, force):
1077 1081 ui = self.ui
1078 1082 try:
1079 1083 args = self.vfs.read('undo.desc').splitlines()
1080 1084 (oldlen, desc, detail) = (int(args[0]), args[1], None)
1081 1085 if len(args) >= 3:
1082 1086 detail = args[2]
1083 1087 oldtip = oldlen - 1
1084 1088
1085 1089 if detail and ui.verbose:
1086 1090 msg = (_('repository tip rolled back to revision %s'
1087 1091 ' (undo %s: %s)\n')
1088 1092 % (oldtip, desc, detail))
1089 1093 else:
1090 1094 msg = (_('repository tip rolled back to revision %s'
1091 1095 ' (undo %s)\n')
1092 1096 % (oldtip, desc))
1093 1097 except IOError:
1094 1098 msg = _('rolling back unknown transaction\n')
1095 1099 desc = None
1096 1100
1097 1101 if not force and self['.'] != self['tip'] and desc == 'commit':
1098 1102 raise util.Abort(
1099 1103 _('rollback of last commit while not checked out '
1100 1104 'may lose data'), hint=_('use -f to force'))
1101 1105
1102 1106 ui.status(msg)
1103 1107 if dryrun:
1104 1108 return 0
1105 1109
1106 1110 parents = self.dirstate.parents()
1107 1111 self.destroying()
1108 1112 vfsmap = {'plain': self.vfs, '': self.svfs}
1109 1113 transaction.rollback(self.svfs, vfsmap, 'undo', ui.warn)
1110 1114 if self.vfs.exists('undo.bookmarks'):
1111 1115 self.vfs.rename('undo.bookmarks', 'bookmarks')
1112 1116 if self.svfs.exists('undo.phaseroots'):
1113 1117 self.svfs.rename('undo.phaseroots', 'phaseroots')
1114 1118 self.invalidate()
1115 1119
1116 1120 parentgone = (parents[0] not in self.changelog.nodemap or
1117 1121 parents[1] not in self.changelog.nodemap)
1118 1122 if parentgone:
1119 1123 self.vfs.rename('undo.dirstate', 'dirstate')
1120 1124 try:
1121 1125 branch = self.vfs.read('undo.branch')
1122 1126 self.dirstate.setbranch(encoding.tolocal(branch))
1123 1127 except IOError:
1124 1128 ui.warn(_('named branch could not be reset: '
1125 1129 'current branch is still \'%s\'\n')
1126 1130 % self.dirstate.branch())
1127 1131
1128 1132 self.dirstate.invalidate()
1129 1133 parents = tuple([p.rev() for p in self.parents()])
1130 1134 if len(parents) > 1:
1131 1135 ui.status(_('working directory now based on '
1132 1136 'revisions %d and %d\n') % parents)
1133 1137 else:
1134 1138 ui.status(_('working directory now based on '
1135 1139 'revision %d\n') % parents)
1136 1140 ms = mergemod.mergestate(self)
1137 1141 ms.reset(self['.'].node())
1138 1142
1139 1143 # TODO: if we know which new heads may result from this rollback, pass
1140 1144 # them to destroy(), which will prevent the branchhead cache from being
1141 1145 # invalidated.
1142 1146 self.destroyed()
1143 1147 return 0
1144 1148
1145 1149 def invalidatecaches(self):
1146 1150
1147 1151 if '_tagscache' in vars(self):
1148 1152 # can't use delattr on proxy
1149 1153 del self.__dict__['_tagscache']
1150 1154
1151 1155 self.unfiltered()._branchcaches.clear()
1152 1156 self.invalidatevolatilesets()
1153 1157
1154 1158 def invalidatevolatilesets(self):
1155 1159 self.filteredrevcache.clear()
1156 1160 obsolete.clearobscaches(self)
1157 1161
1158 1162 def invalidatedirstate(self):
1159 1163 '''Invalidates the dirstate, causing the next call to dirstate
1160 1164 to check if it was modified since the last time it was read,
1161 1165 rereading it if it has.
1162 1166
1163 1167 This is different to dirstate.invalidate() that it doesn't always
1164 1168 rereads the dirstate. Use dirstate.invalidate() if you want to
1165 1169 explicitly read the dirstate again (i.e. restoring it to a previous
1166 1170 known good state).'''
1167 1171 if hasunfilteredcache(self, 'dirstate'):
1168 1172 for k in self.dirstate._filecache:
1169 1173 try:
1170 1174 delattr(self.dirstate, k)
1171 1175 except AttributeError:
1172 1176 pass
1173 1177 delattr(self.unfiltered(), 'dirstate')
1174 1178
1175 1179 def invalidate(self):
1176 1180 unfiltered = self.unfiltered() # all file caches are stored unfiltered
1177 1181 for k in self._filecache:
1178 1182 # dirstate is invalidated separately in invalidatedirstate()
1179 1183 if k == 'dirstate':
1180 1184 continue
1181 1185
1182 1186 try:
1183 1187 delattr(unfiltered, k)
1184 1188 except AttributeError:
1185 1189 pass
1186 1190 self.invalidatecaches()
1187 1191 self.store.invalidatecaches()
1188 1192
1189 1193 def invalidateall(self):
1190 1194 '''Fully invalidates both store and non-store parts, causing the
1191 1195 subsequent operation to reread any outside changes.'''
1192 1196 # extension should hook this to invalidate its caches
1193 1197 self.invalidate()
1194 1198 self.invalidatedirstate()
1195 1199
1196 1200 def _lock(self, vfs, lockname, wait, releasefn, acquirefn, desc):
1197 1201 try:
1198 1202 l = lockmod.lock(vfs, lockname, 0, releasefn, desc=desc)
1199 1203 except error.LockHeld as inst:
1200 1204 if not wait:
1201 1205 raise
1202 1206 self.ui.warn(_("waiting for lock on %s held by %r\n") %
1203 1207 (desc, inst.locker))
1204 1208 # default to 600 seconds timeout
1205 1209 l = lockmod.lock(vfs, lockname,
1206 1210 int(self.ui.config("ui", "timeout", "600")),
1207 1211 releasefn, desc=desc)
1208 1212 self.ui.warn(_("got lock after %s seconds\n") % l.delay)
1209 1213 if acquirefn:
1210 1214 acquirefn()
1211 1215 return l
1212 1216
1213 1217 def _afterlock(self, callback):
1214 1218 """add a callback to be run when the repository is fully unlocked
1215 1219
1216 1220 The callback will be executed when the outermost lock is released
1217 1221 (with wlock being higher level than 'lock')."""
1218 1222 for ref in (self._wlockref, self._lockref):
1219 1223 l = ref and ref()
1220 1224 if l and l.held:
1221 1225 l.postrelease.append(callback)
1222 1226 break
1223 1227 else: # no lock have been found.
1224 1228 callback()
1225 1229
1226 1230 def lock(self, wait=True):
1227 1231 '''Lock the repository store (.hg/store) and return a weak reference
1228 1232 to the lock. Use this before modifying the store (e.g. committing or
1229 1233 stripping). If you are opening a transaction, get a lock as well.)
1230 1234
1231 1235 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1232 1236 'wlock' first to avoid a dead-lock hazard.'''
1233 1237 l = self._lockref and self._lockref()
1234 1238 if l is not None and l.held:
1235 1239 l.lock()
1236 1240 return l
1237 1241
1238 1242 def unlock():
1239 1243 for k, ce in self._filecache.items():
1240 1244 if k == 'dirstate' or k not in self.__dict__:
1241 1245 continue
1242 1246 ce.refresh()
1243 1247
1244 1248 l = self._lock(self.svfs, "lock", wait, unlock,
1245 1249 self.invalidate, _('repository %s') % self.origroot)
1246 1250 self._lockref = weakref.ref(l)
1247 1251 return l
1248 1252
1249 1253 def wlock(self, wait=True):
1250 1254 '''Lock the non-store parts of the repository (everything under
1251 1255 .hg except .hg/store) and return a weak reference to the lock.
1252 1256
1253 1257 Use this before modifying files in .hg.
1254 1258
1255 1259 If both 'lock' and 'wlock' must be acquired, ensure you always acquires
1256 1260 'wlock' first to avoid a dead-lock hazard.'''
1257 1261 l = self._wlockref and self._wlockref()
1258 1262 if l is not None and l.held:
1259 1263 l.lock()
1260 1264 return l
1261 1265
1262 1266 # We do not need to check for non-waiting lock aquisition. Such
1263 1267 # acquisition would not cause dead-lock as they would just fail.
1264 1268 if wait and (self.ui.configbool('devel', 'all-warnings')
1265 1269 or self.ui.configbool('devel', 'check-locks')):
1266 1270 l = self._lockref and self._lockref()
1267 1271 if l is not None and l.held:
1268 1272 self.ui.develwarn('"wlock" acquired after "lock"')
1269 1273
1270 1274 def unlock():
1271 1275 if self.dirstate.pendingparentchange():
1272 1276 self.dirstate.invalidate()
1273 1277 else:
1274 1278 self.dirstate.write()
1275 1279
1276 1280 self._filecache['dirstate'].refresh()
1277 1281
1278 1282 l = self._lock(self.vfs, "wlock", wait, unlock,
1279 1283 self.invalidatedirstate, _('working directory of %s') %
1280 1284 self.origroot)
1281 1285 self._wlockref = weakref.ref(l)
1282 1286 return l
1283 1287
1284 1288 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
1285 1289 """
1286 1290 commit an individual file as part of a larger transaction
1287 1291 """
1288 1292
1289 1293 fname = fctx.path()
1290 1294 fparent1 = manifest1.get(fname, nullid)
1291 1295 fparent2 = manifest2.get(fname, nullid)
1292 1296 if isinstance(fctx, context.filectx):
1293 1297 node = fctx.filenode()
1294 1298 if node in [fparent1, fparent2]:
1295 1299 self.ui.debug('reusing %s filelog entry\n' % fname)
1296 1300 return node
1297 1301
1298 1302 flog = self.file(fname)
1299 1303 meta = {}
1300 1304 copy = fctx.renamed()
1301 1305 if copy and copy[0] != fname:
1302 1306 # Mark the new revision of this file as a copy of another
1303 1307 # file. This copy data will effectively act as a parent
1304 1308 # of this new revision. If this is a merge, the first
1305 1309 # parent will be the nullid (meaning "look up the copy data")
1306 1310 # and the second one will be the other parent. For example:
1307 1311 #
1308 1312 # 0 --- 1 --- 3 rev1 changes file foo
1309 1313 # \ / rev2 renames foo to bar and changes it
1310 1314 # \- 2 -/ rev3 should have bar with all changes and
1311 1315 # should record that bar descends from
1312 1316 # bar in rev2 and foo in rev1
1313 1317 #
1314 1318 # this allows this merge to succeed:
1315 1319 #
1316 1320 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
1317 1321 # \ / merging rev3 and rev4 should use bar@rev2
1318 1322 # \- 2 --- 4 as the merge base
1319 1323 #
1320 1324
1321 1325 cfname = copy[0]
1322 1326 crev = manifest1.get(cfname)
1323 1327 newfparent = fparent2
1324 1328
1325 1329 if manifest2: # branch merge
1326 1330 if fparent2 == nullid or crev is None: # copied on remote side
1327 1331 if cfname in manifest2:
1328 1332 crev = manifest2[cfname]
1329 1333 newfparent = fparent1
1330 1334
1331 1335 # Here, we used to search backwards through history to try to find
1332 1336 # where the file copy came from if the source of a copy was not in
1333 1337 # the parent directory. However, this doesn't actually make sense to
1334 1338 # do (what does a copy from something not in your working copy even
1335 1339 # mean?) and it causes bugs (eg, issue4476). Instead, we will warn
1336 1340 # the user that copy information was dropped, so if they didn't
1337 1341 # expect this outcome it can be fixed, but this is the correct
1338 1342 # behavior in this circumstance.
1339 1343
1340 1344 if crev:
1341 1345 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
1342 1346 meta["copy"] = cfname
1343 1347 meta["copyrev"] = hex(crev)
1344 1348 fparent1, fparent2 = nullid, newfparent
1345 1349 else:
1346 1350 self.ui.warn(_("warning: can't find ancestor for '%s' "
1347 1351 "copied from '%s'!\n") % (fname, cfname))
1348 1352
1349 1353 elif fparent1 == nullid:
1350 1354 fparent1, fparent2 = fparent2, nullid
1351 1355 elif fparent2 != nullid:
1352 1356 # is one parent an ancestor of the other?
1353 1357 fparentancestors = flog.commonancestorsheads(fparent1, fparent2)
1354 1358 if fparent1 in fparentancestors:
1355 1359 fparent1, fparent2 = fparent2, nullid
1356 1360 elif fparent2 in fparentancestors:
1357 1361 fparent2 = nullid
1358 1362
1359 1363 # is the file changed?
1360 1364 text = fctx.data()
1361 1365 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
1362 1366 changelist.append(fname)
1363 1367 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
1364 1368 # are just the flags changed during merge?
1365 1369 elif fname in manifest1 and manifest1.flags(fname) != fctx.flags():
1366 1370 changelist.append(fname)
1367 1371
1368 1372 return fparent1
1369 1373
1370 1374 @unfilteredmethod
1371 1375 def commit(self, text="", user=None, date=None, match=None, force=False,
1372 1376 editor=False, extra={}):
1373 1377 """Add a new revision to current repository.
1374 1378
1375 1379 Revision information is gathered from the working directory,
1376 1380 match can be used to filter the committed files. If editor is
1377 1381 supplied, it is called to get a commit message.
1378 1382 """
1379 1383
1380 1384 def fail(f, msg):
1381 1385 raise util.Abort('%s: %s' % (f, msg))
1382 1386
1383 1387 if not match:
1384 1388 match = matchmod.always(self.root, '')
1385 1389
1386 1390 if not force:
1387 1391 vdirs = []
1388 1392 match.explicitdir = vdirs.append
1389 1393 match.bad = fail
1390 1394
1391 1395 wlock = self.wlock()
1392 1396 try:
1393 1397 wctx = self[None]
1394 1398 merge = len(wctx.parents()) > 1
1395 1399
1396 1400 if not force and merge and match.ispartial():
1397 1401 raise util.Abort(_('cannot partially commit a merge '
1398 1402 '(do not specify files or patterns)'))
1399 1403
1400 1404 status = self.status(match=match, clean=force)
1401 1405 if force:
1402 1406 status.modified.extend(status.clean) # mq may commit clean files
1403 1407
1404 1408 # check subrepos
1405 1409 subs = []
1406 1410 commitsubs = set()
1407 1411 newstate = wctx.substate.copy()
1408 1412 # only manage subrepos and .hgsubstate if .hgsub is present
1409 1413 if '.hgsub' in wctx:
1410 1414 # we'll decide whether to track this ourselves, thanks
1411 1415 for c in status.modified, status.added, status.removed:
1412 1416 if '.hgsubstate' in c:
1413 1417 c.remove('.hgsubstate')
1414 1418
1415 1419 # compare current state to last committed state
1416 1420 # build new substate based on last committed state
1417 1421 oldstate = wctx.p1().substate
1418 1422 for s in sorted(newstate.keys()):
1419 1423 if not match(s):
1420 1424 # ignore working copy, use old state if present
1421 1425 if s in oldstate:
1422 1426 newstate[s] = oldstate[s]
1423 1427 continue
1424 1428 if not force:
1425 1429 raise util.Abort(
1426 1430 _("commit with new subrepo %s excluded") % s)
1427 1431 dirtyreason = wctx.sub(s).dirtyreason(True)
1428 1432 if dirtyreason:
1429 1433 if not self.ui.configbool('ui', 'commitsubrepos'):
1430 1434 raise util.Abort(dirtyreason,
1431 1435 hint=_("use --subrepos for recursive commit"))
1432 1436 subs.append(s)
1433 1437 commitsubs.add(s)
1434 1438 else:
1435 1439 bs = wctx.sub(s).basestate()
1436 1440 newstate[s] = (newstate[s][0], bs, newstate[s][2])
1437 1441 if oldstate.get(s, (None, None, None))[1] != bs:
1438 1442 subs.append(s)
1439 1443
1440 1444 # check for removed subrepos
1441 1445 for p in wctx.parents():
1442 1446 r = [s for s in p.substate if s not in newstate]
1443 1447 subs += [s for s in r if match(s)]
1444 1448 if subs:
1445 1449 if (not match('.hgsub') and
1446 1450 '.hgsub' in (wctx.modified() + wctx.added())):
1447 1451 raise util.Abort(
1448 1452 _("can't commit subrepos without .hgsub"))
1449 1453 status.modified.insert(0, '.hgsubstate')
1450 1454
1451 1455 elif '.hgsub' in status.removed:
1452 1456 # clean up .hgsubstate when .hgsub is removed
1453 1457 if ('.hgsubstate' in wctx and
1454 1458 '.hgsubstate' not in (status.modified + status.added +
1455 1459 status.removed)):
1456 1460 status.removed.insert(0, '.hgsubstate')
1457 1461
1458 1462 # make sure all explicit patterns are matched
1459 1463 if not force and (match.isexact() or match.prefix()):
1460 1464 matched = set(status.modified + status.added + status.removed)
1461 1465
1462 1466 for f in match.files():
1463 1467 f = self.dirstate.normalize(f)
1464 1468 if f == '.' or f in matched or f in wctx.substate:
1465 1469 continue
1466 1470 if f in status.deleted:
1467 1471 fail(f, _('file not found!'))
1468 1472 if f in vdirs: # visited directory
1469 1473 d = f + '/'
1470 1474 for mf in matched:
1471 1475 if mf.startswith(d):
1472 1476 break
1473 1477 else:
1474 1478 fail(f, _("no match under directory!"))
1475 1479 elif f not in self.dirstate:
1476 1480 fail(f, _("file not tracked!"))
1477 1481
1478 1482 cctx = context.workingcommitctx(self, status,
1479 1483 text, user, date, extra)
1480 1484
1481 1485 # internal config: ui.allowemptycommit
1482 1486 allowemptycommit = (wctx.branch() != wctx.p1().branch()
1483 1487 or extra.get('close') or merge or cctx.files()
1484 1488 or self.ui.configbool('ui', 'allowemptycommit'))
1485 1489 if not allowemptycommit:
1486 1490 return None
1487 1491
1488 1492 if merge and cctx.deleted():
1489 1493 raise util.Abort(_("cannot commit merge with missing files"))
1490 1494
1491 1495 ms = mergemod.mergestate(self)
1492 1496 for f in status.modified:
1493 1497 if f in ms and ms[f] == 'u':
1494 1498 raise util.Abort(_('unresolved merge conflicts '
1495 1499 '(see "hg help resolve")'))
1496 1500
1497 1501 if editor:
1498 1502 cctx._text = editor(self, cctx, subs)
1499 1503 edited = (text != cctx._text)
1500 1504
1501 1505 # Save commit message in case this transaction gets rolled back
1502 1506 # (e.g. by a pretxncommit hook). Leave the content alone on
1503 1507 # the assumption that the user will use the same editor again.
1504 1508 msgfn = self.savecommitmessage(cctx._text)
1505 1509
1506 1510 # commit subs and write new state
1507 1511 if subs:
1508 1512 for s in sorted(commitsubs):
1509 1513 sub = wctx.sub(s)
1510 1514 self.ui.status(_('committing subrepository %s\n') %
1511 1515 subrepo.subrelpath(sub))
1512 1516 sr = sub.commit(cctx._text, user, date)
1513 1517 newstate[s] = (newstate[s][0], sr)
1514 1518 subrepo.writestate(self, newstate)
1515 1519
1516 1520 p1, p2 = self.dirstate.parents()
1517 1521 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1518 1522 try:
1519 1523 self.hook("precommit", throw=True, parent1=hookp1,
1520 1524 parent2=hookp2)
1521 1525 ret = self.commitctx(cctx, True)
1522 1526 except: # re-raises
1523 1527 if edited:
1524 1528 self.ui.write(
1525 1529 _('note: commit message saved in %s\n') % msgfn)
1526 1530 raise
1527 1531
1528 1532 # update bookmarks, dirstate and mergestate
1529 1533 bookmarks.update(self, [p1, p2], ret)
1530 1534 cctx.markcommitted(ret)
1531 1535 ms.reset()
1532 1536 finally:
1533 1537 wlock.release()
1534 1538
1535 1539 def commithook(node=hex(ret), parent1=hookp1, parent2=hookp2):
1536 1540 # hack for command that use a temporary commit (eg: histedit)
1537 1541 # temporary commit got stripped before hook release
1538 1542 if self.changelog.hasnode(ret):
1539 1543 self.hook("commit", node=node, parent1=parent1,
1540 1544 parent2=parent2)
1541 1545 self._afterlock(commithook)
1542 1546 return ret
1543 1547
1544 1548 @unfilteredmethod
1545 1549 def commitctx(self, ctx, error=False):
1546 1550 """Add a new revision to current repository.
1547 1551 Revision information is passed via the context argument.
1548 1552 """
1549 1553
1550 1554 tr = None
1551 1555 p1, p2 = ctx.p1(), ctx.p2()
1552 1556 user = ctx.user()
1553 1557
1554 1558 lock = self.lock()
1555 1559 try:
1556 1560 tr = self.transaction("commit")
1557 1561 trp = weakref.proxy(tr)
1558 1562
1559 1563 if ctx.files():
1560 1564 m1 = p1.manifest()
1561 1565 m2 = p2.manifest()
1562 1566 m = m1.copy()
1563 1567
1564 1568 # check in files
1565 1569 added = []
1566 1570 changed = []
1567 1571 removed = list(ctx.removed())
1568 1572 linkrev = len(self)
1569 1573 self.ui.note(_("committing files:\n"))
1570 1574 for f in sorted(ctx.modified() + ctx.added()):
1571 1575 self.ui.note(f + "\n")
1572 1576 try:
1573 1577 fctx = ctx[f]
1574 1578 if fctx is None:
1575 1579 removed.append(f)
1576 1580 else:
1577 1581 added.append(f)
1578 1582 m[f] = self._filecommit(fctx, m1, m2, linkrev,
1579 1583 trp, changed)
1580 1584 m.setflag(f, fctx.flags())
1581 1585 except OSError as inst:
1582 1586 self.ui.warn(_("trouble committing %s!\n") % f)
1583 1587 raise
1584 1588 except IOError as inst:
1585 1589 errcode = getattr(inst, 'errno', errno.ENOENT)
1586 1590 if error or errcode and errcode != errno.ENOENT:
1587 1591 self.ui.warn(_("trouble committing %s!\n") % f)
1588 1592 raise
1589 1593
1590 1594 # update manifest
1591 1595 self.ui.note(_("committing manifest\n"))
1592 1596 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1593 1597 drop = [f for f in removed if f in m]
1594 1598 for f in drop:
1595 1599 del m[f]
1596 1600 mn = self.manifest.add(m, trp, linkrev,
1597 1601 p1.manifestnode(), p2.manifestnode(),
1598 1602 added, drop)
1599 1603 files = changed + removed
1600 1604 else:
1601 1605 mn = p1.manifestnode()
1602 1606 files = []
1603 1607
1604 1608 # update changelog
1605 1609 self.ui.note(_("committing changelog\n"))
1606 1610 self.changelog.delayupdate(tr)
1607 1611 n = self.changelog.add(mn, files, ctx.description(),
1608 1612 trp, p1.node(), p2.node(),
1609 1613 user, ctx.date(), ctx.extra().copy())
1610 1614 p = lambda: tr.writepending() and self.root or ""
1611 1615 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1612 1616 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1613 1617 parent2=xp2, pending=p)
1614 1618 # set the new commit is proper phase
1615 1619 targetphase = subrepo.newcommitphase(self.ui, ctx)
1616 1620 if targetphase:
1617 1621 # retract boundary do not alter parent changeset.
1618 1622 # if a parent have higher the resulting phase will
1619 1623 # be compliant anyway
1620 1624 #
1621 1625 # if minimal phase was 0 we don't need to retract anything
1622 1626 phases.retractboundary(self, tr, targetphase, [n])
1623 1627 tr.close()
1624 1628 branchmap.updatecache(self.filtered('served'))
1625 1629 return n
1626 1630 finally:
1627 1631 if tr:
1628 1632 tr.release()
1629 1633 lock.release()
1630 1634
1631 1635 @unfilteredmethod
1632 1636 def destroying(self):
1633 1637 '''Inform the repository that nodes are about to be destroyed.
1634 1638 Intended for use by strip and rollback, so there's a common
1635 1639 place for anything that has to be done before destroying history.
1636 1640
1637 1641 This is mostly useful for saving state that is in memory and waiting
1638 1642 to be flushed when the current lock is released. Because a call to
1639 1643 destroyed is imminent, the repo will be invalidated causing those
1640 1644 changes to stay in memory (waiting for the next unlock), or vanish
1641 1645 completely.
1642 1646 '''
1643 1647 # When using the same lock to commit and strip, the phasecache is left
1644 1648 # dirty after committing. Then when we strip, the repo is invalidated,
1645 1649 # causing those changes to disappear.
1646 1650 if '_phasecache' in vars(self):
1647 1651 self._phasecache.write()
1648 1652
1649 1653 @unfilteredmethod
1650 1654 def destroyed(self):
1651 1655 '''Inform the repository that nodes have been destroyed.
1652 1656 Intended for use by strip and rollback, so there's a common
1653 1657 place for anything that has to be done after destroying history.
1654 1658 '''
1655 1659 # When one tries to:
1656 1660 # 1) destroy nodes thus calling this method (e.g. strip)
1657 1661 # 2) use phasecache somewhere (e.g. commit)
1658 1662 #
1659 1663 # then 2) will fail because the phasecache contains nodes that were
1660 1664 # removed. We can either remove phasecache from the filecache,
1661 1665 # causing it to reload next time it is accessed, or simply filter
1662 1666 # the removed nodes now and write the updated cache.
1663 1667 self._phasecache.filterunknown(self)
1664 1668 self._phasecache.write()
1665 1669
1666 1670 # update the 'served' branch cache to help read only server process
1667 1671 # Thanks to branchcache collaboration this is done from the nearest
1668 1672 # filtered subset and it is expected to be fast.
1669 1673 branchmap.updatecache(self.filtered('served'))
1670 1674
1671 1675 # Ensure the persistent tag cache is updated. Doing it now
1672 1676 # means that the tag cache only has to worry about destroyed
1673 1677 # heads immediately after a strip/rollback. That in turn
1674 1678 # guarantees that "cachetip == currenttip" (comparing both rev
1675 1679 # and node) always means no nodes have been added or destroyed.
1676 1680
1677 1681 # XXX this is suboptimal when qrefresh'ing: we strip the current
1678 1682 # head, refresh the tag cache, then immediately add a new head.
1679 1683 # But I think doing it this way is necessary for the "instant
1680 1684 # tag cache retrieval" case to work.
1681 1685 self.invalidate()
1682 1686
1683 1687 def walk(self, match, node=None):
1684 1688 '''
1685 1689 walk recursively through the directory tree or a given
1686 1690 changeset, finding all files matched by the match
1687 1691 function
1688 1692 '''
1689 1693 return self[node].walk(match)
1690 1694
1691 1695 def status(self, node1='.', node2=None, match=None,
1692 1696 ignored=False, clean=False, unknown=False,
1693 1697 listsubrepos=False):
1694 1698 '''a convenience method that calls node1.status(node2)'''
1695 1699 return self[node1].status(node2, match, ignored, clean, unknown,
1696 1700 listsubrepos)
1697 1701
1698 1702 def heads(self, start=None):
1699 1703 heads = self.changelog.heads(start)
1700 1704 # sort the output in rev descending order
1701 1705 return sorted(heads, key=self.changelog.rev, reverse=True)
1702 1706
1703 1707 def branchheads(self, branch=None, start=None, closed=False):
1704 1708 '''return a (possibly filtered) list of heads for the given branch
1705 1709
1706 1710 Heads are returned in topological order, from newest to oldest.
1707 1711 If branch is None, use the dirstate branch.
1708 1712 If start is not None, return only heads reachable from start.
1709 1713 If closed is True, return heads that are marked as closed as well.
1710 1714 '''
1711 1715 if branch is None:
1712 1716 branch = self[None].branch()
1713 1717 branches = self.branchmap()
1714 1718 if branch not in branches:
1715 1719 return []
1716 1720 # the cache returns heads ordered lowest to highest
1717 1721 bheads = list(reversed(branches.branchheads(branch, closed=closed)))
1718 1722 if start is not None:
1719 1723 # filter out the heads that cannot be reached from startrev
1720 1724 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1721 1725 bheads = [h for h in bheads if h in fbheads]
1722 1726 return bheads
1723 1727
1724 1728 def branches(self, nodes):
1725 1729 if not nodes:
1726 1730 nodes = [self.changelog.tip()]
1727 1731 b = []
1728 1732 for n in nodes:
1729 1733 t = n
1730 1734 while True:
1731 1735 p = self.changelog.parents(n)
1732 1736 if p[1] != nullid or p[0] == nullid:
1733 1737 b.append((t, n, p[0], p[1]))
1734 1738 break
1735 1739 n = p[0]
1736 1740 return b
1737 1741
1738 1742 def between(self, pairs):
1739 1743 r = []
1740 1744
1741 1745 for top, bottom in pairs:
1742 1746 n, l, i = top, [], 0
1743 1747 f = 1
1744 1748
1745 1749 while n != bottom and n != nullid:
1746 1750 p = self.changelog.parents(n)[0]
1747 1751 if i == f:
1748 1752 l.append(n)
1749 1753 f = f * 2
1750 1754 n = p
1751 1755 i += 1
1752 1756
1753 1757 r.append(l)
1754 1758
1755 1759 return r
1756 1760
1757 1761 def checkpush(self, pushop):
1758 1762 """Extensions can override this function if additional checks have
1759 1763 to be performed before pushing, or call it if they override push
1760 1764 command.
1761 1765 """
1762 1766 pass
1763 1767
1764 1768 @unfilteredpropertycache
1765 1769 def prepushoutgoinghooks(self):
1766 1770 """Return util.hooks consists of "(repo, remote, outgoing)"
1767 1771 functions, which are called before pushing changesets.
1768 1772 """
1769 1773 return util.hooks()
1770 1774
1771 1775 def stream_in(self, remote, remotereqs):
1772 1776 # Save remote branchmap. We will use it later
1773 1777 # to speed up branchcache creation
1774 1778 rbranchmap = None
1775 1779 if remote.capable("branchmap"):
1776 1780 rbranchmap = remote.branchmap()
1777 1781
1778 1782 fp = remote.stream_out()
1779 1783 l = fp.readline()
1780 1784 try:
1781 1785 resp = int(l)
1782 1786 except ValueError:
1783 1787 raise error.ResponseError(
1784 1788 _('unexpected response from remote server:'), l)
1785 1789 if resp == 1:
1786 1790 raise util.Abort(_('operation forbidden by server'))
1787 1791 elif resp == 2:
1788 1792 raise util.Abort(_('locking the remote repository failed'))
1789 1793 elif resp != 0:
1790 1794 raise util.Abort(_('the server sent an unknown error code'))
1791 1795
1792 1796 self.applystreamclone(remotereqs, rbranchmap, fp)
1793 1797 return len(self.heads()) + 1
1794 1798
1795 1799 def applystreamclone(self, remotereqs, remotebranchmap, fp):
1796 1800 """Apply stream clone data to this repository.
1797 1801
1798 1802 "remotereqs" is a set of requirements to handle the incoming data.
1799 1803 "remotebranchmap" is the result of a branchmap lookup on the remote. It
1800 1804 can be None.
1801 1805 "fp" is a file object containing the raw stream data, suitable for
1802 1806 feeding into exchange.consumestreamclone.
1803 1807 """
1804 1808 lock = self.lock()
1805 1809 try:
1806 1810 exchange.consumestreamclone(self, fp)
1807 1811
1808 1812 # new requirements = old non-format requirements +
1809 1813 # new format-related remote requirements
1810 1814 # requirements from the streamed-in repository
1811 1815 self.requirements = remotereqs | (
1812 1816 self.requirements - self.supportedformats)
1813 1817 self._applyopenerreqs()
1814 1818 self._writerequirements()
1815 1819
1816 1820 if remotebranchmap:
1817 1821 rbheads = []
1818 1822 closed = []
1819 1823 for bheads in remotebranchmap.itervalues():
1820 1824 rbheads.extend(bheads)
1821 1825 for h in bheads:
1822 1826 r = self.changelog.rev(h)
1823 1827 b, c = self.changelog.branchinfo(r)
1824 1828 if c:
1825 1829 closed.append(h)
1826 1830
1827 1831 if rbheads:
1828 1832 rtiprev = max((int(self.changelog.rev(node))
1829 1833 for node in rbheads))
1830 1834 cache = branchmap.branchcache(remotebranchmap,
1831 1835 self[rtiprev].node(),
1832 1836 rtiprev,
1833 1837 closednodes=closed)
1834 1838 # Try to stick it as low as possible
1835 1839 # filter above served are unlikely to be fetch from a clone
1836 1840 for candidate in ('base', 'immutable', 'served'):
1837 1841 rview = self.filtered(candidate)
1838 1842 if cache.validfor(rview):
1839 1843 self._branchcaches[candidate] = cache
1840 1844 cache.write(rview)
1841 1845 break
1842 1846 self.invalidate()
1843 1847 finally:
1844 1848 lock.release()
1845 1849
1846 1850 def clone(self, remote, heads=[], stream=None):
1847 1851 '''clone remote repository.
1848 1852
1849 1853 keyword arguments:
1850 1854 heads: list of revs to clone (forces use of pull)
1851 1855 stream: use streaming clone if possible'''
1852 1856
1853 1857 # now, all clients that can request uncompressed clones can
1854 1858 # read repo formats supported by all servers that can serve
1855 1859 # them.
1856 1860
1857 1861 # if revlog format changes, client will have to check version
1858 1862 # and format flags on "stream" capability, and use
1859 1863 # uncompressed only if compatible.
1860 1864
1861 1865 if stream is None:
1862 1866 # if the server explicitly prefers to stream (for fast LANs)
1863 1867 stream = remote.capable('stream-preferred')
1864 1868
1865 1869 if stream and not heads:
1866 1870 # 'stream' means remote revlog format is revlogv1 only
1867 1871 if remote.capable('stream'):
1868 1872 self.stream_in(remote, set(('revlogv1',)))
1869 1873 else:
1870 1874 # otherwise, 'streamreqs' contains the remote revlog format
1871 1875 streamreqs = remote.capable('streamreqs')
1872 1876 if streamreqs:
1873 1877 streamreqs = set(streamreqs.split(','))
1874 1878 # if we support it, stream in and adjust our requirements
1875 1879 if not streamreqs - self.supportedformats:
1876 1880 self.stream_in(remote, streamreqs)
1877 1881
1878 1882 # internal config: ui.quietbookmarkmove
1879 1883 quiet = self.ui.backupconfig('ui', 'quietbookmarkmove')
1880 1884 try:
1881 1885 self.ui.setconfig('ui', 'quietbookmarkmove', True, 'clone')
1882 1886 ret = exchange.pull(self, remote, heads).cgresult
1883 1887 finally:
1884 1888 self.ui.restoreconfig(quiet)
1885 1889 return ret
1886 1890
1887 1891 def pushkey(self, namespace, key, old, new):
1888 1892 try:
1889 1893 tr = self.currenttransaction()
1890 1894 hookargs = {}
1891 1895 if tr is not None:
1892 1896 hookargs.update(tr.hookargs)
1893 1897 pending = lambda: tr.writepending() and self.root or ""
1894 1898 hookargs['pending'] = pending
1895 1899 hookargs['namespace'] = namespace
1896 1900 hookargs['key'] = key
1897 1901 hookargs['old'] = old
1898 1902 hookargs['new'] = new
1899 1903 self.hook('prepushkey', throw=True, **hookargs)
1900 1904 except error.HookAbort as exc:
1901 1905 self.ui.write_err(_("pushkey-abort: %s\n") % exc)
1902 1906 if exc.hint:
1903 1907 self.ui.write_err(_("(%s)\n") % exc.hint)
1904 1908 return False
1905 1909 self.ui.debug('pushing key for "%s:%s"\n' % (namespace, key))
1906 1910 ret = pushkey.push(self, namespace, key, old, new)
1907 1911 def runhook():
1908 1912 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1909 1913 ret=ret)
1910 1914 self._afterlock(runhook)
1911 1915 return ret
1912 1916
1913 1917 def listkeys(self, namespace):
1914 1918 self.hook('prelistkeys', throw=True, namespace=namespace)
1915 1919 self.ui.debug('listing keys for "%s"\n' % namespace)
1916 1920 values = pushkey.list(self, namespace)
1917 1921 self.hook('listkeys', namespace=namespace, values=values)
1918 1922 return values
1919 1923
1920 1924 def debugwireargs(self, one, two, three=None, four=None, five=None):
1921 1925 '''used to test argument passing over the wire'''
1922 1926 return "%s %s %s %s %s" % (one, two, three, four, five)
1923 1927
1924 1928 def savecommitmessage(self, text):
1925 1929 fp = self.vfs('last-message.txt', 'wb')
1926 1930 try:
1927 1931 fp.write(text)
1928 1932 finally:
1929 1933 fp.close()
1930 1934 return self.pathto(fp.name[len(self.root) + 1:])
1931 1935
1932 1936 # used to avoid circular references so destructors work
1933 1937 def aftertrans(files):
1934 1938 renamefiles = [tuple(t) for t in files]
1935 1939 def a():
1936 1940 for vfs, src, dest in renamefiles:
1937 1941 try:
1938 1942 vfs.rename(src, dest)
1939 1943 except OSError: # journal file does not yet exist
1940 1944 pass
1941 1945 return a
1942 1946
1943 1947 def undoname(fn):
1944 1948 base, name = os.path.split(fn)
1945 1949 assert name.startswith('journal')
1946 1950 return os.path.join(base, name.replace('journal', 'undo', 1))
1947 1951
1948 1952 def instance(ui, path, create):
1949 1953 return localrepository(ui, util.urllocalpath(path), create)
1950 1954
1951 1955 def islocal(path):
1952 1956 return True
@@ -1,1629 +1,1651 b''
1 1 # revlog.py - storage back-end for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Storage back-end for Mercurial.
9 9
10 10 This provides efficient delta storage with O(1) retrieve and append
11 11 and O(changes) merge between branches.
12 12 """
13 13
14 14 # import stuff from node for others to import from revlog
15 15 import collections
16 16 from node import bin, hex, nullid, nullrev
17 17 from i18n import _
18 18 import ancestor, mdiff, parsers, error, util, templatefilters
19 19 import struct, zlib, errno
20 20
21 21 _pack = struct.pack
22 22 _unpack = struct.unpack
23 23 _compress = zlib.compress
24 24 _decompress = zlib.decompress
25 25 _sha = util.sha1
26 26
27 27 # revlog header flags
28 28 REVLOGV0 = 0
29 29 REVLOGNG = 1
30 30 REVLOGNGINLINEDATA = (1 << 16)
31 31 REVLOGGENERALDELTA = (1 << 17)
32 32 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
33 33 REVLOG_DEFAULT_FORMAT = REVLOGNG
34 34 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
35 35 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
36 36
37 37 # revlog index flags
38 38 REVIDX_ISCENSORED = (1 << 15) # revision has censor metadata, must be verified
39 39 REVIDX_DEFAULT_FLAGS = 0
40 40 REVIDX_KNOWN_FLAGS = REVIDX_ISCENSORED
41 41
42 42 # max size of revlog with inline data
43 43 _maxinline = 131072
44 44 _chunksize = 1048576
45 45
46 46 RevlogError = error.RevlogError
47 47 LookupError = error.LookupError
48 48 CensoredNodeError = error.CensoredNodeError
49 49
50 50 def getoffset(q):
51 51 return int(q >> 16)
52 52
53 53 def gettype(q):
54 54 return int(q & 0xFFFF)
55 55
56 56 def offset_type(offset, type):
57 57 return long(long(offset) << 16 | type)
58 58
59 59 _nullhash = _sha(nullid)
60 60
61 61 def hash(text, p1, p2):
62 62 """generate a hash from the given text and its parent hashes
63 63
64 64 This hash combines both the current file contents and its history
65 65 in a manner that makes it easy to distinguish nodes with the same
66 66 content in the revision graph.
67 67 """
68 68 # As of now, if one of the parent node is null, p2 is null
69 69 if p2 == nullid:
70 70 # deep copy of a hash is faster than creating one
71 71 s = _nullhash.copy()
72 72 s.update(p1)
73 73 else:
74 74 # none of the parent nodes are nullid
75 75 l = [p1, p2]
76 76 l.sort()
77 77 s = _sha(l[0])
78 78 s.update(l[1])
79 79 s.update(text)
80 80 return s.digest()
81 81
82 82 def decompress(bin):
83 83 """ decompress the given input """
84 84 if not bin:
85 85 return bin
86 86 t = bin[0]
87 87 if t == '\0':
88 88 return bin
89 89 if t == 'x':
90 90 try:
91 91 return _decompress(bin)
92 92 except zlib.error as e:
93 93 raise RevlogError(_("revlog decompress error: %s") % str(e))
94 94 if t == 'u':
95 95 return bin[1:]
96 96 raise RevlogError(_("unknown compression type %r") % t)
97 97
98 98 # index v0:
99 99 # 4 bytes: offset
100 100 # 4 bytes: compressed length
101 101 # 4 bytes: base rev
102 102 # 4 bytes: link rev
103 103 # 20 bytes: parent 1 nodeid
104 104 # 20 bytes: parent 2 nodeid
105 105 # 20 bytes: nodeid
106 106 indexformatv0 = ">4l20s20s20s"
107 107
108 108 class revlogoldio(object):
109 109 def __init__(self):
110 110 self.size = struct.calcsize(indexformatv0)
111 111
112 112 def parseindex(self, data, inline):
113 113 s = self.size
114 114 index = []
115 115 nodemap = {nullid: nullrev}
116 116 n = off = 0
117 117 l = len(data)
118 118 while off + s <= l:
119 119 cur = data[off:off + s]
120 120 off += s
121 121 e = _unpack(indexformatv0, cur)
122 122 # transform to revlogv1 format
123 123 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
124 124 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
125 125 index.append(e2)
126 126 nodemap[e[6]] = n
127 127 n += 1
128 128
129 129 # add the magic null revision at -1
130 130 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
131 131
132 132 return index, nodemap, None
133 133
134 134 def packentry(self, entry, node, version, rev):
135 135 if gettype(entry[0]):
136 136 raise RevlogError(_("index entry flags need RevlogNG"))
137 137 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
138 138 node(entry[5]), node(entry[6]), entry[7])
139 139 return _pack(indexformatv0, *e2)
140 140
141 141 # index ng:
142 142 # 6 bytes: offset
143 143 # 2 bytes: flags
144 144 # 4 bytes: compressed length
145 145 # 4 bytes: uncompressed length
146 146 # 4 bytes: base rev
147 147 # 4 bytes: link rev
148 148 # 4 bytes: parent 1 rev
149 149 # 4 bytes: parent 2 rev
150 150 # 32 bytes: nodeid
151 151 indexformatng = ">Qiiiiii20s12x"
152 152 versionformat = ">I"
153 153
154 154 # corresponds to uncompressed length of indexformatng (2 gigs, 4-byte
155 155 # signed integer)
156 156 _maxentrysize = 0x7fffffff
157 157
158 158 class revlogio(object):
159 159 def __init__(self):
160 160 self.size = struct.calcsize(indexformatng)
161 161
162 162 def parseindex(self, data, inline):
163 163 # call the C implementation to parse the index data
164 164 index, cache = parsers.parse_index2(data, inline)
165 165 return index, getattr(index, 'nodemap', None), cache
166 166
167 167 def packentry(self, entry, node, version, rev):
168 168 p = _pack(indexformatng, *entry)
169 169 if rev == 0:
170 170 p = _pack(versionformat, version) + p[4:]
171 171 return p
172 172
173 173 class revlog(object):
174 174 """
175 175 the underlying revision storage object
176 176
177 177 A revlog consists of two parts, an index and the revision data.
178 178
179 179 The index is a file with a fixed record size containing
180 180 information on each revision, including its nodeid (hash), the
181 181 nodeids of its parents, the position and offset of its data within
182 182 the data file, and the revision it's based on. Finally, each entry
183 183 contains a linkrev entry that can serve as a pointer to external
184 184 data.
185 185
186 186 The revision data itself is a linear collection of data chunks.
187 187 Each chunk represents a revision and is usually represented as a
188 188 delta against the previous chunk. To bound lookup time, runs of
189 189 deltas are limited to about 2 times the length of the original
190 190 version data. This makes retrieval of a version proportional to
191 191 its size, or O(1) relative to the number of revisions.
192 192
193 193 Both pieces of the revlog are written to in an append-only
194 194 fashion, which means we never need to rewrite a file to insert or
195 195 remove data, and can use some simple techniques to avoid the need
196 196 for locking while reading.
197 197 """
198 198 def __init__(self, opener, indexfile):
199 199 """
200 200 create a revlog object
201 201
202 202 opener is a function that abstracts the file opening operation
203 203 and can be used to implement COW semantics or the like.
204 204 """
205 205 self.indexfile = indexfile
206 206 self.datafile = indexfile[:-2] + ".d"
207 207 self.opener = opener
208 208 self._cache = None
209 209 self._basecache = None
210 210 self._chunkcache = (0, '')
211 211 self._chunkcachesize = 65536
212 212 self._maxchainlen = None
213 self._aggressivemergedeltas = False
213 214 self.index = []
214 215 self._pcache = {}
215 216 self._nodecache = {nullid: nullrev}
216 217 self._nodepos = None
217 218
218 219 v = REVLOG_DEFAULT_VERSION
219 220 opts = getattr(opener, 'options', None)
220 221 if opts is not None:
221 222 if 'revlogv1' in opts:
222 223 if 'generaldelta' in opts:
223 224 v |= REVLOGGENERALDELTA
224 225 else:
225 226 v = 0
226 227 if 'chunkcachesize' in opts:
227 228 self._chunkcachesize = opts['chunkcachesize']
228 229 if 'maxchainlen' in opts:
229 230 self._maxchainlen = opts['maxchainlen']
231 if 'aggressivemergedeltas' in opts:
232 self._aggressivemergedeltas = opts['aggressivemergedeltas']
230 233
231 234 if self._chunkcachesize <= 0:
232 235 raise RevlogError(_('revlog chunk cache size %r is not greater '
233 236 'than 0') % self._chunkcachesize)
234 237 elif self._chunkcachesize & (self._chunkcachesize - 1):
235 238 raise RevlogError(_('revlog chunk cache size %r is not a power '
236 239 'of 2') % self._chunkcachesize)
237 240
238 241 i = ''
239 242 self._initempty = True
240 243 try:
241 244 f = self.opener(self.indexfile)
242 245 i = f.read()
243 246 f.close()
244 247 if len(i) > 0:
245 248 v = struct.unpack(versionformat, i[:4])[0]
246 249 self._initempty = False
247 250 except IOError as inst:
248 251 if inst.errno != errno.ENOENT:
249 252 raise
250 253
251 254 self.version = v
252 255 self._inline = v & REVLOGNGINLINEDATA
253 256 self._generaldelta = v & REVLOGGENERALDELTA
254 257 flags = v & ~0xFFFF
255 258 fmt = v & 0xFFFF
256 259 if fmt == REVLOGV0 and flags:
257 260 raise RevlogError(_("index %s unknown flags %#04x for format v0")
258 261 % (self.indexfile, flags >> 16))
259 262 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
260 263 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
261 264 % (self.indexfile, flags >> 16))
262 265 elif fmt > REVLOGNG:
263 266 raise RevlogError(_("index %s unknown format %d")
264 267 % (self.indexfile, fmt))
265 268
266 269 self._io = revlogio()
267 270 if self.version == REVLOGV0:
268 271 self._io = revlogoldio()
269 272 try:
270 273 d = self._io.parseindex(i, self._inline)
271 274 except (ValueError, IndexError):
272 275 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
273 276 self.index, nodemap, self._chunkcache = d
274 277 if nodemap is not None:
275 278 self.nodemap = self._nodecache = nodemap
276 279 if not self._chunkcache:
277 280 self._chunkclear()
278 281 # revnum -> (chain-length, sum-delta-length)
279 282 self._chaininfocache = {}
280 283
281 284 def tip(self):
282 285 return self.node(len(self.index) - 2)
283 286 def __contains__(self, rev):
284 287 return 0 <= rev < len(self)
285 288 def __len__(self):
286 289 return len(self.index) - 1
287 290 def __iter__(self):
288 291 return iter(xrange(len(self)))
289 292 def revs(self, start=0, stop=None):
290 293 """iterate over all rev in this revlog (from start to stop)"""
291 294 step = 1
292 295 if stop is not None:
293 296 if start > stop:
294 297 step = -1
295 298 stop += step
296 299 else:
297 300 stop = len(self)
298 301 return xrange(start, stop, step)
299 302
300 303 @util.propertycache
301 304 def nodemap(self):
302 305 self.rev(self.node(0))
303 306 return self._nodecache
304 307
305 308 def hasnode(self, node):
306 309 try:
307 310 self.rev(node)
308 311 return True
309 312 except KeyError:
310 313 return False
311 314
312 315 def clearcaches(self):
313 316 try:
314 317 self._nodecache.clearcaches()
315 318 except AttributeError:
316 319 self._nodecache = {nullid: nullrev}
317 320 self._nodepos = None
318 321
319 322 def rev(self, node):
320 323 try:
321 324 return self._nodecache[node]
322 325 except TypeError:
323 326 raise
324 327 except RevlogError:
325 328 # parsers.c radix tree lookup failed
326 329 raise LookupError(node, self.indexfile, _('no node'))
327 330 except KeyError:
328 331 # pure python cache lookup failed
329 332 n = self._nodecache
330 333 i = self.index
331 334 p = self._nodepos
332 335 if p is None:
333 336 p = len(i) - 2
334 337 for r in xrange(p, -1, -1):
335 338 v = i[r][7]
336 339 n[v] = r
337 340 if v == node:
338 341 self._nodepos = r - 1
339 342 return r
340 343 raise LookupError(node, self.indexfile, _('no node'))
341 344
342 345 def node(self, rev):
343 346 return self.index[rev][7]
344 347 def linkrev(self, rev):
345 348 return self.index[rev][4]
346 349 def parents(self, node):
347 350 i = self.index
348 351 d = i[self.rev(node)]
349 352 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
350 353 def parentrevs(self, rev):
351 354 return self.index[rev][5:7]
352 355 def start(self, rev):
353 356 return int(self.index[rev][0] >> 16)
354 357 def end(self, rev):
355 358 return self.start(rev) + self.length(rev)
356 359 def length(self, rev):
357 360 return self.index[rev][1]
358 361 def chainbase(self, rev):
359 362 index = self.index
360 363 base = index[rev][3]
361 364 while base != rev:
362 365 rev = base
363 366 base = index[rev][3]
364 367 return base
365 368 def chainlen(self, rev):
366 369 return self._chaininfo(rev)[0]
367 370
368 371 def _chaininfo(self, rev):
369 372 chaininfocache = self._chaininfocache
370 373 if rev in chaininfocache:
371 374 return chaininfocache[rev]
372 375 index = self.index
373 376 generaldelta = self._generaldelta
374 377 iterrev = rev
375 378 e = index[iterrev]
376 379 clen = 0
377 380 compresseddeltalen = 0
378 381 while iterrev != e[3]:
379 382 clen += 1
380 383 compresseddeltalen += e[1]
381 384 if generaldelta:
382 385 iterrev = e[3]
383 386 else:
384 387 iterrev -= 1
385 388 if iterrev in chaininfocache:
386 389 t = chaininfocache[iterrev]
387 390 clen += t[0]
388 391 compresseddeltalen += t[1]
389 392 break
390 393 e = index[iterrev]
391 394 else:
392 395 # Add text length of base since decompressing that also takes
393 396 # work. For cache hits the length is already included.
394 397 compresseddeltalen += e[1]
395 398 r = (clen, compresseddeltalen)
396 399 chaininfocache[rev] = r
397 400 return r
398 401
399 402 def flags(self, rev):
400 403 return self.index[rev][0] & 0xFFFF
401 404 def rawsize(self, rev):
402 405 """return the length of the uncompressed text for a given revision"""
403 406 l = self.index[rev][2]
404 407 if l >= 0:
405 408 return l
406 409
407 410 t = self.revision(self.node(rev))
408 411 return len(t)
409 412 size = rawsize
410 413
411 414 def ancestors(self, revs, stoprev=0, inclusive=False):
412 415 """Generate the ancestors of 'revs' in reverse topological order.
413 416 Does not generate revs lower than stoprev.
414 417
415 418 See the documentation for ancestor.lazyancestors for more details."""
416 419
417 420 return ancestor.lazyancestors(self.parentrevs, revs, stoprev=stoprev,
418 421 inclusive=inclusive)
419 422
420 423 def descendants(self, revs):
421 424 """Generate the descendants of 'revs' in revision order.
422 425
423 426 Yield a sequence of revision numbers starting with a child of
424 427 some rev in revs, i.e., each revision is *not* considered a
425 428 descendant of itself. Results are ordered by revision number (a
426 429 topological sort)."""
427 430 first = min(revs)
428 431 if first == nullrev:
429 432 for i in self:
430 433 yield i
431 434 return
432 435
433 436 seen = set(revs)
434 437 for i in self.revs(start=first + 1):
435 438 for x in self.parentrevs(i):
436 439 if x != nullrev and x in seen:
437 440 seen.add(i)
438 441 yield i
439 442 break
440 443
441 444 def findcommonmissing(self, common=None, heads=None):
442 445 """Return a tuple of the ancestors of common and the ancestors of heads
443 446 that are not ancestors of common. In revset terminology, we return the
444 447 tuple:
445 448
446 449 ::common, (::heads) - (::common)
447 450
448 451 The list is sorted by revision number, meaning it is
449 452 topologically sorted.
450 453
451 454 'heads' and 'common' are both lists of node IDs. If heads is
452 455 not supplied, uses all of the revlog's heads. If common is not
453 456 supplied, uses nullid."""
454 457 if common is None:
455 458 common = [nullid]
456 459 if heads is None:
457 460 heads = self.heads()
458 461
459 462 common = [self.rev(n) for n in common]
460 463 heads = [self.rev(n) for n in heads]
461 464
462 465 # we want the ancestors, but inclusive
463 466 class lazyset(object):
464 467 def __init__(self, lazyvalues):
465 468 self.addedvalues = set()
466 469 self.lazyvalues = lazyvalues
467 470
468 471 def __contains__(self, value):
469 472 return value in self.addedvalues or value in self.lazyvalues
470 473
471 474 def __iter__(self):
472 475 added = self.addedvalues
473 476 for r in added:
474 477 yield r
475 478 for r in self.lazyvalues:
476 479 if not r in added:
477 480 yield r
478 481
479 482 def add(self, value):
480 483 self.addedvalues.add(value)
481 484
482 485 def update(self, values):
483 486 self.addedvalues.update(values)
484 487
485 488 has = lazyset(self.ancestors(common))
486 489 has.add(nullrev)
487 490 has.update(common)
488 491
489 492 # take all ancestors from heads that aren't in has
490 493 missing = set()
491 494 visit = collections.deque(r for r in heads if r not in has)
492 495 while visit:
493 496 r = visit.popleft()
494 497 if r in missing:
495 498 continue
496 499 else:
497 500 missing.add(r)
498 501 for p in self.parentrevs(r):
499 502 if p not in has:
500 503 visit.append(p)
501 504 missing = list(missing)
502 505 missing.sort()
503 506 return has, [self.node(r) for r in missing]
504 507
505 508 def incrementalmissingrevs(self, common=None):
506 509 """Return an object that can be used to incrementally compute the
507 510 revision numbers of the ancestors of arbitrary sets that are not
508 511 ancestors of common. This is an ancestor.incrementalmissingancestors
509 512 object.
510 513
511 514 'common' is a list of revision numbers. If common is not supplied, uses
512 515 nullrev.
513 516 """
514 517 if common is None:
515 518 common = [nullrev]
516 519
517 520 return ancestor.incrementalmissingancestors(self.parentrevs, common)
518 521
519 522 def findmissingrevs(self, common=None, heads=None):
520 523 """Return the revision numbers of the ancestors of heads that
521 524 are not ancestors of common.
522 525
523 526 More specifically, return a list of revision numbers corresponding to
524 527 nodes N such that every N satisfies the following constraints:
525 528
526 529 1. N is an ancestor of some node in 'heads'
527 530 2. N is not an ancestor of any node in 'common'
528 531
529 532 The list is sorted by revision number, meaning it is
530 533 topologically sorted.
531 534
532 535 'heads' and 'common' are both lists of revision numbers. If heads is
533 536 not supplied, uses all of the revlog's heads. If common is not
534 537 supplied, uses nullid."""
535 538 if common is None:
536 539 common = [nullrev]
537 540 if heads is None:
538 541 heads = self.headrevs()
539 542
540 543 inc = self.incrementalmissingrevs(common=common)
541 544 return inc.missingancestors(heads)
542 545
543 546 def findmissing(self, common=None, heads=None):
544 547 """Return the ancestors of heads that are not ancestors of common.
545 548
546 549 More specifically, return a list of nodes N such that every N
547 550 satisfies the following constraints:
548 551
549 552 1. N is an ancestor of some node in 'heads'
550 553 2. N is not an ancestor of any node in 'common'
551 554
552 555 The list is sorted by revision number, meaning it is
553 556 topologically sorted.
554 557
555 558 'heads' and 'common' are both lists of node IDs. If heads is
556 559 not supplied, uses all of the revlog's heads. If common is not
557 560 supplied, uses nullid."""
558 561 if common is None:
559 562 common = [nullid]
560 563 if heads is None:
561 564 heads = self.heads()
562 565
563 566 common = [self.rev(n) for n in common]
564 567 heads = [self.rev(n) for n in heads]
565 568
566 569 inc = self.incrementalmissingrevs(common=common)
567 570 return [self.node(r) for r in inc.missingancestors(heads)]
568 571
569 572 def nodesbetween(self, roots=None, heads=None):
570 573 """Return a topological path from 'roots' to 'heads'.
571 574
572 575 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
573 576 topologically sorted list of all nodes N that satisfy both of
574 577 these constraints:
575 578
576 579 1. N is a descendant of some node in 'roots'
577 580 2. N is an ancestor of some node in 'heads'
578 581
579 582 Every node is considered to be both a descendant and an ancestor
580 583 of itself, so every reachable node in 'roots' and 'heads' will be
581 584 included in 'nodes'.
582 585
583 586 'outroots' is the list of reachable nodes in 'roots', i.e., the
584 587 subset of 'roots' that is returned in 'nodes'. Likewise,
585 588 'outheads' is the subset of 'heads' that is also in 'nodes'.
586 589
587 590 'roots' and 'heads' are both lists of node IDs. If 'roots' is
588 591 unspecified, uses nullid as the only root. If 'heads' is
589 592 unspecified, uses list of all of the revlog's heads."""
590 593 nonodes = ([], [], [])
591 594 if roots is not None:
592 595 roots = list(roots)
593 596 if not roots:
594 597 return nonodes
595 598 lowestrev = min([self.rev(n) for n in roots])
596 599 else:
597 600 roots = [nullid] # Everybody's a descendant of nullid
598 601 lowestrev = nullrev
599 602 if (lowestrev == nullrev) and (heads is None):
600 603 # We want _all_ the nodes!
601 604 return ([self.node(r) for r in self], [nullid], list(self.heads()))
602 605 if heads is None:
603 606 # All nodes are ancestors, so the latest ancestor is the last
604 607 # node.
605 608 highestrev = len(self) - 1
606 609 # Set ancestors to None to signal that every node is an ancestor.
607 610 ancestors = None
608 611 # Set heads to an empty dictionary for later discovery of heads
609 612 heads = {}
610 613 else:
611 614 heads = list(heads)
612 615 if not heads:
613 616 return nonodes
614 617 ancestors = set()
615 618 # Turn heads into a dictionary so we can remove 'fake' heads.
616 619 # Also, later we will be using it to filter out the heads we can't
617 620 # find from roots.
618 621 heads = dict.fromkeys(heads, False)
619 622 # Start at the top and keep marking parents until we're done.
620 623 nodestotag = set(heads)
621 624 # Remember where the top was so we can use it as a limit later.
622 625 highestrev = max([self.rev(n) for n in nodestotag])
623 626 while nodestotag:
624 627 # grab a node to tag
625 628 n = nodestotag.pop()
626 629 # Never tag nullid
627 630 if n == nullid:
628 631 continue
629 632 # A node's revision number represents its place in a
630 633 # topologically sorted list of nodes.
631 634 r = self.rev(n)
632 635 if r >= lowestrev:
633 636 if n not in ancestors:
634 637 # If we are possibly a descendant of one of the roots
635 638 # and we haven't already been marked as an ancestor
636 639 ancestors.add(n) # Mark as ancestor
637 640 # Add non-nullid parents to list of nodes to tag.
638 641 nodestotag.update([p for p in self.parents(n) if
639 642 p != nullid])
640 643 elif n in heads: # We've seen it before, is it a fake head?
641 644 # So it is, real heads should not be the ancestors of
642 645 # any other heads.
643 646 heads.pop(n)
644 647 if not ancestors:
645 648 return nonodes
646 649 # Now that we have our set of ancestors, we want to remove any
647 650 # roots that are not ancestors.
648 651
649 652 # If one of the roots was nullid, everything is included anyway.
650 653 if lowestrev > nullrev:
651 654 # But, since we weren't, let's recompute the lowest rev to not
652 655 # include roots that aren't ancestors.
653 656
654 657 # Filter out roots that aren't ancestors of heads
655 658 roots = [n for n in roots if n in ancestors]
656 659 # Recompute the lowest revision
657 660 if roots:
658 661 lowestrev = min([self.rev(n) for n in roots])
659 662 else:
660 663 # No more roots? Return empty list
661 664 return nonodes
662 665 else:
663 666 # We are descending from nullid, and don't need to care about
664 667 # any other roots.
665 668 lowestrev = nullrev
666 669 roots = [nullid]
667 670 # Transform our roots list into a set.
668 671 descendants = set(roots)
669 672 # Also, keep the original roots so we can filter out roots that aren't
670 673 # 'real' roots (i.e. are descended from other roots).
671 674 roots = descendants.copy()
672 675 # Our topologically sorted list of output nodes.
673 676 orderedout = []
674 677 # Don't start at nullid since we don't want nullid in our output list,
675 678 # and if nullid shows up in descendants, empty parents will look like
676 679 # they're descendants.
677 680 for r in self.revs(start=max(lowestrev, 0), stop=highestrev + 1):
678 681 n = self.node(r)
679 682 isdescendant = False
680 683 if lowestrev == nullrev: # Everybody is a descendant of nullid
681 684 isdescendant = True
682 685 elif n in descendants:
683 686 # n is already a descendant
684 687 isdescendant = True
685 688 # This check only needs to be done here because all the roots
686 689 # will start being marked is descendants before the loop.
687 690 if n in roots:
688 691 # If n was a root, check if it's a 'real' root.
689 692 p = tuple(self.parents(n))
690 693 # If any of its parents are descendants, it's not a root.
691 694 if (p[0] in descendants) or (p[1] in descendants):
692 695 roots.remove(n)
693 696 else:
694 697 p = tuple(self.parents(n))
695 698 # A node is a descendant if either of its parents are
696 699 # descendants. (We seeded the dependents list with the roots
697 700 # up there, remember?)
698 701 if (p[0] in descendants) or (p[1] in descendants):
699 702 descendants.add(n)
700 703 isdescendant = True
701 704 if isdescendant and ((ancestors is None) or (n in ancestors)):
702 705 # Only include nodes that are both descendants and ancestors.
703 706 orderedout.append(n)
704 707 if (ancestors is not None) and (n in heads):
705 708 # We're trying to figure out which heads are reachable
706 709 # from roots.
707 710 # Mark this head as having been reached
708 711 heads[n] = True
709 712 elif ancestors is None:
710 713 # Otherwise, we're trying to discover the heads.
711 714 # Assume this is a head because if it isn't, the next step
712 715 # will eventually remove it.
713 716 heads[n] = True
714 717 # But, obviously its parents aren't.
715 718 for p in self.parents(n):
716 719 heads.pop(p, None)
717 720 heads = [n for n, flag in heads.iteritems() if flag]
718 721 roots = list(roots)
719 722 assert orderedout
720 723 assert roots
721 724 assert heads
722 725 return (orderedout, roots, heads)
723 726
724 727 def headrevs(self):
725 728 try:
726 729 return self.index.headrevs()
727 730 except AttributeError:
728 731 return self._headrevs()
729 732
730 733 def computephases(self, roots):
731 734 return self.index.computephasesmapsets(roots)
732 735
733 736 def _headrevs(self):
734 737 count = len(self)
735 738 if not count:
736 739 return [nullrev]
737 740 # we won't iter over filtered rev so nobody is a head at start
738 741 ishead = [0] * (count + 1)
739 742 index = self.index
740 743 for r in self:
741 744 ishead[r] = 1 # I may be an head
742 745 e = index[r]
743 746 ishead[e[5]] = ishead[e[6]] = 0 # my parent are not
744 747 return [r for r, val in enumerate(ishead) if val]
745 748
746 749 def heads(self, start=None, stop=None):
747 750 """return the list of all nodes that have no children
748 751
749 752 if start is specified, only heads that are descendants of
750 753 start will be returned
751 754 if stop is specified, it will consider all the revs from stop
752 755 as if they had no children
753 756 """
754 757 if start is None and stop is None:
755 758 if not len(self):
756 759 return [nullid]
757 760 return [self.node(r) for r in self.headrevs()]
758 761
759 762 if start is None:
760 763 start = nullid
761 764 if stop is None:
762 765 stop = []
763 766 stoprevs = set([self.rev(n) for n in stop])
764 767 startrev = self.rev(start)
765 768 reachable = set((startrev,))
766 769 heads = set((startrev,))
767 770
768 771 parentrevs = self.parentrevs
769 772 for r in self.revs(start=startrev + 1):
770 773 for p in parentrevs(r):
771 774 if p in reachable:
772 775 if r not in stoprevs:
773 776 reachable.add(r)
774 777 heads.add(r)
775 778 if p in heads and p not in stoprevs:
776 779 heads.remove(p)
777 780
778 781 return [self.node(r) for r in heads]
779 782
780 783 def children(self, node):
781 784 """find the children of a given node"""
782 785 c = []
783 786 p = self.rev(node)
784 787 for r in self.revs(start=p + 1):
785 788 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
786 789 if prevs:
787 790 for pr in prevs:
788 791 if pr == p:
789 792 c.append(self.node(r))
790 793 elif p == nullrev:
791 794 c.append(self.node(r))
792 795 return c
793 796
794 797 def descendant(self, start, end):
795 798 if start == nullrev:
796 799 return True
797 800 for i in self.descendants([start]):
798 801 if i == end:
799 802 return True
800 803 elif i > end:
801 804 break
802 805 return False
803 806
804 807 def commonancestorsheads(self, a, b):
805 808 """calculate all the heads of the common ancestors of nodes a and b"""
806 809 a, b = self.rev(a), self.rev(b)
807 810 try:
808 811 ancs = self.index.commonancestorsheads(a, b)
809 812 except (AttributeError, OverflowError): # C implementation failed
810 813 ancs = ancestor.commonancestorsheads(self.parentrevs, a, b)
811 814 return map(self.node, ancs)
812 815
813 816 def isancestor(self, a, b):
814 817 """return True if node a is an ancestor of node b
815 818
816 819 The implementation of this is trivial but the use of
817 820 commonancestorsheads is not."""
818 821 return a in self.commonancestorsheads(a, b)
819 822
820 823 def ancestor(self, a, b):
821 824 """calculate the "best" common ancestor of nodes a and b"""
822 825
823 826 a, b = self.rev(a), self.rev(b)
824 827 try:
825 828 ancs = self.index.ancestors(a, b)
826 829 except (AttributeError, OverflowError):
827 830 ancs = ancestor.ancestors(self.parentrevs, a, b)
828 831 if ancs:
829 832 # choose a consistent winner when there's a tie
830 833 return min(map(self.node, ancs))
831 834 return nullid
832 835
833 836 def _match(self, id):
834 837 if isinstance(id, int):
835 838 # rev
836 839 return self.node(id)
837 840 if len(id) == 20:
838 841 # possibly a binary node
839 842 # odds of a binary node being all hex in ASCII are 1 in 10**25
840 843 try:
841 844 node = id
842 845 self.rev(node) # quick search the index
843 846 return node
844 847 except LookupError:
845 848 pass # may be partial hex id
846 849 try:
847 850 # str(rev)
848 851 rev = int(id)
849 852 if str(rev) != id:
850 853 raise ValueError
851 854 if rev < 0:
852 855 rev = len(self) + rev
853 856 if rev < 0 or rev >= len(self):
854 857 raise ValueError
855 858 return self.node(rev)
856 859 except (ValueError, OverflowError):
857 860 pass
858 861 if len(id) == 40:
859 862 try:
860 863 # a full hex nodeid?
861 864 node = bin(id)
862 865 self.rev(node)
863 866 return node
864 867 except (TypeError, LookupError):
865 868 pass
866 869
867 870 def _partialmatch(self, id):
868 871 try:
869 872 n = self.index.partialmatch(id)
870 873 if n and self.hasnode(n):
871 874 return n
872 875 return None
873 876 except RevlogError:
874 877 # parsers.c radix tree lookup gave multiple matches
875 878 # fall through to slow path that filters hidden revisions
876 879 pass
877 880 except (AttributeError, ValueError):
878 881 # we are pure python, or key was too short to search radix tree
879 882 pass
880 883
881 884 if id in self._pcache:
882 885 return self._pcache[id]
883 886
884 887 if len(id) < 40:
885 888 try:
886 889 # hex(node)[:...]
887 890 l = len(id) // 2 # grab an even number of digits
888 891 prefix = bin(id[:l * 2])
889 892 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
890 893 nl = [n for n in nl if hex(n).startswith(id) and
891 894 self.hasnode(n)]
892 895 if len(nl) > 0:
893 896 if len(nl) == 1:
894 897 self._pcache[id] = nl[0]
895 898 return nl[0]
896 899 raise LookupError(id, self.indexfile,
897 900 _('ambiguous identifier'))
898 901 return None
899 902 except TypeError:
900 903 pass
901 904
902 905 def lookup(self, id):
903 906 """locate a node based on:
904 907 - revision number or str(revision number)
905 908 - nodeid or subset of hex nodeid
906 909 """
907 910 n = self._match(id)
908 911 if n is not None:
909 912 return n
910 913 n = self._partialmatch(id)
911 914 if n:
912 915 return n
913 916
914 917 raise LookupError(id, self.indexfile, _('no match found'))
915 918
916 919 def cmp(self, node, text):
917 920 """compare text with a given file revision
918 921
919 922 returns True if text is different than what is stored.
920 923 """
921 924 p1, p2 = self.parents(node)
922 925 return hash(text, p1, p2) != node
923 926
924 927 def _addchunk(self, offset, data):
925 928 o, d = self._chunkcache
926 929 # try to add to existing cache
927 930 if o + len(d) == offset and len(d) + len(data) < _chunksize:
928 931 self._chunkcache = o, d + data
929 932 else:
930 933 self._chunkcache = offset, data
931 934
932 935 def _loadchunk(self, offset, length):
933 936 if self._inline:
934 937 df = self.opener(self.indexfile)
935 938 else:
936 939 df = self.opener(self.datafile)
937 940
938 941 # Cache data both forward and backward around the requested
939 942 # data, in a fixed size window. This helps speed up operations
940 943 # involving reading the revlog backwards.
941 944 cachesize = self._chunkcachesize
942 945 realoffset = offset & ~(cachesize - 1)
943 946 reallength = (((offset + length + cachesize) & ~(cachesize - 1))
944 947 - realoffset)
945 948 df.seek(realoffset)
946 949 d = df.read(reallength)
947 950 df.close()
948 951 self._addchunk(realoffset, d)
949 952 if offset != realoffset or reallength != length:
950 953 return util.buffer(d, offset - realoffset, length)
951 954 return d
952 955
953 956 def _getchunk(self, offset, length):
954 957 o, d = self._chunkcache
955 958 l = len(d)
956 959
957 960 # is it in the cache?
958 961 cachestart = offset - o
959 962 cacheend = cachestart + length
960 963 if cachestart >= 0 and cacheend <= l:
961 964 if cachestart == 0 and cacheend == l:
962 965 return d # avoid a copy
963 966 return util.buffer(d, cachestart, cacheend - cachestart)
964 967
965 968 return self._loadchunk(offset, length)
966 969
967 970 def _chunkraw(self, startrev, endrev):
968 971 start = self.start(startrev)
969 972 end = self.end(endrev)
970 973 if self._inline:
971 974 start += (startrev + 1) * self._io.size
972 975 end += (endrev + 1) * self._io.size
973 976 length = end - start
974 977 return self._getchunk(start, length)
975 978
976 979 def _chunk(self, rev):
977 980 return decompress(self._chunkraw(rev, rev))
978 981
979 982 def _chunks(self, revs):
980 983 '''faster version of [self._chunk(rev) for rev in revs]
981 984
982 985 Assumes that revs is in ascending order.'''
983 986 if not revs:
984 987 return []
985 988 start = self.start
986 989 length = self.length
987 990 inline = self._inline
988 991 iosize = self._io.size
989 992 buffer = util.buffer
990 993
991 994 l = []
992 995 ladd = l.append
993 996
994 997 # preload the cache
995 998 try:
996 999 while True:
997 1000 # ensure that the cache doesn't change out from under us
998 1001 _cache = self._chunkcache
999 1002 self._chunkraw(revs[0], revs[-1])
1000 1003 if _cache == self._chunkcache:
1001 1004 break
1002 1005 offset, data = _cache
1003 1006 except OverflowError:
1004 1007 # issue4215 - we can't cache a run of chunks greater than
1005 1008 # 2G on Windows
1006 1009 return [self._chunk(rev) for rev in revs]
1007 1010
1008 1011 for rev in revs:
1009 1012 chunkstart = start(rev)
1010 1013 if inline:
1011 1014 chunkstart += (rev + 1) * iosize
1012 1015 chunklength = length(rev)
1013 1016 ladd(decompress(buffer(data, chunkstart - offset, chunklength)))
1014 1017
1015 1018 return l
1016 1019
1017 1020 def _chunkclear(self):
1018 1021 self._chunkcache = (0, '')
1019 1022
1020 1023 def deltaparent(self, rev):
1021 1024 """return deltaparent of the given revision"""
1022 1025 base = self.index[rev][3]
1023 1026 if base == rev:
1024 1027 return nullrev
1025 1028 elif self._generaldelta:
1026 1029 return base
1027 1030 else:
1028 1031 return rev - 1
1029 1032
1030 1033 def revdiff(self, rev1, rev2):
1031 1034 """return or calculate a delta between two revisions"""
1032 1035 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
1033 1036 return str(self._chunk(rev2))
1034 1037
1035 1038 return mdiff.textdiff(self.revision(rev1),
1036 1039 self.revision(rev2))
1037 1040
1038 1041 def revision(self, nodeorrev):
1039 1042 """return an uncompressed revision of a given node or revision
1040 1043 number.
1041 1044 """
1042 1045 if isinstance(nodeorrev, int):
1043 1046 rev = nodeorrev
1044 1047 node = self.node(rev)
1045 1048 else:
1046 1049 node = nodeorrev
1047 1050 rev = None
1048 1051
1049 1052 _cache = self._cache # grab local copy of cache to avoid thread race
1050 1053 cachedrev = None
1051 1054 if node == nullid:
1052 1055 return ""
1053 1056 if _cache:
1054 1057 if _cache[0] == node:
1055 1058 return _cache[2]
1056 1059 cachedrev = _cache[1]
1057 1060
1058 1061 # look up what we need to read
1059 1062 text = None
1060 1063 if rev is None:
1061 1064 rev = self.rev(node)
1062 1065
1063 1066 # check rev flags
1064 1067 if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
1065 1068 raise RevlogError(_('incompatible revision flag %x') %
1066 1069 (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
1067 1070
1068 1071 # build delta chain
1069 1072 chain = []
1070 1073 index = self.index # for performance
1071 1074 generaldelta = self._generaldelta
1072 1075 iterrev = rev
1073 1076 e = index[iterrev]
1074 1077 while iterrev != e[3] and iterrev != cachedrev:
1075 1078 chain.append(iterrev)
1076 1079 if generaldelta:
1077 1080 iterrev = e[3]
1078 1081 else:
1079 1082 iterrev -= 1
1080 1083 e = index[iterrev]
1081 1084
1082 1085 if iterrev == cachedrev:
1083 1086 # cache hit
1084 1087 text = _cache[2]
1085 1088 else:
1086 1089 chain.append(iterrev)
1087 1090 chain.reverse()
1088 1091
1089 1092 # drop cache to save memory
1090 1093 self._cache = None
1091 1094
1092 1095 bins = self._chunks(chain)
1093 1096 if text is None:
1094 1097 text = str(bins[0])
1095 1098 bins = bins[1:]
1096 1099
1097 1100 text = mdiff.patches(text, bins)
1098 1101
1099 1102 text = self._checkhash(text, node, rev)
1100 1103
1101 1104 self._cache = (node, rev, text)
1102 1105 return text
1103 1106
1104 1107 def hash(self, text, p1, p2):
1105 1108 """Compute a node hash.
1106 1109
1107 1110 Available as a function so that subclasses can replace the hash
1108 1111 as needed.
1109 1112 """
1110 1113 return hash(text, p1, p2)
1111 1114
1112 1115 def _checkhash(self, text, node, rev):
1113 1116 p1, p2 = self.parents(node)
1114 1117 self.checkhash(text, p1, p2, node, rev)
1115 1118 return text
1116 1119
1117 1120 def checkhash(self, text, p1, p2, node, rev=None):
1118 1121 if node != self.hash(text, p1, p2):
1119 1122 revornode = rev
1120 1123 if revornode is None:
1121 1124 revornode = templatefilters.short(hex(node))
1122 1125 raise RevlogError(_("integrity check failed on %s:%s")
1123 1126 % (self.indexfile, revornode))
1124 1127
1125 1128 def checkinlinesize(self, tr, fp=None):
1126 1129 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
1127 1130 return
1128 1131
1129 1132 trinfo = tr.find(self.indexfile)
1130 1133 if trinfo is None:
1131 1134 raise RevlogError(_("%s not found in the transaction")
1132 1135 % self.indexfile)
1133 1136
1134 1137 trindex = trinfo[2]
1135 1138 if trindex is not None:
1136 1139 dataoff = self.start(trindex)
1137 1140 else:
1138 1141 # revlog was stripped at start of transaction, use all leftover data
1139 1142 trindex = len(self) - 1
1140 1143 dataoff = self.end(-2)
1141 1144
1142 1145 tr.add(self.datafile, dataoff)
1143 1146
1144 1147 if fp:
1145 1148 fp.flush()
1146 1149 fp.close()
1147 1150
1148 1151 df = self.opener(self.datafile, 'w')
1149 1152 try:
1150 1153 for r in self:
1151 1154 df.write(self._chunkraw(r, r))
1152 1155 finally:
1153 1156 df.close()
1154 1157
1155 1158 fp = self.opener(self.indexfile, 'w', atomictemp=True)
1156 1159 self.version &= ~(REVLOGNGINLINEDATA)
1157 1160 self._inline = False
1158 1161 for i in self:
1159 1162 e = self._io.packentry(self.index[i], self.node, self.version, i)
1160 1163 fp.write(e)
1161 1164
1162 1165 # if we don't call close, the temp file will never replace the
1163 1166 # real index
1164 1167 fp.close()
1165 1168
1166 1169 tr.replace(self.indexfile, trindex * self._io.size)
1167 1170 self._chunkclear()
1168 1171
1169 1172 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None,
1170 1173 node=None):
1171 1174 """add a revision to the log
1172 1175
1173 1176 text - the revision data to add
1174 1177 transaction - the transaction object used for rollback
1175 1178 link - the linkrev data to add
1176 1179 p1, p2 - the parent nodeids of the revision
1177 1180 cachedelta - an optional precomputed delta
1178 1181 node - nodeid of revision; typically node is not specified, and it is
1179 1182 computed by default as hash(text, p1, p2), however subclasses might
1180 1183 use different hashing method (and override checkhash() in such case)
1181 1184 """
1182 1185 if link == nullrev:
1183 1186 raise RevlogError(_("attempted to add linkrev -1 to %s")
1184 1187 % self.indexfile)
1185 1188
1186 1189 if len(text) > _maxentrysize:
1187 1190 raise RevlogError(
1188 1191 _("%s: size of %d bytes exceeds maximum revlog storage of 2GiB")
1189 1192 % (self.indexfile, len(text)))
1190 1193
1191 1194 node = node or self.hash(text, p1, p2)
1192 1195 if node in self.nodemap:
1193 1196 return node
1194 1197
1195 1198 dfh = None
1196 1199 if not self._inline:
1197 1200 dfh = self.opener(self.datafile, "a")
1198 1201 ifh = self.opener(self.indexfile, "a+")
1199 1202 try:
1200 1203 return self._addrevision(node, text, transaction, link, p1, p2,
1201 1204 REVIDX_DEFAULT_FLAGS, cachedelta, ifh, dfh)
1202 1205 finally:
1203 1206 if dfh:
1204 1207 dfh.close()
1205 1208 ifh.close()
1206 1209
1207 1210 def compress(self, text):
1208 1211 """ generate a possibly-compressed representation of text """
1209 1212 if not text:
1210 1213 return ("", text)
1211 1214 l = len(text)
1212 1215 bin = None
1213 1216 if l < 44:
1214 1217 pass
1215 1218 elif l > 1000000:
1216 1219 # zlib makes an internal copy, thus doubling memory usage for
1217 1220 # large files, so lets do this in pieces
1218 1221 z = zlib.compressobj()
1219 1222 p = []
1220 1223 pos = 0
1221 1224 while pos < l:
1222 1225 pos2 = pos + 2**20
1223 1226 p.append(z.compress(text[pos:pos2]))
1224 1227 pos = pos2
1225 1228 p.append(z.flush())
1226 1229 if sum(map(len, p)) < l:
1227 1230 bin = "".join(p)
1228 1231 else:
1229 1232 bin = _compress(text)
1230 1233 if bin is None or len(bin) > l:
1231 1234 if text[0] == '\0':
1232 1235 return ("", text)
1233 1236 return ('u', text)
1234 1237 return ("", bin)
1235 1238
1236 1239 def _isgooddelta(self, d, textlen):
1237 1240 """Returns True if the given delta is good. Good means that it is within
1238 1241 the disk span, disk size, and chain length bounds that we know to be
1239 1242 performant."""
1240 1243 if d is None:
1241 1244 return False
1242 1245
1243 1246 # - 'dist' is the distance from the base revision -- bounding it limits
1244 1247 # the amount of I/O we need to do.
1245 1248 # - 'compresseddeltalen' is the sum of the total size of deltas we need
1246 1249 # to apply -- bounding it limits the amount of CPU we consume.
1247 1250 dist, l, data, base, chainbase, chainlen, compresseddeltalen = d
1248 1251 if (dist > textlen * 4 or l > textlen or
1249 1252 compresseddeltalen > textlen * 2 or
1250 1253 (self._maxchainlen and chainlen > self._maxchainlen)):
1251 1254 return False
1252 1255
1253 1256 return True
1254 1257
1255 1258 def _addrevision(self, node, text, transaction, link, p1, p2, flags,
1256 1259 cachedelta, ifh, dfh):
1257 1260 """internal function to add revisions to the log
1258 1261
1259 1262 see addrevision for argument descriptions.
1260 1263 invariants:
1261 1264 - text is optional (can be None); if not set, cachedelta must be set.
1262 1265 if both are set, they must correspond to each other.
1263 1266 """
1264 1267 btext = [text]
1265 1268 def buildtext():
1266 1269 if btext[0] is not None:
1267 1270 return btext[0]
1268 1271 # flush any pending writes here so we can read it in revision
1269 1272 if dfh:
1270 1273 dfh.flush()
1271 1274 ifh.flush()
1272 1275 baserev = cachedelta[0]
1273 1276 delta = cachedelta[1]
1274 1277 # special case deltas which replace entire base; no need to decode
1275 1278 # base revision. this neatly avoids censored bases, which throw when
1276 1279 # they're decoded.
1277 1280 hlen = struct.calcsize(">lll")
1278 1281 if delta[:hlen] == mdiff.replacediffheader(self.rawsize(baserev),
1279 1282 len(delta) - hlen):
1280 1283 btext[0] = delta[hlen:]
1281 1284 else:
1282 1285 basetext = self.revision(self.node(baserev))
1283 1286 btext[0] = mdiff.patch(basetext, delta)
1284 1287 try:
1285 1288 self.checkhash(btext[0], p1, p2, node)
1286 1289 if flags & REVIDX_ISCENSORED:
1287 1290 raise RevlogError(_('node %s is not censored') % node)
1288 1291 except CensoredNodeError:
1289 1292 # must pass the censored index flag to add censored revisions
1290 1293 if not flags & REVIDX_ISCENSORED:
1291 1294 raise
1292 1295 return btext[0]
1293 1296
1294 1297 def builddelta(rev):
1295 1298 # can we use the cached delta?
1296 1299 if cachedelta and cachedelta[0] == rev:
1297 1300 delta = cachedelta[1]
1298 1301 else:
1299 1302 t = buildtext()
1300 1303 if self.iscensored(rev):
1301 1304 # deltas based on a censored revision must replace the
1302 1305 # full content in one patch, so delta works everywhere
1303 1306 header = mdiff.replacediffheader(self.rawsize(rev), len(t))
1304 1307 delta = header + t
1305 1308 else:
1306 1309 ptext = self.revision(self.node(rev))
1307 1310 delta = mdiff.textdiff(ptext, t)
1308 1311 data = self.compress(delta)
1309 1312 l = len(data[1]) + len(data[0])
1310 1313 if basecache[0] == rev:
1311 1314 chainbase = basecache[1]
1312 1315 else:
1313 1316 chainbase = self.chainbase(rev)
1314 1317 dist = l + offset - self.start(chainbase)
1315 1318 if self._generaldelta:
1316 1319 base = rev
1317 1320 else:
1318 1321 base = chainbase
1319 1322 chainlen, compresseddeltalen = self._chaininfo(rev)
1320 1323 chainlen += 1
1321 1324 compresseddeltalen += l
1322 1325 return dist, l, data, base, chainbase, chainlen, compresseddeltalen
1323 1326
1324 1327 curr = len(self)
1325 1328 prev = curr - 1
1326 1329 base = chainbase = curr
1327 1330 chainlen = None
1328 1331 offset = self.end(prev)
1329 1332 d = None
1330 1333 if self._basecache is None:
1331 1334 self._basecache = (prev, self.chainbase(prev))
1332 1335 basecache = self._basecache
1333 1336 p1r, p2r = self.rev(p1), self.rev(p2)
1334 1337
1335 1338 # full versions are inserted when the needed deltas
1336 1339 # become comparable to the uncompressed text
1337 1340 if text is None:
1338 1341 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1339 1342 cachedelta[1])
1340 1343 else:
1341 1344 textlen = len(text)
1342 1345
1343 1346 # should we try to build a delta?
1344 1347 if prev != nullrev:
1345 1348 if self._generaldelta:
1346 # Pick whichever parent is closer to us (to minimize the
1347 # chance of having to build a fulltext). Since
1348 # nullrev == -1, any non-merge commit will always pick p1r.
1349 drev = p2r if p2r > p1r else p1r
1350 d = builddelta(drev)
1351 # If the chosen delta will result in us making a full text,
1352 # give it one last try against prev.
1353 if drev != prev and not self._isgooddelta(d, textlen):
1354 d = builddelta(prev)
1349 if p2r != nullrev and self._aggressivemergedeltas:
1350 d = builddelta(p1r)
1351 d2 = builddelta(p2r)
1352 p1good = self._isgooddelta(d, textlen)
1353 p2good = self._isgooddelta(d2, textlen)
1354 if p1good and p2good:
1355 # If both are good deltas, choose the smallest
1356 if d2[1] < d[1]:
1357 d = d2
1358 elif p2good:
1359 # If only p2 is good, use it
1360 d = d2
1361 elif p1good:
1362 pass
1363 else:
1364 # Neither is good, try against prev to hopefully save us
1365 # a fulltext.
1366 d = builddelta(prev)
1367 else:
1368 # Pick whichever parent is closer to us (to minimize the
1369 # chance of having to build a fulltext). Since
1370 # nullrev == -1, any non-merge commit will always pick p1r.
1371 drev = p2r if p2r > p1r else p1r
1372 d = builddelta(drev)
1373 # If the chosen delta will result in us making a full text,
1374 # give it one last try against prev.
1375 if drev != prev and not self._isgooddelta(d, textlen):
1376 d = builddelta(prev)
1355 1377 else:
1356 1378 d = builddelta(prev)
1357 1379 dist, l, data, base, chainbase, chainlen, compresseddeltalen = d
1358 1380
1359 1381 if not self._isgooddelta(d, textlen):
1360 1382 text = buildtext()
1361 1383 data = self.compress(text)
1362 1384 l = len(data[1]) + len(data[0])
1363 1385 base = chainbase = curr
1364 1386
1365 1387 e = (offset_type(offset, flags), l, textlen,
1366 1388 base, link, p1r, p2r, node)
1367 1389 self.index.insert(-1, e)
1368 1390 self.nodemap[node] = curr
1369 1391
1370 1392 entry = self._io.packentry(e, self.node, self.version, curr)
1371 1393 self._writeentry(transaction, ifh, dfh, entry, data, link, offset)
1372 1394
1373 1395 if type(text) == str: # only accept immutable objects
1374 1396 self._cache = (node, curr, text)
1375 1397 self._basecache = (curr, chainbase)
1376 1398 return node
1377 1399
1378 1400 def _writeentry(self, transaction, ifh, dfh, entry, data, link, offset):
1379 1401 curr = len(self) - 1
1380 1402 if not self._inline:
1381 1403 transaction.add(self.datafile, offset)
1382 1404 transaction.add(self.indexfile, curr * len(entry))
1383 1405 if data[0]:
1384 1406 dfh.write(data[0])
1385 1407 dfh.write(data[1])
1386 1408 dfh.flush()
1387 1409 ifh.write(entry)
1388 1410 else:
1389 1411 offset += curr * self._io.size
1390 1412 transaction.add(self.indexfile, offset, curr)
1391 1413 ifh.write(entry)
1392 1414 ifh.write(data[0])
1393 1415 ifh.write(data[1])
1394 1416 self.checkinlinesize(transaction, ifh)
1395 1417
1396 1418 def addgroup(self, bundle, linkmapper, transaction, addrevisioncb=None):
1397 1419 """
1398 1420 add a delta group
1399 1421
1400 1422 given a set of deltas, add them to the revision log. the
1401 1423 first delta is against its parent, which should be in our
1402 1424 log, the rest are against the previous delta.
1403 1425
1404 1426 If ``addrevisioncb`` is defined, it will be called with arguments of
1405 1427 this revlog and the node that was added.
1406 1428 """
1407 1429
1408 1430 # track the base of the current delta log
1409 1431 content = []
1410 1432 node = None
1411 1433
1412 1434 r = len(self)
1413 1435 end = 0
1414 1436 if r:
1415 1437 end = self.end(r - 1)
1416 1438 ifh = self.opener(self.indexfile, "a+")
1417 1439 isize = r * self._io.size
1418 1440 if self._inline:
1419 1441 transaction.add(self.indexfile, end + isize, r)
1420 1442 dfh = None
1421 1443 else:
1422 1444 transaction.add(self.indexfile, isize, r)
1423 1445 transaction.add(self.datafile, end)
1424 1446 dfh = self.opener(self.datafile, "a")
1425 1447 def flush():
1426 1448 if dfh:
1427 1449 dfh.flush()
1428 1450 ifh.flush()
1429 1451 try:
1430 1452 # loop through our set of deltas
1431 1453 chain = None
1432 1454 while True:
1433 1455 chunkdata = bundle.deltachunk(chain)
1434 1456 if not chunkdata:
1435 1457 break
1436 1458 node = chunkdata['node']
1437 1459 p1 = chunkdata['p1']
1438 1460 p2 = chunkdata['p2']
1439 1461 cs = chunkdata['cs']
1440 1462 deltabase = chunkdata['deltabase']
1441 1463 delta = chunkdata['delta']
1442 1464
1443 1465 content.append(node)
1444 1466
1445 1467 link = linkmapper(cs)
1446 1468 if node in self.nodemap:
1447 1469 # this can happen if two branches make the same change
1448 1470 chain = node
1449 1471 continue
1450 1472
1451 1473 for p in (p1, p2):
1452 1474 if p not in self.nodemap:
1453 1475 raise LookupError(p, self.indexfile,
1454 1476 _('unknown parent'))
1455 1477
1456 1478 if deltabase not in self.nodemap:
1457 1479 raise LookupError(deltabase, self.indexfile,
1458 1480 _('unknown delta base'))
1459 1481
1460 1482 baserev = self.rev(deltabase)
1461 1483
1462 1484 if baserev != nullrev and self.iscensored(baserev):
1463 1485 # if base is censored, delta must be full replacement in a
1464 1486 # single patch operation
1465 1487 hlen = struct.calcsize(">lll")
1466 1488 oldlen = self.rawsize(baserev)
1467 1489 newlen = len(delta) - hlen
1468 1490 if delta[:hlen] != mdiff.replacediffheader(oldlen, newlen):
1469 1491 raise error.CensoredBaseError(self.indexfile,
1470 1492 self.node(baserev))
1471 1493
1472 1494 flags = REVIDX_DEFAULT_FLAGS
1473 1495 if self._peek_iscensored(baserev, delta, flush):
1474 1496 flags |= REVIDX_ISCENSORED
1475 1497
1476 1498 chain = self._addrevision(node, None, transaction, link,
1477 1499 p1, p2, flags, (baserev, delta),
1478 1500 ifh, dfh)
1479 1501
1480 1502 if addrevisioncb:
1481 1503 # Data for added revision can't be read unless flushed
1482 1504 # because _loadchunk always opensa new file handle and
1483 1505 # there is no guarantee data was actually written yet.
1484 1506 flush()
1485 1507 addrevisioncb(self, chain)
1486 1508
1487 1509 if not dfh and not self._inline:
1488 1510 # addrevision switched from inline to conventional
1489 1511 # reopen the index
1490 1512 ifh.close()
1491 1513 dfh = self.opener(self.datafile, "a")
1492 1514 ifh = self.opener(self.indexfile, "a")
1493 1515 finally:
1494 1516 if dfh:
1495 1517 dfh.close()
1496 1518 ifh.close()
1497 1519
1498 1520 return content
1499 1521
1500 1522 def iscensored(self, rev):
1501 1523 """Check if a file revision is censored."""
1502 1524 return False
1503 1525
1504 1526 def _peek_iscensored(self, baserev, delta, flush):
1505 1527 """Quickly check if a delta produces a censored revision."""
1506 1528 return False
1507 1529
1508 1530 def getstrippoint(self, minlink):
1509 1531 """find the minimum rev that must be stripped to strip the linkrev
1510 1532
1511 1533 Returns a tuple containing the minimum rev and a set of all revs that
1512 1534 have linkrevs that will be broken by this strip.
1513 1535 """
1514 1536 brokenrevs = set()
1515 1537 strippoint = len(self)
1516 1538
1517 1539 heads = {}
1518 1540 futurelargelinkrevs = set()
1519 1541 for head in self.headrevs():
1520 1542 headlinkrev = self.linkrev(head)
1521 1543 heads[head] = headlinkrev
1522 1544 if headlinkrev >= minlink:
1523 1545 futurelargelinkrevs.add(headlinkrev)
1524 1546
1525 1547 # This algorithm involves walking down the rev graph, starting at the
1526 1548 # heads. Since the revs are topologically sorted according to linkrev,
1527 1549 # once all head linkrevs are below the minlink, we know there are
1528 1550 # no more revs that could have a linkrev greater than minlink.
1529 1551 # So we can stop walking.
1530 1552 while futurelargelinkrevs:
1531 1553 strippoint -= 1
1532 1554 linkrev = heads.pop(strippoint)
1533 1555
1534 1556 if linkrev < minlink:
1535 1557 brokenrevs.add(strippoint)
1536 1558 else:
1537 1559 futurelargelinkrevs.remove(linkrev)
1538 1560
1539 1561 for p in self.parentrevs(strippoint):
1540 1562 if p != nullrev:
1541 1563 plinkrev = self.linkrev(p)
1542 1564 heads[p] = plinkrev
1543 1565 if plinkrev >= minlink:
1544 1566 futurelargelinkrevs.add(plinkrev)
1545 1567
1546 1568 return strippoint, brokenrevs
1547 1569
1548 1570 def strip(self, minlink, transaction):
1549 1571 """truncate the revlog on the first revision with a linkrev >= minlink
1550 1572
1551 1573 This function is called when we're stripping revision minlink and
1552 1574 its descendants from the repository.
1553 1575
1554 1576 We have to remove all revisions with linkrev >= minlink, because
1555 1577 the equivalent changelog revisions will be renumbered after the
1556 1578 strip.
1557 1579
1558 1580 So we truncate the revlog on the first of these revisions, and
1559 1581 trust that the caller has saved the revisions that shouldn't be
1560 1582 removed and that it'll re-add them after this truncation.
1561 1583 """
1562 1584 if len(self) == 0:
1563 1585 return
1564 1586
1565 1587 rev, _ = self.getstrippoint(minlink)
1566 1588 if rev == len(self):
1567 1589 return
1568 1590
1569 1591 # first truncate the files on disk
1570 1592 end = self.start(rev)
1571 1593 if not self._inline:
1572 1594 transaction.add(self.datafile, end)
1573 1595 end = rev * self._io.size
1574 1596 else:
1575 1597 end += rev * self._io.size
1576 1598
1577 1599 transaction.add(self.indexfile, end)
1578 1600
1579 1601 # then reset internal state in memory to forget those revisions
1580 1602 self._cache = None
1581 1603 self._chaininfocache = {}
1582 1604 self._chunkclear()
1583 1605 for x in xrange(rev, len(self)):
1584 1606 del self.nodemap[self.node(x)]
1585 1607
1586 1608 del self.index[rev:-1]
1587 1609
1588 1610 def checksize(self):
1589 1611 expected = 0
1590 1612 if len(self):
1591 1613 expected = max(0, self.end(len(self) - 1))
1592 1614
1593 1615 try:
1594 1616 f = self.opener(self.datafile)
1595 1617 f.seek(0, 2)
1596 1618 actual = f.tell()
1597 1619 f.close()
1598 1620 dd = actual - expected
1599 1621 except IOError as inst:
1600 1622 if inst.errno != errno.ENOENT:
1601 1623 raise
1602 1624 dd = 0
1603 1625
1604 1626 try:
1605 1627 f = self.opener(self.indexfile)
1606 1628 f.seek(0, 2)
1607 1629 actual = f.tell()
1608 1630 f.close()
1609 1631 s = self._io.size
1610 1632 i = max(0, actual // s)
1611 1633 di = actual - (i * s)
1612 1634 if self._inline:
1613 1635 databytes = 0
1614 1636 for r in self:
1615 1637 databytes += max(0, self.length(r))
1616 1638 dd = 0
1617 1639 di = actual - len(self) * s - databytes
1618 1640 except IOError as inst:
1619 1641 if inst.errno != errno.ENOENT:
1620 1642 raise
1621 1643 di = 0
1622 1644
1623 1645 return (dd, di)
1624 1646
1625 1647 def files(self):
1626 1648 res = [self.indexfile]
1627 1649 if not self._inline:
1628 1650 res.append(self.datafile)
1629 1651 return res
@@ -1,71 +1,105 b''
1 1 Check whether size of generaldelta revlog is not bigger than its
2 2 regular equivalent. Test would fail if generaldelta was naive
3 3 implementation of parentdelta: third manifest revision would be fully
4 4 inserted due to big distance from its paren revision (zero).
5 5
6 6 $ hg init repo
7 7 $ cd repo
8 8 $ echo foo > foo
9 9 $ echo bar > bar
10 10 $ hg commit -q -Am boo
11 11 $ hg clone --pull . ../gdrepo -q --config format.generaldelta=yes
12 12 $ for r in 1 2 3; do
13 13 > echo $r > foo
14 14 > hg commit -q -m $r
15 15 > hg up -q -r 0
16 16 > hg pull . -q -r $r -R ../gdrepo
17 17 > done
18 18
19 19 $ cd ..
20 20 >>> import os
21 21 >>> regsize = os.stat("repo/.hg/store/00manifest.i").st_size
22 22 >>> gdsize = os.stat("gdrepo/.hg/store/00manifest.i").st_size
23 23 >>> if regsize < gdsize:
24 24 ... print 'generaldata increased size of manifest'
25 25
26 26 Verify rev reordering doesnt create invalid bundles (issue4462)
27 27 This requires a commit tree that when pulled will reorder manifest revs such
28 28 that the second manifest to create a file rev will be ordered before the first
29 29 manifest to create that file rev. We also need to do a partial pull to ensure
30 30 reordering happens. At the end we verify the linkrev points at the earliest
31 31 commit.
32 32
33 33 $ hg init server --config format.generaldelta=True
34 34 $ cd server
35 35 $ touch a
36 36 $ hg commit -Aqm a
37 37 $ echo x > x
38 38 $ echo y > y
39 39 $ hg commit -Aqm xy
40 40 $ hg up -q '.^'
41 41 $ echo x > x
42 42 $ echo z > z
43 43 $ hg commit -Aqm xz
44 44 $ hg up -q 1
45 45 $ echo b > b
46 46 $ hg commit -Aqm b
47 47 $ hg merge -q 2
48 48 $ hg commit -Aqm merge
49 49 $ echo c > c
50 50 $ hg commit -Aqm c
51 51 $ hg log -G -T '{rev} {shortest(node)} {desc}'
52 52 @ 5 ebb8 c
53 53 |
54 54 o 4 baf7 merge
55 55 |\
56 56 | o 3 a129 b
57 57 | |
58 58 o | 2 958c xz
59 59 | |
60 60 | o 1 f00c xy
61 61 |/
62 62 o 0 3903 a
63 63
64 64 $ cd ..
65 65 $ hg init client
66 66 $ cd client
67 67 $ hg pull -q ../server -r 4
68 68 $ hg debugindex x
69 69 rev offset length base linkrev nodeid p1 p2
70 70 0 0 3 0 1 1406e7411862 000000000000 000000000000
71 71
72 $ cd ..
73
74 Test format.aggressivemergedeltas
75
76 $ hg init --config format.generaldelta=1 aggressive
77 $ cd aggressive
78 $ touch a b c d e
79 $ hg commit -Aqm side1
80 $ hg up -q null
81 $ touch x y
82 $ hg commit -Aqm side2
83
84 - Verify non-aggressive merge uses p1 (commit 1) as delta parent
85 $ hg merge -q 0
86 $ hg commit -q -m merge
87 $ hg debugindex -m
88 rev offset length delta linkrev nodeid p1 p2
89 0 0 59 -1 0 8dde941edb6e 000000000000 000000000000
90 1 59 59 -1 1 315c023f341d 000000000000 000000000000
91 2 118 65 1 2 2ab389a983eb 315c023f341d 8dde941edb6e
92
93 $ hg strip -q -r . --config extensions.strip=
94
95 - Verify aggressive merge uses p2 (commit 0) as delta parent
96 $ hg up -q -C 1
97 $ hg merge -q 0
98 $ hg commit -q -m merge --config format.aggressivemergedeltas=True
99 $ hg debugindex -m
100 rev offset length delta linkrev nodeid p1 p2
101 0 0 59 -1 0 8dde941edb6e 000000000000 000000000000
102 1 59 59 -1 1 315c023f341d 000000000000 000000000000
103 2 118 62 0 2 2ab389a983eb 315c023f341d 8dde941edb6e
104
105 $ cd ..
General Comments 0
You need to be logged in to leave comments. Login now