##// END OF EJS Templates
revlog: linearize created changegroups in generaldelta revlogs...
Sune Foldager -
r14365:a8e3931e default
parent child Browse files
Show More
@@ -1,1975 +1,1986 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from node import bin, hex, nullid, nullrev, short
9 9 from i18n import _
10 10 import repo, changegroup, subrepo, discovery, pushkey
11 11 import changelog, dirstate, filelog, manifest, context, bookmarks
12 12 import lock, transaction, store, encoding
13 13 import scmutil, util, extensions, hook, error
14 14 import match as matchmod
15 15 import merge as mergemod
16 16 import tags as tagsmod
17 17 from lock import release
18 18 import weakref, errno, os, time, inspect
19 19 propertycache = util.propertycache
20 20
21 21 class localrepository(repo.repository):
22 22 capabilities = set(('lookup', 'changegroupsubset', 'branchmap', 'pushkey',
23 23 'known', 'getbundle'))
24 24 supportedformats = set(('revlogv1', 'generaldelta'))
25 25 supported = supportedformats | set(('store', 'fncache', 'shared',
26 26 'dotencode'))
27 27
28 28 def __init__(self, baseui, path=None, create=False):
29 29 repo.repository.__init__(self)
30 30 self.root = os.path.realpath(util.expandpath(path))
31 31 self.path = os.path.join(self.root, ".hg")
32 32 self.origroot = path
33 33 self.auditor = scmutil.pathauditor(self.root, self._checknested)
34 34 self.opener = scmutil.opener(self.path)
35 35 self.wopener = scmutil.opener(self.root)
36 36 self.baseui = baseui
37 37 self.ui = baseui.copy()
38 38
39 39 try:
40 40 self.ui.readconfig(self.join("hgrc"), self.root)
41 41 extensions.loadall(self.ui)
42 42 except IOError:
43 43 pass
44 44
45 45 if not os.path.isdir(self.path):
46 46 if create:
47 47 if not os.path.exists(path):
48 48 util.makedirs(path)
49 49 util.makedir(self.path, notindexed=True)
50 50 requirements = ["revlogv1"]
51 51 if self.ui.configbool('format', 'usestore', True):
52 52 os.mkdir(os.path.join(self.path, "store"))
53 53 requirements.append("store")
54 54 if self.ui.configbool('format', 'usefncache', True):
55 55 requirements.append("fncache")
56 56 if self.ui.configbool('format', 'dotencode', True):
57 57 requirements.append('dotencode')
58 58 # create an invalid changelog
59 59 self.opener.append(
60 60 "00changelog.i",
61 61 '\0\0\0\2' # represents revlogv2
62 62 ' dummy changelog to prevent using the old repo layout'
63 63 )
64 64 if self.ui.configbool('format', 'generaldelta', False):
65 65 requirements.append("generaldelta")
66 66 else:
67 67 raise error.RepoError(_("repository %s not found") % path)
68 68 elif create:
69 69 raise error.RepoError(_("repository %s already exists") % path)
70 70 else:
71 71 # find requirements
72 72 requirements = set()
73 73 try:
74 74 requirements = set(self.opener.read("requires").splitlines())
75 75 except IOError, inst:
76 76 if inst.errno != errno.ENOENT:
77 77 raise
78 78 for r in requirements - self.supported:
79 79 raise error.RequirementError(
80 80 _("requirement '%s' not supported") % r)
81 81
82 82 self.sharedpath = self.path
83 83 try:
84 84 s = os.path.realpath(self.opener.read("sharedpath"))
85 85 if not os.path.exists(s):
86 86 raise error.RepoError(
87 87 _('.hg/sharedpath points to nonexistent directory %s') % s)
88 88 self.sharedpath = s
89 89 except IOError, inst:
90 90 if inst.errno != errno.ENOENT:
91 91 raise
92 92
93 93 self.store = store.store(requirements, self.sharedpath, scmutil.opener)
94 94 self.spath = self.store.path
95 95 self.sopener = self.store.opener
96 96 self.sjoin = self.store.join
97 97 self.opener.createmode = self.store.createmode
98 98 self._applyrequirements(requirements)
99 99 if create:
100 100 self._writerequirements()
101 101
102 102 # These two define the set of tags for this repository. _tags
103 103 # maps tag name to node; _tagtypes maps tag name to 'global' or
104 104 # 'local'. (Global tags are defined by .hgtags across all
105 105 # heads, and local tags are defined in .hg/localtags.) They
106 106 # constitute the in-memory cache of tags.
107 107 self._tags = None
108 108 self._tagtypes = None
109 109
110 110 self._branchcache = None
111 111 self._branchcachetip = None
112 112 self.nodetagscache = None
113 113 self.filterpats = {}
114 114 self._datafilters = {}
115 115 self._transref = self._lockref = self._wlockref = None
116 116
117 117 def _applyrequirements(self, requirements):
118 118 self.requirements = requirements
119 119 openerreqs = set(('revlogv1', 'generaldelta'))
120 120 self.sopener.options = dict((r, 1) for r in requirements
121 121 if r in openerreqs)
122 122
123 123 def _writerequirements(self):
124 124 reqfile = self.opener("requires", "w")
125 125 for r in self.requirements:
126 126 reqfile.write("%s\n" % r)
127 127 reqfile.close()
128 128
129 129 def _checknested(self, path):
130 130 """Determine if path is a legal nested repository."""
131 131 if not path.startswith(self.root):
132 132 return False
133 133 subpath = path[len(self.root) + 1:]
134 134
135 135 # XXX: Checking against the current working copy is wrong in
136 136 # the sense that it can reject things like
137 137 #
138 138 # $ hg cat -r 10 sub/x.txt
139 139 #
140 140 # if sub/ is no longer a subrepository in the working copy
141 141 # parent revision.
142 142 #
143 143 # However, it can of course also allow things that would have
144 144 # been rejected before, such as the above cat command if sub/
145 145 # is a subrepository now, but was a normal directory before.
146 146 # The old path auditor would have rejected by mistake since it
147 147 # panics when it sees sub/.hg/.
148 148 #
149 149 # All in all, checking against the working copy seems sensible
150 150 # since we want to prevent access to nested repositories on
151 151 # the filesystem *now*.
152 152 ctx = self[None]
153 153 parts = util.splitpath(subpath)
154 154 while parts:
155 155 prefix = os.sep.join(parts)
156 156 if prefix in ctx.substate:
157 157 if prefix == subpath:
158 158 return True
159 159 else:
160 160 sub = ctx.sub(prefix)
161 161 return sub.checknested(subpath[len(prefix) + 1:])
162 162 else:
163 163 parts.pop()
164 164 return False
165 165
166 166 @util.propertycache
167 167 def _bookmarks(self):
168 168 return bookmarks.read(self)
169 169
170 170 @util.propertycache
171 171 def _bookmarkcurrent(self):
172 172 return bookmarks.readcurrent(self)
173 173
174 174 @propertycache
175 175 def changelog(self):
176 176 c = changelog.changelog(self.sopener)
177 177 if 'HG_PENDING' in os.environ:
178 178 p = os.environ['HG_PENDING']
179 179 if p.startswith(self.root):
180 180 c.readpending('00changelog.i.a')
181 181 return c
182 182
183 183 @propertycache
184 184 def manifest(self):
185 185 return manifest.manifest(self.sopener)
186 186
187 187 @propertycache
188 188 def dirstate(self):
189 189 warned = [0]
190 190 def validate(node):
191 191 try:
192 192 self.changelog.rev(node)
193 193 return node
194 194 except error.LookupError:
195 195 if not warned[0]:
196 196 warned[0] = True
197 197 self.ui.warn(_("warning: ignoring unknown"
198 198 " working parent %s!\n") % short(node))
199 199 return nullid
200 200
201 201 return dirstate.dirstate(self.opener, self.ui, self.root, validate)
202 202
203 203 def __getitem__(self, changeid):
204 204 if changeid is None:
205 205 return context.workingctx(self)
206 206 return context.changectx(self, changeid)
207 207
208 208 def __contains__(self, changeid):
209 209 try:
210 210 return bool(self.lookup(changeid))
211 211 except error.RepoLookupError:
212 212 return False
213 213
214 214 def __nonzero__(self):
215 215 return True
216 216
217 217 def __len__(self):
218 218 return len(self.changelog)
219 219
220 220 def __iter__(self):
221 221 for i in xrange(len(self)):
222 222 yield i
223 223
224 224 def url(self):
225 225 return 'file:' + self.root
226 226
227 227 def hook(self, name, throw=False, **args):
228 228 return hook.hook(self.ui, self, name, throw, **args)
229 229
230 230 tag_disallowed = ':\r\n'
231 231
232 232 def _tag(self, names, node, message, local, user, date, extra={}):
233 233 if isinstance(names, str):
234 234 allchars = names
235 235 names = (names,)
236 236 else:
237 237 allchars = ''.join(names)
238 238 for c in self.tag_disallowed:
239 239 if c in allchars:
240 240 raise util.Abort(_('%r cannot be used in a tag name') % c)
241 241
242 242 branches = self.branchmap()
243 243 for name in names:
244 244 self.hook('pretag', throw=True, node=hex(node), tag=name,
245 245 local=local)
246 246 if name in branches:
247 247 self.ui.warn(_("warning: tag %s conflicts with existing"
248 248 " branch name\n") % name)
249 249
250 250 def writetags(fp, names, munge, prevtags):
251 251 fp.seek(0, 2)
252 252 if prevtags and prevtags[-1] != '\n':
253 253 fp.write('\n')
254 254 for name in names:
255 255 m = munge and munge(name) or name
256 256 if self._tagtypes and name in self._tagtypes:
257 257 old = self._tags.get(name, nullid)
258 258 fp.write('%s %s\n' % (hex(old), m))
259 259 fp.write('%s %s\n' % (hex(node), m))
260 260 fp.close()
261 261
262 262 prevtags = ''
263 263 if local:
264 264 try:
265 265 fp = self.opener('localtags', 'r+')
266 266 except IOError:
267 267 fp = self.opener('localtags', 'a')
268 268 else:
269 269 prevtags = fp.read()
270 270
271 271 # local tags are stored in the current charset
272 272 writetags(fp, names, None, prevtags)
273 273 for name in names:
274 274 self.hook('tag', node=hex(node), tag=name, local=local)
275 275 return
276 276
277 277 try:
278 278 fp = self.wfile('.hgtags', 'rb+')
279 279 except IOError:
280 280 fp = self.wfile('.hgtags', 'ab')
281 281 else:
282 282 prevtags = fp.read()
283 283
284 284 # committed tags are stored in UTF-8
285 285 writetags(fp, names, encoding.fromlocal, prevtags)
286 286
287 287 fp.close()
288 288
289 289 if '.hgtags' not in self.dirstate:
290 290 self[None].add(['.hgtags'])
291 291
292 292 m = matchmod.exact(self.root, '', ['.hgtags'])
293 293 tagnode = self.commit(message, user, date, extra=extra, match=m)
294 294
295 295 for name in names:
296 296 self.hook('tag', node=hex(node), tag=name, local=local)
297 297
298 298 return tagnode
299 299
300 300 def tag(self, names, node, message, local, user, date):
301 301 '''tag a revision with one or more symbolic names.
302 302
303 303 names is a list of strings or, when adding a single tag, names may be a
304 304 string.
305 305
306 306 if local is True, the tags are stored in a per-repository file.
307 307 otherwise, they are stored in the .hgtags file, and a new
308 308 changeset is committed with the change.
309 309
310 310 keyword arguments:
311 311
312 312 local: whether to store tags in non-version-controlled file
313 313 (default False)
314 314
315 315 message: commit message to use if committing
316 316
317 317 user: name of user to use if committing
318 318
319 319 date: date tuple to use if committing'''
320 320
321 321 if not local:
322 322 for x in self.status()[:5]:
323 323 if '.hgtags' in x:
324 324 raise util.Abort(_('working copy of .hgtags is changed '
325 325 '(please commit .hgtags manually)'))
326 326
327 327 self.tags() # instantiate the cache
328 328 self._tag(names, node, message, local, user, date)
329 329
330 330 def tags(self):
331 331 '''return a mapping of tag to node'''
332 332 if self._tags is None:
333 333 (self._tags, self._tagtypes) = self._findtags()
334 334
335 335 return self._tags
336 336
337 337 def _findtags(self):
338 338 '''Do the hard work of finding tags. Return a pair of dicts
339 339 (tags, tagtypes) where tags maps tag name to node, and tagtypes
340 340 maps tag name to a string like \'global\' or \'local\'.
341 341 Subclasses or extensions are free to add their own tags, but
342 342 should be aware that the returned dicts will be retained for the
343 343 duration of the localrepo object.'''
344 344
345 345 # XXX what tagtype should subclasses/extensions use? Currently
346 346 # mq and bookmarks add tags, but do not set the tagtype at all.
347 347 # Should each extension invent its own tag type? Should there
348 348 # be one tagtype for all such "virtual" tags? Or is the status
349 349 # quo fine?
350 350
351 351 alltags = {} # map tag name to (node, hist)
352 352 tagtypes = {}
353 353
354 354 tagsmod.findglobaltags(self.ui, self, alltags, tagtypes)
355 355 tagsmod.readlocaltags(self.ui, self, alltags, tagtypes)
356 356
357 357 # Build the return dicts. Have to re-encode tag names because
358 358 # the tags module always uses UTF-8 (in order not to lose info
359 359 # writing to the cache), but the rest of Mercurial wants them in
360 360 # local encoding.
361 361 tags = {}
362 362 for (name, (node, hist)) in alltags.iteritems():
363 363 if node != nullid:
364 364 try:
365 365 # ignore tags to unknown nodes
366 366 self.changelog.lookup(node)
367 367 tags[encoding.tolocal(name)] = node
368 368 except error.LookupError:
369 369 pass
370 370 tags['tip'] = self.changelog.tip()
371 371 tagtypes = dict([(encoding.tolocal(name), value)
372 372 for (name, value) in tagtypes.iteritems()])
373 373 return (tags, tagtypes)
374 374
375 375 def tagtype(self, tagname):
376 376 '''
377 377 return the type of the given tag. result can be:
378 378
379 379 'local' : a local tag
380 380 'global' : a global tag
381 381 None : tag does not exist
382 382 '''
383 383
384 384 self.tags()
385 385
386 386 return self._tagtypes.get(tagname)
387 387
388 388 def tagslist(self):
389 389 '''return a list of tags ordered by revision'''
390 390 l = []
391 391 for t, n in self.tags().iteritems():
392 392 r = self.changelog.rev(n)
393 393 l.append((r, t, n))
394 394 return [(t, n) for r, t, n in sorted(l)]
395 395
396 396 def nodetags(self, node):
397 397 '''return the tags associated with a node'''
398 398 if not self.nodetagscache:
399 399 self.nodetagscache = {}
400 400 for t, n in self.tags().iteritems():
401 401 self.nodetagscache.setdefault(n, []).append(t)
402 402 for tags in self.nodetagscache.itervalues():
403 403 tags.sort()
404 404 return self.nodetagscache.get(node, [])
405 405
406 406 def nodebookmarks(self, node):
407 407 marks = []
408 408 for bookmark, n in self._bookmarks.iteritems():
409 409 if n == node:
410 410 marks.append(bookmark)
411 411 return sorted(marks)
412 412
413 413 def _branchtags(self, partial, lrev):
414 414 # TODO: rename this function?
415 415 tiprev = len(self) - 1
416 416 if lrev != tiprev:
417 417 ctxgen = (self[r] for r in xrange(lrev + 1, tiprev + 1))
418 418 self._updatebranchcache(partial, ctxgen)
419 419 self._writebranchcache(partial, self.changelog.tip(), tiprev)
420 420
421 421 return partial
422 422
423 423 def updatebranchcache(self):
424 424 tip = self.changelog.tip()
425 425 if self._branchcache is not None and self._branchcachetip == tip:
426 426 return self._branchcache
427 427
428 428 oldtip = self._branchcachetip
429 429 self._branchcachetip = tip
430 430 if oldtip is None or oldtip not in self.changelog.nodemap:
431 431 partial, last, lrev = self._readbranchcache()
432 432 else:
433 433 lrev = self.changelog.rev(oldtip)
434 434 partial = self._branchcache
435 435
436 436 self._branchtags(partial, lrev)
437 437 # this private cache holds all heads (not just tips)
438 438 self._branchcache = partial
439 439
440 440 def branchmap(self):
441 441 '''returns a dictionary {branch: [branchheads]}'''
442 442 self.updatebranchcache()
443 443 return self._branchcache
444 444
445 445 def branchtags(self):
446 446 '''return a dict where branch names map to the tipmost head of
447 447 the branch, open heads come before closed'''
448 448 bt = {}
449 449 for bn, heads in self.branchmap().iteritems():
450 450 tip = heads[-1]
451 451 for h in reversed(heads):
452 452 if 'close' not in self.changelog.read(h)[5]:
453 453 tip = h
454 454 break
455 455 bt[bn] = tip
456 456 return bt
457 457
458 458 def _readbranchcache(self):
459 459 partial = {}
460 460 try:
461 461 f = self.opener("cache/branchheads")
462 462 lines = f.read().split('\n')
463 463 f.close()
464 464 except (IOError, OSError):
465 465 return {}, nullid, nullrev
466 466
467 467 try:
468 468 last, lrev = lines.pop(0).split(" ", 1)
469 469 last, lrev = bin(last), int(lrev)
470 470 if lrev >= len(self) or self[lrev].node() != last:
471 471 # invalidate the cache
472 472 raise ValueError('invalidating branch cache (tip differs)')
473 473 for l in lines:
474 474 if not l:
475 475 continue
476 476 node, label = l.split(" ", 1)
477 477 label = encoding.tolocal(label.strip())
478 478 partial.setdefault(label, []).append(bin(node))
479 479 except KeyboardInterrupt:
480 480 raise
481 481 except Exception, inst:
482 482 if self.ui.debugflag:
483 483 self.ui.warn(str(inst), '\n')
484 484 partial, last, lrev = {}, nullid, nullrev
485 485 return partial, last, lrev
486 486
487 487 def _writebranchcache(self, branches, tip, tiprev):
488 488 try:
489 489 f = self.opener("cache/branchheads", "w", atomictemp=True)
490 490 f.write("%s %s\n" % (hex(tip), tiprev))
491 491 for label, nodes in branches.iteritems():
492 492 for node in nodes:
493 493 f.write("%s %s\n" % (hex(node), encoding.fromlocal(label)))
494 494 f.rename()
495 495 except (IOError, OSError):
496 496 pass
497 497
498 498 def _updatebranchcache(self, partial, ctxgen):
499 499 # collect new branch entries
500 500 newbranches = {}
501 501 for c in ctxgen:
502 502 newbranches.setdefault(c.branch(), []).append(c.node())
503 503 # if older branchheads are reachable from new ones, they aren't
504 504 # really branchheads. Note checking parents is insufficient:
505 505 # 1 (branch a) -> 2 (branch b) -> 3 (branch a)
506 506 for branch, newnodes in newbranches.iteritems():
507 507 bheads = partial.setdefault(branch, [])
508 508 bheads.extend(newnodes)
509 509 if len(bheads) <= 1:
510 510 continue
511 511 bheads = sorted(bheads, key=lambda x: self[x].rev())
512 512 # starting from tip means fewer passes over reachable
513 513 while newnodes:
514 514 latest = newnodes.pop()
515 515 if latest not in bheads:
516 516 continue
517 517 minbhrev = self[bheads[0]].node()
518 518 reachable = self.changelog.reachable(latest, minbhrev)
519 519 reachable.remove(latest)
520 520 if reachable:
521 521 bheads = [b for b in bheads if b not in reachable]
522 522 partial[branch] = bheads
523 523
524 524 def lookup(self, key):
525 525 if isinstance(key, int):
526 526 return self.changelog.node(key)
527 527 elif key == '.':
528 528 return self.dirstate.p1()
529 529 elif key == 'null':
530 530 return nullid
531 531 elif key == 'tip':
532 532 return self.changelog.tip()
533 533 n = self.changelog._match(key)
534 534 if n:
535 535 return n
536 536 if key in self._bookmarks:
537 537 return self._bookmarks[key]
538 538 if key in self.tags():
539 539 return self.tags()[key]
540 540 if key in self.branchtags():
541 541 return self.branchtags()[key]
542 542 n = self.changelog._partialmatch(key)
543 543 if n:
544 544 return n
545 545
546 546 # can't find key, check if it might have come from damaged dirstate
547 547 if key in self.dirstate.parents():
548 548 raise error.Abort(_("working directory has unknown parent '%s'!")
549 549 % short(key))
550 550 try:
551 551 if len(key) == 20:
552 552 key = hex(key)
553 553 except TypeError:
554 554 pass
555 555 raise error.RepoLookupError(_("unknown revision '%s'") % key)
556 556
557 557 def lookupbranch(self, key, remote=None):
558 558 repo = remote or self
559 559 if key in repo.branchmap():
560 560 return key
561 561
562 562 repo = (remote and remote.local()) and remote or self
563 563 return repo[key].branch()
564 564
565 565 def known(self, nodes):
566 566 nm = self.changelog.nodemap
567 567 return [(n in nm) for n in nodes]
568 568
569 569 def local(self):
570 570 return True
571 571
572 572 def join(self, f):
573 573 return os.path.join(self.path, f)
574 574
575 575 def wjoin(self, f):
576 576 return os.path.join(self.root, f)
577 577
578 578 def file(self, f):
579 579 if f[0] == '/':
580 580 f = f[1:]
581 581 return filelog.filelog(self.sopener, f)
582 582
583 583 def changectx(self, changeid):
584 584 return self[changeid]
585 585
586 586 def parents(self, changeid=None):
587 587 '''get list of changectxs for parents of changeid'''
588 588 return self[changeid].parents()
589 589
590 590 def filectx(self, path, changeid=None, fileid=None):
591 591 """changeid can be a changeset revision, node, or tag.
592 592 fileid can be a file revision or node."""
593 593 return context.filectx(self, path, changeid, fileid)
594 594
595 595 def getcwd(self):
596 596 return self.dirstate.getcwd()
597 597
598 598 def pathto(self, f, cwd=None):
599 599 return self.dirstate.pathto(f, cwd)
600 600
601 601 def wfile(self, f, mode='r'):
602 602 return self.wopener(f, mode)
603 603
604 604 def _link(self, f):
605 605 return os.path.islink(self.wjoin(f))
606 606
607 607 def _loadfilter(self, filter):
608 608 if filter not in self.filterpats:
609 609 l = []
610 610 for pat, cmd in self.ui.configitems(filter):
611 611 if cmd == '!':
612 612 continue
613 613 mf = matchmod.match(self.root, '', [pat])
614 614 fn = None
615 615 params = cmd
616 616 for name, filterfn in self._datafilters.iteritems():
617 617 if cmd.startswith(name):
618 618 fn = filterfn
619 619 params = cmd[len(name):].lstrip()
620 620 break
621 621 if not fn:
622 622 fn = lambda s, c, **kwargs: util.filter(s, c)
623 623 # Wrap old filters not supporting keyword arguments
624 624 if not inspect.getargspec(fn)[2]:
625 625 oldfn = fn
626 626 fn = lambda s, c, **kwargs: oldfn(s, c)
627 627 l.append((mf, fn, params))
628 628 self.filterpats[filter] = l
629 629 return self.filterpats[filter]
630 630
631 631 def _filter(self, filterpats, filename, data):
632 632 for mf, fn, cmd in filterpats:
633 633 if mf(filename):
634 634 self.ui.debug("filtering %s through %s\n" % (filename, cmd))
635 635 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
636 636 break
637 637
638 638 return data
639 639
640 640 @propertycache
641 641 def _encodefilterpats(self):
642 642 return self._loadfilter('encode')
643 643
644 644 @propertycache
645 645 def _decodefilterpats(self):
646 646 return self._loadfilter('decode')
647 647
648 648 def adddatafilter(self, name, filter):
649 649 self._datafilters[name] = filter
650 650
651 651 def wread(self, filename):
652 652 if self._link(filename):
653 653 data = os.readlink(self.wjoin(filename))
654 654 else:
655 655 data = self.wopener.read(filename)
656 656 return self._filter(self._encodefilterpats, filename, data)
657 657
658 658 def wwrite(self, filename, data, flags):
659 659 data = self._filter(self._decodefilterpats, filename, data)
660 660 if 'l' in flags:
661 661 self.wopener.symlink(data, filename)
662 662 else:
663 663 self.wopener.write(filename, data)
664 664 if 'x' in flags:
665 665 util.setflags(self.wjoin(filename), False, True)
666 666
667 667 def wwritedata(self, filename, data):
668 668 return self._filter(self._decodefilterpats, filename, data)
669 669
670 670 def transaction(self, desc):
671 671 tr = self._transref and self._transref() or None
672 672 if tr and tr.running():
673 673 return tr.nest()
674 674
675 675 # abort here if the journal already exists
676 676 if os.path.exists(self.sjoin("journal")):
677 677 raise error.RepoError(
678 678 _("abandoned transaction found - run hg recover"))
679 679
680 680 journalfiles = self._writejournal(desc)
681 681 renames = [(x, undoname(x)) for x in journalfiles]
682 682
683 683 tr = transaction.transaction(self.ui.warn, self.sopener,
684 684 self.sjoin("journal"),
685 685 aftertrans(renames),
686 686 self.store.createmode)
687 687 self._transref = weakref.ref(tr)
688 688 return tr
689 689
690 690 def _writejournal(self, desc):
691 691 # save dirstate for rollback
692 692 try:
693 693 ds = self.opener.read("dirstate")
694 694 except IOError:
695 695 ds = ""
696 696 self.opener.write("journal.dirstate", ds)
697 697 self.opener.write("journal.branch",
698 698 encoding.fromlocal(self.dirstate.branch()))
699 699 self.opener.write("journal.desc",
700 700 "%d\n%s\n" % (len(self), desc))
701 701
702 702 bkname = self.join('bookmarks')
703 703 if os.path.exists(bkname):
704 704 util.copyfile(bkname, self.join('journal.bookmarks'))
705 705 else:
706 706 self.opener.write('journal.bookmarks', '')
707 707
708 708 return (self.sjoin('journal'), self.join('journal.dirstate'),
709 709 self.join('journal.branch'), self.join('journal.desc'),
710 710 self.join('journal.bookmarks'))
711 711
712 712 def recover(self):
713 713 lock = self.lock()
714 714 try:
715 715 if os.path.exists(self.sjoin("journal")):
716 716 self.ui.status(_("rolling back interrupted transaction\n"))
717 717 transaction.rollback(self.sopener, self.sjoin("journal"),
718 718 self.ui.warn)
719 719 self.invalidate()
720 720 return True
721 721 else:
722 722 self.ui.warn(_("no interrupted transaction available\n"))
723 723 return False
724 724 finally:
725 725 lock.release()
726 726
727 727 def rollback(self, dryrun=False):
728 728 wlock = lock = None
729 729 try:
730 730 wlock = self.wlock()
731 731 lock = self.lock()
732 732 if os.path.exists(self.sjoin("undo")):
733 733 try:
734 734 args = self.opener.read("undo.desc").splitlines()
735 735 if len(args) >= 3 and self.ui.verbose:
736 736 desc = _("repository tip rolled back to revision %s"
737 737 " (undo %s: %s)\n") % (
738 738 int(args[0]) - 1, args[1], args[2])
739 739 elif len(args) >= 2:
740 740 desc = _("repository tip rolled back to revision %s"
741 741 " (undo %s)\n") % (
742 742 int(args[0]) - 1, args[1])
743 743 except IOError:
744 744 desc = _("rolling back unknown transaction\n")
745 745 self.ui.status(desc)
746 746 if dryrun:
747 747 return
748 748 transaction.rollback(self.sopener, self.sjoin("undo"),
749 749 self.ui.warn)
750 750 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
751 751 if os.path.exists(self.join('undo.bookmarks')):
752 752 util.rename(self.join('undo.bookmarks'),
753 753 self.join('bookmarks'))
754 754 try:
755 755 branch = self.opener.read("undo.branch")
756 756 self.dirstate.setbranch(branch)
757 757 except IOError:
758 758 self.ui.warn(_("named branch could not be reset, "
759 759 "current branch is still: %s\n")
760 760 % self.dirstate.branch())
761 761 self.invalidate()
762 762 self.dirstate.invalidate()
763 763 self.destroyed()
764 764 parents = tuple([p.rev() for p in self.parents()])
765 765 if len(parents) > 1:
766 766 self.ui.status(_("working directory now based on "
767 767 "revisions %d and %d\n") % parents)
768 768 else:
769 769 self.ui.status(_("working directory now based on "
770 770 "revision %d\n") % parents)
771 771 else:
772 772 self.ui.warn(_("no rollback information available\n"))
773 773 return 1
774 774 finally:
775 775 release(lock, wlock)
776 776
777 777 def invalidatecaches(self):
778 778 self._tags = None
779 779 self._tagtypes = None
780 780 self.nodetagscache = None
781 781 self._branchcache = None # in UTF-8
782 782 self._branchcachetip = None
783 783
784 784 def invalidate(self):
785 785 for a in ("changelog", "manifest", "_bookmarks", "_bookmarkcurrent"):
786 786 if a in self.__dict__:
787 787 delattr(self, a)
788 788 self.invalidatecaches()
789 789
790 790 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
791 791 try:
792 792 l = lock.lock(lockname, 0, releasefn, desc=desc)
793 793 except error.LockHeld, inst:
794 794 if not wait:
795 795 raise
796 796 self.ui.warn(_("waiting for lock on %s held by %r\n") %
797 797 (desc, inst.locker))
798 798 # default to 600 seconds timeout
799 799 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
800 800 releasefn, desc=desc)
801 801 if acquirefn:
802 802 acquirefn()
803 803 return l
804 804
805 805 def lock(self, wait=True):
806 806 '''Lock the repository store (.hg/store) and return a weak reference
807 807 to the lock. Use this before modifying the store (e.g. committing or
808 808 stripping). If you are opening a transaction, get a lock as well.)'''
809 809 l = self._lockref and self._lockref()
810 810 if l is not None and l.held:
811 811 l.lock()
812 812 return l
813 813
814 814 l = self._lock(self.sjoin("lock"), wait, self.store.write,
815 815 self.invalidate, _('repository %s') % self.origroot)
816 816 self._lockref = weakref.ref(l)
817 817 return l
818 818
819 819 def wlock(self, wait=True):
820 820 '''Lock the non-store parts of the repository (everything under
821 821 .hg except .hg/store) and return a weak reference to the lock.
822 822 Use this before modifying files in .hg.'''
823 823 l = self._wlockref and self._wlockref()
824 824 if l is not None and l.held:
825 825 l.lock()
826 826 return l
827 827
828 828 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
829 829 self.dirstate.invalidate, _('working directory of %s') %
830 830 self.origroot)
831 831 self._wlockref = weakref.ref(l)
832 832 return l
833 833
834 834 def _filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
835 835 """
836 836 commit an individual file as part of a larger transaction
837 837 """
838 838
839 839 fname = fctx.path()
840 840 text = fctx.data()
841 841 flog = self.file(fname)
842 842 fparent1 = manifest1.get(fname, nullid)
843 843 fparent2 = fparent2o = manifest2.get(fname, nullid)
844 844
845 845 meta = {}
846 846 copy = fctx.renamed()
847 847 if copy and copy[0] != fname:
848 848 # Mark the new revision of this file as a copy of another
849 849 # file. This copy data will effectively act as a parent
850 850 # of this new revision. If this is a merge, the first
851 851 # parent will be the nullid (meaning "look up the copy data")
852 852 # and the second one will be the other parent. For example:
853 853 #
854 854 # 0 --- 1 --- 3 rev1 changes file foo
855 855 # \ / rev2 renames foo to bar and changes it
856 856 # \- 2 -/ rev3 should have bar with all changes and
857 857 # should record that bar descends from
858 858 # bar in rev2 and foo in rev1
859 859 #
860 860 # this allows this merge to succeed:
861 861 #
862 862 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
863 863 # \ / merging rev3 and rev4 should use bar@rev2
864 864 # \- 2 --- 4 as the merge base
865 865 #
866 866
867 867 cfname = copy[0]
868 868 crev = manifest1.get(cfname)
869 869 newfparent = fparent2
870 870
871 871 if manifest2: # branch merge
872 872 if fparent2 == nullid or crev is None: # copied on remote side
873 873 if cfname in manifest2:
874 874 crev = manifest2[cfname]
875 875 newfparent = fparent1
876 876
877 877 # find source in nearest ancestor if we've lost track
878 878 if not crev:
879 879 self.ui.debug(" %s: searching for copy revision for %s\n" %
880 880 (fname, cfname))
881 881 for ancestor in self[None].ancestors():
882 882 if cfname in ancestor:
883 883 crev = ancestor[cfname].filenode()
884 884 break
885 885
886 886 if crev:
887 887 self.ui.debug(" %s: copy %s:%s\n" % (fname, cfname, hex(crev)))
888 888 meta["copy"] = cfname
889 889 meta["copyrev"] = hex(crev)
890 890 fparent1, fparent2 = nullid, newfparent
891 891 else:
892 892 self.ui.warn(_("warning: can't find ancestor for '%s' "
893 893 "copied from '%s'!\n") % (fname, cfname))
894 894
895 895 elif fparent2 != nullid:
896 896 # is one parent an ancestor of the other?
897 897 fparentancestor = flog.ancestor(fparent1, fparent2)
898 898 if fparentancestor == fparent1:
899 899 fparent1, fparent2 = fparent2, nullid
900 900 elif fparentancestor == fparent2:
901 901 fparent2 = nullid
902 902
903 903 # is the file changed?
904 904 if fparent2 != nullid or flog.cmp(fparent1, text) or meta:
905 905 changelist.append(fname)
906 906 return flog.add(text, meta, tr, linkrev, fparent1, fparent2)
907 907
908 908 # are just the flags changed during merge?
909 909 if fparent1 != fparent2o and manifest1.flags(fname) != fctx.flags():
910 910 changelist.append(fname)
911 911
912 912 return fparent1
913 913
914 914 def commit(self, text="", user=None, date=None, match=None, force=False,
915 915 editor=False, extra={}):
916 916 """Add a new revision to current repository.
917 917
918 918 Revision information is gathered from the working directory,
919 919 match can be used to filter the committed files. If editor is
920 920 supplied, it is called to get a commit message.
921 921 """
922 922
923 923 def fail(f, msg):
924 924 raise util.Abort('%s: %s' % (f, msg))
925 925
926 926 if not match:
927 927 match = matchmod.always(self.root, '')
928 928
929 929 if not force:
930 930 vdirs = []
931 931 match.dir = vdirs.append
932 932 match.bad = fail
933 933
934 934 wlock = self.wlock()
935 935 try:
936 936 wctx = self[None]
937 937 merge = len(wctx.parents()) > 1
938 938
939 939 if (not force and merge and match and
940 940 (match.files() or match.anypats())):
941 941 raise util.Abort(_('cannot partially commit a merge '
942 942 '(do not specify files or patterns)'))
943 943
944 944 changes = self.status(match=match, clean=force)
945 945 if force:
946 946 changes[0].extend(changes[6]) # mq may commit unchanged files
947 947
948 948 # check subrepos
949 949 subs = []
950 950 removedsubs = set()
951 951 for p in wctx.parents():
952 952 removedsubs.update(s for s in p.substate if match(s))
953 953 for s in wctx.substate:
954 954 removedsubs.discard(s)
955 955 if match(s) and wctx.sub(s).dirty():
956 956 subs.append(s)
957 957 if (subs or removedsubs):
958 958 if (not match('.hgsub') and
959 959 '.hgsub' in (wctx.modified() + wctx.added())):
960 960 raise util.Abort(_("can't commit subrepos without .hgsub"))
961 961 if '.hgsubstate' not in changes[0]:
962 962 changes[0].insert(0, '.hgsubstate')
963 963
964 964 if subs and not self.ui.configbool('ui', 'commitsubrepos', True):
965 965 changedsubs = [s for s in subs if wctx.sub(s).dirty(True)]
966 966 if changedsubs:
967 967 raise util.Abort(_("uncommitted changes in subrepo %s")
968 968 % changedsubs[0])
969 969
970 970 # make sure all explicit patterns are matched
971 971 if not force and match.files():
972 972 matched = set(changes[0] + changes[1] + changes[2])
973 973
974 974 for f in match.files():
975 975 if f == '.' or f in matched or f in wctx.substate:
976 976 continue
977 977 if f in changes[3]: # missing
978 978 fail(f, _('file not found!'))
979 979 if f in vdirs: # visited directory
980 980 d = f + '/'
981 981 for mf in matched:
982 982 if mf.startswith(d):
983 983 break
984 984 else:
985 985 fail(f, _("no match under directory!"))
986 986 elif f not in self.dirstate:
987 987 fail(f, _("file not tracked!"))
988 988
989 989 if (not force and not extra.get("close") and not merge
990 990 and not (changes[0] or changes[1] or changes[2])
991 991 and wctx.branch() == wctx.p1().branch()):
992 992 return None
993 993
994 994 ms = mergemod.mergestate(self)
995 995 for f in changes[0]:
996 996 if f in ms and ms[f] == 'u':
997 997 raise util.Abort(_("unresolved merge conflicts "
998 998 "(see hg help resolve)"))
999 999
1000 1000 cctx = context.workingctx(self, text, user, date, extra, changes)
1001 1001 if editor:
1002 1002 cctx._text = editor(self, cctx, subs)
1003 1003 edited = (text != cctx._text)
1004 1004
1005 1005 # commit subs
1006 1006 if subs or removedsubs:
1007 1007 state = wctx.substate.copy()
1008 1008 for s in sorted(subs):
1009 1009 sub = wctx.sub(s)
1010 1010 self.ui.status(_('committing subrepository %s\n') %
1011 1011 subrepo.subrelpath(sub))
1012 1012 sr = sub.commit(cctx._text, user, date)
1013 1013 state[s] = (state[s][0], sr)
1014 1014 subrepo.writestate(self, state)
1015 1015
1016 1016 # Save commit message in case this transaction gets rolled back
1017 1017 # (e.g. by a pretxncommit hook). Leave the content alone on
1018 1018 # the assumption that the user will use the same editor again.
1019 1019 msgfile = self.opener('last-message.txt', 'wb')
1020 1020 msgfile.write(cctx._text)
1021 1021 msgfile.close()
1022 1022
1023 1023 p1, p2 = self.dirstate.parents()
1024 1024 hookp1, hookp2 = hex(p1), (p2 != nullid and hex(p2) or '')
1025 1025 try:
1026 1026 self.hook("precommit", throw=True, parent1=hookp1, parent2=hookp2)
1027 1027 ret = self.commitctx(cctx, True)
1028 1028 except:
1029 1029 if edited:
1030 1030 msgfn = self.pathto(msgfile.name[len(self.root)+1:])
1031 1031 self.ui.write(
1032 1032 _('note: commit message saved in %s\n') % msgfn)
1033 1033 raise
1034 1034
1035 1035 # update bookmarks, dirstate and mergestate
1036 1036 bookmarks.update(self, p1, ret)
1037 1037 for f in changes[0] + changes[1]:
1038 1038 self.dirstate.normal(f)
1039 1039 for f in changes[2]:
1040 1040 self.dirstate.forget(f)
1041 1041 self.dirstate.setparents(ret)
1042 1042 ms.reset()
1043 1043 finally:
1044 1044 wlock.release()
1045 1045
1046 1046 self.hook("commit", node=hex(ret), parent1=hookp1, parent2=hookp2)
1047 1047 return ret
1048 1048
1049 1049 def commitctx(self, ctx, error=False):
1050 1050 """Add a new revision to current repository.
1051 1051 Revision information is passed via the context argument.
1052 1052 """
1053 1053
1054 1054 tr = lock = None
1055 1055 removed = list(ctx.removed())
1056 1056 p1, p2 = ctx.p1(), ctx.p2()
1057 1057 user = ctx.user()
1058 1058
1059 1059 lock = self.lock()
1060 1060 try:
1061 1061 tr = self.transaction("commit")
1062 1062 trp = weakref.proxy(tr)
1063 1063
1064 1064 if ctx.files():
1065 1065 m1 = p1.manifest().copy()
1066 1066 m2 = p2.manifest()
1067 1067
1068 1068 # check in files
1069 1069 new = {}
1070 1070 changed = []
1071 1071 linkrev = len(self)
1072 1072 for f in sorted(ctx.modified() + ctx.added()):
1073 1073 self.ui.note(f + "\n")
1074 1074 try:
1075 1075 fctx = ctx[f]
1076 1076 new[f] = self._filecommit(fctx, m1, m2, linkrev, trp,
1077 1077 changed)
1078 1078 m1.set(f, fctx.flags())
1079 1079 except OSError, inst:
1080 1080 self.ui.warn(_("trouble committing %s!\n") % f)
1081 1081 raise
1082 1082 except IOError, inst:
1083 1083 errcode = getattr(inst, 'errno', errno.ENOENT)
1084 1084 if error or errcode and errcode != errno.ENOENT:
1085 1085 self.ui.warn(_("trouble committing %s!\n") % f)
1086 1086 raise
1087 1087 else:
1088 1088 removed.append(f)
1089 1089
1090 1090 # update manifest
1091 1091 m1.update(new)
1092 1092 removed = [f for f in sorted(removed) if f in m1 or f in m2]
1093 1093 drop = [f for f in removed if f in m1]
1094 1094 for f in drop:
1095 1095 del m1[f]
1096 1096 mn = self.manifest.add(m1, trp, linkrev, p1.manifestnode(),
1097 1097 p2.manifestnode(), (new, drop))
1098 1098 files = changed + removed
1099 1099 else:
1100 1100 mn = p1.manifestnode()
1101 1101 files = []
1102 1102
1103 1103 # update changelog
1104 1104 self.changelog.delayupdate()
1105 1105 n = self.changelog.add(mn, files, ctx.description(),
1106 1106 trp, p1.node(), p2.node(),
1107 1107 user, ctx.date(), ctx.extra().copy())
1108 1108 p = lambda: self.changelog.writepending() and self.root or ""
1109 1109 xp1, xp2 = p1.hex(), p2 and p2.hex() or ''
1110 1110 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
1111 1111 parent2=xp2, pending=p)
1112 1112 self.changelog.finalize(trp)
1113 1113 tr.close()
1114 1114
1115 1115 if self._branchcache:
1116 1116 self.updatebranchcache()
1117 1117 return n
1118 1118 finally:
1119 1119 if tr:
1120 1120 tr.release()
1121 1121 lock.release()
1122 1122
1123 1123 def destroyed(self):
1124 1124 '''Inform the repository that nodes have been destroyed.
1125 1125 Intended for use by strip and rollback, so there's a common
1126 1126 place for anything that has to be done after destroying history.'''
1127 1127 # XXX it might be nice if we could take the list of destroyed
1128 1128 # nodes, but I don't see an easy way for rollback() to do that
1129 1129
1130 1130 # Ensure the persistent tag cache is updated. Doing it now
1131 1131 # means that the tag cache only has to worry about destroyed
1132 1132 # heads immediately after a strip/rollback. That in turn
1133 1133 # guarantees that "cachetip == currenttip" (comparing both rev
1134 1134 # and node) always means no nodes have been added or destroyed.
1135 1135
1136 1136 # XXX this is suboptimal when qrefresh'ing: we strip the current
1137 1137 # head, refresh the tag cache, then immediately add a new head.
1138 1138 # But I think doing it this way is necessary for the "instant
1139 1139 # tag cache retrieval" case to work.
1140 1140 self.invalidatecaches()
1141 1141
1142 1142 def walk(self, match, node=None):
1143 1143 '''
1144 1144 walk recursively through the directory tree or a given
1145 1145 changeset, finding all files matched by the match
1146 1146 function
1147 1147 '''
1148 1148 return self[node].walk(match)
1149 1149
1150 1150 def status(self, node1='.', node2=None, match=None,
1151 1151 ignored=False, clean=False, unknown=False,
1152 1152 listsubrepos=False):
1153 1153 """return status of files between two nodes or node and working directory
1154 1154
1155 1155 If node1 is None, use the first dirstate parent instead.
1156 1156 If node2 is None, compare node1 with working directory.
1157 1157 """
1158 1158
1159 1159 def mfmatches(ctx):
1160 1160 mf = ctx.manifest().copy()
1161 1161 for fn in mf.keys():
1162 1162 if not match(fn):
1163 1163 del mf[fn]
1164 1164 return mf
1165 1165
1166 1166 if isinstance(node1, context.changectx):
1167 1167 ctx1 = node1
1168 1168 else:
1169 1169 ctx1 = self[node1]
1170 1170 if isinstance(node2, context.changectx):
1171 1171 ctx2 = node2
1172 1172 else:
1173 1173 ctx2 = self[node2]
1174 1174
1175 1175 working = ctx2.rev() is None
1176 1176 parentworking = working and ctx1 == self['.']
1177 1177 match = match or matchmod.always(self.root, self.getcwd())
1178 1178 listignored, listclean, listunknown = ignored, clean, unknown
1179 1179
1180 1180 # load earliest manifest first for caching reasons
1181 1181 if not working and ctx2.rev() < ctx1.rev():
1182 1182 ctx2.manifest()
1183 1183
1184 1184 if not parentworking:
1185 1185 def bad(f, msg):
1186 1186 if f not in ctx1:
1187 1187 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1188 1188 match.bad = bad
1189 1189
1190 1190 if working: # we need to scan the working dir
1191 1191 subrepos = []
1192 1192 if '.hgsub' in self.dirstate:
1193 1193 subrepos = ctx1.substate.keys()
1194 1194 s = self.dirstate.status(match, subrepos, listignored,
1195 1195 listclean, listunknown)
1196 1196 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1197 1197
1198 1198 # check for any possibly clean files
1199 1199 if parentworking and cmp:
1200 1200 fixup = []
1201 1201 # do a full compare of any files that might have changed
1202 1202 for f in sorted(cmp):
1203 1203 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1204 1204 or ctx1[f].cmp(ctx2[f])):
1205 1205 modified.append(f)
1206 1206 else:
1207 1207 fixup.append(f)
1208 1208
1209 1209 # update dirstate for files that are actually clean
1210 1210 if fixup:
1211 1211 if listclean:
1212 1212 clean += fixup
1213 1213
1214 1214 try:
1215 1215 # updating the dirstate is optional
1216 1216 # so we don't wait on the lock
1217 1217 wlock = self.wlock(False)
1218 1218 try:
1219 1219 for f in fixup:
1220 1220 self.dirstate.normal(f)
1221 1221 finally:
1222 1222 wlock.release()
1223 1223 except error.LockError:
1224 1224 pass
1225 1225
1226 1226 if not parentworking:
1227 1227 mf1 = mfmatches(ctx1)
1228 1228 if working:
1229 1229 # we are comparing working dir against non-parent
1230 1230 # generate a pseudo-manifest for the working dir
1231 1231 mf2 = mfmatches(self['.'])
1232 1232 for f in cmp + modified + added:
1233 1233 mf2[f] = None
1234 1234 mf2.set(f, ctx2.flags(f))
1235 1235 for f in removed:
1236 1236 if f in mf2:
1237 1237 del mf2[f]
1238 1238 else:
1239 1239 # we are comparing two revisions
1240 1240 deleted, unknown, ignored = [], [], []
1241 1241 mf2 = mfmatches(ctx2)
1242 1242
1243 1243 modified, added, clean = [], [], []
1244 1244 for fn in mf2:
1245 1245 if fn in mf1:
1246 1246 if (fn not in deleted and
1247 1247 (mf1.flags(fn) != mf2.flags(fn) or
1248 1248 (mf1[fn] != mf2[fn] and
1249 1249 (mf2[fn] or ctx1[fn].cmp(ctx2[fn]))))):
1250 1250 modified.append(fn)
1251 1251 elif listclean:
1252 1252 clean.append(fn)
1253 1253 del mf1[fn]
1254 1254 elif fn not in deleted:
1255 1255 added.append(fn)
1256 1256 removed = mf1.keys()
1257 1257
1258 1258 r = modified, added, removed, deleted, unknown, ignored, clean
1259 1259
1260 1260 if listsubrepos:
1261 1261 for subpath, sub in subrepo.itersubrepos(ctx1, ctx2):
1262 1262 if working:
1263 1263 rev2 = None
1264 1264 else:
1265 1265 rev2 = ctx2.substate[subpath][1]
1266 1266 try:
1267 1267 submatch = matchmod.narrowmatcher(subpath, match)
1268 1268 s = sub.status(rev2, match=submatch, ignored=listignored,
1269 1269 clean=listclean, unknown=listunknown,
1270 1270 listsubrepos=True)
1271 1271 for rfiles, sfiles in zip(r, s):
1272 1272 rfiles.extend("%s/%s" % (subpath, f) for f in sfiles)
1273 1273 except error.LookupError:
1274 1274 self.ui.status(_("skipping missing subrepository: %s\n")
1275 1275 % subpath)
1276 1276
1277 1277 for l in r:
1278 1278 l.sort()
1279 1279 return r
1280 1280
1281 1281 def heads(self, start=None):
1282 1282 heads = self.changelog.heads(start)
1283 1283 # sort the output in rev descending order
1284 1284 return sorted(heads, key=self.changelog.rev, reverse=True)
1285 1285
1286 1286 def branchheads(self, branch=None, start=None, closed=False):
1287 1287 '''return a (possibly filtered) list of heads for the given branch
1288 1288
1289 1289 Heads are returned in topological order, from newest to oldest.
1290 1290 If branch is None, use the dirstate branch.
1291 1291 If start is not None, return only heads reachable from start.
1292 1292 If closed is True, return heads that are marked as closed as well.
1293 1293 '''
1294 1294 if branch is None:
1295 1295 branch = self[None].branch()
1296 1296 branches = self.branchmap()
1297 1297 if branch not in branches:
1298 1298 return []
1299 1299 # the cache returns heads ordered lowest to highest
1300 1300 bheads = list(reversed(branches[branch]))
1301 1301 if start is not None:
1302 1302 # filter out the heads that cannot be reached from startrev
1303 1303 fbheads = set(self.changelog.nodesbetween([start], bheads)[2])
1304 1304 bheads = [h for h in bheads if h in fbheads]
1305 1305 if not closed:
1306 1306 bheads = [h for h in bheads if
1307 1307 ('close' not in self.changelog.read(h)[5])]
1308 1308 return bheads
1309 1309
1310 1310 def branches(self, nodes):
1311 1311 if not nodes:
1312 1312 nodes = [self.changelog.tip()]
1313 1313 b = []
1314 1314 for n in nodes:
1315 1315 t = n
1316 1316 while 1:
1317 1317 p = self.changelog.parents(n)
1318 1318 if p[1] != nullid or p[0] == nullid:
1319 1319 b.append((t, n, p[0], p[1]))
1320 1320 break
1321 1321 n = p[0]
1322 1322 return b
1323 1323
1324 1324 def between(self, pairs):
1325 1325 r = []
1326 1326
1327 1327 for top, bottom in pairs:
1328 1328 n, l, i = top, [], 0
1329 1329 f = 1
1330 1330
1331 1331 while n != bottom and n != nullid:
1332 1332 p = self.changelog.parents(n)[0]
1333 1333 if i == f:
1334 1334 l.append(n)
1335 1335 f = f * 2
1336 1336 n = p
1337 1337 i += 1
1338 1338
1339 1339 r.append(l)
1340 1340
1341 1341 return r
1342 1342
1343 1343 def pull(self, remote, heads=None, force=False):
1344 1344 lock = self.lock()
1345 1345 try:
1346 1346 tmp = discovery.findcommonincoming(self, remote, heads=heads,
1347 1347 force=force)
1348 1348 common, fetch, rheads = tmp
1349 1349 if not fetch:
1350 1350 self.ui.status(_("no changes found\n"))
1351 1351 result = 0
1352 1352 else:
1353 1353 if heads is None and list(common) == [nullid]:
1354 1354 self.ui.status(_("requesting all changes\n"))
1355 1355 elif heads is None and remote.capable('changegroupsubset'):
1356 1356 # issue1320, avoid a race if remote changed after discovery
1357 1357 heads = rheads
1358 1358
1359 1359 if remote.capable('getbundle'):
1360 1360 cg = remote.getbundle('pull', common=common,
1361 1361 heads=heads or rheads)
1362 1362 elif heads is None:
1363 1363 cg = remote.changegroup(fetch, 'pull')
1364 1364 elif not remote.capable('changegroupsubset'):
1365 1365 raise util.Abort(_("partial pull cannot be done because "
1366 1366 "other repository doesn't support "
1367 1367 "changegroupsubset."))
1368 1368 else:
1369 1369 cg = remote.changegroupsubset(fetch, heads, 'pull')
1370 1370 result = self.addchangegroup(cg, 'pull', remote.url(),
1371 1371 lock=lock)
1372 1372 finally:
1373 1373 lock.release()
1374 1374
1375 1375 return result
1376 1376
1377 1377 def checkpush(self, force, revs):
1378 1378 """Extensions can override this function if additional checks have
1379 1379 to be performed before pushing, or call it if they override push
1380 1380 command.
1381 1381 """
1382 1382 pass
1383 1383
1384 1384 def push(self, remote, force=False, revs=None, newbranch=False):
1385 1385 '''Push outgoing changesets (limited by revs) from the current
1386 1386 repository to remote. Return an integer:
1387 1387 - 0 means HTTP error *or* nothing to push
1388 1388 - 1 means we pushed and remote head count is unchanged *or*
1389 1389 we have outgoing changesets but refused to push
1390 1390 - other values as described by addchangegroup()
1391 1391 '''
1392 1392 # there are two ways to push to remote repo:
1393 1393 #
1394 1394 # addchangegroup assumes local user can lock remote
1395 1395 # repo (local filesystem, old ssh servers).
1396 1396 #
1397 1397 # unbundle assumes local user cannot lock remote repo (new ssh
1398 1398 # servers, http servers).
1399 1399
1400 1400 self.checkpush(force, revs)
1401 1401 lock = None
1402 1402 unbundle = remote.capable('unbundle')
1403 1403 if not unbundle:
1404 1404 lock = remote.lock()
1405 1405 try:
1406 1406 cg, remote_heads = discovery.prepush(self, remote, force, revs,
1407 1407 newbranch)
1408 1408 ret = remote_heads
1409 1409 if cg is not None:
1410 1410 if unbundle:
1411 1411 # local repo finds heads on server, finds out what
1412 1412 # revs it must push. once revs transferred, if server
1413 1413 # finds it has different heads (someone else won
1414 1414 # commit/push race), server aborts.
1415 1415 if force:
1416 1416 remote_heads = ['force']
1417 1417 # ssh: return remote's addchangegroup()
1418 1418 # http: return remote's addchangegroup() or 0 for error
1419 1419 ret = remote.unbundle(cg, remote_heads, 'push')
1420 1420 else:
1421 1421 # we return an integer indicating remote head count change
1422 1422 ret = remote.addchangegroup(cg, 'push', self.url(),
1423 1423 lock=lock)
1424 1424 finally:
1425 1425 if lock is not None:
1426 1426 lock.release()
1427 1427
1428 1428 self.ui.debug("checking for updated bookmarks\n")
1429 1429 rb = remote.listkeys('bookmarks')
1430 1430 for k in rb.keys():
1431 1431 if k in self._bookmarks:
1432 1432 nr, nl = rb[k], hex(self._bookmarks[k])
1433 1433 if nr in self:
1434 1434 cr = self[nr]
1435 1435 cl = self[nl]
1436 1436 if cl in cr.descendants():
1437 1437 r = remote.pushkey('bookmarks', k, nr, nl)
1438 1438 if r:
1439 1439 self.ui.status(_("updating bookmark %s\n") % k)
1440 1440 else:
1441 1441 self.ui.warn(_('updating bookmark %s'
1442 1442 ' failed!\n') % k)
1443 1443
1444 1444 return ret
1445 1445
1446 1446 def changegroupinfo(self, nodes, source):
1447 1447 if self.ui.verbose or source == 'bundle':
1448 1448 self.ui.status(_("%d changesets found\n") % len(nodes))
1449 1449 if self.ui.debugflag:
1450 1450 self.ui.debug("list of changesets:\n")
1451 1451 for node in nodes:
1452 1452 self.ui.debug("%s\n" % hex(node))
1453 1453
1454 1454 def changegroupsubset(self, bases, heads, source):
1455 1455 """Compute a changegroup consisting of all the nodes that are
1456 1456 descendents of any of the bases and ancestors of any of the heads.
1457 1457 Return a chunkbuffer object whose read() method will return
1458 1458 successive changegroup chunks.
1459 1459
1460 1460 It is fairly complex as determining which filenodes and which
1461 1461 manifest nodes need to be included for the changeset to be complete
1462 1462 is non-trivial.
1463 1463
1464 1464 Another wrinkle is doing the reverse, figuring out which changeset in
1465 1465 the changegroup a particular filenode or manifestnode belongs to.
1466 1466 """
1467 1467 cl = self.changelog
1468 1468 if not bases:
1469 1469 bases = [nullid]
1470 1470 csets, bases, heads = cl.nodesbetween(bases, heads)
1471 1471 # We assume that all ancestors of bases are known
1472 1472 common = set(cl.ancestors(*[cl.rev(n) for n in bases]))
1473 1473 return self._changegroupsubset(common, csets, heads, source)
1474 1474
1475 1475 def getbundle(self, source, heads=None, common=None):
1476 1476 """Like changegroupsubset, but returns the set difference between the
1477 1477 ancestors of heads and the ancestors common.
1478 1478
1479 1479 If heads is None, use the local heads. If common is None, use [nullid].
1480 1480
1481 1481 The nodes in common might not all be known locally due to the way the
1482 1482 current discovery protocol works.
1483 1483 """
1484 1484 cl = self.changelog
1485 1485 if common:
1486 1486 nm = cl.nodemap
1487 1487 common = [n for n in common if n in nm]
1488 1488 else:
1489 1489 common = [nullid]
1490 1490 if not heads:
1491 1491 heads = cl.heads()
1492 1492 common, missing = cl.findcommonmissing(common, heads)
1493 1493 if not missing:
1494 1494 return None
1495 1495 return self._changegroupsubset(common, missing, heads, source)
1496 1496
1497 1497 def _changegroupsubset(self, commonrevs, csets, heads, source):
1498 1498
1499 1499 cl = self.changelog
1500 1500 mf = self.manifest
1501 1501 mfs = {} # needed manifests
1502 1502 fnodes = {} # needed file nodes
1503 1503 changedfiles = set()
1504 1504 fstate = ['', {}]
1505 1505 count = [0]
1506 1506
1507 1507 # can we go through the fast path ?
1508 1508 heads.sort()
1509 1509 if heads == sorted(self.heads()):
1510 1510 return self._changegroup(csets, source)
1511 1511
1512 1512 # slow path
1513 1513 self.hook('preoutgoing', throw=True, source=source)
1514 1514 self.changegroupinfo(csets, source)
1515 1515
1516 1516 # filter any nodes that claim to be part of the known set
1517 1517 def prune(revlog, missing):
1518 1518 for n in missing:
1519 1519 if revlog.linkrev(revlog.rev(n)) not in commonrevs:
1520 1520 yield n
1521 1521
1522 1522 def lookup(revlog, x):
1523 1523 if revlog == cl:
1524 1524 c = cl.read(x)
1525 1525 changedfiles.update(c[3])
1526 1526 mfs.setdefault(c[0], x)
1527 1527 count[0] += 1
1528 1528 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1529 1529 return x
1530 1530 elif revlog == mf:
1531 1531 clnode = mfs[x]
1532 1532 mdata = mf.readfast(x)
1533 1533 for f in changedfiles:
1534 1534 if f in mdata:
1535 1535 fnodes.setdefault(f, {}).setdefault(mdata[f], clnode)
1536 1536 count[0] += 1
1537 1537 self.ui.progress(_('bundling'), count[0],
1538 1538 unit=_('manifests'), total=len(mfs))
1539 1539 return mfs[x]
1540 1540 else:
1541 1541 self.ui.progress(
1542 1542 _('bundling'), count[0], item=fstate[0],
1543 1543 unit=_('files'), total=len(changedfiles))
1544 1544 return fstate[1][x]
1545 1545
1546 1546 bundler = changegroup.bundle10(lookup)
1547 reorder = self.ui.config('bundle', 'reorder', 'auto')
1548 if reorder == 'auto':
1549 reorder = None
1550 else:
1551 reorder = util.parsebool(reorder)
1547 1552
1548 1553 def gengroup():
1549 1554 # Create a changenode group generator that will call our functions
1550 1555 # back to lookup the owning changenode and collect information.
1551 for chunk in cl.group(csets, bundler):
1556 for chunk in cl.group(csets, bundler, reorder=reorder):
1552 1557 yield chunk
1553 1558 self.ui.progress(_('bundling'), None)
1554 1559
1555 1560 # Create a generator for the manifestnodes that calls our lookup
1556 1561 # and data collection functions back.
1557 1562 count[0] = 0
1558 for chunk in mf.group(prune(mf, mfs), bundler):
1563 for chunk in mf.group(prune(mf, mfs), bundler, reorder=reorder):
1559 1564 yield chunk
1560 1565 self.ui.progress(_('bundling'), None)
1561 1566
1562 1567 mfs.clear()
1563 1568
1564 1569 # Go through all our files in order sorted by name.
1565 1570 count[0] = 0
1566 1571 for fname in sorted(changedfiles):
1567 1572 filerevlog = self.file(fname)
1568 1573 if not len(filerevlog):
1569 1574 raise util.Abort(_("empty or missing revlog for %s") % fname)
1570 1575 fstate[0] = fname
1571 1576 fstate[1] = fnodes.pop(fname, {})
1572 1577 first = True
1573 1578
1574 1579 for chunk in filerevlog.group(prune(filerevlog, fstate[1]),
1575 bundler):
1580 bundler, reorder=reorder):
1576 1581 if first:
1577 1582 if chunk == bundler.close():
1578 1583 break
1579 1584 count[0] += 1
1580 1585 yield bundler.fileheader(fname)
1581 1586 first = False
1582 1587 yield chunk
1583 1588 # Signal that no more groups are left.
1584 1589 yield bundler.close()
1585 1590 self.ui.progress(_('bundling'), None)
1586 1591
1587 1592 if csets:
1588 1593 self.hook('outgoing', node=hex(csets[0]), source=source)
1589 1594
1590 1595 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1591 1596
1592 1597 def changegroup(self, basenodes, source):
1593 1598 # to avoid a race we use changegroupsubset() (issue1320)
1594 1599 return self.changegroupsubset(basenodes, self.heads(), source)
1595 1600
1596 1601 def _changegroup(self, nodes, source):
1597 1602 """Compute the changegroup of all nodes that we have that a recipient
1598 1603 doesn't. Return a chunkbuffer object whose read() method will return
1599 1604 successive changegroup chunks.
1600 1605
1601 1606 This is much easier than the previous function as we can assume that
1602 1607 the recipient has any changenode we aren't sending them.
1603 1608
1604 1609 nodes is the set of nodes to send"""
1605 1610
1606 1611 cl = self.changelog
1607 1612 mf = self.manifest
1608 1613 mfs = {}
1609 1614 changedfiles = set()
1610 1615 fstate = ['']
1611 1616 count = [0]
1612 1617
1613 1618 self.hook('preoutgoing', throw=True, source=source)
1614 1619 self.changegroupinfo(nodes, source)
1615 1620
1616 1621 revset = set([cl.rev(n) for n in nodes])
1617 1622
1618 1623 def gennodelst(log):
1619 1624 for r in log:
1620 1625 if log.linkrev(r) in revset:
1621 1626 yield log.node(r)
1622 1627
1623 1628 def lookup(revlog, x):
1624 1629 if revlog == cl:
1625 1630 c = cl.read(x)
1626 1631 changedfiles.update(c[3])
1627 1632 mfs.setdefault(c[0], x)
1628 1633 count[0] += 1
1629 1634 self.ui.progress(_('bundling'), count[0], unit=_('changesets'))
1630 1635 return x
1631 1636 elif revlog == mf:
1632 1637 count[0] += 1
1633 1638 self.ui.progress(_('bundling'), count[0],
1634 1639 unit=_('manifests'), total=len(mfs))
1635 1640 return cl.node(revlog.linkrev(revlog.rev(x)))
1636 1641 else:
1637 1642 self.ui.progress(
1638 1643 _('bundling'), count[0], item=fstate[0],
1639 1644 total=len(changedfiles), unit=_('files'))
1640 1645 return cl.node(revlog.linkrev(revlog.rev(x)))
1641 1646
1642 1647 bundler = changegroup.bundle10(lookup)
1648 reorder = self.ui.config('bundle', 'reorder', 'auto')
1649 if reorder == 'auto':
1650 reorder = None
1651 else:
1652 reorder = util.parsebool(reorder)
1643 1653
1644 1654 def gengroup():
1645 1655 '''yield a sequence of changegroup chunks (strings)'''
1646 1656 # construct a list of all changed files
1647 1657
1648 for chunk in cl.group(nodes, bundler):
1658 for chunk in cl.group(nodes, bundler, reorder=reorder):
1649 1659 yield chunk
1650 1660 self.ui.progress(_('bundling'), None)
1651 1661
1652 1662 count[0] = 0
1653 for chunk in mf.group(gennodelst(mf), bundler):
1663 for chunk in mf.group(gennodelst(mf), bundler, reorder=reorder):
1654 1664 yield chunk
1655 1665 self.ui.progress(_('bundling'), None)
1656 1666
1657 1667 count[0] = 0
1658 1668 for fname in sorted(changedfiles):
1659 1669 filerevlog = self.file(fname)
1660 1670 if not len(filerevlog):
1661 1671 raise util.Abort(_("empty or missing revlog for %s") % fname)
1662 1672 fstate[0] = fname
1663 1673 first = True
1664 for chunk in filerevlog.group(gennodelst(filerevlog), bundler):
1674 for chunk in filerevlog.group(gennodelst(filerevlog), bundler,
1675 reorder=reorder):
1665 1676 if first:
1666 1677 if chunk == bundler.close():
1667 1678 break
1668 1679 count[0] += 1
1669 1680 yield bundler.fileheader(fname)
1670 1681 first = False
1671 1682 yield chunk
1672 1683 yield bundler.close()
1673 1684 self.ui.progress(_('bundling'), None)
1674 1685
1675 1686 if nodes:
1676 1687 self.hook('outgoing', node=hex(nodes[0]), source=source)
1677 1688
1678 1689 return changegroup.unbundle10(util.chunkbuffer(gengroup()), 'UN')
1679 1690
1680 1691 def addchangegroup(self, source, srctype, url, emptyok=False, lock=None):
1681 1692 """Add the changegroup returned by source.read() to this repo.
1682 1693 srctype is a string like 'push', 'pull', or 'unbundle'. url is
1683 1694 the URL of the repo where this changegroup is coming from.
1684 1695 If lock is not None, the function takes ownership of the lock
1685 1696 and releases it after the changegroup is added.
1686 1697
1687 1698 Return an integer summarizing the change to this repo:
1688 1699 - nothing changed or no source: 0
1689 1700 - more heads than before: 1+added heads (2..n)
1690 1701 - fewer heads than before: -1-removed heads (-2..-n)
1691 1702 - number of heads stays the same: 1
1692 1703 """
1693 1704 def csmap(x):
1694 1705 self.ui.debug("add changeset %s\n" % short(x))
1695 1706 return len(cl)
1696 1707
1697 1708 def revmap(x):
1698 1709 return cl.rev(x)
1699 1710
1700 1711 if not source:
1701 1712 return 0
1702 1713
1703 1714 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1704 1715
1705 1716 changesets = files = revisions = 0
1706 1717 efiles = set()
1707 1718
1708 1719 # write changelog data to temp files so concurrent readers will not see
1709 1720 # inconsistent view
1710 1721 cl = self.changelog
1711 1722 cl.delayupdate()
1712 1723 oldheads = cl.heads()
1713 1724
1714 1725 tr = self.transaction("\n".join([srctype, util.hidepassword(url)]))
1715 1726 try:
1716 1727 trp = weakref.proxy(tr)
1717 1728 # pull off the changeset group
1718 1729 self.ui.status(_("adding changesets\n"))
1719 1730 clstart = len(cl)
1720 1731 class prog(object):
1721 1732 step = _('changesets')
1722 1733 count = 1
1723 1734 ui = self.ui
1724 1735 total = None
1725 1736 def __call__(self):
1726 1737 self.ui.progress(self.step, self.count, unit=_('chunks'),
1727 1738 total=self.total)
1728 1739 self.count += 1
1729 1740 pr = prog()
1730 1741 source.callback = pr
1731 1742
1732 1743 source.changelogheader()
1733 1744 if (cl.addgroup(source, csmap, trp) is None
1734 1745 and not emptyok):
1735 1746 raise util.Abort(_("received changelog group is empty"))
1736 1747 clend = len(cl)
1737 1748 changesets = clend - clstart
1738 1749 for c in xrange(clstart, clend):
1739 1750 efiles.update(self[c].files())
1740 1751 efiles = len(efiles)
1741 1752 self.ui.progress(_('changesets'), None)
1742 1753
1743 1754 # pull off the manifest group
1744 1755 self.ui.status(_("adding manifests\n"))
1745 1756 pr.step = _('manifests')
1746 1757 pr.count = 1
1747 1758 pr.total = changesets # manifests <= changesets
1748 1759 # no need to check for empty manifest group here:
1749 1760 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1750 1761 # no new manifest will be created and the manifest group will
1751 1762 # be empty during the pull
1752 1763 source.manifestheader()
1753 1764 self.manifest.addgroup(source, revmap, trp)
1754 1765 self.ui.progress(_('manifests'), None)
1755 1766
1756 1767 needfiles = {}
1757 1768 if self.ui.configbool('server', 'validate', default=False):
1758 1769 # validate incoming csets have their manifests
1759 1770 for cset in xrange(clstart, clend):
1760 1771 mfest = self.changelog.read(self.changelog.node(cset))[0]
1761 1772 mfest = self.manifest.readdelta(mfest)
1762 1773 # store file nodes we must see
1763 1774 for f, n in mfest.iteritems():
1764 1775 needfiles.setdefault(f, set()).add(n)
1765 1776
1766 1777 # process the files
1767 1778 self.ui.status(_("adding file changes\n"))
1768 1779 pr.step = 'files'
1769 1780 pr.count = 1
1770 1781 pr.total = efiles
1771 1782 source.callback = None
1772 1783
1773 1784 while 1:
1774 1785 chunkdata = source.filelogheader()
1775 1786 if not chunkdata:
1776 1787 break
1777 1788 f = chunkdata["filename"]
1778 1789 self.ui.debug("adding %s revisions\n" % f)
1779 1790 pr()
1780 1791 fl = self.file(f)
1781 1792 o = len(fl)
1782 1793 if fl.addgroup(source, revmap, trp) is None:
1783 1794 raise util.Abort(_("received file revlog group is empty"))
1784 1795 revisions += len(fl) - o
1785 1796 files += 1
1786 1797 if f in needfiles:
1787 1798 needs = needfiles[f]
1788 1799 for new in xrange(o, len(fl)):
1789 1800 n = fl.node(new)
1790 1801 if n in needs:
1791 1802 needs.remove(n)
1792 1803 if not needs:
1793 1804 del needfiles[f]
1794 1805 self.ui.progress(_('files'), None)
1795 1806
1796 1807 for f, needs in needfiles.iteritems():
1797 1808 fl = self.file(f)
1798 1809 for n in needs:
1799 1810 try:
1800 1811 fl.rev(n)
1801 1812 except error.LookupError:
1802 1813 raise util.Abort(
1803 1814 _('missing file data for %s:%s - run hg verify') %
1804 1815 (f, hex(n)))
1805 1816
1806 1817 dh = 0
1807 1818 if oldheads:
1808 1819 heads = cl.heads()
1809 1820 dh = len(heads) - len(oldheads)
1810 1821 for h in heads:
1811 1822 if h not in oldheads and 'close' in self[h].extra():
1812 1823 dh -= 1
1813 1824 htext = ""
1814 1825 if dh:
1815 1826 htext = _(" (%+d heads)") % dh
1816 1827
1817 1828 self.ui.status(_("added %d changesets"
1818 1829 " with %d changes to %d files%s\n")
1819 1830 % (changesets, revisions, files, htext))
1820 1831
1821 1832 if changesets > 0:
1822 1833 p = lambda: cl.writepending() and self.root or ""
1823 1834 self.hook('pretxnchangegroup', throw=True,
1824 1835 node=hex(cl.node(clstart)), source=srctype,
1825 1836 url=url, pending=p)
1826 1837
1827 1838 # make changelog see real files again
1828 1839 cl.finalize(trp)
1829 1840
1830 1841 tr.close()
1831 1842 finally:
1832 1843 tr.release()
1833 1844 if lock:
1834 1845 lock.release()
1835 1846
1836 1847 if changesets > 0:
1837 1848 # forcefully update the on-disk branch cache
1838 1849 self.ui.debug("updating the branch cache\n")
1839 1850 self.updatebranchcache()
1840 1851 self.hook("changegroup", node=hex(cl.node(clstart)),
1841 1852 source=srctype, url=url)
1842 1853
1843 1854 for i in xrange(clstart, clend):
1844 1855 self.hook("incoming", node=hex(cl.node(i)),
1845 1856 source=srctype, url=url)
1846 1857
1847 1858 # never return 0 here:
1848 1859 if dh < 0:
1849 1860 return dh - 1
1850 1861 else:
1851 1862 return dh + 1
1852 1863
1853 1864 def stream_in(self, remote, requirements):
1854 1865 lock = self.lock()
1855 1866 try:
1856 1867 fp = remote.stream_out()
1857 1868 l = fp.readline()
1858 1869 try:
1859 1870 resp = int(l)
1860 1871 except ValueError:
1861 1872 raise error.ResponseError(
1862 1873 _('Unexpected response from remote server:'), l)
1863 1874 if resp == 1:
1864 1875 raise util.Abort(_('operation forbidden by server'))
1865 1876 elif resp == 2:
1866 1877 raise util.Abort(_('locking the remote repository failed'))
1867 1878 elif resp != 0:
1868 1879 raise util.Abort(_('the server sent an unknown error code'))
1869 1880 self.ui.status(_('streaming all changes\n'))
1870 1881 l = fp.readline()
1871 1882 try:
1872 1883 total_files, total_bytes = map(int, l.split(' ', 1))
1873 1884 except (ValueError, TypeError):
1874 1885 raise error.ResponseError(
1875 1886 _('Unexpected response from remote server:'), l)
1876 1887 self.ui.status(_('%d files to transfer, %s of data\n') %
1877 1888 (total_files, util.bytecount(total_bytes)))
1878 1889 start = time.time()
1879 1890 for i in xrange(total_files):
1880 1891 # XXX doesn't support '\n' or '\r' in filenames
1881 1892 l = fp.readline()
1882 1893 try:
1883 1894 name, size = l.split('\0', 1)
1884 1895 size = int(size)
1885 1896 except (ValueError, TypeError):
1886 1897 raise error.ResponseError(
1887 1898 _('Unexpected response from remote server:'), l)
1888 1899 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
1889 1900 # for backwards compat, name was partially encoded
1890 1901 ofp = self.sopener(store.decodedir(name), 'w')
1891 1902 for chunk in util.filechunkiter(fp, limit=size):
1892 1903 ofp.write(chunk)
1893 1904 ofp.close()
1894 1905 elapsed = time.time() - start
1895 1906 if elapsed <= 0:
1896 1907 elapsed = 0.001
1897 1908 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
1898 1909 (util.bytecount(total_bytes), elapsed,
1899 1910 util.bytecount(total_bytes / elapsed)))
1900 1911
1901 1912 # new requirements = old non-format requirements + new format-related
1902 1913 # requirements from the streamed-in repository
1903 1914 requirements.update(set(self.requirements) - self.supportedformats)
1904 1915 self._applyrequirements(requirements)
1905 1916 self._writerequirements()
1906 1917
1907 1918 self.invalidate()
1908 1919 return len(self.heads()) + 1
1909 1920 finally:
1910 1921 lock.release()
1911 1922
1912 1923 def clone(self, remote, heads=[], stream=False):
1913 1924 '''clone remote repository.
1914 1925
1915 1926 keyword arguments:
1916 1927 heads: list of revs to clone (forces use of pull)
1917 1928 stream: use streaming clone if possible'''
1918 1929
1919 1930 # now, all clients that can request uncompressed clones can
1920 1931 # read repo formats supported by all servers that can serve
1921 1932 # them.
1922 1933
1923 1934 # if revlog format changes, client will have to check version
1924 1935 # and format flags on "stream" capability, and use
1925 1936 # uncompressed only if compatible.
1926 1937
1927 1938 if stream and not heads:
1928 1939 # 'stream' means remote revlog format is revlogv1 only
1929 1940 if remote.capable('stream'):
1930 1941 return self.stream_in(remote, set(('revlogv1',)))
1931 1942 # otherwise, 'streamreqs' contains the remote revlog format
1932 1943 streamreqs = remote.capable('streamreqs')
1933 1944 if streamreqs:
1934 1945 streamreqs = set(streamreqs.split(','))
1935 1946 # if we support it, stream in and adjust our requirements
1936 1947 if not streamreqs - self.supportedformats:
1937 1948 return self.stream_in(remote, streamreqs)
1938 1949 return self.pull(remote, heads)
1939 1950
1940 1951 def pushkey(self, namespace, key, old, new):
1941 1952 self.hook('prepushkey', throw=True, namespace=namespace, key=key,
1942 1953 old=old, new=new)
1943 1954 ret = pushkey.push(self, namespace, key, old, new)
1944 1955 self.hook('pushkey', namespace=namespace, key=key, old=old, new=new,
1945 1956 ret=ret)
1946 1957 return ret
1947 1958
1948 1959 def listkeys(self, namespace):
1949 1960 self.hook('prelistkeys', throw=True, namespace=namespace)
1950 1961 values = pushkey.list(self, namespace)
1951 1962 self.hook('listkeys', namespace=namespace, values=values)
1952 1963 return values
1953 1964
1954 1965 def debugwireargs(self, one, two, three=None, four=None, five=None):
1955 1966 '''used to test argument passing over the wire'''
1956 1967 return "%s %s %s %s %s" % (one, two, three, four, five)
1957 1968
1958 1969 # used to avoid circular references so destructors work
1959 1970 def aftertrans(files):
1960 1971 renamefiles = [tuple(t) for t in files]
1961 1972 def a():
1962 1973 for src, dest in renamefiles:
1963 1974 util.rename(src, dest)
1964 1975 return a
1965 1976
1966 1977 def undoname(fn):
1967 1978 base, name = os.path.split(fn)
1968 1979 assert name.startswith('journal')
1969 1980 return os.path.join(base, name.replace('journal', 'undo', 1))
1970 1981
1971 1982 def instance(ui, path, create):
1972 1983 return localrepository(ui, util.localpath(path), create)
1973 1984
1974 1985 def islocal(path):
1975 1986 return True
@@ -1,1273 +1,1280 b''
1 1 # revlog.py - storage back-end for mercurial
2 2 #
3 3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 """Storage back-end for Mercurial.
9 9
10 10 This provides efficient delta storage with O(1) retrieve and append
11 11 and O(changes) merge between branches.
12 12 """
13 13
14 14 # import stuff from node for others to import from revlog
15 15 from node import bin, hex, nullid, nullrev, short #@UnusedImport
16 16 from i18n import _
17 import ancestor, mdiff, parsers, error, util
17 import ancestor, mdiff, parsers, error, util, dagutil
18 18 import struct, zlib, errno
19 19
20 20 _pack = struct.pack
21 21 _unpack = struct.unpack
22 22 _compress = zlib.compress
23 23 _decompress = zlib.decompress
24 24 _sha = util.sha1
25 25
26 26 # revlog header flags
27 27 REVLOGV0 = 0
28 28 REVLOGNG = 1
29 29 REVLOGNGINLINEDATA = (1 << 16)
30 30 REVLOGGENERALDELTA = (1 << 17)
31 31 REVLOG_DEFAULT_FLAGS = REVLOGNGINLINEDATA
32 32 REVLOG_DEFAULT_FORMAT = REVLOGNG
33 33 REVLOG_DEFAULT_VERSION = REVLOG_DEFAULT_FORMAT | REVLOG_DEFAULT_FLAGS
34 34 REVLOGNG_FLAGS = REVLOGNGINLINEDATA | REVLOGGENERALDELTA
35 35
36 36 # revlog index flags
37 37 REVIDX_KNOWN_FLAGS = 0
38 38
39 39 # max size of revlog with inline data
40 40 _maxinline = 131072
41 41 _chunksize = 1048576
42 42
43 43 RevlogError = error.RevlogError
44 44 LookupError = error.LookupError
45 45
46 46 def getoffset(q):
47 47 return int(q >> 16)
48 48
49 49 def gettype(q):
50 50 return int(q & 0xFFFF)
51 51
52 52 def offset_type(offset, type):
53 53 return long(long(offset) << 16 | type)
54 54
55 55 nullhash = _sha(nullid)
56 56
57 57 def hash(text, p1, p2):
58 58 """generate a hash from the given text and its parent hashes
59 59
60 60 This hash combines both the current file contents and its history
61 61 in a manner that makes it easy to distinguish nodes with the same
62 62 content in the revision graph.
63 63 """
64 64 # As of now, if one of the parent node is null, p2 is null
65 65 if p2 == nullid:
66 66 # deep copy of a hash is faster than creating one
67 67 s = nullhash.copy()
68 68 s.update(p1)
69 69 else:
70 70 # none of the parent nodes are nullid
71 71 l = [p1, p2]
72 72 l.sort()
73 73 s = _sha(l[0])
74 74 s.update(l[1])
75 75 s.update(text)
76 76 return s.digest()
77 77
78 78 def compress(text):
79 79 """ generate a possibly-compressed representation of text """
80 80 if not text:
81 81 return ("", text)
82 82 l = len(text)
83 83 bin = None
84 84 if l < 44:
85 85 pass
86 86 elif l > 1000000:
87 87 # zlib makes an internal copy, thus doubling memory usage for
88 88 # large files, so lets do this in pieces
89 89 z = zlib.compressobj()
90 90 p = []
91 91 pos = 0
92 92 while pos < l:
93 93 pos2 = pos + 2**20
94 94 p.append(z.compress(text[pos:pos2]))
95 95 pos = pos2
96 96 p.append(z.flush())
97 97 if sum(map(len, p)) < l:
98 98 bin = "".join(p)
99 99 else:
100 100 bin = _compress(text)
101 101 if bin is None or len(bin) > l:
102 102 if text[0] == '\0':
103 103 return ("", text)
104 104 return ('u', text)
105 105 return ("", bin)
106 106
107 107 def decompress(bin):
108 108 """ decompress the given input """
109 109 if not bin:
110 110 return bin
111 111 t = bin[0]
112 112 if t == '\0':
113 113 return bin
114 114 if t == 'x':
115 115 return _decompress(bin)
116 116 if t == 'u':
117 117 return bin[1:]
118 118 raise RevlogError(_("unknown compression type %r") % t)
119 119
120 120 indexformatv0 = ">4l20s20s20s"
121 121 v0shaoffset = 56
122 122
123 123 class revlogoldio(object):
124 124 def __init__(self):
125 125 self.size = struct.calcsize(indexformatv0)
126 126
127 127 def parseindex(self, data, inline):
128 128 s = self.size
129 129 index = []
130 130 nodemap = {nullid: nullrev}
131 131 n = off = 0
132 132 l = len(data)
133 133 while off + s <= l:
134 134 cur = data[off:off + s]
135 135 off += s
136 136 e = _unpack(indexformatv0, cur)
137 137 # transform to revlogv1 format
138 138 e2 = (offset_type(e[0], 0), e[1], -1, e[2], e[3],
139 139 nodemap.get(e[4], nullrev), nodemap.get(e[5], nullrev), e[6])
140 140 index.append(e2)
141 141 nodemap[e[6]] = n
142 142 n += 1
143 143
144 144 # add the magic null revision at -1
145 145 index.append((0, 0, 0, -1, -1, -1, -1, nullid))
146 146
147 147 return index, nodemap, None
148 148
149 149 def packentry(self, entry, node, version, rev):
150 150 if gettype(entry[0]):
151 151 raise RevlogError(_("index entry flags need RevlogNG"))
152 152 e2 = (getoffset(entry[0]), entry[1], entry[3], entry[4],
153 153 node(entry[5]), node(entry[6]), entry[7])
154 154 return _pack(indexformatv0, *e2)
155 155
156 156 # index ng:
157 157 # 6 bytes: offset
158 158 # 2 bytes: flags
159 159 # 4 bytes: compressed length
160 160 # 4 bytes: uncompressed length
161 161 # 4 bytes: base rev
162 162 # 4 bytes: link rev
163 163 # 4 bytes: parent 1 rev
164 164 # 4 bytes: parent 2 rev
165 165 # 32 bytes: nodeid
166 166 indexformatng = ">Qiiiiii20s12x"
167 167 ngshaoffset = 32
168 168 versionformat = ">I"
169 169
170 170 class revlogio(object):
171 171 def __init__(self):
172 172 self.size = struct.calcsize(indexformatng)
173 173
174 174 def parseindex(self, data, inline):
175 175 # call the C implementation to parse the index data
176 176 index, cache = parsers.parse_index2(data, inline)
177 177 return index, None, cache
178 178
179 179 def packentry(self, entry, node, version, rev):
180 180 p = _pack(indexformatng, *entry)
181 181 if rev == 0:
182 182 p = _pack(versionformat, version) + p[4:]
183 183 return p
184 184
185 185 class revlog(object):
186 186 """
187 187 the underlying revision storage object
188 188
189 189 A revlog consists of two parts, an index and the revision data.
190 190
191 191 The index is a file with a fixed record size containing
192 192 information on each revision, including its nodeid (hash), the
193 193 nodeids of its parents, the position and offset of its data within
194 194 the data file, and the revision it's based on. Finally, each entry
195 195 contains a linkrev entry that can serve as a pointer to external
196 196 data.
197 197
198 198 The revision data itself is a linear collection of data chunks.
199 199 Each chunk represents a revision and is usually represented as a
200 200 delta against the previous chunk. To bound lookup time, runs of
201 201 deltas are limited to about 2 times the length of the original
202 202 version data. This makes retrieval of a version proportional to
203 203 its size, or O(1) relative to the number of revisions.
204 204
205 205 Both pieces of the revlog are written to in an append-only
206 206 fashion, which means we never need to rewrite a file to insert or
207 207 remove data, and can use some simple techniques to avoid the need
208 208 for locking while reading.
209 209 """
210 210 def __init__(self, opener, indexfile):
211 211 """
212 212 create a revlog object
213 213
214 214 opener is a function that abstracts the file opening operation
215 215 and can be used to implement COW semantics or the like.
216 216 """
217 217 self.indexfile = indexfile
218 218 self.datafile = indexfile[:-2] + ".d"
219 219 self.opener = opener
220 220 self._cache = None
221 221 self._basecache = (0, 0)
222 222 self._chunkcache = (0, '')
223 223 self.index = []
224 224 self._pcache = {}
225 225 self._nodecache = {nullid: nullrev}
226 226 self._nodepos = None
227 227
228 228 v = REVLOG_DEFAULT_VERSION
229 229 if hasattr(opener, 'options'):
230 230 if 'revlogv1' in opener.options:
231 231 if 'generaldelta' in opener.options:
232 232 v |= REVLOGGENERALDELTA
233 233 else:
234 234 v = 0
235 235
236 236 i = ''
237 237 self._initempty = True
238 238 try:
239 239 f = self.opener(self.indexfile)
240 240 i = f.read()
241 241 f.close()
242 242 if len(i) > 0:
243 243 v = struct.unpack(versionformat, i[:4])[0]
244 244 self._initempty = False
245 245 except IOError, inst:
246 246 if inst.errno != errno.ENOENT:
247 247 raise
248 248
249 249 self.version = v
250 250 self._inline = v & REVLOGNGINLINEDATA
251 251 self._generaldelta = v & REVLOGGENERALDELTA
252 252 flags = v & ~0xFFFF
253 253 fmt = v & 0xFFFF
254 254 if fmt == REVLOGV0 and flags:
255 255 raise RevlogError(_("index %s unknown flags %#04x for format v0")
256 256 % (self.indexfile, flags >> 16))
257 257 elif fmt == REVLOGNG and flags & ~REVLOGNG_FLAGS:
258 258 raise RevlogError(_("index %s unknown flags %#04x for revlogng")
259 259 % (self.indexfile, flags >> 16))
260 260 elif fmt > REVLOGNG:
261 261 raise RevlogError(_("index %s unknown format %d")
262 262 % (self.indexfile, fmt))
263 263
264 264 self._io = revlogio()
265 265 if self.version == REVLOGV0:
266 266 self._io = revlogoldio()
267 267 try:
268 268 d = self._io.parseindex(i, self._inline)
269 269 except (ValueError, IndexError):
270 270 raise RevlogError(_("index %s is corrupted") % (self.indexfile))
271 271 self.index, nodemap, self._chunkcache = d
272 272 if nodemap is not None:
273 273 self.nodemap = self._nodecache = nodemap
274 274 if not self._chunkcache:
275 275 self._chunkclear()
276 276
277 277 def tip(self):
278 278 return self.node(len(self.index) - 2)
279 279 def __len__(self):
280 280 return len(self.index) - 1
281 281 def __iter__(self):
282 282 for i in xrange(len(self)):
283 283 yield i
284 284
285 285 @util.propertycache
286 286 def nodemap(self):
287 287 self.rev(self.node(0))
288 288 return self._nodecache
289 289
290 290 def rev(self, node):
291 291 try:
292 292 return self._nodecache[node]
293 293 except KeyError:
294 294 n = self._nodecache
295 295 i = self.index
296 296 p = self._nodepos
297 297 if p is None:
298 298 p = len(i) - 2
299 299 for r in xrange(p, -1, -1):
300 300 v = i[r][7]
301 301 n[v] = r
302 302 if v == node:
303 303 self._nodepos = r - 1
304 304 return r
305 305 raise LookupError(node, self.indexfile, _('no node'))
306 306
307 307 def node(self, rev):
308 308 return self.index[rev][7]
309 309 def linkrev(self, rev):
310 310 return self.index[rev][4]
311 311 def parents(self, node):
312 312 i = self.index
313 313 d = i[self.rev(node)]
314 314 return i[d[5]][7], i[d[6]][7] # map revisions to nodes inline
315 315 def parentrevs(self, rev):
316 316 return self.index[rev][5:7]
317 317 def start(self, rev):
318 318 return int(self.index[rev][0] >> 16)
319 319 def end(self, rev):
320 320 return self.start(rev) + self.length(rev)
321 321 def length(self, rev):
322 322 return self.index[rev][1]
323 323 def base(self, rev):
324 324 return self.index[rev][3]
325 325 def chainbase(self, rev):
326 326 index = self.index
327 327 base = index[rev][3]
328 328 while base != rev:
329 329 rev = base
330 330 base = index[rev][3]
331 331 return base
332 332 def flags(self, rev):
333 333 return self.index[rev][0] & 0xFFFF
334 334 def rawsize(self, rev):
335 335 """return the length of the uncompressed text for a given revision"""
336 336 l = self.index[rev][2]
337 337 if l >= 0:
338 338 return l
339 339
340 340 t = self.revision(self.node(rev))
341 341 return len(t)
342 342 size = rawsize
343 343
344 344 def reachable(self, node, stop=None):
345 345 """return the set of all nodes ancestral to a given node, including
346 346 the node itself, stopping when stop is matched"""
347 347 reachable = set((node,))
348 348 visit = [node]
349 349 if stop:
350 350 stopn = self.rev(stop)
351 351 else:
352 352 stopn = 0
353 353 while visit:
354 354 n = visit.pop(0)
355 355 if n == stop:
356 356 continue
357 357 if n == nullid:
358 358 continue
359 359 for p in self.parents(n):
360 360 if self.rev(p) < stopn:
361 361 continue
362 362 if p not in reachable:
363 363 reachable.add(p)
364 364 visit.append(p)
365 365 return reachable
366 366
367 367 def ancestors(self, *revs):
368 368 """Generate the ancestors of 'revs' in reverse topological order.
369 369
370 370 Yield a sequence of revision numbers starting with the parents
371 371 of each revision in revs, i.e., each revision is *not* considered
372 372 an ancestor of itself. Results are in breadth-first order:
373 373 parents of each rev in revs, then parents of those, etc. Result
374 374 does not include the null revision."""
375 375 visit = list(revs)
376 376 seen = set([nullrev])
377 377 while visit:
378 378 for parent in self.parentrevs(visit.pop(0)):
379 379 if parent not in seen:
380 380 visit.append(parent)
381 381 seen.add(parent)
382 382 yield parent
383 383
384 384 def descendants(self, *revs):
385 385 """Generate the descendants of 'revs' in revision order.
386 386
387 387 Yield a sequence of revision numbers starting with a child of
388 388 some rev in revs, i.e., each revision is *not* considered a
389 389 descendant of itself. Results are ordered by revision number (a
390 390 topological sort)."""
391 391 first = min(revs)
392 392 if first == nullrev:
393 393 for i in self:
394 394 yield i
395 395 return
396 396
397 397 seen = set(revs)
398 398 for i in xrange(first + 1, len(self)):
399 399 for x in self.parentrevs(i):
400 400 if x != nullrev and x in seen:
401 401 seen.add(i)
402 402 yield i
403 403 break
404 404
405 405 def findcommonmissing(self, common=None, heads=None):
406 406 """Return a tuple of the ancestors of common and the ancestors of heads
407 407 that are not ancestors of common.
408 408
409 409 More specifically, the second element is a list of nodes N such that
410 410 every N satisfies the following constraints:
411 411
412 412 1. N is an ancestor of some node in 'heads'
413 413 2. N is not an ancestor of any node in 'common'
414 414
415 415 The list is sorted by revision number, meaning it is
416 416 topologically sorted.
417 417
418 418 'heads' and 'common' are both lists of node IDs. If heads is
419 419 not supplied, uses all of the revlog's heads. If common is not
420 420 supplied, uses nullid."""
421 421 if common is None:
422 422 common = [nullid]
423 423 if heads is None:
424 424 heads = self.heads()
425 425
426 426 common = [self.rev(n) for n in common]
427 427 heads = [self.rev(n) for n in heads]
428 428
429 429 # we want the ancestors, but inclusive
430 430 has = set(self.ancestors(*common))
431 431 has.add(nullrev)
432 432 has.update(common)
433 433
434 434 # take all ancestors from heads that aren't in has
435 435 missing = set()
436 436 visit = [r for r in heads if r not in has]
437 437 while visit:
438 438 r = visit.pop(0)
439 439 if r in missing:
440 440 continue
441 441 else:
442 442 missing.add(r)
443 443 for p in self.parentrevs(r):
444 444 if p not in has:
445 445 visit.append(p)
446 446 missing = list(missing)
447 447 missing.sort()
448 448 return has, [self.node(r) for r in missing]
449 449
450 450 def findmissing(self, common=None, heads=None):
451 451 """Return the ancestors of heads that are not ancestors of common.
452 452
453 453 More specifically, return a list of nodes N such that every N
454 454 satisfies the following constraints:
455 455
456 456 1. N is an ancestor of some node in 'heads'
457 457 2. N is not an ancestor of any node in 'common'
458 458
459 459 The list is sorted by revision number, meaning it is
460 460 topologically sorted.
461 461
462 462 'heads' and 'common' are both lists of node IDs. If heads is
463 463 not supplied, uses all of the revlog's heads. If common is not
464 464 supplied, uses nullid."""
465 465 _common, missing = self.findcommonmissing(common, heads)
466 466 return missing
467 467
468 468 def nodesbetween(self, roots=None, heads=None):
469 469 """Return a topological path from 'roots' to 'heads'.
470 470
471 471 Return a tuple (nodes, outroots, outheads) where 'nodes' is a
472 472 topologically sorted list of all nodes N that satisfy both of
473 473 these constraints:
474 474
475 475 1. N is a descendant of some node in 'roots'
476 476 2. N is an ancestor of some node in 'heads'
477 477
478 478 Every node is considered to be both a descendant and an ancestor
479 479 of itself, so every reachable node in 'roots' and 'heads' will be
480 480 included in 'nodes'.
481 481
482 482 'outroots' is the list of reachable nodes in 'roots', i.e., the
483 483 subset of 'roots' that is returned in 'nodes'. Likewise,
484 484 'outheads' is the subset of 'heads' that is also in 'nodes'.
485 485
486 486 'roots' and 'heads' are both lists of node IDs. If 'roots' is
487 487 unspecified, uses nullid as the only root. If 'heads' is
488 488 unspecified, uses list of all of the revlog's heads."""
489 489 nonodes = ([], [], [])
490 490 if roots is not None:
491 491 roots = list(roots)
492 492 if not roots:
493 493 return nonodes
494 494 lowestrev = min([self.rev(n) for n in roots])
495 495 else:
496 496 roots = [nullid] # Everybody's a descendent of nullid
497 497 lowestrev = nullrev
498 498 if (lowestrev == nullrev) and (heads is None):
499 499 # We want _all_ the nodes!
500 500 return ([self.node(r) for r in self], [nullid], list(self.heads()))
501 501 if heads is None:
502 502 # All nodes are ancestors, so the latest ancestor is the last
503 503 # node.
504 504 highestrev = len(self) - 1
505 505 # Set ancestors to None to signal that every node is an ancestor.
506 506 ancestors = None
507 507 # Set heads to an empty dictionary for later discovery of heads
508 508 heads = {}
509 509 else:
510 510 heads = list(heads)
511 511 if not heads:
512 512 return nonodes
513 513 ancestors = set()
514 514 # Turn heads into a dictionary so we can remove 'fake' heads.
515 515 # Also, later we will be using it to filter out the heads we can't
516 516 # find from roots.
517 517 heads = dict.fromkeys(heads, False)
518 518 # Start at the top and keep marking parents until we're done.
519 519 nodestotag = set(heads)
520 520 # Remember where the top was so we can use it as a limit later.
521 521 highestrev = max([self.rev(n) for n in nodestotag])
522 522 while nodestotag:
523 523 # grab a node to tag
524 524 n = nodestotag.pop()
525 525 # Never tag nullid
526 526 if n == nullid:
527 527 continue
528 528 # A node's revision number represents its place in a
529 529 # topologically sorted list of nodes.
530 530 r = self.rev(n)
531 531 if r >= lowestrev:
532 532 if n not in ancestors:
533 533 # If we are possibly a descendent of one of the roots
534 534 # and we haven't already been marked as an ancestor
535 535 ancestors.add(n) # Mark as ancestor
536 536 # Add non-nullid parents to list of nodes to tag.
537 537 nodestotag.update([p for p in self.parents(n) if
538 538 p != nullid])
539 539 elif n in heads: # We've seen it before, is it a fake head?
540 540 # So it is, real heads should not be the ancestors of
541 541 # any other heads.
542 542 heads.pop(n)
543 543 if not ancestors:
544 544 return nonodes
545 545 # Now that we have our set of ancestors, we want to remove any
546 546 # roots that are not ancestors.
547 547
548 548 # If one of the roots was nullid, everything is included anyway.
549 549 if lowestrev > nullrev:
550 550 # But, since we weren't, let's recompute the lowest rev to not
551 551 # include roots that aren't ancestors.
552 552
553 553 # Filter out roots that aren't ancestors of heads
554 554 roots = [n for n in roots if n in ancestors]
555 555 # Recompute the lowest revision
556 556 if roots:
557 557 lowestrev = min([self.rev(n) for n in roots])
558 558 else:
559 559 # No more roots? Return empty list
560 560 return nonodes
561 561 else:
562 562 # We are descending from nullid, and don't need to care about
563 563 # any other roots.
564 564 lowestrev = nullrev
565 565 roots = [nullid]
566 566 # Transform our roots list into a set.
567 567 descendents = set(roots)
568 568 # Also, keep the original roots so we can filter out roots that aren't
569 569 # 'real' roots (i.e. are descended from other roots).
570 570 roots = descendents.copy()
571 571 # Our topologically sorted list of output nodes.
572 572 orderedout = []
573 573 # Don't start at nullid since we don't want nullid in our output list,
574 574 # and if nullid shows up in descedents, empty parents will look like
575 575 # they're descendents.
576 576 for r in xrange(max(lowestrev, 0), highestrev + 1):
577 577 n = self.node(r)
578 578 isdescendent = False
579 579 if lowestrev == nullrev: # Everybody is a descendent of nullid
580 580 isdescendent = True
581 581 elif n in descendents:
582 582 # n is already a descendent
583 583 isdescendent = True
584 584 # This check only needs to be done here because all the roots
585 585 # will start being marked is descendents before the loop.
586 586 if n in roots:
587 587 # If n was a root, check if it's a 'real' root.
588 588 p = tuple(self.parents(n))
589 589 # If any of its parents are descendents, it's not a root.
590 590 if (p[0] in descendents) or (p[1] in descendents):
591 591 roots.remove(n)
592 592 else:
593 593 p = tuple(self.parents(n))
594 594 # A node is a descendent if either of its parents are
595 595 # descendents. (We seeded the dependents list with the roots
596 596 # up there, remember?)
597 597 if (p[0] in descendents) or (p[1] in descendents):
598 598 descendents.add(n)
599 599 isdescendent = True
600 600 if isdescendent and ((ancestors is None) or (n in ancestors)):
601 601 # Only include nodes that are both descendents and ancestors.
602 602 orderedout.append(n)
603 603 if (ancestors is not None) and (n in heads):
604 604 # We're trying to figure out which heads are reachable
605 605 # from roots.
606 606 # Mark this head as having been reached
607 607 heads[n] = True
608 608 elif ancestors is None:
609 609 # Otherwise, we're trying to discover the heads.
610 610 # Assume this is a head because if it isn't, the next step
611 611 # will eventually remove it.
612 612 heads[n] = True
613 613 # But, obviously its parents aren't.
614 614 for p in self.parents(n):
615 615 heads.pop(p, None)
616 616 heads = [n for n, flag in heads.iteritems() if flag]
617 617 roots = list(roots)
618 618 assert orderedout
619 619 assert roots
620 620 assert heads
621 621 return (orderedout, roots, heads)
622 622
623 623 def headrevs(self):
624 624 count = len(self)
625 625 if not count:
626 626 return [nullrev]
627 627 ishead = [1] * (count + 1)
628 628 index = self.index
629 629 for r in xrange(count):
630 630 e = index[r]
631 631 ishead[e[5]] = ishead[e[6]] = 0
632 632 return [r for r in xrange(count) if ishead[r]]
633 633
634 634 def heads(self, start=None, stop=None):
635 635 """return the list of all nodes that have no children
636 636
637 637 if start is specified, only heads that are descendants of
638 638 start will be returned
639 639 if stop is specified, it will consider all the revs from stop
640 640 as if they had no children
641 641 """
642 642 if start is None and stop is None:
643 643 if not len(self):
644 644 return [nullid]
645 645 return [self.node(r) for r in self.headrevs()]
646 646
647 647 if start is None:
648 648 start = nullid
649 649 if stop is None:
650 650 stop = []
651 651 stoprevs = set([self.rev(n) for n in stop])
652 652 startrev = self.rev(start)
653 653 reachable = set((startrev,))
654 654 heads = set((startrev,))
655 655
656 656 parentrevs = self.parentrevs
657 657 for r in xrange(startrev + 1, len(self)):
658 658 for p in parentrevs(r):
659 659 if p in reachable:
660 660 if r not in stoprevs:
661 661 reachable.add(r)
662 662 heads.add(r)
663 663 if p in heads and p not in stoprevs:
664 664 heads.remove(p)
665 665
666 666 return [self.node(r) for r in heads]
667 667
668 668 def children(self, node):
669 669 """find the children of a given node"""
670 670 c = []
671 671 p = self.rev(node)
672 672 for r in range(p + 1, len(self)):
673 673 prevs = [pr for pr in self.parentrevs(r) if pr != nullrev]
674 674 if prevs:
675 675 for pr in prevs:
676 676 if pr == p:
677 677 c.append(self.node(r))
678 678 elif p == nullrev:
679 679 c.append(self.node(r))
680 680 return c
681 681
682 682 def descendant(self, start, end):
683 683 if start == nullrev:
684 684 return True
685 685 for i in self.descendants(start):
686 686 if i == end:
687 687 return True
688 688 elif i > end:
689 689 break
690 690 return False
691 691
692 692 def ancestor(self, a, b):
693 693 """calculate the least common ancestor of nodes a and b"""
694 694
695 695 # fast path, check if it is a descendant
696 696 a, b = self.rev(a), self.rev(b)
697 697 start, end = sorted((a, b))
698 698 if self.descendant(start, end):
699 699 return self.node(start)
700 700
701 701 def parents(rev):
702 702 return [p for p in self.parentrevs(rev) if p != nullrev]
703 703
704 704 c = ancestor.ancestor(a, b, parents)
705 705 if c is None:
706 706 return nullid
707 707
708 708 return self.node(c)
709 709
710 710 def _match(self, id):
711 711 if isinstance(id, (long, int)):
712 712 # rev
713 713 return self.node(id)
714 714 if len(id) == 20:
715 715 # possibly a binary node
716 716 # odds of a binary node being all hex in ASCII are 1 in 10**25
717 717 try:
718 718 node = id
719 719 self.rev(node) # quick search the index
720 720 return node
721 721 except LookupError:
722 722 pass # may be partial hex id
723 723 try:
724 724 # str(rev)
725 725 rev = int(id)
726 726 if str(rev) != id:
727 727 raise ValueError
728 728 if rev < 0:
729 729 rev = len(self) + rev
730 730 if rev < 0 or rev >= len(self):
731 731 raise ValueError
732 732 return self.node(rev)
733 733 except (ValueError, OverflowError):
734 734 pass
735 735 if len(id) == 40:
736 736 try:
737 737 # a full hex nodeid?
738 738 node = bin(id)
739 739 self.rev(node)
740 740 return node
741 741 except (TypeError, LookupError):
742 742 pass
743 743
744 744 def _partialmatch(self, id):
745 745 if id in self._pcache:
746 746 return self._pcache[id]
747 747
748 748 if len(id) < 40:
749 749 try:
750 750 # hex(node)[:...]
751 751 l = len(id) // 2 # grab an even number of digits
752 752 prefix = bin(id[:l * 2])
753 753 nl = [e[7] for e in self.index if e[7].startswith(prefix)]
754 754 nl = [n for n in nl if hex(n).startswith(id)]
755 755 if len(nl) > 0:
756 756 if len(nl) == 1:
757 757 self._pcache[id] = nl[0]
758 758 return nl[0]
759 759 raise LookupError(id, self.indexfile,
760 760 _('ambiguous identifier'))
761 761 return None
762 762 except TypeError:
763 763 pass
764 764
765 765 def lookup(self, id):
766 766 """locate a node based on:
767 767 - revision number or str(revision number)
768 768 - nodeid or subset of hex nodeid
769 769 """
770 770 n = self._match(id)
771 771 if n is not None:
772 772 return n
773 773 n = self._partialmatch(id)
774 774 if n:
775 775 return n
776 776
777 777 raise LookupError(id, self.indexfile, _('no match found'))
778 778
779 779 def cmp(self, node, text):
780 780 """compare text with a given file revision
781 781
782 782 returns True if text is different than what is stored.
783 783 """
784 784 p1, p2 = self.parents(node)
785 785 return hash(text, p1, p2) != node
786 786
787 787 def _addchunk(self, offset, data):
788 788 o, d = self._chunkcache
789 789 # try to add to existing cache
790 790 if o + len(d) == offset and len(d) + len(data) < _chunksize:
791 791 self._chunkcache = o, d + data
792 792 else:
793 793 self._chunkcache = offset, data
794 794
795 795 def _loadchunk(self, offset, length):
796 796 if self._inline:
797 797 df = self.opener(self.indexfile)
798 798 else:
799 799 df = self.opener(self.datafile)
800 800
801 801 readahead = max(65536, length)
802 802 df.seek(offset)
803 803 d = df.read(readahead)
804 804 self._addchunk(offset, d)
805 805 if readahead > length:
806 806 return d[:length]
807 807 return d
808 808
809 809 def _getchunk(self, offset, length):
810 810 o, d = self._chunkcache
811 811 l = len(d)
812 812
813 813 # is it in the cache?
814 814 cachestart = offset - o
815 815 cacheend = cachestart + length
816 816 if cachestart >= 0 and cacheend <= l:
817 817 if cachestart == 0 and cacheend == l:
818 818 return d # avoid a copy
819 819 return d[cachestart:cacheend]
820 820
821 821 return self._loadchunk(offset, length)
822 822
823 823 def _chunkraw(self, startrev, endrev):
824 824 start = self.start(startrev)
825 825 length = self.end(endrev) - start
826 826 if self._inline:
827 827 start += (startrev + 1) * self._io.size
828 828 return self._getchunk(start, length)
829 829
830 830 def _chunk(self, rev):
831 831 return decompress(self._chunkraw(rev, rev))
832 832
833 833 def _chunkbase(self, rev):
834 834 return self._chunk(rev)
835 835
836 836 def _chunkclear(self):
837 837 self._chunkcache = (0, '')
838 838
839 839 def deltaparent(self, rev):
840 840 """return deltaparent of the given revision"""
841 841 base = self.index[rev][3]
842 842 if base == rev:
843 843 return nullrev
844 844 elif self._generaldelta:
845 845 return base
846 846 else:
847 847 return rev - 1
848 848
849 849 def revdiff(self, rev1, rev2):
850 850 """return or calculate a delta between two revisions"""
851 851 if rev1 != nullrev and self.deltaparent(rev2) == rev1:
852 852 return self._chunk(rev2)
853 853
854 854 return mdiff.textdiff(self.revision(self.node(rev1)),
855 855 self.revision(self.node(rev2)))
856 856
857 857 def revision(self, node):
858 858 """return an uncompressed revision of a given node"""
859 859 cachedrev = None
860 860 if node == nullid:
861 861 return ""
862 862 if self._cache:
863 863 if self._cache[0] == node:
864 864 return self._cache[2]
865 865 cachedrev = self._cache[1]
866 866
867 867 # look up what we need to read
868 868 text = None
869 869 rev = self.rev(node)
870 870
871 871 # check rev flags
872 872 if self.flags(rev) & ~REVIDX_KNOWN_FLAGS:
873 873 raise RevlogError(_('incompatible revision flag %x') %
874 874 (self.flags(rev) & ~REVIDX_KNOWN_FLAGS))
875 875
876 876 # build delta chain
877 877 chain = []
878 878 index = self.index # for performance
879 879 generaldelta = self._generaldelta
880 880 iterrev = rev
881 881 e = index[iterrev]
882 882 while iterrev != e[3] and iterrev != cachedrev:
883 883 chain.append(iterrev)
884 884 if generaldelta:
885 885 iterrev = e[3]
886 886 else:
887 887 iterrev -= 1
888 888 e = index[iterrev]
889 889 chain.reverse()
890 890 base = iterrev
891 891
892 892 if iterrev == cachedrev:
893 893 # cache hit
894 894 text = self._cache[2]
895 895
896 896 # drop cache to save memory
897 897 self._cache = None
898 898
899 899 self._chunkraw(base, rev)
900 900 if text is None:
901 901 text = self._chunkbase(base)
902 902
903 903 bins = [self._chunk(r) for r in chain]
904 904 text = mdiff.patches(text, bins)
905 905
906 906 text = self._checkhash(text, node, rev)
907 907
908 908 self._cache = (node, rev, text)
909 909 return text
910 910
911 911 def _checkhash(self, text, node, rev):
912 912 p1, p2 = self.parents(node)
913 913 if node != hash(text, p1, p2):
914 914 raise RevlogError(_("integrity check failed on %s:%d")
915 915 % (self.indexfile, rev))
916 916 return text
917 917
918 918 def checkinlinesize(self, tr, fp=None):
919 919 if not self._inline or (self.start(-2) + self.length(-2)) < _maxinline:
920 920 return
921 921
922 922 trinfo = tr.find(self.indexfile)
923 923 if trinfo is None:
924 924 raise RevlogError(_("%s not found in the transaction")
925 925 % self.indexfile)
926 926
927 927 trindex = trinfo[2]
928 928 dataoff = self.start(trindex)
929 929
930 930 tr.add(self.datafile, dataoff)
931 931
932 932 if fp:
933 933 fp.flush()
934 934 fp.close()
935 935
936 936 df = self.opener(self.datafile, 'w')
937 937 try:
938 938 for r in self:
939 939 df.write(self._chunkraw(r, r))
940 940 finally:
941 941 df.close()
942 942
943 943 fp = self.opener(self.indexfile, 'w', atomictemp=True)
944 944 self.version &= ~(REVLOGNGINLINEDATA)
945 945 self._inline = False
946 946 for i in self:
947 947 e = self._io.packentry(self.index[i], self.node, self.version, i)
948 948 fp.write(e)
949 949
950 950 # if we don't call rename, the temp file will never replace the
951 951 # real index
952 952 fp.rename()
953 953
954 954 tr.replace(self.indexfile, trindex * self._io.size)
955 955 self._chunkclear()
956 956
957 957 def addrevision(self, text, transaction, link, p1, p2, cachedelta=None):
958 958 """add a revision to the log
959 959
960 960 text - the revision data to add
961 961 transaction - the transaction object used for rollback
962 962 link - the linkrev data to add
963 963 p1, p2 - the parent nodeids of the revision
964 964 cachedelta - an optional precomputed delta
965 965 """
966 966 node = hash(text, p1, p2)
967 967 if node in self.nodemap:
968 968 return node
969 969
970 970 dfh = None
971 971 if not self._inline:
972 972 dfh = self.opener(self.datafile, "a")
973 973 ifh = self.opener(self.indexfile, "a+")
974 974 try:
975 975 return self._addrevision(node, text, transaction, link, p1, p2,
976 976 cachedelta, ifh, dfh)
977 977 finally:
978 978 if dfh:
979 979 dfh.close()
980 980 ifh.close()
981 981
982 982 def _addrevision(self, node, text, transaction, link, p1, p2,
983 983 cachedelta, ifh, dfh):
984 984 """internal function to add revisions to the log
985 985
986 986 see addrevision for argument descriptions.
987 987 invariants:
988 988 - text is optional (can be None); if not set, cachedelta must be set.
989 989 if both are set, they must correspond to eachother.
990 990 """
991 991 btext = [text]
992 992 def buildtext():
993 993 if btext[0] is not None:
994 994 return btext[0]
995 995 # flush any pending writes here so we can read it in revision
996 996 if dfh:
997 997 dfh.flush()
998 998 ifh.flush()
999 999 basetext = self.revision(self.node(cachedelta[0]))
1000 1000 btext[0] = mdiff.patch(basetext, cachedelta[1])
1001 1001 chk = hash(btext[0], p1, p2)
1002 1002 if chk != node:
1003 1003 raise RevlogError(_("consistency error in delta"))
1004 1004 return btext[0]
1005 1005
1006 1006 def builddelta(rev):
1007 1007 # can we use the cached delta?
1008 1008 if cachedelta and cachedelta[0] == rev:
1009 1009 delta = cachedelta[1]
1010 1010 else:
1011 1011 t = buildtext()
1012 1012 ptext = self.revision(self.node(rev))
1013 1013 delta = mdiff.textdiff(ptext, t)
1014 1014 data = compress(delta)
1015 1015 l = len(data[1]) + len(data[0])
1016 1016 if basecache[0] == rev:
1017 1017 chainbase = basecache[1]
1018 1018 else:
1019 1019 chainbase = self.chainbase(rev)
1020 1020 dist = l + offset - self.start(chainbase)
1021 1021 if self._generaldelta:
1022 1022 base = rev
1023 1023 else:
1024 1024 base = chainbase
1025 1025 return dist, l, data, base, chainbase
1026 1026
1027 1027 curr = len(self)
1028 1028 prev = curr - 1
1029 1029 base = chainbase = curr
1030 1030 offset = self.end(prev)
1031 1031 flags = 0
1032 1032 d = None
1033 1033 basecache = self._basecache
1034 1034 p1r, p2r = self.rev(p1), self.rev(p2)
1035 1035
1036 1036 # should we try to build a delta?
1037 1037 if prev != nullrev:
1038 1038 if self._generaldelta:
1039 1039 if p1r >= basecache[1]:
1040 1040 d = builddelta(p1r)
1041 1041 elif p2r >= basecache[1]:
1042 1042 d = builddelta(p2r)
1043 1043 else:
1044 1044 d = builddelta(prev)
1045 1045 else:
1046 1046 d = builddelta(prev)
1047 1047 dist, l, data, base, chainbase = d
1048 1048
1049 1049 # full versions are inserted when the needed deltas
1050 1050 # become comparable to the uncompressed text
1051 1051 if text is None:
1052 1052 textlen = mdiff.patchedsize(self.rawsize(cachedelta[0]),
1053 1053 cachedelta[1])
1054 1054 else:
1055 1055 textlen = len(text)
1056 1056 if d is None or dist > textlen * 2:
1057 1057 text = buildtext()
1058 1058 data = compress(text)
1059 1059 l = len(data[1]) + len(data[0])
1060 1060 base = chainbase = curr
1061 1061
1062 1062 e = (offset_type(offset, flags), l, textlen,
1063 1063 base, link, p1r, p2r, node)
1064 1064 self.index.insert(-1, e)
1065 1065 self.nodemap[node] = curr
1066 1066
1067 1067 entry = self._io.packentry(e, self.node, self.version, curr)
1068 1068 if not self._inline:
1069 1069 transaction.add(self.datafile, offset)
1070 1070 transaction.add(self.indexfile, curr * len(entry))
1071 1071 if data[0]:
1072 1072 dfh.write(data[0])
1073 1073 dfh.write(data[1])
1074 1074 dfh.flush()
1075 1075 ifh.write(entry)
1076 1076 else:
1077 1077 offset += curr * self._io.size
1078 1078 transaction.add(self.indexfile, offset, curr)
1079 1079 ifh.write(entry)
1080 1080 ifh.write(data[0])
1081 1081 ifh.write(data[1])
1082 1082 self.checkinlinesize(transaction, ifh)
1083 1083
1084 1084 if type(text) == str: # only accept immutable objects
1085 1085 self._cache = (node, curr, text)
1086 1086 self._basecache = (curr, chainbase)
1087 1087 return node
1088 1088
1089 def group(self, nodelist, bundler):
1089 def group(self, nodelist, bundler, reorder=None):
1090 1090 """Calculate a delta group, yielding a sequence of changegroup chunks
1091 1091 (strings).
1092 1092
1093 1093 Given a list of changeset revs, return a set of deltas and
1094 1094 metadata corresponding to nodes. The first delta is
1095 1095 first parent(nodelist[0]) -> nodelist[0], the receiver is
1096 1096 guaranteed to have this parent as it has all history before
1097 1097 these changesets. In the case firstparent is nullrev the
1098 1098 changegroup starts with a full revision.
1099 1099 """
1100 1100
1101 # for generaldelta revlogs, we linearize the revs; this will both be
1102 # much quicker and generate a much smaller bundle
1103 if (self._generaldelta and reorder is not False) or reorder:
1104 dag = dagutil.revlogdag(self)
1105 revs = set(self.rev(n) for n in nodelist)
1106 revs = dag.linearize(revs)
1107 else:
1101 1108 revs = sorted([self.rev(n) for n in nodelist])
1102 1109
1103 1110 # if we don't have any revisions touched by these changesets, bail
1104 1111 if not revs:
1105 1112 yield bundler.close()
1106 1113 return
1107 1114
1108 1115 # add the parent of the first rev
1109 1116 p = self.parentrevs(revs[0])[0]
1110 1117 revs.insert(0, p)
1111 1118
1112 1119 # build deltas
1113 1120 for r in xrange(len(revs) - 1):
1114 1121 prev, curr = revs[r], revs[r + 1]
1115 1122 for c in bundler.revchunk(self, curr, prev):
1116 1123 yield c
1117 1124
1118 1125 yield bundler.close()
1119 1126
1120 1127 def addgroup(self, bundle, linkmapper, transaction):
1121 1128 """
1122 1129 add a delta group
1123 1130
1124 1131 given a set of deltas, add them to the revision log. the
1125 1132 first delta is against its parent, which should be in our
1126 1133 log, the rest are against the previous delta.
1127 1134 """
1128 1135
1129 1136 # track the base of the current delta log
1130 1137 node = None
1131 1138
1132 1139 r = len(self)
1133 1140 end = 0
1134 1141 if r:
1135 1142 end = self.end(r - 1)
1136 1143 ifh = self.opener(self.indexfile, "a+")
1137 1144 isize = r * self._io.size
1138 1145 if self._inline:
1139 1146 transaction.add(self.indexfile, end + isize, r)
1140 1147 dfh = None
1141 1148 else:
1142 1149 transaction.add(self.indexfile, isize, r)
1143 1150 transaction.add(self.datafile, end)
1144 1151 dfh = self.opener(self.datafile, "a")
1145 1152
1146 1153 try:
1147 1154 # loop through our set of deltas
1148 1155 chain = None
1149 1156 while 1:
1150 1157 chunkdata = bundle.deltachunk(chain)
1151 1158 if not chunkdata:
1152 1159 break
1153 1160 node = chunkdata['node']
1154 1161 p1 = chunkdata['p1']
1155 1162 p2 = chunkdata['p2']
1156 1163 cs = chunkdata['cs']
1157 1164 deltabase = chunkdata['deltabase']
1158 1165 delta = chunkdata['delta']
1159 1166
1160 1167 link = linkmapper(cs)
1161 1168 if node in self.nodemap:
1162 1169 # this can happen if two branches make the same change
1163 1170 chain = node
1164 1171 continue
1165 1172
1166 1173 for p in (p1, p2):
1167 1174 if not p in self.nodemap:
1168 1175 raise LookupError(p, self.indexfile,
1169 1176 _('unknown parent'))
1170 1177
1171 1178 if deltabase not in self.nodemap:
1172 1179 raise LookupError(deltabase, self.indexfile,
1173 1180 _('unknown delta base'))
1174 1181
1175 1182 baserev = self.rev(deltabase)
1176 1183 chain = self._addrevision(node, None, transaction, link,
1177 1184 p1, p2, (baserev, delta), ifh, dfh)
1178 1185 if not dfh and not self._inline:
1179 1186 # addrevision switched from inline to conventional
1180 1187 # reopen the index
1181 1188 ifh.close()
1182 1189 dfh = self.opener(self.datafile, "a")
1183 1190 ifh = self.opener(self.indexfile, "a")
1184 1191 finally:
1185 1192 if dfh:
1186 1193 dfh.close()
1187 1194 ifh.close()
1188 1195
1189 1196 return node
1190 1197
1191 1198 def strip(self, minlink, transaction):
1192 1199 """truncate the revlog on the first revision with a linkrev >= minlink
1193 1200
1194 1201 This function is called when we're stripping revision minlink and
1195 1202 its descendants from the repository.
1196 1203
1197 1204 We have to remove all revisions with linkrev >= minlink, because
1198 1205 the equivalent changelog revisions will be renumbered after the
1199 1206 strip.
1200 1207
1201 1208 So we truncate the revlog on the first of these revisions, and
1202 1209 trust that the caller has saved the revisions that shouldn't be
1203 1210 removed and that it'll readd them after this truncation.
1204 1211 """
1205 1212 if len(self) == 0:
1206 1213 return
1207 1214
1208 1215 for rev in self:
1209 1216 if self.index[rev][4] >= minlink:
1210 1217 break
1211 1218 else:
1212 1219 return
1213 1220
1214 1221 # first truncate the files on disk
1215 1222 end = self.start(rev)
1216 1223 if not self._inline:
1217 1224 transaction.add(self.datafile, end)
1218 1225 end = rev * self._io.size
1219 1226 else:
1220 1227 end += rev * self._io.size
1221 1228
1222 1229 transaction.add(self.indexfile, end)
1223 1230
1224 1231 # then reset internal state in memory to forget those revisions
1225 1232 self._cache = None
1226 1233 self._chunkclear()
1227 1234 for x in xrange(rev, len(self)):
1228 1235 del self.nodemap[self.node(x)]
1229 1236
1230 1237 del self.index[rev:-1]
1231 1238
1232 1239 def checksize(self):
1233 1240 expected = 0
1234 1241 if len(self):
1235 1242 expected = max(0, self.end(len(self) - 1))
1236 1243
1237 1244 try:
1238 1245 f = self.opener(self.datafile)
1239 1246 f.seek(0, 2)
1240 1247 actual = f.tell()
1241 1248 f.close()
1242 1249 dd = actual - expected
1243 1250 except IOError, inst:
1244 1251 if inst.errno != errno.ENOENT:
1245 1252 raise
1246 1253 dd = 0
1247 1254
1248 1255 try:
1249 1256 f = self.opener(self.indexfile)
1250 1257 f.seek(0, 2)
1251 1258 actual = f.tell()
1252 1259 f.close()
1253 1260 s = self._io.size
1254 1261 i = max(0, actual // s)
1255 1262 di = actual - (i * s)
1256 1263 if self._inline:
1257 1264 databytes = 0
1258 1265 for r in self:
1259 1266 databytes += max(0, self.length(r))
1260 1267 dd = 0
1261 1268 di = actual - len(self) * s - databytes
1262 1269 except IOError, inst:
1263 1270 if inst.errno != errno.ENOENT:
1264 1271 raise
1265 1272 di = 0
1266 1273
1267 1274 return (dd, di)
1268 1275
1269 1276 def files(self):
1270 1277 res = [self.indexfile]
1271 1278 if not self._inline:
1272 1279 res.append(self.datafile)
1273 1280 return res
General Comments 0
You need to be logged in to leave comments. Login now