##// END OF EJS Templates
Implement data inlined with the index file...
mason@suse.com -
r2073:1e6745f7 default
parent child Browse files
Show More
@@ -1,1963 +1,1974 b''
1 1 # localrepo.py - read/write repository class for mercurial
2 2 #
3 3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 4 #
5 5 # This software may be used and distributed according to the terms
6 6 # of the GNU General Public License, incorporated herein by reference.
7 7
8 8 import os, util
9 9 import filelog, manifest, changelog, dirstate, repo
10 10 from node import *
11 11 from i18n import gettext as _
12 12 from demandload import *
13 13 demandload(globals(), "appendfile changegroup")
14 14 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui revlog")
15 15
16 16 class localrepository(object):
17 17 def __del__(self):
18 18 self.transhandle = None
19 19 def __init__(self, parentui, path=None, create=0):
20 20 if not path:
21 21 p = os.getcwd()
22 22 while not os.path.isdir(os.path.join(p, ".hg")):
23 23 oldp = p
24 24 p = os.path.dirname(p)
25 25 if p == oldp:
26 26 raise repo.RepoError(_("no repo found"))
27 27 path = p
28 28 self.path = os.path.join(path, ".hg")
29 29
30 30 if not create and not os.path.isdir(self.path):
31 31 raise repo.RepoError(_("repository %s not found") % path)
32 32
33 33 self.root = os.path.abspath(path)
34 34 self.origroot = path
35 35 self.ui = ui.ui(parentui=parentui)
36 36 self.opener = util.opener(self.path)
37 37 self.wopener = util.opener(self.root)
38 38
39 39 try:
40 40 self.ui.readconfig(self.join("hgrc"), self.root)
41 41 except IOError:
42 42 pass
43 43
44 44 v = self.ui.revlogopts
45 45 self.revlogversion = int(v.get('format', 0))
46 flags = 0
46 47 for x in v.get('flags', "").split():
47 self.revlogversion |= revlog.flagstr(x)
48 flags |= revlog.flagstr(x)
49
50 v = self.revlogversion | flags
51 self.manifest = manifest.manifest(self.opener, v)
52 self.changelog = changelog.changelog(self.opener, v)
48 53
49 self.manifest = manifest.manifest(self.opener, self.revlogversion)
50 self.changelog = changelog.changelog(self.opener, self.revlogversion)
51 self.revlogversion = self.changelog.version
54 # the changelog might not have the inline index flag
55 # on. If the format of the changelog is the same as found in
56 # .hgrc, apply any flags found in the .hgrc as well.
57 # Otherwise, just version from the changelog
58 v = self.changelog.version
59 if v == self.revlogversion:
60 v |= flags
61 self.revlogversion = v
62
52 63 self.tagscache = None
53 64 self.nodetagscache = None
54 65 self.encodepats = None
55 66 self.decodepats = None
56 67 self.transhandle = None
57 68
58 69 if create:
59 70 os.mkdir(self.path)
60 71 os.mkdir(self.join("data"))
61 72
62 73 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
63 74 def hook(self, name, throw=False, **args):
64 75 def runhook(name, cmd):
65 76 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
66 77 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()] +
67 78 [(k.upper(), v) for k, v in args.iteritems()])
68 79 r = util.system(cmd, environ=env, cwd=self.root)
69 80 if r:
70 81 desc, r = util.explain_exit(r)
71 82 if throw:
72 83 raise util.Abort(_('%s hook %s') % (name, desc))
73 84 self.ui.warn(_('error: %s hook %s\n') % (name, desc))
74 85 return False
75 86 return True
76 87
77 88 r = True
78 89 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
79 90 if hname.split(".", 1)[0] == name and cmd]
80 91 hooks.sort()
81 92 for hname, cmd in hooks:
82 93 r = runhook(hname, cmd) and r
83 94 return r
84 95
85 96 def tags(self):
86 97 '''return a mapping of tag to node'''
87 98 if not self.tagscache:
88 99 self.tagscache = {}
89 100
90 101 def parsetag(line, context):
91 102 if not line:
92 103 return
93 104 s = l.split(" ", 1)
94 105 if len(s) != 2:
95 106 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
96 107 return
97 108 node, key = s
98 109 try:
99 110 bin_n = bin(node)
100 111 except TypeError:
101 112 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
102 113 return
103 114 if bin_n not in self.changelog.nodemap:
104 115 self.ui.warn(_("%s: ignoring invalid tag\n") % context)
105 116 return
106 117 self.tagscache[key.strip()] = bin_n
107 118
108 119 # read each head of the tags file, ending with the tip
109 120 # and add each tag found to the map, with "newer" ones
110 121 # taking precedence
111 122 fl = self.file(".hgtags")
112 123 h = fl.heads()
113 124 h.reverse()
114 125 for r in h:
115 126 count = 0
116 127 for l in fl.read(r).splitlines():
117 128 count += 1
118 129 parsetag(l, ".hgtags:%d" % count)
119 130
120 131 try:
121 132 f = self.opener("localtags")
122 133 count = 0
123 134 for l in f:
124 135 count += 1
125 136 parsetag(l, "localtags:%d" % count)
126 137 except IOError:
127 138 pass
128 139
129 140 self.tagscache['tip'] = self.changelog.tip()
130 141
131 142 return self.tagscache
132 143
133 144 def tagslist(self):
134 145 '''return a list of tags ordered by revision'''
135 146 l = []
136 147 for t, n in self.tags().items():
137 148 try:
138 149 r = self.changelog.rev(n)
139 150 except:
140 151 r = -2 # sort to the beginning of the list if unknown
141 152 l.append((r, t, n))
142 153 l.sort()
143 154 return [(t, n) for r, t, n in l]
144 155
145 156 def nodetags(self, node):
146 157 '''return the tags associated with a node'''
147 158 if not self.nodetagscache:
148 159 self.nodetagscache = {}
149 160 for t, n in self.tags().items():
150 161 self.nodetagscache.setdefault(n, []).append(t)
151 162 return self.nodetagscache.get(node, [])
152 163
153 164 def lookup(self, key):
154 165 try:
155 166 return self.tags()[key]
156 167 except KeyError:
157 168 try:
158 169 return self.changelog.lookup(key)
159 170 except:
160 171 raise repo.RepoError(_("unknown revision '%s'") % key)
161 172
162 173 def dev(self):
163 174 return os.stat(self.path).st_dev
164 175
165 176 def local(self):
166 177 return True
167 178
168 179 def join(self, f):
169 180 return os.path.join(self.path, f)
170 181
171 182 def wjoin(self, f):
172 183 return os.path.join(self.root, f)
173 184
174 185 def file(self, f):
175 186 if f[0] == '/':
176 187 f = f[1:]
177 188 return filelog.filelog(self.opener, f, self.revlogversion)
178 189
179 190 def getcwd(self):
180 191 return self.dirstate.getcwd()
181 192
182 193 def wfile(self, f, mode='r'):
183 194 return self.wopener(f, mode)
184 195
185 196 def wread(self, filename):
186 197 if self.encodepats == None:
187 198 l = []
188 199 for pat, cmd in self.ui.configitems("encode"):
189 200 mf = util.matcher(self.root, "", [pat], [], [])[1]
190 201 l.append((mf, cmd))
191 202 self.encodepats = l
192 203
193 204 data = self.wopener(filename, 'r').read()
194 205
195 206 for mf, cmd in self.encodepats:
196 207 if mf(filename):
197 208 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
198 209 data = util.filter(data, cmd)
199 210 break
200 211
201 212 return data
202 213
203 214 def wwrite(self, filename, data, fd=None):
204 215 if self.decodepats == None:
205 216 l = []
206 217 for pat, cmd in self.ui.configitems("decode"):
207 218 mf = util.matcher(self.root, "", [pat], [], [])[1]
208 219 l.append((mf, cmd))
209 220 self.decodepats = l
210 221
211 222 for mf, cmd in self.decodepats:
212 223 if mf(filename):
213 224 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
214 225 data = util.filter(data, cmd)
215 226 break
216 227
217 228 if fd:
218 229 return fd.write(data)
219 230 return self.wopener(filename, 'w').write(data)
220 231
221 232 def transaction(self):
222 233 tr = self.transhandle
223 234 if tr != None and tr.running():
224 235 return tr.nest()
225 236
226 237 # save dirstate for undo
227 238 try:
228 239 ds = self.opener("dirstate").read()
229 240 except IOError:
230 241 ds = ""
231 242 self.opener("journal.dirstate", "w").write(ds)
232 243
233 244 tr = transaction.transaction(self.ui.warn, self.opener,
234 245 self.join("journal"),
235 246 aftertrans(self.path))
236 247 self.transhandle = tr
237 248 return tr
238 249
239 250 def recover(self):
240 251 l = self.lock()
241 252 if os.path.exists(self.join("journal")):
242 253 self.ui.status(_("rolling back interrupted transaction\n"))
243 254 transaction.rollback(self.opener, self.join("journal"))
244 255 self.reload()
245 256 return True
246 257 else:
247 258 self.ui.warn(_("no interrupted transaction available\n"))
248 259 return False
249 260
250 261 def undo(self, wlock=None):
251 262 if not wlock:
252 263 wlock = self.wlock()
253 264 l = self.lock()
254 265 if os.path.exists(self.join("undo")):
255 266 self.ui.status(_("rolling back last transaction\n"))
256 267 transaction.rollback(self.opener, self.join("undo"))
257 268 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
258 269 self.reload()
259 270 self.wreload()
260 271 else:
261 272 self.ui.warn(_("no undo information available\n"))
262 273
263 274 def wreload(self):
264 275 self.dirstate.read()
265 276
266 277 def reload(self):
267 278 self.changelog.load()
268 279 self.manifest.load()
269 280 self.tagscache = None
270 281 self.nodetagscache = None
271 282
272 283 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None,
273 284 desc=None):
274 285 try:
275 286 l = lock.lock(self.join(lockname), 0, releasefn, desc=desc)
276 287 except lock.LockHeld, inst:
277 288 if not wait:
278 289 raise
279 290 self.ui.warn(_("waiting for lock on %s held by %s\n") %
280 291 (desc, inst.args[0]))
281 292 # default to 600 seconds timeout
282 293 l = lock.lock(self.join(lockname),
283 294 int(self.ui.config("ui", "timeout") or 600),
284 295 releasefn, desc=desc)
285 296 if acquirefn:
286 297 acquirefn()
287 298 return l
288 299
289 300 def lock(self, wait=1):
290 301 return self.do_lock("lock", wait, acquirefn=self.reload,
291 302 desc=_('repository %s') % self.origroot)
292 303
293 304 def wlock(self, wait=1):
294 305 return self.do_lock("wlock", wait, self.dirstate.write,
295 306 self.wreload,
296 307 desc=_('working directory of %s') % self.origroot)
297 308
298 309 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
299 310 "determine whether a new filenode is needed"
300 311 fp1 = manifest1.get(filename, nullid)
301 312 fp2 = manifest2.get(filename, nullid)
302 313
303 314 if fp2 != nullid:
304 315 # is one parent an ancestor of the other?
305 316 fpa = filelog.ancestor(fp1, fp2)
306 317 if fpa == fp1:
307 318 fp1, fp2 = fp2, nullid
308 319 elif fpa == fp2:
309 320 fp2 = nullid
310 321
311 322 # is the file unmodified from the parent? report existing entry
312 323 if fp2 == nullid and text == filelog.read(fp1):
313 324 return (fp1, None, None)
314 325
315 326 return (None, fp1, fp2)
316 327
317 328 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
318 329 orig_parent = self.dirstate.parents()[0] or nullid
319 330 p1 = p1 or self.dirstate.parents()[0] or nullid
320 331 p2 = p2 or self.dirstate.parents()[1] or nullid
321 332 c1 = self.changelog.read(p1)
322 333 c2 = self.changelog.read(p2)
323 334 m1 = self.manifest.read(c1[0])
324 335 mf1 = self.manifest.readflags(c1[0])
325 336 m2 = self.manifest.read(c2[0])
326 337 changed = []
327 338
328 339 if orig_parent == p1:
329 340 update_dirstate = 1
330 341 else:
331 342 update_dirstate = 0
332 343
333 344 if not wlock:
334 345 wlock = self.wlock()
335 346 l = self.lock()
336 347 tr = self.transaction()
337 348 mm = m1.copy()
338 349 mfm = mf1.copy()
339 350 linkrev = self.changelog.count()
340 351 for f in files:
341 352 try:
342 353 t = self.wread(f)
343 354 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
344 355 r = self.file(f)
345 356 mfm[f] = tm
346 357
347 358 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
348 359 if entry:
349 360 mm[f] = entry
350 361 continue
351 362
352 363 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
353 364 changed.append(f)
354 365 if update_dirstate:
355 366 self.dirstate.update([f], "n")
356 367 except IOError:
357 368 try:
358 369 del mm[f]
359 370 del mfm[f]
360 371 if update_dirstate:
361 372 self.dirstate.forget([f])
362 373 except:
363 374 # deleted from p2?
364 375 pass
365 376
366 377 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
367 378 user = user or self.ui.username()
368 379 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
369 380 tr.close()
370 381 if update_dirstate:
371 382 self.dirstate.setparents(n, nullid)
372 383
373 384 def commit(self, files=None, text="", user=None, date=None,
374 385 match=util.always, force=False, lock=None, wlock=None):
375 386 commit = []
376 387 remove = []
377 388 changed = []
378 389
379 390 if files:
380 391 for f in files:
381 392 s = self.dirstate.state(f)
382 393 if s in 'nmai':
383 394 commit.append(f)
384 395 elif s == 'r':
385 396 remove.append(f)
386 397 else:
387 398 self.ui.warn(_("%s not tracked!\n") % f)
388 399 else:
389 400 modified, added, removed, deleted, unknown = self.changes(match=match)
390 401 commit = modified + added
391 402 remove = removed
392 403
393 404 p1, p2 = self.dirstate.parents()
394 405 c1 = self.changelog.read(p1)
395 406 c2 = self.changelog.read(p2)
396 407 m1 = self.manifest.read(c1[0])
397 408 mf1 = self.manifest.readflags(c1[0])
398 409 m2 = self.manifest.read(c2[0])
399 410
400 411 if not commit and not remove and not force and p2 == nullid:
401 412 self.ui.status(_("nothing changed\n"))
402 413 return None
403 414
404 415 xp1 = hex(p1)
405 416 if p2 == nullid: xp2 = ''
406 417 else: xp2 = hex(p2)
407 418
408 419 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
409 420
410 421 if not wlock:
411 422 wlock = self.wlock()
412 423 if not lock:
413 424 lock = self.lock()
414 425 tr = self.transaction()
415 426
416 427 # check in files
417 428 new = {}
418 429 linkrev = self.changelog.count()
419 430 commit.sort()
420 431 for f in commit:
421 432 self.ui.note(f + "\n")
422 433 try:
423 434 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
424 435 t = self.wread(f)
425 436 except IOError:
426 437 self.ui.warn(_("trouble committing %s!\n") % f)
427 438 raise
428 439
429 440 r = self.file(f)
430 441
431 442 meta = {}
432 443 cp = self.dirstate.copied(f)
433 444 if cp:
434 445 meta["copy"] = cp
435 446 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
436 447 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
437 448 fp1, fp2 = nullid, nullid
438 449 else:
439 450 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
440 451 if entry:
441 452 new[f] = entry
442 453 continue
443 454
444 455 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
445 456 # remember what we've added so that we can later calculate
446 457 # the files to pull from a set of changesets
447 458 changed.append(f)
448 459
449 460 # update manifest
450 461 m1 = m1.copy()
451 462 m1.update(new)
452 463 for f in remove:
453 464 if f in m1:
454 465 del m1[f]
455 466 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
456 467 (new, remove))
457 468
458 469 # add changeset
459 470 new = new.keys()
460 471 new.sort()
461 472
462 473 user = user or self.ui.username()
463 474 if not text:
464 475 edittext = [""]
465 476 if p2 != nullid:
466 477 edittext.append("HG: branch merge")
467 478 edittext.extend(["HG: changed %s" % f for f in changed])
468 479 edittext.extend(["HG: removed %s" % f for f in remove])
469 480 if not changed and not remove:
470 481 edittext.append("HG: no files changed")
471 482 edittext.append("")
472 483 # run editor in the repository root
473 484 olddir = os.getcwd()
474 485 os.chdir(self.root)
475 486 edittext = self.ui.edit("\n".join(edittext), user)
476 487 os.chdir(olddir)
477 488 if not edittext.rstrip():
478 489 return None
479 490 text = edittext
480 491
481 492 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
482 493 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
483 494 parent2=xp2)
484 495 tr.close()
485 496
486 497 self.dirstate.setparents(n)
487 498 self.dirstate.update(new, "n")
488 499 self.dirstate.forget(remove)
489 500
490 501 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
491 502 return n
492 503
493 504 def walk(self, node=None, files=[], match=util.always, badmatch=None):
494 505 if node:
495 506 fdict = dict.fromkeys(files)
496 507 for fn in self.manifest.read(self.changelog.read(node)[0]):
497 508 fdict.pop(fn, None)
498 509 if match(fn):
499 510 yield 'm', fn
500 511 for fn in fdict:
501 512 if badmatch and badmatch(fn):
502 513 if match(fn):
503 514 yield 'b', fn
504 515 else:
505 516 self.ui.warn(_('%s: No such file in rev %s\n') % (
506 517 util.pathto(self.getcwd(), fn), short(node)))
507 518 else:
508 519 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
509 520 yield src, fn
510 521
511 522 def changes(self, node1=None, node2=None, files=[], match=util.always,
512 523 wlock=None, show_ignored=None):
513 524 """return changes between two nodes or node and working directory
514 525
515 526 If node1 is None, use the first dirstate parent instead.
516 527 If node2 is None, compare node1 with working directory.
517 528 """
518 529
519 530 def fcmp(fn, mf):
520 531 t1 = self.wread(fn)
521 532 t2 = self.file(fn).read(mf.get(fn, nullid))
522 533 return cmp(t1, t2)
523 534
524 535 def mfmatches(node):
525 536 change = self.changelog.read(node)
526 537 mf = dict(self.manifest.read(change[0]))
527 538 for fn in mf.keys():
528 539 if not match(fn):
529 540 del mf[fn]
530 541 return mf
531 542
532 543 if node1:
533 544 # read the manifest from node1 before the manifest from node2,
534 545 # so that we'll hit the manifest cache if we're going through
535 546 # all the revisions in parent->child order.
536 547 mf1 = mfmatches(node1)
537 548
538 549 # are we comparing the working directory?
539 550 if not node2:
540 551 if not wlock:
541 552 try:
542 553 wlock = self.wlock(wait=0)
543 554 except lock.LockException:
544 555 wlock = None
545 556 lookup, modified, added, removed, deleted, unknown, ignored = (
546 557 self.dirstate.changes(files, match, show_ignored))
547 558
548 559 # are we comparing working dir against its parent?
549 560 if not node1:
550 561 if lookup:
551 562 # do a full compare of any files that might have changed
552 563 mf2 = mfmatches(self.dirstate.parents()[0])
553 564 for f in lookup:
554 565 if fcmp(f, mf2):
555 566 modified.append(f)
556 567 elif wlock is not None:
557 568 self.dirstate.update([f], "n")
558 569 else:
559 570 # we are comparing working dir against non-parent
560 571 # generate a pseudo-manifest for the working dir
561 572 mf2 = mfmatches(self.dirstate.parents()[0])
562 573 for f in lookup + modified + added:
563 574 mf2[f] = ""
564 575 for f in removed:
565 576 if f in mf2:
566 577 del mf2[f]
567 578 else:
568 579 # we are comparing two revisions
569 580 deleted, unknown, ignored = [], [], []
570 581 mf2 = mfmatches(node2)
571 582
572 583 if node1:
573 584 # flush lists from dirstate before comparing manifests
574 585 modified, added = [], []
575 586
576 587 for fn in mf2:
577 588 if mf1.has_key(fn):
578 589 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
579 590 modified.append(fn)
580 591 del mf1[fn]
581 592 else:
582 593 added.append(fn)
583 594
584 595 removed = mf1.keys()
585 596
586 597 # sort and return results:
587 598 for l in modified, added, removed, deleted, unknown, ignored:
588 599 l.sort()
589 600 if show_ignored is None:
590 601 return (modified, added, removed, deleted, unknown)
591 602 else:
592 603 return (modified, added, removed, deleted, unknown, ignored)
593 604
594 605 def add(self, list, wlock=None):
595 606 if not wlock:
596 607 wlock = self.wlock()
597 608 for f in list:
598 609 p = self.wjoin(f)
599 610 if not os.path.exists(p):
600 611 self.ui.warn(_("%s does not exist!\n") % f)
601 612 elif not os.path.isfile(p):
602 613 self.ui.warn(_("%s not added: only files supported currently\n")
603 614 % f)
604 615 elif self.dirstate.state(f) in 'an':
605 616 self.ui.warn(_("%s already tracked!\n") % f)
606 617 else:
607 618 self.dirstate.update([f], "a")
608 619
609 620 def forget(self, list, wlock=None):
610 621 if not wlock:
611 622 wlock = self.wlock()
612 623 for f in list:
613 624 if self.dirstate.state(f) not in 'ai':
614 625 self.ui.warn(_("%s not added!\n") % f)
615 626 else:
616 627 self.dirstate.forget([f])
617 628
618 629 def remove(self, list, unlink=False, wlock=None):
619 630 if unlink:
620 631 for f in list:
621 632 try:
622 633 util.unlink(self.wjoin(f))
623 634 except OSError, inst:
624 635 if inst.errno != errno.ENOENT:
625 636 raise
626 637 if not wlock:
627 638 wlock = self.wlock()
628 639 for f in list:
629 640 p = self.wjoin(f)
630 641 if os.path.exists(p):
631 642 self.ui.warn(_("%s still exists!\n") % f)
632 643 elif self.dirstate.state(f) == 'a':
633 644 self.dirstate.forget([f])
634 645 elif f not in self.dirstate:
635 646 self.ui.warn(_("%s not tracked!\n") % f)
636 647 else:
637 648 self.dirstate.update([f], "r")
638 649
639 650 def undelete(self, list, wlock=None):
640 651 p = self.dirstate.parents()[0]
641 652 mn = self.changelog.read(p)[0]
642 653 mf = self.manifest.readflags(mn)
643 654 m = self.manifest.read(mn)
644 655 if not wlock:
645 656 wlock = self.wlock()
646 657 for f in list:
647 658 if self.dirstate.state(f) not in "r":
648 659 self.ui.warn("%s not removed!\n" % f)
649 660 else:
650 661 t = self.file(f).read(m[f])
651 662 self.wwrite(f, t)
652 663 util.set_exec(self.wjoin(f), mf[f])
653 664 self.dirstate.update([f], "n")
654 665
655 666 def copy(self, source, dest, wlock=None):
656 667 p = self.wjoin(dest)
657 668 if not os.path.exists(p):
658 669 self.ui.warn(_("%s does not exist!\n") % dest)
659 670 elif not os.path.isfile(p):
660 671 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
661 672 else:
662 673 if not wlock:
663 674 wlock = self.wlock()
664 675 if self.dirstate.state(dest) == '?':
665 676 self.dirstate.update([dest], "a")
666 677 self.dirstate.copy(source, dest)
667 678
668 679 def heads(self, start=None):
669 680 heads = self.changelog.heads(start)
670 681 # sort the output in rev descending order
671 682 heads = [(-self.changelog.rev(h), h) for h in heads]
672 683 heads.sort()
673 684 return [n for (r, n) in heads]
674 685
675 686 # branchlookup returns a dict giving a list of branches for
676 687 # each head. A branch is defined as the tag of a node or
677 688 # the branch of the node's parents. If a node has multiple
678 689 # branch tags, tags are eliminated if they are visible from other
679 690 # branch tags.
680 691 #
681 692 # So, for this graph: a->b->c->d->e
682 693 # \ /
683 694 # aa -----/
684 695 # a has tag 2.6.12
685 696 # d has tag 2.6.13
686 697 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
687 698 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
688 699 # from the list.
689 700 #
690 701 # It is possible that more than one head will have the same branch tag.
691 702 # callers need to check the result for multiple heads under the same
692 703 # branch tag if that is a problem for them (ie checkout of a specific
693 704 # branch).
694 705 #
695 706 # passing in a specific branch will limit the depth of the search
696 707 # through the parents. It won't limit the branches returned in the
697 708 # result though.
698 709 def branchlookup(self, heads=None, branch=None):
699 710 if not heads:
700 711 heads = self.heads()
701 712 headt = [ h for h in heads ]
702 713 chlog = self.changelog
703 714 branches = {}
704 715 merges = []
705 716 seenmerge = {}
706 717
707 718 # traverse the tree once for each head, recording in the branches
708 719 # dict which tags are visible from this head. The branches
709 720 # dict also records which tags are visible from each tag
710 721 # while we traverse.
711 722 while headt or merges:
712 723 if merges:
713 724 n, found = merges.pop()
714 725 visit = [n]
715 726 else:
716 727 h = headt.pop()
717 728 visit = [h]
718 729 found = [h]
719 730 seen = {}
720 731 while visit:
721 732 n = visit.pop()
722 733 if n in seen:
723 734 continue
724 735 pp = chlog.parents(n)
725 736 tags = self.nodetags(n)
726 737 if tags:
727 738 for x in tags:
728 739 if x == 'tip':
729 740 continue
730 741 for f in found:
731 742 branches.setdefault(f, {})[n] = 1
732 743 branches.setdefault(n, {})[n] = 1
733 744 break
734 745 if n not in found:
735 746 found.append(n)
736 747 if branch in tags:
737 748 continue
738 749 seen[n] = 1
739 750 if pp[1] != nullid and n not in seenmerge:
740 751 merges.append((pp[1], [x for x in found]))
741 752 seenmerge[n] = 1
742 753 if pp[0] != nullid:
743 754 visit.append(pp[0])
744 755 # traverse the branches dict, eliminating branch tags from each
745 756 # head that are visible from another branch tag for that head.
746 757 out = {}
747 758 viscache = {}
748 759 for h in heads:
749 760 def visible(node):
750 761 if node in viscache:
751 762 return viscache[node]
752 763 ret = {}
753 764 visit = [node]
754 765 while visit:
755 766 x = visit.pop()
756 767 if x in viscache:
757 768 ret.update(viscache[x])
758 769 elif x not in ret:
759 770 ret[x] = 1
760 771 if x in branches:
761 772 visit[len(visit):] = branches[x].keys()
762 773 viscache[node] = ret
763 774 return ret
764 775 if h not in branches:
765 776 continue
766 777 # O(n^2), but somewhat limited. This only searches the
767 778 # tags visible from a specific head, not all the tags in the
768 779 # whole repo.
769 780 for b in branches[h]:
770 781 vis = False
771 782 for bb in branches[h].keys():
772 783 if b != bb:
773 784 if b in visible(bb):
774 785 vis = True
775 786 break
776 787 if not vis:
777 788 l = out.setdefault(h, [])
778 789 l[len(l):] = self.nodetags(b)
779 790 return out
780 791
781 792 def branches(self, nodes):
782 793 if not nodes:
783 794 nodes = [self.changelog.tip()]
784 795 b = []
785 796 for n in nodes:
786 797 t = n
787 798 while n:
788 799 p = self.changelog.parents(n)
789 800 if p[1] != nullid or p[0] == nullid:
790 801 b.append((t, n, p[0], p[1]))
791 802 break
792 803 n = p[0]
793 804 return b
794 805
795 806 def between(self, pairs):
796 807 r = []
797 808
798 809 for top, bottom in pairs:
799 810 n, l, i = top, [], 0
800 811 f = 1
801 812
802 813 while n != bottom:
803 814 p = self.changelog.parents(n)[0]
804 815 if i == f:
805 816 l.append(n)
806 817 f = f * 2
807 818 n = p
808 819 i += 1
809 820
810 821 r.append(l)
811 822
812 823 return r
813 824
814 825 def findincoming(self, remote, base=None, heads=None, force=False):
815 826 m = self.changelog.nodemap
816 827 search = []
817 828 fetch = {}
818 829 seen = {}
819 830 seenbranch = {}
820 831 if base == None:
821 832 base = {}
822 833
823 834 # assume we're closer to the tip than the root
824 835 # and start by examining the heads
825 836 self.ui.status(_("searching for changes\n"))
826 837
827 838 if not heads:
828 839 heads = remote.heads()
829 840
830 841 unknown = []
831 842 for h in heads:
832 843 if h not in m:
833 844 unknown.append(h)
834 845 else:
835 846 base[h] = 1
836 847
837 848 if not unknown:
838 849 return []
839 850
840 851 rep = {}
841 852 reqcnt = 0
842 853
843 854 # search through remote branches
844 855 # a 'branch' here is a linear segment of history, with four parts:
845 856 # head, root, first parent, second parent
846 857 # (a branch always has two parents (or none) by definition)
847 858 unknown = remote.branches(unknown)
848 859 while unknown:
849 860 r = []
850 861 while unknown:
851 862 n = unknown.pop(0)
852 863 if n[0] in seen:
853 864 continue
854 865
855 866 self.ui.debug(_("examining %s:%s\n")
856 867 % (short(n[0]), short(n[1])))
857 868 if n[0] == nullid:
858 869 break
859 870 if n in seenbranch:
860 871 self.ui.debug(_("branch already found\n"))
861 872 continue
862 873 if n[1] and n[1] in m: # do we know the base?
863 874 self.ui.debug(_("found incomplete branch %s:%s\n")
864 875 % (short(n[0]), short(n[1])))
865 876 search.append(n) # schedule branch range for scanning
866 877 seenbranch[n] = 1
867 878 else:
868 879 if n[1] not in seen and n[1] not in fetch:
869 880 if n[2] in m and n[3] in m:
870 881 self.ui.debug(_("found new changeset %s\n") %
871 882 short(n[1]))
872 883 fetch[n[1]] = 1 # earliest unknown
873 884 base[n[2]] = 1 # latest known
874 885 continue
875 886
876 887 for a in n[2:4]:
877 888 if a not in rep:
878 889 r.append(a)
879 890 rep[a] = 1
880 891
881 892 seen[n[0]] = 1
882 893
883 894 if r:
884 895 reqcnt += 1
885 896 self.ui.debug(_("request %d: %s\n") %
886 897 (reqcnt, " ".join(map(short, r))))
887 898 for p in range(0, len(r), 10):
888 899 for b in remote.branches(r[p:p+10]):
889 900 self.ui.debug(_("received %s:%s\n") %
890 901 (short(b[0]), short(b[1])))
891 902 if b[0] in m:
892 903 self.ui.debug(_("found base node %s\n")
893 904 % short(b[0]))
894 905 base[b[0]] = 1
895 906 elif b[0] not in seen:
896 907 unknown.append(b)
897 908
898 909 # do binary search on the branches we found
899 910 while search:
900 911 n = search.pop(0)
901 912 reqcnt += 1
902 913 l = remote.between([(n[0], n[1])])[0]
903 914 l.append(n[1])
904 915 p = n[0]
905 916 f = 1
906 917 for i in l:
907 918 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
908 919 if i in m:
909 920 if f <= 2:
910 921 self.ui.debug(_("found new branch changeset %s\n") %
911 922 short(p))
912 923 fetch[p] = 1
913 924 base[i] = 1
914 925 else:
915 926 self.ui.debug(_("narrowed branch search to %s:%s\n")
916 927 % (short(p), short(i)))
917 928 search.append((p, i))
918 929 break
919 930 p, f = i, f * 2
920 931
921 932 # sanity check our fetch list
922 933 for f in fetch.keys():
923 934 if f in m:
924 935 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
925 936
926 937 if base.keys() == [nullid]:
927 938 if force:
928 939 self.ui.warn(_("warning: repository is unrelated\n"))
929 940 else:
930 941 raise util.Abort(_("repository is unrelated"))
931 942
932 943 self.ui.note(_("found new changesets starting at ") +
933 944 " ".join([short(f) for f in fetch]) + "\n")
934 945
935 946 self.ui.debug(_("%d total queries\n") % reqcnt)
936 947
937 948 return fetch.keys()
938 949
939 950 def findoutgoing(self, remote, base=None, heads=None, force=False):
940 951 """Return list of nodes that are roots of subsets not in remote
941 952
942 953 If base dict is specified, assume that these nodes and their parents
943 954 exist on the remote side.
944 955 If a list of heads is specified, return only nodes which are heads
945 956 or ancestors of these heads, and return a second element which
946 957 contains all remote heads which get new children.
947 958 """
948 959 if base == None:
949 960 base = {}
950 961 self.findincoming(remote, base, heads, force=force)
951 962
952 963 self.ui.debug(_("common changesets up to ")
953 964 + " ".join(map(short, base.keys())) + "\n")
954 965
955 966 remain = dict.fromkeys(self.changelog.nodemap)
956 967
957 968 # prune everything remote has from the tree
958 969 del remain[nullid]
959 970 remove = base.keys()
960 971 while remove:
961 972 n = remove.pop(0)
962 973 if n in remain:
963 974 del remain[n]
964 975 for p in self.changelog.parents(n):
965 976 remove.append(p)
966 977
967 978 # find every node whose parents have been pruned
968 979 subset = []
969 980 # find every remote head that will get new children
970 981 updated_heads = {}
971 982 for n in remain:
972 983 p1, p2 = self.changelog.parents(n)
973 984 if p1 not in remain and p2 not in remain:
974 985 subset.append(n)
975 986 if heads:
976 987 if p1 in heads:
977 988 updated_heads[p1] = True
978 989 if p2 in heads:
979 990 updated_heads[p2] = True
980 991
981 992 # this is the set of all roots we have to push
982 993 if heads:
983 994 return subset, updated_heads.keys()
984 995 else:
985 996 return subset
986 997
987 998 def pull(self, remote, heads=None, force=False):
988 999 l = self.lock()
989 1000
990 1001 # if we have an empty repo, fetch everything
991 1002 if self.changelog.tip() == nullid:
992 1003 self.ui.status(_("requesting all changes\n"))
993 1004 fetch = [nullid]
994 1005 else:
995 1006 fetch = self.findincoming(remote, force=force)
996 1007
997 1008 if not fetch:
998 1009 self.ui.status(_("no changes found\n"))
999 1010 return 0
1000 1011
1001 1012 if heads is None:
1002 1013 cg = remote.changegroup(fetch, 'pull')
1003 1014 else:
1004 1015 cg = remote.changegroupsubset(fetch, heads, 'pull')
1005 1016 return self.addchangegroup(cg)
1006 1017
1007 1018 def push(self, remote, force=False, revs=None):
1008 1019 lock = remote.lock()
1009 1020
1010 1021 base = {}
1011 1022 remote_heads = remote.heads()
1012 1023 inc = self.findincoming(remote, base, remote_heads, force=force)
1013 1024 if not force and inc:
1014 1025 self.ui.warn(_("abort: unsynced remote changes!\n"))
1015 1026 self.ui.status(_("(did you forget to sync?"
1016 1027 " use push -f to force)\n"))
1017 1028 return 1
1018 1029
1019 1030 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1020 1031 if revs is not None:
1021 1032 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1022 1033 else:
1023 1034 bases, heads = update, self.changelog.heads()
1024 1035
1025 1036 if not bases:
1026 1037 self.ui.status(_("no changes found\n"))
1027 1038 return 1
1028 1039 elif not force:
1029 1040 if revs is not None:
1030 1041 updated_heads = {}
1031 1042 for base in msng_cl:
1032 1043 for parent in self.changelog.parents(base):
1033 1044 if parent in remote_heads:
1034 1045 updated_heads[parent] = True
1035 1046 updated_heads = updated_heads.keys()
1036 1047 if len(updated_heads) < len(heads):
1037 1048 self.ui.warn(_("abort: push creates new remote branches!\n"))
1038 1049 self.ui.status(_("(did you forget to merge?"
1039 1050 " use push -f to force)\n"))
1040 1051 return 1
1041 1052
1042 1053 if revs is None:
1043 1054 cg = self.changegroup(update, 'push')
1044 1055 else:
1045 1056 cg = self.changegroupsubset(update, revs, 'push')
1046 1057 return remote.addchangegroup(cg)
1047 1058
1048 1059 def changegroupsubset(self, bases, heads, source):
1049 1060 """This function generates a changegroup consisting of all the nodes
1050 1061 that are descendents of any of the bases, and ancestors of any of
1051 1062 the heads.
1052 1063
1053 1064 It is fairly complex as determining which filenodes and which
1054 1065 manifest nodes need to be included for the changeset to be complete
1055 1066 is non-trivial.
1056 1067
1057 1068 Another wrinkle is doing the reverse, figuring out which changeset in
1058 1069 the changegroup a particular filenode or manifestnode belongs to."""
1059 1070
1060 1071 self.hook('preoutgoing', throw=True, source=source)
1061 1072
1062 1073 # Set up some initial variables
1063 1074 # Make it easy to refer to self.changelog
1064 1075 cl = self.changelog
1065 1076 # msng is short for missing - compute the list of changesets in this
1066 1077 # changegroup.
1067 1078 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1068 1079 # Some bases may turn out to be superfluous, and some heads may be
1069 1080 # too. nodesbetween will return the minimal set of bases and heads
1070 1081 # necessary to re-create the changegroup.
1071 1082
1072 1083 # Known heads are the list of heads that it is assumed the recipient
1073 1084 # of this changegroup will know about.
1074 1085 knownheads = {}
1075 1086 # We assume that all parents of bases are known heads.
1076 1087 for n in bases:
1077 1088 for p in cl.parents(n):
1078 1089 if p != nullid:
1079 1090 knownheads[p] = 1
1080 1091 knownheads = knownheads.keys()
1081 1092 if knownheads:
1082 1093 # Now that we know what heads are known, we can compute which
1083 1094 # changesets are known. The recipient must know about all
1084 1095 # changesets required to reach the known heads from the null
1085 1096 # changeset.
1086 1097 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1087 1098 junk = None
1088 1099 # Transform the list into an ersatz set.
1089 1100 has_cl_set = dict.fromkeys(has_cl_set)
1090 1101 else:
1091 1102 # If there were no known heads, the recipient cannot be assumed to
1092 1103 # know about any changesets.
1093 1104 has_cl_set = {}
1094 1105
1095 1106 # Make it easy to refer to self.manifest
1096 1107 mnfst = self.manifest
1097 1108 # We don't know which manifests are missing yet
1098 1109 msng_mnfst_set = {}
1099 1110 # Nor do we know which filenodes are missing.
1100 1111 msng_filenode_set = {}
1101 1112
1102 1113 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1103 1114 junk = None
1104 1115
1105 1116 # A changeset always belongs to itself, so the changenode lookup
1106 1117 # function for a changenode is identity.
1107 1118 def identity(x):
1108 1119 return x
1109 1120
1110 1121 # A function generating function. Sets up an environment for the
1111 1122 # inner function.
1112 1123 def cmp_by_rev_func(revlog):
1113 1124 # Compare two nodes by their revision number in the environment's
1114 1125 # revision history. Since the revision number both represents the
1115 1126 # most efficient order to read the nodes in, and represents a
1116 1127 # topological sorting of the nodes, this function is often useful.
1117 1128 def cmp_by_rev(a, b):
1118 1129 return cmp(revlog.rev(a), revlog.rev(b))
1119 1130 return cmp_by_rev
1120 1131
1121 1132 # If we determine that a particular file or manifest node must be a
1122 1133 # node that the recipient of the changegroup will already have, we can
1123 1134 # also assume the recipient will have all the parents. This function
1124 1135 # prunes them from the set of missing nodes.
1125 1136 def prune_parents(revlog, hasset, msngset):
1126 1137 haslst = hasset.keys()
1127 1138 haslst.sort(cmp_by_rev_func(revlog))
1128 1139 for node in haslst:
1129 1140 parentlst = [p for p in revlog.parents(node) if p != nullid]
1130 1141 while parentlst:
1131 1142 n = parentlst.pop()
1132 1143 if n not in hasset:
1133 1144 hasset[n] = 1
1134 1145 p = [p for p in revlog.parents(n) if p != nullid]
1135 1146 parentlst.extend(p)
1136 1147 for n in hasset:
1137 1148 msngset.pop(n, None)
1138 1149
1139 1150 # This is a function generating function used to set up an environment
1140 1151 # for the inner function to execute in.
1141 1152 def manifest_and_file_collector(changedfileset):
1142 1153 # This is an information gathering function that gathers
1143 1154 # information from each changeset node that goes out as part of
1144 1155 # the changegroup. The information gathered is a list of which
1145 1156 # manifest nodes are potentially required (the recipient may
1146 1157 # already have them) and total list of all files which were
1147 1158 # changed in any changeset in the changegroup.
1148 1159 #
1149 1160 # We also remember the first changenode we saw any manifest
1150 1161 # referenced by so we can later determine which changenode 'owns'
1151 1162 # the manifest.
1152 1163 def collect_manifests_and_files(clnode):
1153 1164 c = cl.read(clnode)
1154 1165 for f in c[3]:
1155 1166 # This is to make sure we only have one instance of each
1156 1167 # filename string for each filename.
1157 1168 changedfileset.setdefault(f, f)
1158 1169 msng_mnfst_set.setdefault(c[0], clnode)
1159 1170 return collect_manifests_and_files
1160 1171
1161 1172 # Figure out which manifest nodes (of the ones we think might be part
1162 1173 # of the changegroup) the recipient must know about and remove them
1163 1174 # from the changegroup.
1164 1175 def prune_manifests():
1165 1176 has_mnfst_set = {}
1166 1177 for n in msng_mnfst_set:
1167 1178 # If a 'missing' manifest thinks it belongs to a changenode
1168 1179 # the recipient is assumed to have, obviously the recipient
1169 1180 # must have that manifest.
1170 1181 linknode = cl.node(mnfst.linkrev(n))
1171 1182 if linknode in has_cl_set:
1172 1183 has_mnfst_set[n] = 1
1173 1184 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1174 1185
1175 1186 # Use the information collected in collect_manifests_and_files to say
1176 1187 # which changenode any manifestnode belongs to.
1177 1188 def lookup_manifest_link(mnfstnode):
1178 1189 return msng_mnfst_set[mnfstnode]
1179 1190
1180 1191 # A function generating function that sets up the initial environment
1181 1192 # the inner function.
1182 1193 def filenode_collector(changedfiles):
1183 1194 next_rev = [0]
1184 1195 # This gathers information from each manifestnode included in the
1185 1196 # changegroup about which filenodes the manifest node references
1186 1197 # so we can include those in the changegroup too.
1187 1198 #
1188 1199 # It also remembers which changenode each filenode belongs to. It
1189 1200 # does this by assuming the a filenode belongs to the changenode
1190 1201 # the first manifest that references it belongs to.
1191 1202 def collect_msng_filenodes(mnfstnode):
1192 1203 r = mnfst.rev(mnfstnode)
1193 1204 if r == next_rev[0]:
1194 1205 # If the last rev we looked at was the one just previous,
1195 1206 # we only need to see a diff.
1196 1207 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1197 1208 # For each line in the delta
1198 1209 for dline in delta.splitlines():
1199 1210 # get the filename and filenode for that line
1200 1211 f, fnode = dline.split('\0')
1201 1212 fnode = bin(fnode[:40])
1202 1213 f = changedfiles.get(f, None)
1203 1214 # And if the file is in the list of files we care
1204 1215 # about.
1205 1216 if f is not None:
1206 1217 # Get the changenode this manifest belongs to
1207 1218 clnode = msng_mnfst_set[mnfstnode]
1208 1219 # Create the set of filenodes for the file if
1209 1220 # there isn't one already.
1210 1221 ndset = msng_filenode_set.setdefault(f, {})
1211 1222 # And set the filenode's changelog node to the
1212 1223 # manifest's if it hasn't been set already.
1213 1224 ndset.setdefault(fnode, clnode)
1214 1225 else:
1215 1226 # Otherwise we need a full manifest.
1216 1227 m = mnfst.read(mnfstnode)
1217 1228 # For every file in we care about.
1218 1229 for f in changedfiles:
1219 1230 fnode = m.get(f, None)
1220 1231 # If it's in the manifest
1221 1232 if fnode is not None:
1222 1233 # See comments above.
1223 1234 clnode = msng_mnfst_set[mnfstnode]
1224 1235 ndset = msng_filenode_set.setdefault(f, {})
1225 1236 ndset.setdefault(fnode, clnode)
1226 1237 # Remember the revision we hope to see next.
1227 1238 next_rev[0] = r + 1
1228 1239 return collect_msng_filenodes
1229 1240
1230 1241 # We have a list of filenodes we think we need for a file, lets remove
1231 1242 # all those we now the recipient must have.
1232 1243 def prune_filenodes(f, filerevlog):
1233 1244 msngset = msng_filenode_set[f]
1234 1245 hasset = {}
1235 1246 # If a 'missing' filenode thinks it belongs to a changenode we
1236 1247 # assume the recipient must have, then the recipient must have
1237 1248 # that filenode.
1238 1249 for n in msngset:
1239 1250 clnode = cl.node(filerevlog.linkrev(n))
1240 1251 if clnode in has_cl_set:
1241 1252 hasset[n] = 1
1242 1253 prune_parents(filerevlog, hasset, msngset)
1243 1254
1244 1255 # A function generator function that sets up the a context for the
1245 1256 # inner function.
1246 1257 def lookup_filenode_link_func(fname):
1247 1258 msngset = msng_filenode_set[fname]
1248 1259 # Lookup the changenode the filenode belongs to.
1249 1260 def lookup_filenode_link(fnode):
1250 1261 return msngset[fnode]
1251 1262 return lookup_filenode_link
1252 1263
1253 1264 # Now that we have all theses utility functions to help out and
1254 1265 # logically divide up the task, generate the group.
1255 1266 def gengroup():
1256 1267 # The set of changed files starts empty.
1257 1268 changedfiles = {}
1258 1269 # Create a changenode group generator that will call our functions
1259 1270 # back to lookup the owning changenode and collect information.
1260 1271 group = cl.group(msng_cl_lst, identity,
1261 1272 manifest_and_file_collector(changedfiles))
1262 1273 for chnk in group:
1263 1274 yield chnk
1264 1275
1265 1276 # The list of manifests has been collected by the generator
1266 1277 # calling our functions back.
1267 1278 prune_manifests()
1268 1279 msng_mnfst_lst = msng_mnfst_set.keys()
1269 1280 # Sort the manifestnodes by revision number.
1270 1281 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1271 1282 # Create a generator for the manifestnodes that calls our lookup
1272 1283 # and data collection functions back.
1273 1284 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1274 1285 filenode_collector(changedfiles))
1275 1286 for chnk in group:
1276 1287 yield chnk
1277 1288
1278 1289 # These are no longer needed, dereference and toss the memory for
1279 1290 # them.
1280 1291 msng_mnfst_lst = None
1281 1292 msng_mnfst_set.clear()
1282 1293
1283 1294 changedfiles = changedfiles.keys()
1284 1295 changedfiles.sort()
1285 1296 # Go through all our files in order sorted by name.
1286 1297 for fname in changedfiles:
1287 1298 filerevlog = self.file(fname)
1288 1299 # Toss out the filenodes that the recipient isn't really
1289 1300 # missing.
1290 1301 if msng_filenode_set.has_key(fname):
1291 1302 prune_filenodes(fname, filerevlog)
1292 1303 msng_filenode_lst = msng_filenode_set[fname].keys()
1293 1304 else:
1294 1305 msng_filenode_lst = []
1295 1306 # If any filenodes are left, generate the group for them,
1296 1307 # otherwise don't bother.
1297 1308 if len(msng_filenode_lst) > 0:
1298 1309 yield changegroup.genchunk(fname)
1299 1310 # Sort the filenodes by their revision #
1300 1311 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1301 1312 # Create a group generator and only pass in a changenode
1302 1313 # lookup function as we need to collect no information
1303 1314 # from filenodes.
1304 1315 group = filerevlog.group(msng_filenode_lst,
1305 1316 lookup_filenode_link_func(fname))
1306 1317 for chnk in group:
1307 1318 yield chnk
1308 1319 if msng_filenode_set.has_key(fname):
1309 1320 # Don't need this anymore, toss it to free memory.
1310 1321 del msng_filenode_set[fname]
1311 1322 # Signal that no more groups are left.
1312 1323 yield changegroup.closechunk()
1313 1324
1314 1325 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1315 1326
1316 1327 return util.chunkbuffer(gengroup())
1317 1328
1318 1329 def changegroup(self, basenodes, source):
1319 1330 """Generate a changegroup of all nodes that we have that a recipient
1320 1331 doesn't.
1321 1332
1322 1333 This is much easier than the previous function as we can assume that
1323 1334 the recipient has any changenode we aren't sending them."""
1324 1335
1325 1336 self.hook('preoutgoing', throw=True, source=source)
1326 1337
1327 1338 cl = self.changelog
1328 1339 nodes = cl.nodesbetween(basenodes, None)[0]
1329 1340 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1330 1341
1331 1342 def identity(x):
1332 1343 return x
1333 1344
1334 1345 def gennodelst(revlog):
1335 1346 for r in xrange(0, revlog.count()):
1336 1347 n = revlog.node(r)
1337 1348 if revlog.linkrev(n) in revset:
1338 1349 yield n
1339 1350
1340 1351 def changed_file_collector(changedfileset):
1341 1352 def collect_changed_files(clnode):
1342 1353 c = cl.read(clnode)
1343 1354 for fname in c[3]:
1344 1355 changedfileset[fname] = 1
1345 1356 return collect_changed_files
1346 1357
1347 1358 def lookuprevlink_func(revlog):
1348 1359 def lookuprevlink(n):
1349 1360 return cl.node(revlog.linkrev(n))
1350 1361 return lookuprevlink
1351 1362
1352 1363 def gengroup():
1353 1364 # construct a list of all changed files
1354 1365 changedfiles = {}
1355 1366
1356 1367 for chnk in cl.group(nodes, identity,
1357 1368 changed_file_collector(changedfiles)):
1358 1369 yield chnk
1359 1370 changedfiles = changedfiles.keys()
1360 1371 changedfiles.sort()
1361 1372
1362 1373 mnfst = self.manifest
1363 1374 nodeiter = gennodelst(mnfst)
1364 1375 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1365 1376 yield chnk
1366 1377
1367 1378 for fname in changedfiles:
1368 1379 filerevlog = self.file(fname)
1369 1380 nodeiter = gennodelst(filerevlog)
1370 1381 nodeiter = list(nodeiter)
1371 1382 if nodeiter:
1372 1383 yield changegroup.genchunk(fname)
1373 1384 lookup = lookuprevlink_func(filerevlog)
1374 1385 for chnk in filerevlog.group(nodeiter, lookup):
1375 1386 yield chnk
1376 1387
1377 1388 yield changegroup.closechunk()
1378 1389 self.hook('outgoing', node=hex(nodes[0]), source=source)
1379 1390
1380 1391 return util.chunkbuffer(gengroup())
1381 1392
1382 1393 def addchangegroup(self, source):
1383 1394 """add changegroup to repo.
1384 1395 returns number of heads modified or added + 1."""
1385 1396
1386 1397 def csmap(x):
1387 1398 self.ui.debug(_("add changeset %s\n") % short(x))
1388 1399 return cl.count()
1389 1400
1390 1401 def revmap(x):
1391 1402 return cl.rev(x)
1392 1403
1393 1404 if not source:
1394 1405 return 0
1395 1406
1396 1407 self.hook('prechangegroup', throw=True)
1397 1408
1398 1409 changesets = files = revisions = 0
1399 1410
1400 1411 tr = self.transaction()
1401 1412
1402 1413 # write changelog and manifest data to temp files so
1403 1414 # concurrent readers will not see inconsistent view
1404 1415 cl = appendfile.appendchangelog(self.opener)
1405 1416
1406 1417 oldheads = len(cl.heads())
1407 1418
1408 1419 # pull off the changeset group
1409 1420 self.ui.status(_("adding changesets\n"))
1410 1421 co = cl.tip()
1411 1422 chunkiter = changegroup.chunkiter(source)
1412 1423 cn = cl.addgroup(chunkiter, csmap, tr, 1) # unique
1413 1424 cnr, cor = map(cl.rev, (cn, co))
1414 1425 if cn == nullid:
1415 1426 cnr = cor
1416 1427 changesets = cnr - cor
1417 1428
1418 1429 mf = appendfile.appendmanifest(self.opener)
1419 1430
1420 1431 # pull off the manifest group
1421 1432 self.ui.status(_("adding manifests\n"))
1422 1433 mm = mf.tip()
1423 1434 chunkiter = changegroup.chunkiter(source)
1424 1435 mo = mf.addgroup(chunkiter, revmap, tr)
1425 1436
1426 1437 # process the files
1427 1438 self.ui.status(_("adding file changes\n"))
1428 1439 while 1:
1429 1440 f = changegroup.getchunk(source)
1430 1441 if not f:
1431 1442 break
1432 1443 self.ui.debug(_("adding %s revisions\n") % f)
1433 1444 fl = self.file(f)
1434 1445 o = fl.count()
1435 1446 chunkiter = changegroup.chunkiter(source)
1436 1447 n = fl.addgroup(chunkiter, revmap, tr)
1437 1448 revisions += fl.count() - o
1438 1449 files += 1
1439 1450
1440 1451 # write order here is important so concurrent readers will see
1441 1452 # consistent view of repo
1442 1453 mf.writedata()
1443 1454 cl.writedata()
1444 1455
1445 1456 # make changelog and manifest see real files again
1446 1457 self.changelog = changelog.changelog(self.opener)
1447 1458 self.manifest = manifest.manifest(self.opener)
1448 1459
1449 1460 newheads = len(self.changelog.heads())
1450 1461 heads = ""
1451 1462 if oldheads and newheads > oldheads:
1452 1463 heads = _(" (+%d heads)") % (newheads - oldheads)
1453 1464
1454 1465 self.ui.status(_("added %d changesets"
1455 1466 " with %d changes to %d files%s\n")
1456 1467 % (changesets, revisions, files, heads))
1457 1468
1458 1469 self.hook('pretxnchangegroup', throw=True,
1459 1470 node=hex(self.changelog.node(cor+1)))
1460 1471
1461 1472 tr.close()
1462 1473
1463 1474 if changesets > 0:
1464 1475 self.hook("changegroup", node=hex(self.changelog.node(cor+1)))
1465 1476
1466 1477 for i in range(cor + 1, cnr + 1):
1467 1478 self.hook("incoming", node=hex(self.changelog.node(i)))
1468 1479
1469 1480 return newheads - oldheads + 1
1470 1481
1471 1482 def update(self, node, allow=False, force=False, choose=None,
1472 1483 moddirstate=True, forcemerge=False, wlock=None):
1473 1484 pl = self.dirstate.parents()
1474 1485 if not force and pl[1] != nullid:
1475 1486 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1476 1487 return 1
1477 1488
1478 1489 err = False
1479 1490
1480 1491 p1, p2 = pl[0], node
1481 1492 pa = self.changelog.ancestor(p1, p2)
1482 1493 m1n = self.changelog.read(p1)[0]
1483 1494 m2n = self.changelog.read(p2)[0]
1484 1495 man = self.manifest.ancestor(m1n, m2n)
1485 1496 m1 = self.manifest.read(m1n)
1486 1497 mf1 = self.manifest.readflags(m1n)
1487 1498 m2 = self.manifest.read(m2n).copy()
1488 1499 mf2 = self.manifest.readflags(m2n)
1489 1500 ma = self.manifest.read(man)
1490 1501 mfa = self.manifest.readflags(man)
1491 1502
1492 1503 modified, added, removed, deleted, unknown = self.changes()
1493 1504
1494 1505 # is this a jump, or a merge? i.e. is there a linear path
1495 1506 # from p1 to p2?
1496 1507 linear_path = (pa == p1 or pa == p2)
1497 1508
1498 1509 if allow and linear_path:
1499 1510 raise util.Abort(_("there is nothing to merge, "
1500 1511 "just use 'hg update'"))
1501 1512 if allow and not forcemerge:
1502 1513 if modified or added or removed:
1503 1514 raise util.Abort(_("outstanding uncommitted changes"))
1504 1515 if not forcemerge and not force:
1505 1516 for f in unknown:
1506 1517 if f in m2:
1507 1518 t1 = self.wread(f)
1508 1519 t2 = self.file(f).read(m2[f])
1509 1520 if cmp(t1, t2) != 0:
1510 1521 raise util.Abort(_("'%s' already exists in the working"
1511 1522 " dir and differs from remote") % f)
1512 1523
1513 1524 # resolve the manifest to determine which files
1514 1525 # we care about merging
1515 1526 self.ui.note(_("resolving manifests\n"))
1516 1527 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1517 1528 (force, allow, moddirstate, linear_path))
1518 1529 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1519 1530 (short(man), short(m1n), short(m2n)))
1520 1531
1521 1532 merge = {}
1522 1533 get = {}
1523 1534 remove = []
1524 1535
1525 1536 # construct a working dir manifest
1526 1537 mw = m1.copy()
1527 1538 mfw = mf1.copy()
1528 1539 umap = dict.fromkeys(unknown)
1529 1540
1530 1541 for f in added + modified + unknown:
1531 1542 mw[f] = ""
1532 1543 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1533 1544
1534 1545 if moddirstate and not wlock:
1535 1546 wlock = self.wlock()
1536 1547
1537 1548 for f in deleted + removed:
1538 1549 if f in mw:
1539 1550 del mw[f]
1540 1551
1541 1552 # If we're jumping between revisions (as opposed to merging),
1542 1553 # and if neither the working directory nor the target rev has
1543 1554 # the file, then we need to remove it from the dirstate, to
1544 1555 # prevent the dirstate from listing the file when it is no
1545 1556 # longer in the manifest.
1546 1557 if moddirstate and linear_path and f not in m2:
1547 1558 self.dirstate.forget((f,))
1548 1559
1549 1560 # Compare manifests
1550 1561 for f, n in mw.iteritems():
1551 1562 if choose and not choose(f):
1552 1563 continue
1553 1564 if f in m2:
1554 1565 s = 0
1555 1566
1556 1567 # is the wfile new since m1, and match m2?
1557 1568 if f not in m1:
1558 1569 t1 = self.wread(f)
1559 1570 t2 = self.file(f).read(m2[f])
1560 1571 if cmp(t1, t2) == 0:
1561 1572 n = m2[f]
1562 1573 del t1, t2
1563 1574
1564 1575 # are files different?
1565 1576 if n != m2[f]:
1566 1577 a = ma.get(f, nullid)
1567 1578 # are both different from the ancestor?
1568 1579 if n != a and m2[f] != a:
1569 1580 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1570 1581 # merge executable bits
1571 1582 # "if we changed or they changed, change in merge"
1572 1583 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1573 1584 mode = ((a^b) | (a^c)) ^ a
1574 1585 merge[f] = (m1.get(f, nullid), m2[f], mode)
1575 1586 s = 1
1576 1587 # are we clobbering?
1577 1588 # is remote's version newer?
1578 1589 # or are we going back in time?
1579 1590 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1580 1591 self.ui.debug(_(" remote %s is newer, get\n") % f)
1581 1592 get[f] = m2[f]
1582 1593 s = 1
1583 1594 elif f in umap:
1584 1595 # this unknown file is the same as the checkout
1585 1596 get[f] = m2[f]
1586 1597
1587 1598 if not s and mfw[f] != mf2[f]:
1588 1599 if force:
1589 1600 self.ui.debug(_(" updating permissions for %s\n") % f)
1590 1601 util.set_exec(self.wjoin(f), mf2[f])
1591 1602 else:
1592 1603 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1593 1604 mode = ((a^b) | (a^c)) ^ a
1594 1605 if mode != b:
1595 1606 self.ui.debug(_(" updating permissions for %s\n")
1596 1607 % f)
1597 1608 util.set_exec(self.wjoin(f), mode)
1598 1609 del m2[f]
1599 1610 elif f in ma:
1600 1611 if n != ma[f]:
1601 1612 r = _("d")
1602 1613 if not force and (linear_path or allow):
1603 1614 r = self.ui.prompt(
1604 1615 (_(" local changed %s which remote deleted\n") % f) +
1605 1616 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1606 1617 if r == _("d"):
1607 1618 remove.append(f)
1608 1619 else:
1609 1620 self.ui.debug(_("other deleted %s\n") % f)
1610 1621 remove.append(f) # other deleted it
1611 1622 else:
1612 1623 # file is created on branch or in working directory
1613 1624 if force and f not in umap:
1614 1625 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1615 1626 remove.append(f)
1616 1627 elif n == m1.get(f, nullid): # same as parent
1617 1628 if p2 == pa: # going backwards?
1618 1629 self.ui.debug(_("remote deleted %s\n") % f)
1619 1630 remove.append(f)
1620 1631 else:
1621 1632 self.ui.debug(_("local modified %s, keeping\n") % f)
1622 1633 else:
1623 1634 self.ui.debug(_("working dir created %s, keeping\n") % f)
1624 1635
1625 1636 for f, n in m2.iteritems():
1626 1637 if choose and not choose(f):
1627 1638 continue
1628 1639 if f[0] == "/":
1629 1640 continue
1630 1641 if f in ma and n != ma[f]:
1631 1642 r = _("k")
1632 1643 if not force and (linear_path or allow):
1633 1644 r = self.ui.prompt(
1634 1645 (_("remote changed %s which local deleted\n") % f) +
1635 1646 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1636 1647 if r == _("k"):
1637 1648 get[f] = n
1638 1649 elif f not in ma:
1639 1650 self.ui.debug(_("remote created %s\n") % f)
1640 1651 get[f] = n
1641 1652 else:
1642 1653 if force or p2 == pa: # going backwards?
1643 1654 self.ui.debug(_("local deleted %s, recreating\n") % f)
1644 1655 get[f] = n
1645 1656 else:
1646 1657 self.ui.debug(_("local deleted %s\n") % f)
1647 1658
1648 1659 del mw, m1, m2, ma
1649 1660
1650 1661 if force:
1651 1662 for f in merge:
1652 1663 get[f] = merge[f][1]
1653 1664 merge = {}
1654 1665
1655 1666 if linear_path or force:
1656 1667 # we don't need to do any magic, just jump to the new rev
1657 1668 branch_merge = False
1658 1669 p1, p2 = p2, nullid
1659 1670 else:
1660 1671 if not allow:
1661 1672 self.ui.status(_("this update spans a branch"
1662 1673 " affecting the following files:\n"))
1663 1674 fl = merge.keys() + get.keys()
1664 1675 fl.sort()
1665 1676 for f in fl:
1666 1677 cf = ""
1667 1678 if f in merge:
1668 1679 cf = _(" (resolve)")
1669 1680 self.ui.status(" %s%s\n" % (f, cf))
1670 1681 self.ui.warn(_("aborting update spanning branches!\n"))
1671 1682 self.ui.status(_("(use 'hg merge' to merge across branches"
1672 1683 " or 'hg update -C' to lose changes)\n"))
1673 1684 return 1
1674 1685 branch_merge = True
1675 1686
1676 1687 # get the files we don't need to change
1677 1688 files = get.keys()
1678 1689 files.sort()
1679 1690 for f in files:
1680 1691 if f[0] == "/":
1681 1692 continue
1682 1693 self.ui.note(_("getting %s\n") % f)
1683 1694 t = self.file(f).read(get[f])
1684 1695 self.wwrite(f, t)
1685 1696 util.set_exec(self.wjoin(f), mf2[f])
1686 1697 if moddirstate:
1687 1698 if branch_merge:
1688 1699 self.dirstate.update([f], 'n', st_mtime=-1)
1689 1700 else:
1690 1701 self.dirstate.update([f], 'n')
1691 1702
1692 1703 # merge the tricky bits
1693 1704 failedmerge = []
1694 1705 files = merge.keys()
1695 1706 files.sort()
1696 1707 xp1 = hex(p1)
1697 1708 xp2 = hex(p2)
1698 1709 for f in files:
1699 1710 self.ui.status(_("merging %s\n") % f)
1700 1711 my, other, flag = merge[f]
1701 1712 ret = self.merge3(f, my, other, xp1, xp2)
1702 1713 if ret:
1703 1714 err = True
1704 1715 failedmerge.append(f)
1705 1716 util.set_exec(self.wjoin(f), flag)
1706 1717 if moddirstate:
1707 1718 if branch_merge:
1708 1719 # We've done a branch merge, mark this file as merged
1709 1720 # so that we properly record the merger later
1710 1721 self.dirstate.update([f], 'm')
1711 1722 else:
1712 1723 # We've update-merged a locally modified file, so
1713 1724 # we set the dirstate to emulate a normal checkout
1714 1725 # of that file some time in the past. Thus our
1715 1726 # merge will appear as a normal local file
1716 1727 # modification.
1717 1728 f_len = len(self.file(f).read(other))
1718 1729 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1719 1730
1720 1731 remove.sort()
1721 1732 for f in remove:
1722 1733 self.ui.note(_("removing %s\n") % f)
1723 1734 util.audit_path(f)
1724 1735 try:
1725 1736 util.unlink(self.wjoin(f))
1726 1737 except OSError, inst:
1727 1738 if inst.errno != errno.ENOENT:
1728 1739 self.ui.warn(_("update failed to remove %s: %s!\n") %
1729 1740 (f, inst.strerror))
1730 1741 if moddirstate:
1731 1742 if branch_merge:
1732 1743 self.dirstate.update(remove, 'r')
1733 1744 else:
1734 1745 self.dirstate.forget(remove)
1735 1746
1736 1747 if moddirstate:
1737 1748 self.dirstate.setparents(p1, p2)
1738 1749
1739 1750 stat = ((len(get), _("updated")),
1740 1751 (len(merge) - len(failedmerge), _("merged")),
1741 1752 (len(remove), _("removed")),
1742 1753 (len(failedmerge), _("unresolved")))
1743 1754 note = ", ".join([_("%d files %s") % s for s in stat])
1744 1755 self.ui.note("%s\n" % note)
1745 1756 if moddirstate and branch_merge:
1746 1757 self.ui.note(_("(branch merge, don't forget to commit)\n"))
1747 1758
1748 1759 return err
1749 1760
1750 1761 def merge3(self, fn, my, other, p1, p2):
1751 1762 """perform a 3-way merge in the working directory"""
1752 1763
1753 1764 def temp(prefix, node):
1754 1765 pre = "%s~%s." % (os.path.basename(fn), prefix)
1755 1766 (fd, name) = tempfile.mkstemp("", pre)
1756 1767 f = os.fdopen(fd, "wb")
1757 1768 self.wwrite(fn, fl.read(node), f)
1758 1769 f.close()
1759 1770 return name
1760 1771
1761 1772 fl = self.file(fn)
1762 1773 base = fl.ancestor(my, other)
1763 1774 a = self.wjoin(fn)
1764 1775 b = temp("base", base)
1765 1776 c = temp("other", other)
1766 1777
1767 1778 self.ui.note(_("resolving %s\n") % fn)
1768 1779 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1769 1780 (fn, short(my), short(other), short(base)))
1770 1781
1771 1782 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1772 1783 or "hgmerge")
1773 1784 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c), cwd=self.root,
1774 1785 environ={'HG_FILE': fn,
1775 1786 'HG_MY_NODE': p1,
1776 1787 'HG_OTHER_NODE': p2,
1777 1788 'HG_FILE_MY_NODE': hex(my),
1778 1789 'HG_FILE_OTHER_NODE': hex(other),
1779 1790 'HG_FILE_BASE_NODE': hex(base)})
1780 1791 if r:
1781 1792 self.ui.warn(_("merging %s failed!\n") % fn)
1782 1793
1783 1794 os.unlink(b)
1784 1795 os.unlink(c)
1785 1796 return r
1786 1797
1787 1798 def verify(self):
1788 1799 filelinkrevs = {}
1789 1800 filenodes = {}
1790 1801 changesets = revisions = files = 0
1791 1802 errors = [0]
1792 1803 neededmanifests = {}
1793 1804
1794 1805 def err(msg):
1795 1806 self.ui.warn(msg + "\n")
1796 1807 errors[0] += 1
1797 1808
1798 1809 def checksize(obj, name):
1799 1810 d = obj.checksize()
1800 1811 if d[0]:
1801 1812 err(_("%s data length off by %d bytes") % (name, d[0]))
1802 1813 if d[1]:
1803 1814 err(_("%s index contains %d extra bytes") % (name, d[1]))
1804 1815
1805 1816 seen = {}
1806 1817 self.ui.status(_("checking changesets\n"))
1807 1818 checksize(self.changelog, "changelog")
1808 1819
1809 1820 for i in range(self.changelog.count()):
1810 1821 changesets += 1
1811 1822 n = self.changelog.node(i)
1812 1823 l = self.changelog.linkrev(n)
1813 1824 if l != i:
1814 1825 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1815 1826 if n in seen:
1816 1827 err(_("duplicate changeset at revision %d") % i)
1817 1828 seen[n] = 1
1818 1829
1819 1830 for p in self.changelog.parents(n):
1820 1831 if p not in self.changelog.nodemap:
1821 1832 err(_("changeset %s has unknown parent %s") %
1822 1833 (short(n), short(p)))
1823 1834 try:
1824 1835 changes = self.changelog.read(n)
1825 1836 except KeyboardInterrupt:
1826 1837 self.ui.warn(_("interrupted"))
1827 1838 raise
1828 1839 except Exception, inst:
1829 1840 err(_("unpacking changeset %s: %s") % (short(n), inst))
1830 1841 continue
1831 1842
1832 1843 neededmanifests[changes[0]] = n
1833 1844
1834 1845 for f in changes[3]:
1835 1846 filelinkrevs.setdefault(f, []).append(i)
1836 1847
1837 1848 seen = {}
1838 1849 self.ui.status(_("checking manifests\n"))
1839 1850 checksize(self.manifest, "manifest")
1840 1851
1841 1852 for i in range(self.manifest.count()):
1842 1853 n = self.manifest.node(i)
1843 1854 l = self.manifest.linkrev(n)
1844 1855
1845 1856 if l < 0 or l >= self.changelog.count():
1846 1857 err(_("bad manifest link (%d) at revision %d") % (l, i))
1847 1858
1848 1859 if n in neededmanifests:
1849 1860 del neededmanifests[n]
1850 1861
1851 1862 if n in seen:
1852 1863 err(_("duplicate manifest at revision %d") % i)
1853 1864
1854 1865 seen[n] = 1
1855 1866
1856 1867 for p in self.manifest.parents(n):
1857 1868 if p not in self.manifest.nodemap:
1858 1869 err(_("manifest %s has unknown parent %s") %
1859 1870 (short(n), short(p)))
1860 1871
1861 1872 try:
1862 1873 delta = mdiff.patchtext(self.manifest.delta(n))
1863 1874 except KeyboardInterrupt:
1864 1875 self.ui.warn(_("interrupted"))
1865 1876 raise
1866 1877 except Exception, inst:
1867 1878 err(_("unpacking manifest %s: %s") % (short(n), inst))
1868 1879 continue
1869 1880
1870 1881 try:
1871 1882 ff = [ l.split('\0') for l in delta.splitlines() ]
1872 1883 for f, fn in ff:
1873 1884 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1874 1885 except (ValueError, TypeError), inst:
1875 1886 err(_("broken delta in manifest %s: %s") % (short(n), inst))
1876 1887
1877 1888 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1878 1889
1879 1890 for m, c in neededmanifests.items():
1880 1891 err(_("Changeset %s refers to unknown manifest %s") %
1881 1892 (short(m), short(c)))
1882 1893 del neededmanifests
1883 1894
1884 1895 for f in filenodes:
1885 1896 if f not in filelinkrevs:
1886 1897 err(_("file %s in manifest but not in changesets") % f)
1887 1898
1888 1899 for f in filelinkrevs:
1889 1900 if f not in filenodes:
1890 1901 err(_("file %s in changeset but not in manifest") % f)
1891 1902
1892 1903 self.ui.status(_("checking files\n"))
1893 1904 ff = filenodes.keys()
1894 1905 ff.sort()
1895 1906 for f in ff:
1896 1907 if f == "/dev/null":
1897 1908 continue
1898 1909 files += 1
1899 1910 if not f:
1900 1911 err(_("file without name in manifest %s") % short(n))
1901 1912 continue
1902 1913 fl = self.file(f)
1903 1914 checksize(fl, f)
1904 1915
1905 1916 nodes = {nullid: 1}
1906 1917 seen = {}
1907 1918 for i in range(fl.count()):
1908 1919 revisions += 1
1909 1920 n = fl.node(i)
1910 1921
1911 1922 if n in seen:
1912 1923 err(_("%s: duplicate revision %d") % (f, i))
1913 1924 if n not in filenodes[f]:
1914 1925 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1915 1926 else:
1916 1927 del filenodes[f][n]
1917 1928
1918 1929 flr = fl.linkrev(n)
1919 1930 if flr not in filelinkrevs.get(f, []):
1920 1931 err(_("%s:%s points to unexpected changeset %d")
1921 1932 % (f, short(n), flr))
1922 1933 else:
1923 1934 filelinkrevs[f].remove(flr)
1924 1935
1925 1936 # verify contents
1926 1937 try:
1927 1938 t = fl.read(n)
1928 1939 except KeyboardInterrupt:
1929 1940 self.ui.warn(_("interrupted"))
1930 1941 raise
1931 1942 except Exception, inst:
1932 1943 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1933 1944
1934 1945 # verify parents
1935 1946 (p1, p2) = fl.parents(n)
1936 1947 if p1 not in nodes:
1937 1948 err(_("file %s:%s unknown parent 1 %s") %
1938 1949 (f, short(n), short(p1)))
1939 1950 if p2 not in nodes:
1940 1951 err(_("file %s:%s unknown parent 2 %s") %
1941 1952 (f, short(n), short(p1)))
1942 1953 nodes[n] = 1
1943 1954
1944 1955 # cross-check
1945 1956 for node in filenodes[f]:
1946 1957 err(_("node %s in manifests not in %s") % (hex(node), f))
1947 1958
1948 1959 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1949 1960 (files, changesets, revisions))
1950 1961
1951 1962 if errors[0]:
1952 1963 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1953 1964 return 1
1954 1965
1955 1966 # used to avoid circular references so destructors work
1956 1967 def aftertrans(base):
1957 1968 p = base
1958 1969 def a():
1959 1970 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1960 1971 util.rename(os.path.join(p, "journal.dirstate"),
1961 1972 os.path.join(p, "undo.dirstate"))
1962 1973 return a
1963 1974
@@ -1,962 +1,1062 b''
1 1 """
2 2 revlog.py - storage back-end for mercurial
3 3
4 4 This provides efficient delta storage with O(1) retrieve and append
5 5 and O(changes) merge between branches
6 6
7 7 Copyright 2005 Matt Mackall <mpm@selenic.com>
8 8
9 9 This software may be used and distributed according to the terms
10 10 of the GNU General Public License, incorporated herein by reference.
11 11 """
12 12
13 13 from node import *
14 14 from i18n import gettext as _
15 15 from demandload import demandload
16 16 demandload(globals(), "binascii changegroup errno heapq mdiff os")
17 17 demandload(globals(), "sha struct zlib")
18 18
19 19 # revlog version strings
20 20 REVLOGV0 = 0
21 21 REVLOGNG = 1
22 22
23 # revlog flags
24 REVLOGNGINLINEDATA = (1 << 16)
25
26 def flagstr(flag):
27 if flag == "inline":
28 return REVLOGNGINLINEDATA
29 raise RevlogError(_("unknown revlog flag %s" % flag))
30
23 31 def hash(text, p1, p2):
24 32 """generate a hash from the given text and its parent hashes
25 33
26 34 This hash combines both the current file contents and its history
27 35 in a manner that makes it easy to distinguish nodes with the same
28 36 content in the revision graph.
29 37 """
30 38 l = [p1, p2]
31 39 l.sort()
32 40 s = sha.new(l[0])
33 41 s.update(l[1])
34 42 s.update(text)
35 43 return s.digest()
36 44
37 45 def compress(text):
38 46 """ generate a possibly-compressed representation of text """
39 47 if not text: return ("", text)
40 48 if len(text) < 44:
41 49 if text[0] == '\0': return ("", text)
42 50 return ('u', text)
43 51 bin = zlib.compress(text)
44 52 if len(bin) > len(text):
45 53 if text[0] == '\0': return ("", text)
46 54 return ('u', text)
47 55 return ("", bin)
48 56
49 57 def decompress(bin):
50 58 """ decompress the given input """
51 59 if not bin: return bin
52 60 t = bin[0]
53 61 if t == '\0': return bin
54 62 if t == 'x': return zlib.decompress(bin)
55 63 if t == 'u': return bin[1:]
56 64 raise RevlogError(_("unknown compression type %r") % t)
57 65
58 66 indexformatv0 = ">4l20s20s20s"
59 67 # index ng:
60 68 # 6 bytes offset
61 69 # 2 bytes flags
62 70 # 4 bytes compressed length
63 71 # 4 bytes uncompressed length
64 72 # 4 bytes: base rev
65 73 # 4 bytes link rev
66 74 # 4 bytes parent 1 rev
67 75 # 4 bytes parent 2 rev
68 76 # 32 bytes: nodeid
69 77 indexformatng = ">Qiiiiii20s12x"
70 78 versionformat = ">i"
71 79
72 80 class lazyparser(object):
73 81 """
74 82 this class avoids the need to parse the entirety of large indices
75 83
76 84 By default we parse and load 1000 entries at a time.
77 85
78 86 If no position is specified, we load the whole index, and replace
79 87 the lazy objects in revlog with the underlying objects for
80 88 efficiency in cases where we look at most of the nodes.
81 89 """
82 90 def __init__(self, data, revlog, indexformat):
83 91 self.data = data
84 92 self.s = struct.calcsize(indexformat)
85 93 self.indexformat = indexformat
86 94 self.l = len(data)/self.s
87 95 self.index = [None] * self.l
88 96 self.map = {nullid: -1}
89 97 self.all = 0
90 98 self.revlog = revlog
91 99
92 100 def load(self, pos=None):
93 101 if self.all: return
94 102 if pos is not None:
95 103 block = pos / 1000
96 104 i = block * 1000
97 105 end = min(self.l, i + 1000)
98 106 else:
99 107 self.all = 1
100 108 i = 0
101 109 end = self.l
102 110 self.revlog.index = self.index
103 111 self.revlog.nodemap = self.map
104 112
105 113 while i < end:
106 114 if not self.index[i]:
107 115 d = self.data[i * self.s: (i + 1) * self.s]
108 116 e = struct.unpack(self.indexformat, d)
109 117 self.index[i] = e
110 118 self.map[e[-1]] = i
111 119 i += 1
112 120
113 121 class lazyindex(object):
114 122 """a lazy version of the index array"""
115 123 def __init__(self, parser):
116 124 self.p = parser
117 125 def __len__(self):
118 126 return len(self.p.index)
119 127 def load(self, pos):
120 128 if pos < 0:
121 129 pos += len(self.p.index)
122 130 self.p.load(pos)
123 131 return self.p.index[pos]
124 132 def __getitem__(self, pos):
125 133 return self.p.index[pos] or self.load(pos)
126 134 def __setitem__(self, pos, item):
127 135 self.p.index[pos] = item
128 136 def __delitem__(self, pos):
129 137 del self.p.index[pos]
130 138 def append(self, e):
131 139 self.p.index.append(e)
132 140
133 141 class lazymap(object):
134 142 """a lazy version of the node map"""
135 143 def __init__(self, parser):
136 144 self.p = parser
137 145 def load(self, key):
138 146 if self.p.all: return
139 147 n = self.p.data.find(key)
140 148 if n < 0:
141 149 raise KeyError(key)
142 150 pos = n / self.p.s
143 151 self.p.load(pos)
144 152 def __contains__(self, key):
145 153 self.p.load()
146 154 return key in self.p.map
147 155 def __iter__(self):
148 156 yield nullid
149 157 for i in xrange(self.p.l):
150 158 try:
151 159 yield self.p.index[i][-1]
152 160 except:
153 161 self.p.load(i)
154 162 yield self.p.index[i][-1]
155 163 def __getitem__(self, key):
156 164 try:
157 165 return self.p.map[key]
158 166 except KeyError:
159 167 try:
160 168 self.load(key)
161 169 return self.p.map[key]
162 170 except KeyError:
163 171 raise KeyError("node " + hex(key))
164 172 def __setitem__(self, key, val):
165 173 self.p.map[key] = val
166 174 def __delitem__(self, key):
167 175 del self.p.map[key]
168 176
169 177 class RevlogError(Exception): pass
170 178
171 179 class revlog(object):
172 180 """
173 181 the underlying revision storage object
174 182
175 183 A revlog consists of two parts, an index and the revision data.
176 184
177 185 The index is a file with a fixed record size containing
178 186 information on each revision, includings its nodeid (hash), the
179 187 nodeids of its parents, the position and offset of its data within
180 188 the data file, and the revision it's based on. Finally, each entry
181 189 contains a linkrev entry that can serve as a pointer to external
182 190 data.
183 191
184 192 The revision data itself is a linear collection of data chunks.
185 193 Each chunk represents a revision and is usually represented as a
186 194 delta against the previous chunk. To bound lookup time, runs of
187 195 deltas are limited to about 2 times the length of the original
188 196 version data. This makes retrieval of a version proportional to
189 197 its size, or O(1) relative to the number of revisions.
190 198
191 199 Both pieces of the revlog are written to in an append-only
192 200 fashion, which means we never need to rewrite a file to insert or
193 201 remove data, and can use some simple techniques to avoid the need
194 202 for locking while reading.
195 203 """
196 204 def __init__(self, opener, indexfile, datafile, defversion=0):
197 205 """
198 206 create a revlog object
199 207
200 208 opener is a function that abstracts the file opening operation
201 209 and can be used to implement COW semantics or the like.
202 210 """
203 211 self.indexfile = indexfile
204 212 self.datafile = datafile
205 213 self.opener = opener
206 214
207 215 self.indexstat = None
208 216 self.cache = None
209 217 self.chunkcache = None
210 218 self.defversion = defversion
211 219 self.load()
212 220
213 221 def load(self):
214 222 v = self.defversion
215 223 try:
216 224 f = self.opener(self.indexfile)
217 225 i = f.read()
218 226 except IOError, inst:
219 227 if inst.errno != errno.ENOENT:
220 228 raise
221 229 i = ""
222 230 else:
223 231 try:
224 232 st = os.fstat(f.fileno())
225 233 except AttributeError, inst:
226 234 st = None
227 235 else:
228 236 oldst = self.indexstat
229 237 if (oldst and st.st_dev == oldst.st_dev
230 238 and st.st_ino == oldst.st_ino
231 239 and st.st_mtime == oldst.st_mtime
232 240 and st.st_ctime == oldst.st_ctime):
233 241 return
234 242 self.indexstat = st
235 243 if len(i) > 0:
236 244 v = struct.unpack(versionformat, i[:4])[0]
237 if v != 0:
238 245 flags = v & ~0xFFFF
239 246 fmt = v & 0xFFFF
240 if fmt != REVLOGNG or (flags & ~(REVLOGNGINLINEDATA)):
241 raise RevlogError(
242 _("unknown version format %d or flags %x on %s") %
243 (v, flags, self.indexfile))
247 if fmt == 0:
248 if flags:
249 raise RevlogError(_("index %s invalid flags %x for format v0" %
250 (self.indexfile, flags)))
251 elif fmt == REVLOGNG:
252 if flags & ~REVLOGNGINLINEDATA:
253 raise RevlogError(_("index %s invalid flags %x for revlogng" %
254 (self.indexfile, flags)))
255 else:
256 raise RevlogError(_("index %s invalid format %d" %
257 (self.indexfile, fmt)))
244 258 self.version = v
245 259 if v == 0:
246 260 self.indexformat = indexformatv0
247 261 else:
248 262 self.indexformat = indexformatng
249 263
250 264 if i:
251 if st and st.st_size > 10000:
265 if not self.inlinedata() and st and st.st_size > 10000:
252 266 # big index, let's parse it on demand
253 267 parser = lazyparser(i, self, self.indexformat)
254 268 self.index = lazyindex(parser)
255 269 self.nodemap = lazymap(parser)
256 270 else:
257 271 self.parseindex(i)
272 if self.inlinedata():
273 # we've already got the entire data file read in, save it
274 # in the chunk data
275 self.chunkcache = (0, i)
258 276 if self.version != 0:
259 277 e = list(self.index[0])
260 278 type = self.ngtype(e[0])
261 279 e[0] = self.offset_type(0, type)
262 280 self.index[0] = e
263 281 else:
264 282 self.nodemap = { nullid: -1}
265 283 self.index = []
266 284
267 285
268 286 def parseindex(self, data):
269 287 s = struct.calcsize(self.indexformat)
270 288 l = len(data)
271 289 self.index = []
272 290 self.nodemap = {nullid: -1}
291 inline = self.inlinedata()
273 292 off = 0
274 293 n = 0
275 294 while off < l:
276 295 e = struct.unpack(self.indexformat, data[off:off + s])
277 296 self.index.append(e)
278 297 self.nodemap[e[-1]] = n
279 298 n += 1
280 299 off += s
300 if inline:
301 off += e[1]
281 302
282 303 def ngoffset(self, q):
283 304 if q & 0xFFFF:
284 305 raise RevlogError(_('%s: incompatible revision flag %x') %
285 306 (self.indexfile, type))
286 307 return long(q >> 16)
287 308
288 309 def ngtype(self, q):
289 310 return int(q & 0xFFFF)
290 311
291 312 def offset_type(self, offset, type):
292 313 return long(long(offset) << 16 | type)
293 314
294 315 def loadindexmap(self):
295 316 """loads both the map and the index from the lazy parser"""
296 317 if isinstance(self.index, lazyindex):
297 318 p = self.index.p
298 319 p.load()
299 320
321 def inlinedata(self): return self.version & REVLOGNGINLINEDATA
300 322 def tip(self): return self.node(len(self.index) - 1)
301 323 def count(self): return len(self.index)
302 324 def node(self, rev):
303 325 return (rev < 0) and nullid or self.index[rev][-1]
304 326 def rev(self, node):
305 327 try:
306 328 return self.nodemap[node]
307 329 except KeyError:
308 330 raise RevlogError(_('%s: no node %s') % (self.indexfile, hex(node)))
309 331 def linkrev(self, node): return self.index[self.rev(node)][-4]
310 332 def parents(self, node):
311 333 if node == nullid: return (nullid, nullid)
312 334 r = self.rev(node)
313 335 d = self.index[r][-3:-1]
314 336 if self.version == 0:
315 337 return d
316 338 return [ self.node(x) for x in d ]
317 339 def start(self, rev):
318 340 if rev < 0:
319 341 return -1
320 342 if self.version != 0:
321 343 return self.ngoffset(self.index[rev][0])
322 344 return self.index[rev][0]
323 345 def end(self, rev): return self.start(rev) + self.length(rev)
324 346
325 347 def length(self, rev):
326 348 if rev < 0:
327 349 return 0
328 350 else:
329 351 return self.index[rev][1]
330 352 def base(self, rev): return (rev < 0) and rev or self.index[rev][-5]
331 353
332 354 def reachable(self, rev, stop=None):
333 355 reachable = {}
334 356 visit = [rev]
335 357 reachable[rev] = 1
336 358 if stop:
337 359 stopn = self.rev(stop)
338 360 else:
339 361 stopn = 0
340 362 while visit:
341 363 n = visit.pop(0)
342 364 if n == stop:
343 365 continue
344 366 if n == nullid:
345 367 continue
346 368 for p in self.parents(n):
347 369 if self.rev(p) < stopn:
348 370 continue
349 371 if p not in reachable:
350 372 reachable[p] = 1
351 373 visit.append(p)
352 374 return reachable
353 375
354 376 def nodesbetween(self, roots=None, heads=None):
355 377 """Return a tuple containing three elements. Elements 1 and 2 contain
356 378 a final list bases and heads after all the unreachable ones have been
357 379 pruned. Element 0 contains a topologically sorted list of all
358 380
359 381 nodes that satisfy these constraints:
360 382 1. All nodes must be descended from a node in roots (the nodes on
361 383 roots are considered descended from themselves).
362 384 2. All nodes must also be ancestors of a node in heads (the nodes in
363 385 heads are considered to be their own ancestors).
364 386
365 387 If roots is unspecified, nullid is assumed as the only root.
366 388 If heads is unspecified, it is taken to be the output of the
367 389 heads method (i.e. a list of all nodes in the repository that
368 390 have no children)."""
369 391 nonodes = ([], [], [])
370 392 if roots is not None:
371 393 roots = list(roots)
372 394 if not roots:
373 395 return nonodes
374 396 lowestrev = min([self.rev(n) for n in roots])
375 397 else:
376 398 roots = [nullid] # Everybody's a descendent of nullid
377 399 lowestrev = -1
378 400 if (lowestrev == -1) and (heads is None):
379 401 # We want _all_ the nodes!
380 402 return ([self.node(r) for r in xrange(0, self.count())],
381 403 [nullid], list(self.heads()))
382 404 if heads is None:
383 405 # All nodes are ancestors, so the latest ancestor is the last
384 406 # node.
385 407 highestrev = self.count() - 1
386 408 # Set ancestors to None to signal that every node is an ancestor.
387 409 ancestors = None
388 410 # Set heads to an empty dictionary for later discovery of heads
389 411 heads = {}
390 412 else:
391 413 heads = list(heads)
392 414 if not heads:
393 415 return nonodes
394 416 ancestors = {}
395 417 # Start at the top and keep marking parents until we're done.
396 418 nodestotag = heads[:]
397 419 # Turn heads into a dictionary so we can remove 'fake' heads.
398 420 # Also, later we will be using it to filter out the heads we can't
399 421 # find from roots.
400 422 heads = dict.fromkeys(heads, 0)
401 423 # Remember where the top was so we can use it as a limit later.
402 424 highestrev = max([self.rev(n) for n in nodestotag])
403 425 while nodestotag:
404 426 # grab a node to tag
405 427 n = nodestotag.pop()
406 428 # Never tag nullid
407 429 if n == nullid:
408 430 continue
409 431 # A node's revision number represents its place in a
410 432 # topologically sorted list of nodes.
411 433 r = self.rev(n)
412 434 if r >= lowestrev:
413 435 if n not in ancestors:
414 436 # If we are possibly a descendent of one of the roots
415 437 # and we haven't already been marked as an ancestor
416 438 ancestors[n] = 1 # Mark as ancestor
417 439 # Add non-nullid parents to list of nodes to tag.
418 440 nodestotag.extend([p for p in self.parents(n) if
419 441 p != nullid])
420 442 elif n in heads: # We've seen it before, is it a fake head?
421 443 # So it is, real heads should not be the ancestors of
422 444 # any other heads.
423 445 heads.pop(n)
424 446 if not ancestors:
425 447 return nonodes
426 448 # Now that we have our set of ancestors, we want to remove any
427 449 # roots that are not ancestors.
428 450
429 451 # If one of the roots was nullid, everything is included anyway.
430 452 if lowestrev > -1:
431 453 # But, since we weren't, let's recompute the lowest rev to not
432 454 # include roots that aren't ancestors.
433 455
434 456 # Filter out roots that aren't ancestors of heads
435 457 roots = [n for n in roots if n in ancestors]
436 458 # Recompute the lowest revision
437 459 if roots:
438 460 lowestrev = min([self.rev(n) for n in roots])
439 461 else:
440 462 # No more roots? Return empty list
441 463 return nonodes
442 464 else:
443 465 # We are descending from nullid, and don't need to care about
444 466 # any other roots.
445 467 lowestrev = -1
446 468 roots = [nullid]
447 469 # Transform our roots list into a 'set' (i.e. a dictionary where the
448 470 # values don't matter.
449 471 descendents = dict.fromkeys(roots, 1)
450 472 # Also, keep the original roots so we can filter out roots that aren't
451 473 # 'real' roots (i.e. are descended from other roots).
452 474 roots = descendents.copy()
453 475 # Our topologically sorted list of output nodes.
454 476 orderedout = []
455 477 # Don't start at nullid since we don't want nullid in our output list,
456 478 # and if nullid shows up in descedents, empty parents will look like
457 479 # they're descendents.
458 480 for r in xrange(max(lowestrev, 0), highestrev + 1):
459 481 n = self.node(r)
460 482 isdescendent = False
461 483 if lowestrev == -1: # Everybody is a descendent of nullid
462 484 isdescendent = True
463 485 elif n in descendents:
464 486 # n is already a descendent
465 487 isdescendent = True
466 488 # This check only needs to be done here because all the roots
467 489 # will start being marked is descendents before the loop.
468 490 if n in roots:
469 491 # If n was a root, check if it's a 'real' root.
470 492 p = tuple(self.parents(n))
471 493 # If any of its parents are descendents, it's not a root.
472 494 if (p[0] in descendents) or (p[1] in descendents):
473 495 roots.pop(n)
474 496 else:
475 497 p = tuple(self.parents(n))
476 498 # A node is a descendent if either of its parents are
477 499 # descendents. (We seeded the dependents list with the roots
478 500 # up there, remember?)
479 501 if (p[0] in descendents) or (p[1] in descendents):
480 502 descendents[n] = 1
481 503 isdescendent = True
482 504 if isdescendent and ((ancestors is None) or (n in ancestors)):
483 505 # Only include nodes that are both descendents and ancestors.
484 506 orderedout.append(n)
485 507 if (ancestors is not None) and (n in heads):
486 508 # We're trying to figure out which heads are reachable
487 509 # from roots.
488 510 # Mark this head as having been reached
489 511 heads[n] = 1
490 512 elif ancestors is None:
491 513 # Otherwise, we're trying to discover the heads.
492 514 # Assume this is a head because if it isn't, the next step
493 515 # will eventually remove it.
494 516 heads[n] = 1
495 517 # But, obviously its parents aren't.
496 518 for p in self.parents(n):
497 519 heads.pop(p, None)
498 520 heads = [n for n in heads.iterkeys() if heads[n] != 0]
499 521 roots = roots.keys()
500 522 assert orderedout
501 523 assert roots
502 524 assert heads
503 525 return (orderedout, roots, heads)
504 526
505 527 def heads(self, start=None):
506 528 """return the list of all nodes that have no children
507 529
508 530 if start is specified, only heads that are descendants of
509 531 start will be returned
510 532
511 533 """
512 534 if start is None:
513 535 start = nullid
514 536 reachable = {start: 1}
515 537 heads = {start: 1}
516 538 startrev = self.rev(start)
517 539
518 540 for r in xrange(startrev + 1, self.count()):
519 541 n = self.node(r)
520 542 for pn in self.parents(n):
521 543 if pn in reachable:
522 544 reachable[n] = 1
523 545 heads[n] = 1
524 546 if pn in heads:
525 547 del heads[pn]
526 548 return heads.keys()
527 549
528 550 def children(self, node):
529 551 """find the children of a given node"""
530 552 c = []
531 553 p = self.rev(node)
532 554 for r in range(p + 1, self.count()):
533 555 n = self.node(r)
534 556 for pn in self.parents(n):
535 557 if pn == node:
536 558 c.append(n)
537 559 continue
538 560 elif pn == nullid:
539 561 continue
540 562 return c
541 563
542 564 def lookup(self, id):
543 565 """locate a node based on revision number or subset of hex nodeid"""
544 566 try:
545 567 rev = int(id)
546 568 if str(rev) != id: raise ValueError
547 569 if rev < 0: rev = self.count() + rev
548 570 if rev < 0 or rev >= self.count(): raise ValueError
549 571 return self.node(rev)
550 572 except (ValueError, OverflowError):
551 573 c = []
552 574 for n in self.nodemap:
553 575 if hex(n).startswith(id):
554 576 c.append(n)
555 577 if len(c) > 1: raise RevlogError(_("Ambiguous identifier"))
556 578 if len(c) < 1: raise RevlogError(_("No match found"))
557 579 return c[0]
558 580
559 581 return None
560 582
561 583 def diff(self, a, b):
562 584 """return a delta between two revisions"""
563 585 return mdiff.textdiff(a, b)
564 586
565 587 def patches(self, t, pl):
566 588 """apply a list of patches to a string"""
567 589 return mdiff.patches(t, pl)
568 590
569 591 def chunk(self, rev, df=None, cachelen=4096):
570 592 start, length = self.start(rev), self.length(rev)
593 inline = self.inlinedata()
594 if inline:
595 start += (rev + 1) * struct.calcsize(self.indexformat)
571 596 end = start + length
572 597 def loadcache(df):
573 598 cache_length = max(cachelen, length) # 4k
574 599 if not df:
600 if inline:
601 df = self.opener(self.indexfile)
602 else:
575 603 df = self.opener(self.datafile)
576 604 df.seek(start)
577 605 self.chunkcache = (start, df.read(cache_length))
578 606
579 607 if not self.chunkcache:
580 608 loadcache(df)
581 609
582 610 cache_start = self.chunkcache[0]
583 611 cache_end = cache_start + len(self.chunkcache[1])
584 612 if start >= cache_start and end <= cache_end:
585 613 # it is cached
586 614 offset = start - cache_start
587 615 else:
588 616 loadcache(df)
589 617 offset = 0
590 618
591 619 #def checkchunk():
592 620 # df = self.opener(self.datafile)
593 621 # df.seek(start)
594 622 # return df.read(length)
595 623 #assert s == checkchunk()
596 624 return decompress(self.chunkcache[1][offset:offset + length])
597 625
598 626 def delta(self, node):
599 627 """return or calculate a delta between a node and its predecessor"""
600 628 r = self.rev(node)
601 629 return self.revdiff(r - 1, r)
602 630
603 631 def revdiff(self, rev1, rev2):
604 632 """return or calculate a delta between two revisions"""
605 633 b1 = self.base(rev1)
606 634 b2 = self.base(rev2)
607 635 if b1 == b2 and rev1 + 1 == rev2:
608 636 return self.chunk(rev2)
609 637 else:
610 638 return self.diff(self.revision(self.node(rev1)),
611 639 self.revision(self.node(rev2)))
612 640
613 641 def revision(self, node):
614 642 """return an uncompressed revision of a given"""
615 643 if node == nullid: return ""
616 644 if self.cache and self.cache[0] == node: return self.cache[2]
617 645
618 646 # look up what we need to read
619 647 text = None
620 648 rev = self.rev(node)
621 649 base = self.base(rev)
622 650
651 if self.inlinedata():
652 # we probably have the whole chunk cached
653 df = None
654 else:
623 655 df = self.opener(self.datafile)
624 656
625 657 # do we have useful data cached?
626 658 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
627 659 base = self.cache[1]
628 660 text = self.cache[2]
629 661 else:
630 662 text = self.chunk(base, df=df)
631 663
632 664 bins = []
633 665 for r in xrange(base + 1, rev + 1):
634 666 bins.append(self.chunk(r, df=df))
635 667
636 668 text = self.patches(text, bins)
637 669
638 670 p1, p2 = self.parents(node)
639 671 if node != hash(text, p1, p2):
640 672 raise RevlogError(_("integrity check failed on %s:%d")
641 673 % (self.datafile, rev))
642 674
643 675 self.cache = (node, rev, text)
644 676 return text
645 677
678 def checkinlinesize(self, fp, tr):
679 if not self.inlinedata():
680 return
681 size = fp.tell()
682 if size < 131072:
683 return
684 tr.add(self.datafile, 0)
685 df = self.opener(self.datafile, 'w')
686 calc = struct.calcsize(self.indexformat)
687 for r in xrange(self.count()):
688 start = self.start(r) + (r + 1) * calc
689 length = self.length(r)
690 fp.seek(start)
691 d = fp.read(length)
692 df.write(d)
693 fp.close()
694 df.close()
695 fp = self.opener(self.indexfile, 'w', atomic=True)
696 self.version &= ~(REVLOGNGINLINEDATA)
697 if self.count():
698 x = self.index[0]
699 e = struct.pack(self.indexformat, *x)[4:]
700 l = struct.pack(versionformat, self.version)
701 fp.write(l)
702 fp.write(e)
703
704 for i in xrange(1, self.count()):
705 x = self.index[i]
706 e = struct.pack(self.indexformat, *x)
707 fp.write(e)
708
709 fp.close()
710 self.chunkcache = None
711
646 712 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
647 713 """add a revision to the log
648 714
649 715 text - the revision data to add
650 716 transaction - the transaction object used for rollback
651 717 link - the linkrev data to add
652 718 p1, p2 - the parent nodeids of the revision
653 719 d - an optional precomputed delta
654 720 """
655 721 if text is None: text = ""
656 722 if p1 is None: p1 = self.tip()
657 723 if p2 is None: p2 = nullid
658 724
659 725 node = hash(text, p1, p2)
660 726
661 727 if node in self.nodemap:
662 728 return node
663 729
664 730 n = self.count()
665 731 t = n - 1
666 732
667 733 if n:
668 734 base = self.base(t)
669 735 start = self.start(base)
670 736 end = self.end(t)
671 737 if not d:
672 738 prev = self.revision(self.tip())
673 739 d = self.diff(prev, str(text))
674 740 data = compress(d)
675 741 l = len(data[1]) + len(data[0])
676 742 dist = end - start + l
677 743
678 744 # full versions are inserted when the needed deltas
679 745 # become comparable to the uncompressed text
680 746 if not n or dist > len(text) * 2:
681 747 data = compress(text)
682 748 l = len(data[1]) + len(data[0])
683 749 base = n
684 750 else:
685 751 base = self.base(t)
686 752
687 753 offset = 0
688 754 if t >= 0:
689 755 offset = self.end(t)
690 756
691 757 if self.version == 0:
692 758 e = (offset, l, base, link, p1, p2, node)
693 759 else:
694 760 e = (self.offset_type(offset, 0), l, len(text),
695 761 base, link, self.rev(p1), self.rev(p2), node)
696 762
697 763 self.index.append(e)
698 764 self.nodemap[node] = n
699 765 entry = struct.pack(self.indexformat, *e)
700 766
767 if not self.inlinedata():
701 768 transaction.add(self.datafile, offset)
702 769 transaction.add(self.indexfile, n * len(entry))
703 770 f = self.opener(self.datafile, "a")
704 771 if data[0]:
705 772 f.write(data[0])
706 773 f.write(data[1])
707 774 f = self.opener(self.indexfile, "a")
775 else:
776 f = self.opener(self.indexfile, "a+")
777 transaction.add(self.indexfile, f.tell())
708 778
709 779 if len(self.index) == 1 and self.version != 0:
710 780 l = struct.pack(versionformat, self.version)
711 781 f.write(l)
712 782 entry = entry[4:]
713 783
714 784 f.write(entry)
715 785
786 if self.inlinedata():
787 f.write(data[0])
788 f.write(data[1])
789 self.checkinlinesize(f, transaction)
790
716 791 self.cache = (node, n, text)
717 792 return node
718 793
719 794 def ancestor(self, a, b):
720 795 """calculate the least common ancestor of nodes a and b"""
721 796 # calculate the distance of every node from root
722 797 dist = {nullid: 0}
723 798 for i in xrange(self.count()):
724 799 n = self.node(i)
725 800 p1, p2 = self.parents(n)
726 801 dist[n] = max(dist[p1], dist[p2]) + 1
727 802
728 803 # traverse ancestors in order of decreasing distance from root
729 804 def ancestors(node):
730 805 # we store negative distances because heap returns smallest member
731 806 h = [(-dist[node], node)]
732 807 seen = {}
733 808 while h:
734 809 d, n = heapq.heappop(h)
735 810 if n not in seen:
736 811 seen[n] = 1
737 812 yield (-d, n)
738 813 for p in self.parents(n):
739 814 heapq.heappush(h, (-dist[p], p))
740 815
741 816 def generations(node):
742 817 sg, s = None, {}
743 818 for g,n in ancestors(node):
744 819 if g != sg:
745 820 if sg:
746 821 yield sg, s
747 822 sg, s = g, {n:1}
748 823 else:
749 824 s[n] = 1
750 825 yield sg, s
751 826
752 827 x = generations(a)
753 828 y = generations(b)
754 829 gx = x.next()
755 830 gy = y.next()
756 831
757 832 # increment each ancestor list until it is closer to root than
758 833 # the other, or they match
759 834 while 1:
760 835 #print "ancestor gen %s %s" % (gx[0], gy[0])
761 836 if gx[0] == gy[0]:
762 837 # find the intersection
763 838 i = [ n for n in gx[1] if n in gy[1] ]
764 839 if i:
765 840 return i[0]
766 841 else:
767 842 #print "next"
768 843 gy = y.next()
769 844 gx = x.next()
770 845 elif gx[0] < gy[0]:
771 846 #print "next y"
772 847 gy = y.next()
773 848 else:
774 849 #print "next x"
775 850 gx = x.next()
776 851
777 852 def group(self, nodelist, lookup, infocollect=None):
778 853 """calculate a delta group
779 854
780 855 Given a list of changeset revs, return a set of deltas and
781 856 metadata corresponding to nodes. the first delta is
782 857 parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
783 858 have this parent as it has all history before these
784 859 changesets. parent is parent[0]
785 860 """
786 861 revs = [self.rev(n) for n in nodelist]
787 862
788 863 # if we don't have any revisions touched by these changesets, bail
789 864 if not revs:
790 865 yield changegroup.closechunk()
791 866 return
792 867
793 868 # add the parent of the first rev
794 869 p = self.parents(self.node(revs[0]))[0]
795 870 revs.insert(0, self.rev(p))
796 871
797 872 # build deltas
798 873 for d in xrange(0, len(revs) - 1):
799 874 a, b = revs[d], revs[d + 1]
800 875 nb = self.node(b)
801 876
802 877 if infocollect is not None:
803 878 infocollect(nb)
804 879
805 880 d = self.revdiff(a, b)
806 881 p = self.parents(nb)
807 882 meta = nb + p[0] + p[1] + lookup(nb)
808 883 yield changegroup.genchunk("%s%s" % (meta, d))
809 884
810 885 yield changegroup.closechunk()
811 886
812 887 def addgroup(self, revs, linkmapper, transaction, unique=0):
813 888 """
814 889 add a delta group
815 890
816 891 given a set of deltas, add them to the revision log. the
817 892 first delta is against its parent, which should be in our
818 893 log, the rest are against the previous delta.
819 894 """
820 895
821 896 #track the base of the current delta log
822 897 r = self.count()
823 898 t = r - 1
824 899 node = None
825 900
826 901 base = prev = -1
827 902 start = end = measure = 0
828 903 if r:
829 904 end = self.end(t)
830 905
831 906 ifh = self.opener(self.indexfile, "a+")
832 907 transaction.add(self.indexfile, ifh.tell())
908 if self.inlinedata():
909 dfh = None
910 else:
833 911 transaction.add(self.datafile, end)
834 912 dfh = self.opener(self.datafile, "a")
835 913
836 914 # loop through our set of deltas
837 915 chain = None
838 916 for chunk in revs:
839 917 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
840 918 link = linkmapper(cs)
841 919 if node in self.nodemap:
842 920 # this can happen if two branches make the same change
843 921 # if unique:
844 922 # raise RevlogError(_("already have %s") % hex(node[:4]))
845 923 chain = node
846 924 continue
847 925 delta = chunk[80:]
848 926
849 927 for p in (p1, p2):
850 928 if not p in self.nodemap:
851 929 raise RevlogError(_("unknown parent %s") % short(p1))
852 930
853 931 if not chain:
854 932 # retrieve the parent revision of the delta chain
855 933 chain = p1
856 934 if not chain in self.nodemap:
857 935 raise RevlogError(_("unknown base %s") % short(chain[:4]))
858 936
859 937 # full versions are inserted when the needed deltas become
860 938 # comparable to the uncompressed text or when the previous
861 939 # version is not the one we have a delta against. We use
862 940 # the size of the previous full rev as a proxy for the
863 941 # current size.
864 942
865 943 if chain == prev:
866 944 tempd = compress(delta)
867 945 cdelta = tempd[0] + tempd[1]
868 946
869 947 if chain != prev or (end - start + len(cdelta)) > measure * 2:
870 948 # flush our writes here so we can read it in revision
871 949 if dfh:
872 950 dfh.flush()
873 951 ifh.flush()
874 952 text = self.revision(chain)
875 953 text = self.patches(text, [delta])
876 954 chk = self.addrevision(text, transaction, link, p1, p2)
877 955 if chk != node:
878 956 raise RevlogError(_("consistency error adding group"))
879 957 measure = len(text)
880 958 else:
881 959 if self.version == 0:
882 960 e = (end, len(cdelta), base, link, p1, p2, node)
883 961 else:
884 962 e = (self.offset_type(end, 0), len(cdelta), -1, base,
885 963 link, self.rev(p1), self.rev(p2), node)
886 964 self.index.append(e)
887 965 self.nodemap[node] = r
966 if self.inlinedata():
967 ifh.write(struct.pack(self.indexformat, *e))
968 ifh.write(cdelta)
969 self.checkinlinesize(ifh, transaction)
970 if not self.inlinedata():
971 dfh = self.opener(self.datafile, "a")
972 ifh = self.opener(self.indexfile, "a")
973 else:
974 if not dfh:
975 # addrevision switched from inline to conventional
976 # reopen the index
977 dfh = self.opener(self.datafile, "a")
978 ifh = self.opener(self.indexfile, "a")
888 979 dfh.write(cdelta)
889 980 ifh.write(struct.pack(self.indexformat, *e))
890 981
891 982 t, r, chain, prev = r, r + 1, node, node
892 983 base = self.base(t)
893 984 start = self.start(base)
894 985 end = self.end(t)
895 986
896 987 if node is None:
897 988 raise RevlogError(_("group to be added is empty"))
898 989 return node
899 990
900 991 def strip(self, rev, minlink):
901 992 if self.count() == 0 or rev >= self.count():
902 993 return
903 994
904 995 if isinstance(self.index, lazyindex):
905 996 self.loadindexmap()
906 997
907 998 # When stripping away a revision, we need to make sure it
908 999 # does not actually belong to an older changeset.
909 1000 # The minlink parameter defines the oldest revision
910 1001 # we're allowed to strip away.
911 1002 while minlink > self.index[rev][-4]:
912 1003 rev += 1
913 1004 if rev >= self.count():
914 1005 return
915 1006
916 1007 # first truncate the files on disk
917 1008 end = self.start(rev)
1009 if not self.inlinedata():
918 1010 df = self.opener(self.datafile, "a")
919 1011 df.truncate(end)
920 1012 end = rev * struct.calcsize(self.indexformat)
1013 else:
1014 end += rev * struct.calcsize(self.indexformat)
921 1015
922 1016 indexf = self.opener(self.indexfile, "a")
923 1017 indexf.truncate(end)
924 1018
925 1019 # then reset internal state in memory to forget those revisions
926 1020 self.cache = None
927 1021 self.chunkcache = None
928 1022 for x in xrange(rev, self.count()):
929 1023 del self.nodemap[self.node(x)]
930 1024
931 1025 del self.index[rev:]
932 1026
933 1027 def checksize(self):
934 1028 expected = 0
935 1029 if self.count():
936 1030 expected = self.end(self.count() - 1)
937 1031
938 1032 try:
939 1033 f = self.opener(self.datafile)
940 1034 f.seek(0, 2)
941 1035 actual = f.tell()
942 1036 dd = actual - expected
943 1037 except IOError, inst:
944 1038 if inst.errno != errno.ENOENT:
945 1039 raise
946 1040 dd = 0
947 1041
948 1042 try:
949 1043 f = self.opener(self.indexfile)
950 1044 f.seek(0, 2)
951 1045 actual = f.tell()
952 1046 s = struct.calcsize(self.indexformat)
953 1047 i = actual / s
954 1048 di = actual - (i * s)
1049 if self.inlinedata():
1050 databytes = 0
1051 for r in xrange(self.count()):
1052 databytes += self.length(r)
1053 dd = 0
1054 di = actual - self.count() * s - databytes
955 1055 except IOError, inst:
956 1056 if inst.errno != errno.ENOENT:
957 1057 raise
958 1058 di = 0
959 1059
960 1060 return (dd, di)
961 1061
962 1062
General Comments 0
You need to be logged in to leave comments. Login now