##// END OF EJS Templates
[PATCH] raise exceptions with Exception subclasses...
Bart Trojanowski -
r1073:7b35a980 default
parent child Browse files
Show More
@@ -1,907 +1,907 b''
1 1 # hgweb.py - web interface to a mercurial repository
2 2 #
3 3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 4 # Copyright 2005 Matt Mackall <mpm@selenic.com>
5 5 #
6 6 # This software may be used and distributed according to the terms
7 7 # of the GNU General Public License, incorporated herein by reference.
8 8
9 9 import os, cgi, time, re, difflib, socket, sys, zlib
10 10 from mercurial.hg import *
11 11 from mercurial.ui import *
12 12
13 13 def templatepath():
14 14 for f in "templates", "../templates":
15 15 p = os.path.join(os.path.dirname(__file__), f)
16 16 if os.path.isdir(p):
17 17 return p
18 18
19 19 def age(t):
20 20 def plural(t, c):
21 21 if c == 1:
22 22 return t
23 23 return t + "s"
24 24 def fmt(t, c):
25 25 return "%d %s" % (c, plural(t, c))
26 26
27 27 now = time.time()
28 28 delta = max(1, int(now - t))
29 29
30 30 scales = [["second", 1],
31 31 ["minute", 60],
32 32 ["hour", 3600],
33 33 ["day", 3600 * 24],
34 34 ["week", 3600 * 24 * 7],
35 35 ["month", 3600 * 24 * 30],
36 36 ["year", 3600 * 24 * 365]]
37 37
38 38 scales.reverse()
39 39
40 40 for t, s in scales:
41 41 n = delta / s
42 42 if n >= 2 or s == 1:
43 43 return fmt(t, n)
44 44
45 45 def nl2br(text):
46 46 return text.replace('\n', '<br/>\n')
47 47
48 48 def obfuscate(text):
49 49 return ''.join(['&#%d;' % ord(c) for c in text])
50 50
51 51 def up(p):
52 52 if p[0] != "/":
53 53 p = "/" + p
54 54 if p[-1] == "/":
55 55 p = p[:-1]
56 56 up = os.path.dirname(p)
57 57 if up == "/":
58 58 return "/"
59 59 return up + "/"
60 60
61 61 def httphdr(type):
62 62 sys.stdout.write('Content-type: %s\n\n' % type)
63 63
64 64 def write(*things):
65 65 for thing in things:
66 66 if hasattr(thing, "__iter__"):
67 67 for part in thing:
68 68 write(part)
69 69 else:
70 70 sys.stdout.write(str(thing))
71 71
72 72 class templater:
73 73 def __init__(self, mapfile, filters={}, defaults={}):
74 74 self.cache = {}
75 75 self.map = {}
76 76 self.base = os.path.dirname(mapfile)
77 77 self.filters = filters
78 78 self.defaults = defaults
79 79
80 80 for l in file(mapfile):
81 81 m = re.match(r'(\S+)\s*=\s*"(.*)"$', l)
82 82 if m:
83 83 self.cache[m.group(1)] = m.group(2)
84 84 else:
85 85 m = re.match(r'(\S+)\s*=\s*(\S+)', l)
86 86 if m:
87 87 self.map[m.group(1)] = os.path.join(self.base, m.group(2))
88 88 else:
89 raise "unknown map entry '%s'" % l
89 raise LookupError("unknown map entry '%s'" % l)
90 90
91 91 def __call__(self, t, **map):
92 92 m = self.defaults.copy()
93 93 m.update(map)
94 94 try:
95 95 tmpl = self.cache[t]
96 96 except KeyError:
97 97 tmpl = self.cache[t] = file(self.map[t]).read()
98 98 return self.template(tmpl, self.filters, **m)
99 99
100 100 def template(self, tmpl, filters={}, **map):
101 101 while tmpl:
102 102 m = re.search(r"#([a-zA-Z0-9]+)((%[a-zA-Z0-9]+)*)((\|[a-zA-Z0-9]+)*)#", tmpl)
103 103 if m:
104 104 yield tmpl[:m.start(0)]
105 105 v = map.get(m.group(1), "")
106 106 v = callable(v) and v(**map) or v
107 107
108 108 format = m.group(2)
109 109 fl = m.group(4)
110 110
111 111 if format:
112 112 q = v.__iter__
113 113 for i in q():
114 114 lm = map.copy()
115 115 lm.update(i)
116 116 yield self(format[1:], **lm)
117 117
118 118 v = ""
119 119
120 120 elif fl:
121 121 for f in fl.split("|")[1:]:
122 122 v = filters[f](v)
123 123
124 124 yield v
125 125 tmpl = tmpl[m.end(0):]
126 126 else:
127 127 yield tmpl
128 128 return
129 129
130 130 def rfc822date(x):
131 131 return time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime(x))
132 132
133 133 common_filters = {
134 134 "escape": cgi.escape,
135 135 "age": age,
136 136 "date": (lambda x: time.asctime(time.gmtime(x))),
137 137 "addbreaks": nl2br,
138 138 "obfuscate": obfuscate,
139 139 "short": (lambda x: x[:12]),
140 140 "firstline": (lambda x: x.splitlines(1)[0]),
141 141 "permissions": (lambda x: x and "-rwxr-xr-x" or "-rw-r--r--"),
142 142 "rfc822date": rfc822date,
143 143 }
144 144
145 145 class hgweb:
146 146 def __init__(self, repo, name=None):
147 147 if type(repo) == type(""):
148 148 self.repo = repository(ui(), repo)
149 149 else:
150 150 self.repo = repo
151 151
152 152 self.mtime = -1
153 153 self.reponame = name or self.repo.ui.config("web", "name",
154 154 self.repo.root)
155 155
156 156 def refresh(self):
157 157 s = os.stat(os.path.join(self.repo.root, ".hg", "00changelog.i"))
158 158 if s.st_mtime != self.mtime:
159 159 self.mtime = s.st_mtime
160 160 self.repo = repository(self.repo.ui, self.repo.root)
161 161 self.maxchanges = self.repo.ui.config("web", "maxchanges", 10)
162 162 self.maxfiles = self.repo.ui.config("web", "maxchanges", 10)
163 163 self.allowpull = self.repo.ui.configbool("web", "allowpull", True)
164 164
165 165 def date(self, cs):
166 166 return time.asctime(time.gmtime(float(cs[2].split(' ')[0])))
167 167
168 168 def listfiles(self, files, mf):
169 169 for f in files[:self.maxfiles]:
170 170 yield self.t("filenodelink", node=hex(mf[f]), file=f)
171 171 if len(files) > self.maxfiles:
172 172 yield self.t("fileellipses")
173 173
174 174 def listfilediffs(self, files, changeset):
175 175 for f in files[:self.maxfiles]:
176 176 yield self.t("filedifflink", node=hex(changeset), file=f)
177 177 if len(files) > self.maxfiles:
178 178 yield self.t("fileellipses")
179 179
180 180 def parents(self, t1, nodes=[], rev=None,**args):
181 181 if not rev:
182 182 rev = lambda x: ""
183 183 for node in nodes:
184 184 if node != nullid:
185 185 yield self.t(t1, node=hex(node), rev=rev(node), **args)
186 186
187 187 def showtag(self, t1, node=nullid, **args):
188 188 for t in self.repo.nodetags(node):
189 189 yield self.t(t1, tag=t, **args)
190 190
191 191 def diff(self, node1, node2, files):
192 192 def filterfiles(list, files):
193 193 l = [x for x in list if x in files]
194 194
195 195 for f in files:
196 196 if f[-1] != os.sep:
197 197 f += os.sep
198 198 l += [x for x in list if x.startswith(f)]
199 199 return l
200 200
201 201 parity = [0]
202 202 def diffblock(diff, f, fn):
203 203 yield self.t("diffblock",
204 204 lines=prettyprintlines(diff),
205 205 parity=parity[0],
206 206 file=f,
207 207 filenode=hex(fn or nullid))
208 208 parity[0] = 1 - parity[0]
209 209
210 210 def prettyprintlines(diff):
211 211 for l in diff.splitlines(1):
212 212 if l.startswith('+'):
213 213 yield self.t("difflineplus", line=l)
214 214 elif l.startswith('-'):
215 215 yield self.t("difflineminus", line=l)
216 216 elif l.startswith('@'):
217 217 yield self.t("difflineat", line=l)
218 218 else:
219 219 yield self.t("diffline", line=l)
220 220
221 221 r = self.repo
222 222 cl = r.changelog
223 223 mf = r.manifest
224 224 change1 = cl.read(node1)
225 225 change2 = cl.read(node2)
226 226 mmap1 = mf.read(change1[0])
227 227 mmap2 = mf.read(change2[0])
228 228 date1 = self.date(change1)
229 229 date2 = self.date(change2)
230 230
231 231 c, a, d, u = r.changes(node1, node2)
232 232 if files:
233 233 c, a, d = map(lambda x: filterfiles(x, files), (c, a, d))
234 234
235 235 for f in c:
236 236 to = r.file(f).read(mmap1[f])
237 237 tn = r.file(f).read(mmap2[f])
238 238 yield diffblock(mdiff.unidiff(to, date1, tn, date2, f), f, tn)
239 239 for f in a:
240 240 to = None
241 241 tn = r.file(f).read(mmap2[f])
242 242 yield diffblock(mdiff.unidiff(to, date1, tn, date2, f), f, tn)
243 243 for f in d:
244 244 to = r.file(f).read(mmap1[f])
245 245 tn = None
246 246 yield diffblock(mdiff.unidiff(to, date1, tn, date2, f), f, tn)
247 247
248 248 def changelog(self, pos):
249 249 def changenav(**map):
250 250 def seq(factor=1):
251 251 yield 1 * factor
252 252 yield 3 * factor
253 253 #yield 5 * factor
254 254 for f in seq(factor * 10):
255 255 yield f
256 256
257 257 l = []
258 258 for f in seq():
259 259 if f < self.maxchanges / 2:
260 260 continue
261 261 if f > count:
262 262 break
263 263 r = "%d" % f
264 264 if pos + f < count:
265 265 l.append(("+" + r, pos + f))
266 266 if pos - f >= 0:
267 267 l.insert(0, ("-" + r, pos - f))
268 268
269 269 yield {"rev": 0, "label": "(0)"}
270 270
271 271 for label, rev in l:
272 272 yield {"label": label, "rev": rev}
273 273
274 274 yield {"label": "tip", "rev": ""}
275 275
276 276 def changelist(**map):
277 277 parity = (start - end) & 1
278 278 cl = self.repo.changelog
279 279 l = [] # build a list in forward order for efficiency
280 280 for i in range(start, end):
281 281 n = cl.node(i)
282 282 changes = cl.read(n)
283 283 hn = hex(n)
284 284 t = float(changes[2].split(' ')[0])
285 285
286 286 l.insert(0, {"parity": parity,
287 287 "author": changes[1],
288 288 "parent": self.parents("changelogparent",
289 289 cl.parents(n), cl.rev),
290 290 "changelogtag": self.showtag("changelogtag",n),
291 291 "manifest": hex(changes[0]),
292 292 "desc": changes[4],
293 293 "date": t,
294 294 "files": self.listfilediffs(changes[3], n),
295 295 "rev": i,
296 296 "node": hn})
297 297 parity = 1 - parity
298 298
299 299 for e in l:
300 300 yield e
301 301
302 302 cl = self.repo.changelog
303 303 mf = cl.read(cl.tip())[0]
304 304 count = cl.count()
305 305 start = max(0, pos - self.maxchanges + 1)
306 306 end = min(count, start + self.maxchanges)
307 307 pos = end - 1
308 308
309 309 yield self.t('changelog',
310 310 changenav=changenav,
311 311 manifest=hex(mf),
312 312 rev=pos, changesets=count, entries=changelist)
313 313
314 314 def search(self, query):
315 315
316 316 def changelist(**map):
317 317 cl = self.repo.changelog
318 318 count = 0
319 319 qw = query.lower().split()
320 320
321 321 def revgen():
322 322 for i in range(cl.count() - 1, 0, -100):
323 323 l = []
324 324 for j in range(max(0, i - 100), i):
325 325 n = cl.node(j)
326 326 changes = cl.read(n)
327 327 l.append((n, j, changes))
328 328 l.reverse()
329 329 for e in l:
330 330 yield e
331 331
332 332 for n, i, changes in revgen():
333 333 miss = 0
334 334 for q in qw:
335 335 if not (q in changes[1].lower() or
336 336 q in changes[4].lower() or
337 337 q in " ".join(changes[3][:20]).lower()):
338 338 miss = 1
339 339 break
340 340 if miss:
341 341 continue
342 342
343 343 count += 1
344 344 hn = hex(n)
345 345 t = float(changes[2].split(' ')[0])
346 346
347 347 yield self.t('searchentry',
348 348 parity=count & 1,
349 349 author=changes[1],
350 350 parent=self.parents("changelogparent",
351 351 cl.parents(n), cl.rev),
352 352 changelogtag=self.showtag("changelogtag",n),
353 353 manifest=hex(changes[0]),
354 354 desc=changes[4],
355 355 date=t,
356 356 files=self.listfilediffs(changes[3], n),
357 357 rev=i,
358 358 node=hn)
359 359
360 360 if count >= self.maxchanges:
361 361 break
362 362
363 363 cl = self.repo.changelog
364 364 mf = cl.read(cl.tip())[0]
365 365
366 366 yield self.t('search',
367 367 query=query,
368 368 manifest=hex(mf),
369 369 entries=changelist)
370 370
371 371 def changeset(self, nodeid):
372 372 n = bin(nodeid)
373 373 cl = self.repo.changelog
374 374 changes = cl.read(n)
375 375 p1 = cl.parents(n)[0]
376 376 t = float(changes[2].split(' ')[0])
377 377
378 378 files = []
379 379 mf = self.repo.manifest.read(changes[0])
380 380 for f in changes[3]:
381 381 files.append(self.t("filenodelink",
382 382 filenode=hex(mf.get(f, nullid)), file=f))
383 383
384 384 def diff(**map):
385 385 yield self.diff(p1, n, None)
386 386
387 387 yield self.t('changeset',
388 388 diff=diff,
389 389 rev=cl.rev(n),
390 390 node=nodeid,
391 391 parent=self.parents("changesetparent",
392 392 cl.parents(n), cl.rev),
393 393 changesettag=self.showtag("changesettag",n),
394 394 manifest=hex(changes[0]),
395 395 author=changes[1],
396 396 desc=changes[4],
397 397 date=t,
398 398 files=files)
399 399
400 400 def filelog(self, f, filenode):
401 401 cl = self.repo.changelog
402 402 fl = self.repo.file(f)
403 403 count = fl.count()
404 404
405 405 def entries(**map):
406 406 l = []
407 407 parity = (count - 1) & 1
408 408
409 409 for i in range(count):
410 410 n = fl.node(i)
411 411 lr = fl.linkrev(n)
412 412 cn = cl.node(lr)
413 413 cs = cl.read(cl.node(lr))
414 414 t = float(cs[2].split(' ')[0])
415 415
416 416 l.insert(0, {"parity": parity,
417 417 "filenode": hex(n),
418 418 "filerev": i,
419 419 "file": f,
420 420 "node": hex(cn),
421 421 "author": cs[1],
422 422 "date": t,
423 423 "parent": self.parents("filelogparent",
424 424 fl.parents(n),
425 425 fl.rev, file=f),
426 426 "desc": cs[4]})
427 427 parity = 1 - parity
428 428
429 429 for e in l:
430 430 yield e
431 431
432 432 yield self.t("filelog", file=f, filenode=filenode, entries=entries)
433 433
434 434 def filerevision(self, f, node):
435 435 fl = self.repo.file(f)
436 436 n = bin(node)
437 437 text = fl.read(n)
438 438 changerev = fl.linkrev(n)
439 439 cl = self.repo.changelog
440 440 cn = cl.node(changerev)
441 441 cs = cl.read(cn)
442 442 t = float(cs[2].split(' ')[0])
443 443 mfn = cs[0]
444 444
445 445 def lines():
446 446 for l, t in enumerate(text.splitlines(1)):
447 447 yield {"line": t,
448 448 "linenumber": "% 6d" % (l + 1),
449 449 "parity": l & 1}
450 450
451 451 yield self.t("filerevision",
452 452 file=f,
453 453 filenode=node,
454 454 path=up(f),
455 455 text=lines(),
456 456 rev=changerev,
457 457 node=hex(cn),
458 458 manifest=hex(mfn),
459 459 author=cs[1],
460 460 date=t,
461 461 parent=self.parents("filerevparent",
462 462 fl.parents(n), fl.rev, file=f),
463 463 permissions=self.repo.manifest.readflags(mfn)[f])
464 464
465 465 def fileannotate(self, f, node):
466 466 bcache = {}
467 467 ncache = {}
468 468 fl = self.repo.file(f)
469 469 n = bin(node)
470 470 changerev = fl.linkrev(n)
471 471
472 472 cl = self.repo.changelog
473 473 cn = cl.node(changerev)
474 474 cs = cl.read(cn)
475 475 t = float(cs[2].split(' ')[0])
476 476 mfn = cs[0]
477 477
478 478 def annotate(**map):
479 479 parity = 1
480 480 last = None
481 481 for r, l in fl.annotate(n):
482 482 try:
483 483 cnode = ncache[r]
484 484 except KeyError:
485 485 cnode = ncache[r] = self.repo.changelog.node(r)
486 486
487 487 try:
488 488 name = bcache[r]
489 489 except KeyError:
490 490 cl = self.repo.changelog.read(cnode)
491 491 name = cl[1]
492 492 f = name.find('@')
493 493 if f >= 0:
494 494 name = name[:f]
495 495 f = name.find('<')
496 496 if f >= 0:
497 497 name = name[f+1:]
498 498 bcache[r] = name
499 499
500 500 if last != cnode:
501 501 parity = 1 - parity
502 502 last = cnode
503 503
504 504 yield {"parity": parity,
505 505 "node": hex(cnode),
506 506 "rev": r,
507 507 "author": name,
508 508 "file": f,
509 509 "line": l}
510 510
511 511 yield self.t("fileannotate",
512 512 file=f,
513 513 filenode=node,
514 514 annotate=annotate,
515 515 path=up(f),
516 516 rev=changerev,
517 517 node=hex(cn),
518 518 manifest=hex(mfn),
519 519 author=cs[1],
520 520 date=t,
521 521 parent=self.parents("fileannotateparent",
522 522 fl.parents(n), fl.rev, file=f),
523 523 permissions=self.repo.manifest.readflags(mfn)[f])
524 524
525 525 def manifest(self, mnode, path):
526 526 mf = self.repo.manifest.read(bin(mnode))
527 527 rev = self.repo.manifest.rev(bin(mnode))
528 528 node = self.repo.changelog.node(rev)
529 529 mff=self.repo.manifest.readflags(bin(mnode))
530 530
531 531 files = {}
532 532
533 533 p = path[1:]
534 534 l = len(p)
535 535
536 536 for f,n in mf.items():
537 537 if f[:l] != p:
538 538 continue
539 539 remain = f[l:]
540 540 if "/" in remain:
541 541 short = remain[:remain.find("/") + 1] # bleah
542 542 files[short] = (f, None)
543 543 else:
544 544 short = os.path.basename(remain)
545 545 files[short] = (f, n)
546 546
547 547 def filelist(**map):
548 548 parity = 0
549 549 fl = files.keys()
550 550 fl.sort()
551 551 for f in fl:
552 552 full, fnode = files[f]
553 553 if not fnode:
554 554 continue
555 555
556 556 yield {"file": full,
557 557 "manifest": mnode,
558 558 "filenode": hex(fnode),
559 559 "parity": parity,
560 560 "basename": f,
561 561 "permissions": mff[full]}
562 562 parity = 1 - parity
563 563
564 564 def dirlist(**map):
565 565 parity = 0
566 566 fl = files.keys()
567 567 fl.sort()
568 568 for f in fl:
569 569 full, fnode = files[f]
570 570 if fnode:
571 571 continue
572 572
573 573 yield {"parity": parity,
574 574 "path": os.path.join(path, f),
575 575 "manifest": mnode,
576 576 "basename": f[:-1]}
577 577 parity = 1 - parity
578 578
579 579 yield self.t("manifest",
580 580 manifest=mnode,
581 581 rev=rev,
582 582 node=hex(node),
583 583 path=path,
584 584 up=up(path),
585 585 fentries=filelist,
586 586 dentries=dirlist)
587 587
588 588 def tags(self):
589 589 cl = self.repo.changelog
590 590 mf = cl.read(cl.tip())[0]
591 591
592 592 i = self.repo.tagslist()
593 593 i.reverse()
594 594
595 595 def entries(**map):
596 596 parity = 0
597 597 for k,n in i:
598 598 yield {"parity": parity,
599 599 "tag": k,
600 600 "node": hex(n)}
601 601 parity = 1 - parity
602 602
603 603 yield self.t("tags",
604 604 manifest=hex(mf),
605 605 entries=entries)
606 606
607 607 def filediff(self, file, changeset):
608 608 n = bin(changeset)
609 609 cl = self.repo.changelog
610 610 p1 = cl.parents(n)[0]
611 611 cs = cl.read(n)
612 612 mf = self.repo.manifest.read(cs[0])
613 613
614 614 def diff(**map):
615 615 yield self.diff(p1, n, file)
616 616
617 617 yield self.t("filediff",
618 618 file=file,
619 619 filenode=hex(mf.get(file, nullid)),
620 620 node=changeset,
621 621 rev=self.repo.changelog.rev(n),
622 622 parent=self.parents("filediffparent",
623 623 cl.parents(n), cl.rev),
624 624 diff=diff)
625 625
626 626 # add tags to things
627 627 # tags -> list of changesets corresponding to tags
628 628 # find tag, changeset, file
629 629
630 630 def run(self):
631 631 def header(**map):
632 632 yield self.t("header", **map)
633 633
634 634 def footer(**map):
635 635 yield self.t("footer", **map)
636 636
637 637 self.refresh()
638 638 args = cgi.parse()
639 639
640 640 t = self.repo.ui.config("web", "templates", templatepath())
641 641 m = os.path.join(t, "map")
642 642 style = self.repo.ui.config("web", "style", "")
643 643 if args.has_key('style'):
644 644 style = args['style'][0]
645 645 if style:
646 646 b = os.path.basename("map-" + style)
647 647 p = os.path.join(t, b)
648 648 if os.path.isfile(p):
649 649 m = p
650 650
651 651 port = os.environ["SERVER_PORT"]
652 652 port = port != "80" and (":" + port) or ""
653 653 uri = os.environ["REQUEST_URI"]
654 654 if "?" in uri:
655 655 uri = uri.split("?")[0]
656 656 url = "http://%s%s%s" % (os.environ["SERVER_NAME"], port, uri)
657 657
658 658 self.t = templater(m, common_filters,
659 659 {"url": url,
660 660 "repo": self.reponame,
661 661 "header": header,
662 662 "footer": footer,
663 663 })
664 664
665 665 if not args.has_key('cmd'):
666 666 args['cmd'] = [self.t.cache['default'],]
667 667
668 668 if args['cmd'][0] == 'changelog':
669 669 c = self.repo.changelog.count() - 1
670 670 hi = c
671 671 if args.has_key('rev'):
672 672 hi = args['rev'][0]
673 673 try:
674 674 hi = self.repo.changelog.rev(self.repo.lookup(hi))
675 675 except RepoError:
676 676 write(self.search(hi))
677 677 return
678 678
679 679 write(self.changelog(hi))
680 680
681 681 elif args['cmd'][0] == 'changeset':
682 682 write(self.changeset(args['node'][0]))
683 683
684 684 elif args['cmd'][0] == 'manifest':
685 685 write(self.manifest(args['manifest'][0], args['path'][0]))
686 686
687 687 elif args['cmd'][0] == 'tags':
688 688 write(self.tags())
689 689
690 690 elif args['cmd'][0] == 'filediff':
691 691 write(self.filediff(args['file'][0], args['node'][0]))
692 692
693 693 elif args['cmd'][0] == 'file':
694 694 write(self.filerevision(args['file'][0], args['filenode'][0]))
695 695
696 696 elif args['cmd'][0] == 'annotate':
697 697 write(self.fileannotate(args['file'][0], args['filenode'][0]))
698 698
699 699 elif args['cmd'][0] == 'filelog':
700 700 write(self.filelog(args['file'][0], args['filenode'][0]))
701 701
702 702 elif args['cmd'][0] == 'heads':
703 703 httphdr("application/mercurial-0.1")
704 704 h = self.repo.heads()
705 705 sys.stdout.write(" ".join(map(hex, h)) + "\n")
706 706
707 707 elif args['cmd'][0] == 'branches':
708 708 httphdr("application/mercurial-0.1")
709 709 nodes = []
710 710 if args.has_key('nodes'):
711 711 nodes = map(bin, args['nodes'][0].split(" "))
712 712 for b in self.repo.branches(nodes):
713 713 sys.stdout.write(" ".join(map(hex, b)) + "\n")
714 714
715 715 elif args['cmd'][0] == 'between':
716 716 httphdr("application/mercurial-0.1")
717 717 nodes = []
718 718 if args.has_key('pairs'):
719 719 pairs = [map(bin, p.split("-"))
720 720 for p in args['pairs'][0].split(" ")]
721 721 for b in self.repo.between(pairs):
722 722 sys.stdout.write(" ".join(map(hex, b)) + "\n")
723 723
724 724 elif args['cmd'][0] == 'changegroup':
725 725 httphdr("application/mercurial-0.1")
726 726 nodes = []
727 727 if not self.allowpull:
728 728 return
729 729
730 730 if args.has_key('roots'):
731 731 nodes = map(bin, args['roots'][0].split(" "))
732 732
733 733 z = zlib.compressobj()
734 734 f = self.repo.changegroup(nodes)
735 735 while 1:
736 736 chunk = f.read(4096)
737 737 if not chunk:
738 738 break
739 739 sys.stdout.write(z.compress(chunk))
740 740
741 741 sys.stdout.write(z.flush())
742 742
743 743 else:
744 744 write(self.t("error"))
745 745
746 746 def create_server(repo):
747 747
748 748 def openlog(opt, default):
749 749 if opt and opt != '-':
750 750 return open(opt, 'w')
751 751 return default
752 752
753 753 address = repo.ui.config("web", "address", "")
754 754 port = int(repo.ui.config("web", "port", 8000))
755 755 use_ipv6 = repo.ui.configbool("web", "ipv6")
756 756 accesslog = openlog(repo.ui.config("web", "accesslog", "-"), sys.stdout)
757 757 errorlog = openlog(repo.ui.config("web", "errorlog", "-"), sys.stderr)
758 758
759 759 import BaseHTTPServer
760 760
761 761 class IPv6HTTPServer(BaseHTTPServer.HTTPServer):
762 762 address_family = getattr(socket, 'AF_INET6', None)
763 763
764 764 def __init__(self, *args, **kwargs):
765 765 if self.address_family is None:
766 766 raise RepoError('IPv6 not available on this system')
767 767 BaseHTTPServer.HTTPServer.__init__(self, *args, **kwargs)
768 768
769 769 class hgwebhandler(BaseHTTPServer.BaseHTTPRequestHandler):
770 770 def log_error(self, format, *args):
771 771 errorlog.write("%s - - [%s] %s\n" % (self.address_string(),
772 772 self.log_date_time_string(),
773 773 format % args))
774 774
775 775 def log_message(self, format, *args):
776 776 accesslog.write("%s - - [%s] %s\n" % (self.address_string(),
777 777 self.log_date_time_string(),
778 778 format % args))
779 779
780 780 def do_POST(self):
781 781 try:
782 782 self.do_hgweb()
783 783 except socket.error, inst:
784 784 if inst.args[0] != 32:
785 785 raise
786 786
787 787 def do_GET(self):
788 788 self.do_POST()
789 789
790 790 def do_hgweb(self):
791 791 query = ""
792 792 p = self.path.find("?")
793 793 if p:
794 794 query = self.path[p + 1:]
795 795 query = query.replace('+', ' ')
796 796
797 797 env = {}
798 798 env['GATEWAY_INTERFACE'] = 'CGI/1.1'
799 799 env['REQUEST_METHOD'] = self.command
800 800 env['SERVER_NAME'] = self.server.server_name
801 801 env['SERVER_PORT'] = str(self.server.server_port)
802 802 env['REQUEST_URI'] = "/"
803 803 if query:
804 804 env['QUERY_STRING'] = query
805 805 host = self.address_string()
806 806 if host != self.client_address[0]:
807 807 env['REMOTE_HOST'] = host
808 808 env['REMOTE_ADDR'] = self.client_address[0]
809 809
810 810 if self.headers.typeheader is None:
811 811 env['CONTENT_TYPE'] = self.headers.type
812 812 else:
813 813 env['CONTENT_TYPE'] = self.headers.typeheader
814 814 length = self.headers.getheader('content-length')
815 815 if length:
816 816 env['CONTENT_LENGTH'] = length
817 817 accept = []
818 818 for line in self.headers.getallmatchingheaders('accept'):
819 819 if line[:1] in "\t\n\r ":
820 820 accept.append(line.strip())
821 821 else:
822 822 accept = accept + line[7:].split(',')
823 823 env['HTTP_ACCEPT'] = ','.join(accept)
824 824
825 825 os.environ.update(env)
826 826
827 827 save = sys.argv, sys.stdin, sys.stdout, sys.stderr
828 828 try:
829 829 sys.stdin = self.rfile
830 830 sys.stdout = self.wfile
831 831 sys.argv = ["hgweb.py"]
832 832 if '=' not in query:
833 833 sys.argv.append(query)
834 834 self.send_response(200, "Script output follows")
835 835 hg.run()
836 836 finally:
837 837 sys.argv, sys.stdin, sys.stdout, sys.stderr = save
838 838
839 839 hg = hgweb(repo)
840 840 if use_ipv6:
841 841 return IPv6HTTPServer((address, port), hgwebhandler)
842 842 else:
843 843 return BaseHTTPServer.HTTPServer((address, port), hgwebhandler)
844 844
845 845 def server(path, name, templates, address, port, use_ipv6=False,
846 846 accesslog=sys.stdout, errorlog=sys.stderr):
847 847 httpd = create_server(path, name, templates, address, port, use_ipv6,
848 848 accesslog, errorlog)
849 849 httpd.serve_forever()
850 850
851 851 # This is a stopgap
852 852 class hgwebdir:
853 853 def __init__(self, config):
854 854 self.cp = ConfigParser.SafeConfigParser()
855 855 self.cp.read(config)
856 856
857 857 def run(self):
858 858 try:
859 859 virtual = os.environ["PATH_INFO"]
860 860 except:
861 861 virtual = ""
862 862
863 863 if virtual[1:]:
864 864 real = self.cp.get("paths", virtual[1:])
865 865 h = hgweb(real)
866 866 h.run()
867 867 return
868 868
869 869 def header(**map):
870 870 yield tmpl("header", **map)
871 871
872 872 def footer(**map):
873 873 yield tmpl("footer", **map)
874 874
875 875 templates = templatepath()
876 876 m = os.path.join(templates, "map")
877 877 tmpl = templater(m, common_filters,
878 878 {"header": header, "footer": footer})
879 879
880 880 def entries(**map):
881 881 parity = 0
882 882 l = self.cp.items("paths")
883 883 l.sort()
884 884 for v,r in l:
885 885 cp2 = ConfigParser.SafeConfigParser()
886 886 cp2.read(os.path.join(r, ".hg", "hgrc"))
887 887
888 888 def get(sec, val, default):
889 889 try:
890 890 return cp2.get(sec, val)
891 891 except:
892 892 return default
893 893
894 894 url = os.environ["REQUEST_URI"] + "/" + v
895 895 url = url.replace("//", "/")
896 896
897 897 yield dict(author=get("web", "author", "unknown"),
898 898 name=get("web", "name", v),
899 899 url=url,
900 900 parity=parity,
901 901 shortdesc=get("web", "description", "unknown"),
902 902 lastupdate=os.stat(os.path.join(r, ".hg",
903 903 "00changelog.d")).st_mtime)
904 904
905 905 parity = 1 - parity
906 906
907 907 write(tmpl("index", entries=entries))
@@ -1,551 +1,553 b''
1 1 # revlog.py - storage back-end for mercurial
2 2 #
3 3 # This provides efficient delta storage with O(1) retrieve and append
4 4 # and O(changes) merge between branches
5 5 #
6 6 # Copyright 2005 Matt Mackall <mpm@selenic.com>
7 7 #
8 8 # This software may be used and distributed according to the terms
9 9 # of the GNU General Public License, incorporated herein by reference.
10 10
11 11 import zlib, struct, sha, binascii, heapq
12 12 from mercurial import mdiff
13 13
14 14 def hex(node): return binascii.hexlify(node)
15 15 def bin(node): return binascii.unhexlify(node)
16 16 def short(node): return hex(node[:6])
17 17
18 18 def compress(text):
19 19 if not text: return text
20 20 if len(text) < 44:
21 21 if text[0] == '\0': return text
22 22 return 'u' + text
23 23 bin = zlib.compress(text)
24 24 if len(bin) > len(text):
25 25 if text[0] == '\0': return text
26 26 return 'u' + text
27 27 return bin
28 28
29 29 def decompress(bin):
30 30 if not bin: return bin
31 31 t = bin[0]
32 32 if t == '\0': return bin
33 33 if t == 'x': return zlib.decompress(bin)
34 34 if t == 'u': return bin[1:]
35 raise "unknown compression type %s" % t
35 raise RevlogError("unknown compression type %s" % t)
36 36
37 37 def hash(text, p1, p2):
38 38 l = [p1, p2]
39 39 l.sort()
40 40 s = sha.new(l[0])
41 41 s.update(l[1])
42 42 s.update(text)
43 43 return s.digest()
44 44
45 45 nullid = "\0" * 20
46 46 indexformat = ">4l20s20s20s"
47 47
48 48 class lazyparser:
49 49 def __init__(self, data, revlog):
50 50 self.data = data
51 51 self.s = struct.calcsize(indexformat)
52 52 self.l = len(data)/self.s
53 53 self.index = [None] * self.l
54 54 self.map = {nullid: -1}
55 55 self.all = 0
56 56 self.revlog = revlog
57 57
58 58 def load(self, pos=None):
59 59 if self.all: return
60 60 if pos is not None:
61 61 block = pos / 1000
62 62 i = block * 1000
63 63 end = min(self.l, i + 1000)
64 64 else:
65 65 self.all = 1
66 66 i = 0
67 67 end = self.l
68 68 self.revlog.index = self.index
69 69 self.revlog.nodemap = self.map
70 70
71 71 while i < end:
72 72 d = self.data[i * self.s: (i + 1) * self.s]
73 73 e = struct.unpack(indexformat, d)
74 74 self.index[i] = e
75 75 self.map[e[6]] = i
76 76 i += 1
77 77
78 78 class lazyindex:
79 79 def __init__(self, parser):
80 80 self.p = parser
81 81 def __len__(self):
82 82 return len(self.p.index)
83 83 def load(self, pos):
84 84 self.p.load(pos)
85 85 return self.p.index[pos]
86 86 def __getitem__(self, pos):
87 87 return self.p.index[pos] or self.load(pos)
88 88 def append(self, e):
89 89 self.p.index.append(e)
90 90
91 91 class lazymap:
92 92 def __init__(self, parser):
93 93 self.p = parser
94 94 def load(self, key):
95 95 if self.p.all: return
96 96 n = self.p.data.find(key)
97 97 if n < 0: raise KeyError("node " + hex(key))
98 98 pos = n / self.p.s
99 99 self.p.load(pos)
100 100 def __contains__(self, key):
101 101 self.p.load()
102 102 return key in self.p.map
103 103 def __iter__(self):
104 104 yield nullid
105 105 for i in xrange(self.p.l):
106 106 try:
107 107 yield self.p.index[i][6]
108 108 except:
109 109 self.p.load(i)
110 110 yield self.p.index[i][6]
111 111 def __getitem__(self, key):
112 112 try:
113 113 return self.p.map[key]
114 114 except KeyError:
115 115 try:
116 116 self.load(key)
117 117 return self.p.map[key]
118 118 except KeyError:
119 119 raise KeyError("node " + hex(key))
120 120 def __setitem__(self, key, val):
121 121 self.p.map[key] = val
122 122
123 class RevlogError(Exception): pass
124
123 125 class revlog:
124 126 def __init__(self, opener, indexfile, datafile):
125 127 self.indexfile = indexfile
126 128 self.datafile = datafile
127 129 self.opener = opener
128 130 self.cache = None
129 131
130 132 try:
131 133 i = self.opener(self.indexfile).read()
132 134 except IOError:
133 135 i = ""
134 136
135 137 if len(i) > 10000:
136 138 # big index, let's parse it on demand
137 139 parser = lazyparser(i, self)
138 140 self.index = lazyindex(parser)
139 141 self.nodemap = lazymap(parser)
140 142 else:
141 143 s = struct.calcsize(indexformat)
142 144 l = len(i) / s
143 145 self.index = [None] * l
144 146 m = [None] * l
145 147
146 148 n = 0
147 149 for f in xrange(0, len(i), s):
148 150 # offset, size, base, linkrev, p1, p2, nodeid
149 151 e = struct.unpack(indexformat, i[f:f + s])
150 152 m[n] = (e[6], n)
151 153 self.index[n] = e
152 154 n += 1
153 155
154 156 self.nodemap = dict(m)
155 157 self.nodemap[nullid] = -1
156 158
157 159 def tip(self): return self.node(len(self.index) - 1)
158 160 def count(self): return len(self.index)
159 161 def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
160 162 def rev(self, node): return self.nodemap[node]
161 163 def linkrev(self, node): return self.index[self.nodemap[node]][3]
162 164 def parents(self, node):
163 165 if node == nullid: return (nullid, nullid)
164 166 return self.index[self.nodemap[node]][4:6]
165 167
166 168 def start(self, rev): return self.index[rev][0]
167 169 def length(self, rev): return self.index[rev][1]
168 170 def end(self, rev): return self.start(rev) + self.length(rev)
169 171 def base(self, rev): return self.index[rev][2]
170 172
171 173 def heads(self, stop=None):
172 174 p = {}
173 175 h = []
174 176 stoprev = 0
175 177 if stop and stop in self.nodemap:
176 178 stoprev = self.rev(stop)
177 179
178 180 for r in range(self.count() - 1, -1, -1):
179 181 n = self.node(r)
180 182 if n not in p:
181 183 h.append(n)
182 184 if n == stop:
183 185 break
184 186 if r < stoprev:
185 187 break
186 188 for pn in self.parents(n):
187 189 p[pn] = 1
188 190 return h
189 191
190 192 def children(self, node):
191 193 c = []
192 194 p = self.rev(node)
193 195 for r in range(p + 1, self.count()):
194 196 n = self.node(r)
195 197 for pn in self.parents(n):
196 198 if pn == node:
197 199 c.append(n)
198 200 continue
199 201 elif pn == nullid:
200 202 continue
201 203 return c
202 204
203 205 def lookup(self, id):
204 206 try:
205 207 rev = int(id)
206 208 if str(rev) != id: raise ValueError
207 209 if rev < 0: rev = self.count() + rev
208 210 if rev < 0 or rev >= self.count(): raise ValueError
209 211 return self.node(rev)
210 212 except (ValueError, OverflowError):
211 213 c = []
212 214 for n in self.nodemap:
213 215 if hex(n).startswith(id):
214 216 c.append(n)
215 217 if len(c) > 1: raise KeyError("Ambiguous identifier")
216 218 if len(c) < 1: raise KeyError("No match found")
217 219 return c[0]
218 220
219 221 return None
220 222
221 223 def diff(self, a, b):
222 224 return mdiff.textdiff(a, b)
223 225
224 226 def patches(self, t, pl):
225 227 return mdiff.patches(t, pl)
226 228
227 229 def delta(self, node):
228 230 r = self.rev(node)
229 231 b = self.base(r)
230 232 if r == b:
231 233 return self.diff(self.revision(self.node(r - 1)),
232 234 self.revision(node))
233 235 else:
234 236 f = self.opener(self.datafile)
235 237 f.seek(self.start(r))
236 238 data = f.read(self.length(r))
237 239 return decompress(data)
238 240
239 241 def revision(self, node):
240 242 if node == nullid: return ""
241 243 if self.cache and self.cache[0] == node: return self.cache[2]
242 244
243 245 text = None
244 246 rev = self.rev(node)
245 247 start, length, base, link, p1, p2, node = self.index[rev]
246 248 end = start + length
247 249 if base != rev: start = self.start(base)
248 250
249 251 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
250 252 base = self.cache[1]
251 253 start = self.start(base + 1)
252 254 text = self.cache[2]
253 255 last = 0
254 256
255 257 f = self.opener(self.datafile)
256 258 f.seek(start)
257 259 data = f.read(end - start)
258 260
259 261 if text is None:
260 262 last = self.length(base)
261 263 text = decompress(data[:last])
262 264
263 265 bins = []
264 266 for r in xrange(base + 1, rev + 1):
265 267 s = self.length(r)
266 268 bins.append(decompress(data[last:last + s]))
267 269 last = last + s
268 270
269 271 text = mdiff.patches(text, bins)
270 272
271 273 if node != hash(text, p1, p2):
272 274 raise IOError("integrity check failed on %s:%d"
273 275 % (self.datafile, rev))
274 276
275 277 self.cache = (node, rev, text)
276 278 return text
277 279
278 280 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
279 281 if text is None: text = ""
280 282 if p1 is None: p1 = self.tip()
281 283 if p2 is None: p2 = nullid
282 284
283 285 node = hash(text, p1, p2)
284 286
285 287 if node in self.nodemap:
286 288 return node
287 289
288 290 n = self.count()
289 291 t = n - 1
290 292
291 293 if n:
292 294 base = self.base(t)
293 295 start = self.start(base)
294 296 end = self.end(t)
295 297 if not d:
296 298 prev = self.revision(self.tip())
297 299 d = self.diff(prev, text)
298 300 data = compress(d)
299 301 dist = end - start + len(data)
300 302
301 303 # full versions are inserted when the needed deltas
302 304 # become comparable to the uncompressed text
303 305 if not n or dist > len(text) * 2:
304 306 data = compress(text)
305 307 base = n
306 308 else:
307 309 base = self.base(t)
308 310
309 311 offset = 0
310 312 if t >= 0:
311 313 offset = self.end(t)
312 314
313 315 e = (offset, len(data), base, link, p1, p2, node)
314 316
315 317 self.index.append(e)
316 318 self.nodemap[node] = n
317 319 entry = struct.pack(indexformat, *e)
318 320
319 321 transaction.add(self.datafile, e[0])
320 322 self.opener(self.datafile, "a").write(data)
321 323 transaction.add(self.indexfile, n * len(entry))
322 324 self.opener(self.indexfile, "a").write(entry)
323 325
324 326 self.cache = (node, n, text)
325 327 return node
326 328
327 329 def ancestor(self, a, b):
328 330 # calculate the distance of every node from root
329 331 dist = {nullid: 0}
330 332 for i in xrange(self.count()):
331 333 n = self.node(i)
332 334 p1, p2 = self.parents(n)
333 335 dist[n] = max(dist[p1], dist[p2]) + 1
334 336
335 337 # traverse ancestors in order of decreasing distance from root
336 338 def ancestors(node):
337 339 # we store negative distances because heap returns smallest member
338 340 h = [(-dist[node], node)]
339 341 seen = {}
340 342 earliest = self.count()
341 343 while h:
342 344 d, n = heapq.heappop(h)
343 345 if n not in seen:
344 346 seen[n] = 1
345 347 r = self.rev(n)
346 348 yield (-d, r, n)
347 349 for p in self.parents(n):
348 350 heapq.heappush(h, (-dist[p], p))
349 351
350 352 x = ancestors(a)
351 353 y = ancestors(b)
352 354 lx = x.next()
353 355 ly = y.next()
354 356
355 357 # increment each ancestor list until it is closer to root than
356 358 # the other, or they match
357 359 while 1:
358 360 if lx == ly:
359 361 return lx[2]
360 362 elif lx < ly:
361 363 ly = y.next()
362 364 elif lx > ly:
363 365 lx = x.next()
364 366
365 367 def group(self, linkmap):
366 368 # given a list of changeset revs, return a set of deltas and
367 369 # metadata corresponding to nodes. the first delta is
368 370 # parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
369 371 # have this parent as it has all history before these
370 372 # changesets. parent is parent[0]
371 373
372 374 revs = []
373 375 needed = {}
374 376
375 377 # find file nodes/revs that match changeset revs
376 378 for i in xrange(0, self.count()):
377 379 if self.index[i][3] in linkmap:
378 380 revs.append(i)
379 381 needed[i] = 1
380 382
381 383 # if we don't have any revisions touched by these changesets, bail
382 384 if not revs:
383 385 yield struct.pack(">l", 0)
384 386 return
385 387
386 388 # add the parent of the first rev
387 389 p = self.parents(self.node(revs[0]))[0]
388 390 revs.insert(0, self.rev(p))
389 391
390 392 # for each delta that isn't contiguous in the log, we need to
391 393 # reconstruct the base, reconstruct the result, and then
392 394 # calculate the delta. We also need to do this where we've
393 395 # stored a full version and not a delta
394 396 for i in xrange(0, len(revs) - 1):
395 397 a, b = revs[i], revs[i + 1]
396 398 if a + 1 != b or self.base(b) == b:
397 399 for j in xrange(self.base(a), a + 1):
398 400 needed[j] = 1
399 401 for j in xrange(self.base(b), b + 1):
400 402 needed[j] = 1
401 403
402 404 # calculate spans to retrieve from datafile
403 405 needed = needed.keys()
404 406 needed.sort()
405 407 spans = []
406 408 oo = -1
407 409 ol = 0
408 410 for n in needed:
409 411 if n < 0: continue
410 412 o = self.start(n)
411 413 l = self.length(n)
412 414 if oo + ol == o: # can we merge with the previous?
413 415 nl = spans[-1][2]
414 416 nl.append((n, l))
415 417 ol += l
416 418 spans[-1] = (oo, ol, nl)
417 419 else:
418 420 oo = o
419 421 ol = l
420 422 spans.append((oo, ol, [(n, l)]))
421 423
422 424 # read spans in, divide up chunks
423 425 chunks = {}
424 426 for span in spans:
425 427 # we reopen the file for each span to make http happy for now
426 428 f = self.opener(self.datafile)
427 429 f.seek(span[0])
428 430 data = f.read(span[1])
429 431
430 432 # divide up the span
431 433 pos = 0
432 434 for r, l in span[2]:
433 435 chunks[r] = decompress(data[pos: pos + l])
434 436 pos += l
435 437
436 438 # helper to reconstruct intermediate versions
437 439 def construct(text, base, rev):
438 440 bins = [chunks[r] for r in xrange(base + 1, rev + 1)]
439 441 return mdiff.patches(text, bins)
440 442
441 443 # build deltas
442 444 deltas = []
443 445 for d in xrange(0, len(revs) - 1):
444 446 a, b = revs[d], revs[d + 1]
445 447 n = self.node(b)
446 448
447 449 # do we need to construct a new delta?
448 450 if a + 1 != b or self.base(b) == b:
449 451 if a >= 0:
450 452 base = self.base(a)
451 453 ta = chunks[self.base(a)]
452 454 ta = construct(ta, base, a)
453 455 else:
454 456 ta = ""
455 457
456 458 base = self.base(b)
457 459 if a > base:
458 460 base = a
459 461 tb = ta
460 462 else:
461 463 tb = chunks[self.base(b)]
462 464 tb = construct(tb, base, b)
463 465 d = self.diff(ta, tb)
464 466 else:
465 467 d = chunks[b]
466 468
467 469 p = self.parents(n)
468 470 meta = n + p[0] + p[1] + linkmap[self.linkrev(n)]
469 471 l = struct.pack(">l", len(meta) + len(d) + 4)
470 472 yield l
471 473 yield meta
472 474 yield d
473 475
474 476 yield struct.pack(">l", 0)
475 477
476 478 def addgroup(self, revs, linkmapper, transaction, unique=0):
477 479 # given a set of deltas, add them to the revision log. the
478 480 # first delta is against its parent, which should be in our
479 481 # log, the rest are against the previous delta.
480 482
481 483 # track the base of the current delta log
482 484 r = self.count()
483 485 t = r - 1
484 486 node = nullid
485 487
486 488 base = prev = -1
487 489 start = end = measure = 0
488 490 if r:
489 491 start = self.start(self.base(t))
490 492 end = self.end(t)
491 493 measure = self.length(self.base(t))
492 494 base = self.base(t)
493 495 prev = self.tip()
494 496
495 497 transaction.add(self.datafile, end)
496 498 transaction.add(self.indexfile, r * struct.calcsize(indexformat))
497 499 dfh = self.opener(self.datafile, "a")
498 500 ifh = self.opener(self.indexfile, "a")
499 501
500 502 # loop through our set of deltas
501 503 chain = None
502 504 for chunk in revs:
503 505 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
504 506 link = linkmapper(cs)
505 507 if node in self.nodemap:
506 508 # this can happen if two branches make the same change
507 509 if unique:
508 raise "already have %s" % hex(node[:4])
510 raise RevlogError("already have %s" % hex(node[:4]))
509 511 chain = node
510 512 continue
511 513 delta = chunk[80:]
512 514
513 515 if not chain:
514 516 # retrieve the parent revision of the delta chain
515 517 chain = p1
516 518 if not chain in self.nodemap:
517 raise "unknown base %s" % short(chain[:4])
519 raise RevlogError("unknown base %s" % short(chain[:4]))
518 520
519 521 # full versions are inserted when the needed deltas become
520 522 # comparable to the uncompressed text or when the previous
521 523 # version is not the one we have a delta against. We use
522 524 # the size of the previous full rev as a proxy for the
523 525 # current size.
524 526
525 527 if chain == prev:
526 528 cdelta = compress(delta)
527 529
528 530 if chain != prev or (end - start + len(cdelta)) > measure * 2:
529 531 # flush our writes here so we can read it in revision
530 532 dfh.flush()
531 533 ifh.flush()
532 534 text = self.revision(chain)
533 535 text = self.patches(text, [delta])
534 536 chk = self.addrevision(text, transaction, link, p1, p2)
535 537 if chk != node:
536 raise "consistency error adding group"
538 raise RevlogError("consistency error adding group")
537 539 measure = len(text)
538 540 else:
539 541 e = (end, len(cdelta), self.base(t), link, p1, p2, node)
540 542 self.index.append(e)
541 543 self.nodemap[node] = r
542 544 dfh.write(cdelta)
543 545 ifh.write(struct.pack(indexformat, *e))
544 546
545 547 t, r, chain, prev = r, r + 1, node, node
546 548 start = self.start(self.base(t))
547 549 end = self.end(t)
548 550
549 551 dfh.close()
550 552 ifh.close()
551 553 return node
@@ -1,78 +1,78 b''
1 1 # transaction.py - simple journalling scheme for mercurial
2 2 #
3 3 # This transaction scheme is intended to gracefully handle program
4 4 # errors and interruptions. More serious failures like system crashes
5 5 # can be recovered with an fsck-like tool. As the whole repository is
6 6 # effectively log-structured, this should amount to simply truncating
7 7 # anything that isn't referenced in the changelog.
8 8 #
9 9 # Copyright 2005 Matt Mackall <mpm@selenic.com>
10 10 #
11 11 # This software may be used and distributed according to the terms
12 12 # of the GNU General Public License, incorporated herein by reference.
13 13
14 14 import os
15 15 import util
16 16
17 17 class transaction:
18 18 def __init__(self, report, opener, journal, after=None):
19 19 self.journal = None
20 20
21 21 # abort here if the journal already exists
22 22 if os.path.exists(journal):
23 raise "journal already exists - run hg recover"
23 raise AssertionError("journal already exists - run hg recover")
24 24
25 25 self.report = report
26 26 self.opener = opener
27 27 self.after = after
28 28 self.entries = []
29 29 self.map = {}
30 30 self.journal = journal
31 31
32 32 self.file = open(self.journal, "w")
33 33
34 34 def __del__(self):
35 35 if self.journal:
36 36 if self.entries: self.abort()
37 37 self.file.close()
38 38 try: os.unlink(self.journal)
39 39 except: pass
40 40
41 41 def add(self, file, offset):
42 42 if file in self.map: return
43 43 self.entries.append((file, offset))
44 44 self.map[file] = 1
45 45 # add enough data to the journal to do the truncate
46 46 self.file.write("%s\0%d\n" % (file, offset))
47 47 self.file.flush()
48 48
49 49 def close(self):
50 50 self.file.close()
51 51 self.entries = []
52 52 if self.after:
53 53 self.after()
54 54 else:
55 55 os.unlink(self.journal)
56 56 self.journal = None
57 57
58 58 def abort(self):
59 59 if not self.entries: return
60 60
61 61 self.report("transaction abort!\n")
62 62
63 63 for f, o in self.entries:
64 64 try:
65 65 self.opener(f, "a").truncate(o)
66 66 except:
67 67 self.report("failed to truncate %s\n" % f)
68 68
69 69 self.entries = []
70 70
71 71 self.report("rollback completed\n")
72 72
73 73 def rollback(opener, file):
74 74 for l in open(file).readlines():
75 75 f, o = l.split('\0')
76 76 opener(f, "a").truncate(int(o))
77 77 os.unlink(file)
78 78
General Comments 0
You need to be logged in to leave comments. Login now