##// END OF EJS Templates
[PATCH] raise exceptions with Exception subclasses...
Bart Trojanowski -
r1073:7b35a980 default
parent child Browse files
Show More
@@ -1,907 +1,907 b''
1 # hgweb.py - web interface to a mercurial repository
1 # hgweb.py - web interface to a mercurial repository
2 #
2 #
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
3 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
4 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005 Matt Mackall <mpm@selenic.com>
5 #
5 #
6 # This software may be used and distributed according to the terms
6 # This software may be used and distributed according to the terms
7 # of the GNU General Public License, incorporated herein by reference.
7 # of the GNU General Public License, incorporated herein by reference.
8
8
9 import os, cgi, time, re, difflib, socket, sys, zlib
9 import os, cgi, time, re, difflib, socket, sys, zlib
10 from mercurial.hg import *
10 from mercurial.hg import *
11 from mercurial.ui import *
11 from mercurial.ui import *
12
12
13 def templatepath():
13 def templatepath():
14 for f in "templates", "../templates":
14 for f in "templates", "../templates":
15 p = os.path.join(os.path.dirname(__file__), f)
15 p = os.path.join(os.path.dirname(__file__), f)
16 if os.path.isdir(p):
16 if os.path.isdir(p):
17 return p
17 return p
18
18
19 def age(t):
19 def age(t):
20 def plural(t, c):
20 def plural(t, c):
21 if c == 1:
21 if c == 1:
22 return t
22 return t
23 return t + "s"
23 return t + "s"
24 def fmt(t, c):
24 def fmt(t, c):
25 return "%d %s" % (c, plural(t, c))
25 return "%d %s" % (c, plural(t, c))
26
26
27 now = time.time()
27 now = time.time()
28 delta = max(1, int(now - t))
28 delta = max(1, int(now - t))
29
29
30 scales = [["second", 1],
30 scales = [["second", 1],
31 ["minute", 60],
31 ["minute", 60],
32 ["hour", 3600],
32 ["hour", 3600],
33 ["day", 3600 * 24],
33 ["day", 3600 * 24],
34 ["week", 3600 * 24 * 7],
34 ["week", 3600 * 24 * 7],
35 ["month", 3600 * 24 * 30],
35 ["month", 3600 * 24 * 30],
36 ["year", 3600 * 24 * 365]]
36 ["year", 3600 * 24 * 365]]
37
37
38 scales.reverse()
38 scales.reverse()
39
39
40 for t, s in scales:
40 for t, s in scales:
41 n = delta / s
41 n = delta / s
42 if n >= 2 or s == 1:
42 if n >= 2 or s == 1:
43 return fmt(t, n)
43 return fmt(t, n)
44
44
45 def nl2br(text):
45 def nl2br(text):
46 return text.replace('\n', '<br/>\n')
46 return text.replace('\n', '<br/>\n')
47
47
48 def obfuscate(text):
48 def obfuscate(text):
49 return ''.join(['&#%d;' % ord(c) for c in text])
49 return ''.join(['&#%d;' % ord(c) for c in text])
50
50
51 def up(p):
51 def up(p):
52 if p[0] != "/":
52 if p[0] != "/":
53 p = "/" + p
53 p = "/" + p
54 if p[-1] == "/":
54 if p[-1] == "/":
55 p = p[:-1]
55 p = p[:-1]
56 up = os.path.dirname(p)
56 up = os.path.dirname(p)
57 if up == "/":
57 if up == "/":
58 return "/"
58 return "/"
59 return up + "/"
59 return up + "/"
60
60
61 def httphdr(type):
61 def httphdr(type):
62 sys.stdout.write('Content-type: %s\n\n' % type)
62 sys.stdout.write('Content-type: %s\n\n' % type)
63
63
64 def write(*things):
64 def write(*things):
65 for thing in things:
65 for thing in things:
66 if hasattr(thing, "__iter__"):
66 if hasattr(thing, "__iter__"):
67 for part in thing:
67 for part in thing:
68 write(part)
68 write(part)
69 else:
69 else:
70 sys.stdout.write(str(thing))
70 sys.stdout.write(str(thing))
71
71
72 class templater:
72 class templater:
73 def __init__(self, mapfile, filters={}, defaults={}):
73 def __init__(self, mapfile, filters={}, defaults={}):
74 self.cache = {}
74 self.cache = {}
75 self.map = {}
75 self.map = {}
76 self.base = os.path.dirname(mapfile)
76 self.base = os.path.dirname(mapfile)
77 self.filters = filters
77 self.filters = filters
78 self.defaults = defaults
78 self.defaults = defaults
79
79
80 for l in file(mapfile):
80 for l in file(mapfile):
81 m = re.match(r'(\S+)\s*=\s*"(.*)"$', l)
81 m = re.match(r'(\S+)\s*=\s*"(.*)"$', l)
82 if m:
82 if m:
83 self.cache[m.group(1)] = m.group(2)
83 self.cache[m.group(1)] = m.group(2)
84 else:
84 else:
85 m = re.match(r'(\S+)\s*=\s*(\S+)', l)
85 m = re.match(r'(\S+)\s*=\s*(\S+)', l)
86 if m:
86 if m:
87 self.map[m.group(1)] = os.path.join(self.base, m.group(2))
87 self.map[m.group(1)] = os.path.join(self.base, m.group(2))
88 else:
88 else:
89 raise "unknown map entry '%s'" % l
89 raise LookupError("unknown map entry '%s'" % l)
90
90
91 def __call__(self, t, **map):
91 def __call__(self, t, **map):
92 m = self.defaults.copy()
92 m = self.defaults.copy()
93 m.update(map)
93 m.update(map)
94 try:
94 try:
95 tmpl = self.cache[t]
95 tmpl = self.cache[t]
96 except KeyError:
96 except KeyError:
97 tmpl = self.cache[t] = file(self.map[t]).read()
97 tmpl = self.cache[t] = file(self.map[t]).read()
98 return self.template(tmpl, self.filters, **m)
98 return self.template(tmpl, self.filters, **m)
99
99
100 def template(self, tmpl, filters={}, **map):
100 def template(self, tmpl, filters={}, **map):
101 while tmpl:
101 while tmpl:
102 m = re.search(r"#([a-zA-Z0-9]+)((%[a-zA-Z0-9]+)*)((\|[a-zA-Z0-9]+)*)#", tmpl)
102 m = re.search(r"#([a-zA-Z0-9]+)((%[a-zA-Z0-9]+)*)((\|[a-zA-Z0-9]+)*)#", tmpl)
103 if m:
103 if m:
104 yield tmpl[:m.start(0)]
104 yield tmpl[:m.start(0)]
105 v = map.get(m.group(1), "")
105 v = map.get(m.group(1), "")
106 v = callable(v) and v(**map) or v
106 v = callable(v) and v(**map) or v
107
107
108 format = m.group(2)
108 format = m.group(2)
109 fl = m.group(4)
109 fl = m.group(4)
110
110
111 if format:
111 if format:
112 q = v.__iter__
112 q = v.__iter__
113 for i in q():
113 for i in q():
114 lm = map.copy()
114 lm = map.copy()
115 lm.update(i)
115 lm.update(i)
116 yield self(format[1:], **lm)
116 yield self(format[1:], **lm)
117
117
118 v = ""
118 v = ""
119
119
120 elif fl:
120 elif fl:
121 for f in fl.split("|")[1:]:
121 for f in fl.split("|")[1:]:
122 v = filters[f](v)
122 v = filters[f](v)
123
123
124 yield v
124 yield v
125 tmpl = tmpl[m.end(0):]
125 tmpl = tmpl[m.end(0):]
126 else:
126 else:
127 yield tmpl
127 yield tmpl
128 return
128 return
129
129
130 def rfc822date(x):
130 def rfc822date(x):
131 return time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime(x))
131 return time.strftime("%a, %d %b %Y %H:%M:%S +0000", time.gmtime(x))
132
132
133 common_filters = {
133 common_filters = {
134 "escape": cgi.escape,
134 "escape": cgi.escape,
135 "age": age,
135 "age": age,
136 "date": (lambda x: time.asctime(time.gmtime(x))),
136 "date": (lambda x: time.asctime(time.gmtime(x))),
137 "addbreaks": nl2br,
137 "addbreaks": nl2br,
138 "obfuscate": obfuscate,
138 "obfuscate": obfuscate,
139 "short": (lambda x: x[:12]),
139 "short": (lambda x: x[:12]),
140 "firstline": (lambda x: x.splitlines(1)[0]),
140 "firstline": (lambda x: x.splitlines(1)[0]),
141 "permissions": (lambda x: x and "-rwxr-xr-x" or "-rw-r--r--"),
141 "permissions": (lambda x: x and "-rwxr-xr-x" or "-rw-r--r--"),
142 "rfc822date": rfc822date,
142 "rfc822date": rfc822date,
143 }
143 }
144
144
145 class hgweb:
145 class hgweb:
146 def __init__(self, repo, name=None):
146 def __init__(self, repo, name=None):
147 if type(repo) == type(""):
147 if type(repo) == type(""):
148 self.repo = repository(ui(), repo)
148 self.repo = repository(ui(), repo)
149 else:
149 else:
150 self.repo = repo
150 self.repo = repo
151
151
152 self.mtime = -1
152 self.mtime = -1
153 self.reponame = name or self.repo.ui.config("web", "name",
153 self.reponame = name or self.repo.ui.config("web", "name",
154 self.repo.root)
154 self.repo.root)
155
155
156 def refresh(self):
156 def refresh(self):
157 s = os.stat(os.path.join(self.repo.root, ".hg", "00changelog.i"))
157 s = os.stat(os.path.join(self.repo.root, ".hg", "00changelog.i"))
158 if s.st_mtime != self.mtime:
158 if s.st_mtime != self.mtime:
159 self.mtime = s.st_mtime
159 self.mtime = s.st_mtime
160 self.repo = repository(self.repo.ui, self.repo.root)
160 self.repo = repository(self.repo.ui, self.repo.root)
161 self.maxchanges = self.repo.ui.config("web", "maxchanges", 10)
161 self.maxchanges = self.repo.ui.config("web", "maxchanges", 10)
162 self.maxfiles = self.repo.ui.config("web", "maxchanges", 10)
162 self.maxfiles = self.repo.ui.config("web", "maxchanges", 10)
163 self.allowpull = self.repo.ui.configbool("web", "allowpull", True)
163 self.allowpull = self.repo.ui.configbool("web", "allowpull", True)
164
164
165 def date(self, cs):
165 def date(self, cs):
166 return time.asctime(time.gmtime(float(cs[2].split(' ')[0])))
166 return time.asctime(time.gmtime(float(cs[2].split(' ')[0])))
167
167
168 def listfiles(self, files, mf):
168 def listfiles(self, files, mf):
169 for f in files[:self.maxfiles]:
169 for f in files[:self.maxfiles]:
170 yield self.t("filenodelink", node=hex(mf[f]), file=f)
170 yield self.t("filenodelink", node=hex(mf[f]), file=f)
171 if len(files) > self.maxfiles:
171 if len(files) > self.maxfiles:
172 yield self.t("fileellipses")
172 yield self.t("fileellipses")
173
173
174 def listfilediffs(self, files, changeset):
174 def listfilediffs(self, files, changeset):
175 for f in files[:self.maxfiles]:
175 for f in files[:self.maxfiles]:
176 yield self.t("filedifflink", node=hex(changeset), file=f)
176 yield self.t("filedifflink", node=hex(changeset), file=f)
177 if len(files) > self.maxfiles:
177 if len(files) > self.maxfiles:
178 yield self.t("fileellipses")
178 yield self.t("fileellipses")
179
179
180 def parents(self, t1, nodes=[], rev=None,**args):
180 def parents(self, t1, nodes=[], rev=None,**args):
181 if not rev:
181 if not rev:
182 rev = lambda x: ""
182 rev = lambda x: ""
183 for node in nodes:
183 for node in nodes:
184 if node != nullid:
184 if node != nullid:
185 yield self.t(t1, node=hex(node), rev=rev(node), **args)
185 yield self.t(t1, node=hex(node), rev=rev(node), **args)
186
186
187 def showtag(self, t1, node=nullid, **args):
187 def showtag(self, t1, node=nullid, **args):
188 for t in self.repo.nodetags(node):
188 for t in self.repo.nodetags(node):
189 yield self.t(t1, tag=t, **args)
189 yield self.t(t1, tag=t, **args)
190
190
191 def diff(self, node1, node2, files):
191 def diff(self, node1, node2, files):
192 def filterfiles(list, files):
192 def filterfiles(list, files):
193 l = [x for x in list if x in files]
193 l = [x for x in list if x in files]
194
194
195 for f in files:
195 for f in files:
196 if f[-1] != os.sep:
196 if f[-1] != os.sep:
197 f += os.sep
197 f += os.sep
198 l += [x for x in list if x.startswith(f)]
198 l += [x for x in list if x.startswith(f)]
199 return l
199 return l
200
200
201 parity = [0]
201 parity = [0]
202 def diffblock(diff, f, fn):
202 def diffblock(diff, f, fn):
203 yield self.t("diffblock",
203 yield self.t("diffblock",
204 lines=prettyprintlines(diff),
204 lines=prettyprintlines(diff),
205 parity=parity[0],
205 parity=parity[0],
206 file=f,
206 file=f,
207 filenode=hex(fn or nullid))
207 filenode=hex(fn or nullid))
208 parity[0] = 1 - parity[0]
208 parity[0] = 1 - parity[0]
209
209
210 def prettyprintlines(diff):
210 def prettyprintlines(diff):
211 for l in diff.splitlines(1):
211 for l in diff.splitlines(1):
212 if l.startswith('+'):
212 if l.startswith('+'):
213 yield self.t("difflineplus", line=l)
213 yield self.t("difflineplus", line=l)
214 elif l.startswith('-'):
214 elif l.startswith('-'):
215 yield self.t("difflineminus", line=l)
215 yield self.t("difflineminus", line=l)
216 elif l.startswith('@'):
216 elif l.startswith('@'):
217 yield self.t("difflineat", line=l)
217 yield self.t("difflineat", line=l)
218 else:
218 else:
219 yield self.t("diffline", line=l)
219 yield self.t("diffline", line=l)
220
220
221 r = self.repo
221 r = self.repo
222 cl = r.changelog
222 cl = r.changelog
223 mf = r.manifest
223 mf = r.manifest
224 change1 = cl.read(node1)
224 change1 = cl.read(node1)
225 change2 = cl.read(node2)
225 change2 = cl.read(node2)
226 mmap1 = mf.read(change1[0])
226 mmap1 = mf.read(change1[0])
227 mmap2 = mf.read(change2[0])
227 mmap2 = mf.read(change2[0])
228 date1 = self.date(change1)
228 date1 = self.date(change1)
229 date2 = self.date(change2)
229 date2 = self.date(change2)
230
230
231 c, a, d, u = r.changes(node1, node2)
231 c, a, d, u = r.changes(node1, node2)
232 if files:
232 if files:
233 c, a, d = map(lambda x: filterfiles(x, files), (c, a, d))
233 c, a, d = map(lambda x: filterfiles(x, files), (c, a, d))
234
234
235 for f in c:
235 for f in c:
236 to = r.file(f).read(mmap1[f])
236 to = r.file(f).read(mmap1[f])
237 tn = r.file(f).read(mmap2[f])
237 tn = r.file(f).read(mmap2[f])
238 yield diffblock(mdiff.unidiff(to, date1, tn, date2, f), f, tn)
238 yield diffblock(mdiff.unidiff(to, date1, tn, date2, f), f, tn)
239 for f in a:
239 for f in a:
240 to = None
240 to = None
241 tn = r.file(f).read(mmap2[f])
241 tn = r.file(f).read(mmap2[f])
242 yield diffblock(mdiff.unidiff(to, date1, tn, date2, f), f, tn)
242 yield diffblock(mdiff.unidiff(to, date1, tn, date2, f), f, tn)
243 for f in d:
243 for f in d:
244 to = r.file(f).read(mmap1[f])
244 to = r.file(f).read(mmap1[f])
245 tn = None
245 tn = None
246 yield diffblock(mdiff.unidiff(to, date1, tn, date2, f), f, tn)
246 yield diffblock(mdiff.unidiff(to, date1, tn, date2, f), f, tn)
247
247
248 def changelog(self, pos):
248 def changelog(self, pos):
249 def changenav(**map):
249 def changenav(**map):
250 def seq(factor=1):
250 def seq(factor=1):
251 yield 1 * factor
251 yield 1 * factor
252 yield 3 * factor
252 yield 3 * factor
253 #yield 5 * factor
253 #yield 5 * factor
254 for f in seq(factor * 10):
254 for f in seq(factor * 10):
255 yield f
255 yield f
256
256
257 l = []
257 l = []
258 for f in seq():
258 for f in seq():
259 if f < self.maxchanges / 2:
259 if f < self.maxchanges / 2:
260 continue
260 continue
261 if f > count:
261 if f > count:
262 break
262 break
263 r = "%d" % f
263 r = "%d" % f
264 if pos + f < count:
264 if pos + f < count:
265 l.append(("+" + r, pos + f))
265 l.append(("+" + r, pos + f))
266 if pos - f >= 0:
266 if pos - f >= 0:
267 l.insert(0, ("-" + r, pos - f))
267 l.insert(0, ("-" + r, pos - f))
268
268
269 yield {"rev": 0, "label": "(0)"}
269 yield {"rev": 0, "label": "(0)"}
270
270
271 for label, rev in l:
271 for label, rev in l:
272 yield {"label": label, "rev": rev}
272 yield {"label": label, "rev": rev}
273
273
274 yield {"label": "tip", "rev": ""}
274 yield {"label": "tip", "rev": ""}
275
275
276 def changelist(**map):
276 def changelist(**map):
277 parity = (start - end) & 1
277 parity = (start - end) & 1
278 cl = self.repo.changelog
278 cl = self.repo.changelog
279 l = [] # build a list in forward order for efficiency
279 l = [] # build a list in forward order for efficiency
280 for i in range(start, end):
280 for i in range(start, end):
281 n = cl.node(i)
281 n = cl.node(i)
282 changes = cl.read(n)
282 changes = cl.read(n)
283 hn = hex(n)
283 hn = hex(n)
284 t = float(changes[2].split(' ')[0])
284 t = float(changes[2].split(' ')[0])
285
285
286 l.insert(0, {"parity": parity,
286 l.insert(0, {"parity": parity,
287 "author": changes[1],
287 "author": changes[1],
288 "parent": self.parents("changelogparent",
288 "parent": self.parents("changelogparent",
289 cl.parents(n), cl.rev),
289 cl.parents(n), cl.rev),
290 "changelogtag": self.showtag("changelogtag",n),
290 "changelogtag": self.showtag("changelogtag",n),
291 "manifest": hex(changes[0]),
291 "manifest": hex(changes[0]),
292 "desc": changes[4],
292 "desc": changes[4],
293 "date": t,
293 "date": t,
294 "files": self.listfilediffs(changes[3], n),
294 "files": self.listfilediffs(changes[3], n),
295 "rev": i,
295 "rev": i,
296 "node": hn})
296 "node": hn})
297 parity = 1 - parity
297 parity = 1 - parity
298
298
299 for e in l:
299 for e in l:
300 yield e
300 yield e
301
301
302 cl = self.repo.changelog
302 cl = self.repo.changelog
303 mf = cl.read(cl.tip())[0]
303 mf = cl.read(cl.tip())[0]
304 count = cl.count()
304 count = cl.count()
305 start = max(0, pos - self.maxchanges + 1)
305 start = max(0, pos - self.maxchanges + 1)
306 end = min(count, start + self.maxchanges)
306 end = min(count, start + self.maxchanges)
307 pos = end - 1
307 pos = end - 1
308
308
309 yield self.t('changelog',
309 yield self.t('changelog',
310 changenav=changenav,
310 changenav=changenav,
311 manifest=hex(mf),
311 manifest=hex(mf),
312 rev=pos, changesets=count, entries=changelist)
312 rev=pos, changesets=count, entries=changelist)
313
313
314 def search(self, query):
314 def search(self, query):
315
315
316 def changelist(**map):
316 def changelist(**map):
317 cl = self.repo.changelog
317 cl = self.repo.changelog
318 count = 0
318 count = 0
319 qw = query.lower().split()
319 qw = query.lower().split()
320
320
321 def revgen():
321 def revgen():
322 for i in range(cl.count() - 1, 0, -100):
322 for i in range(cl.count() - 1, 0, -100):
323 l = []
323 l = []
324 for j in range(max(0, i - 100), i):
324 for j in range(max(0, i - 100), i):
325 n = cl.node(j)
325 n = cl.node(j)
326 changes = cl.read(n)
326 changes = cl.read(n)
327 l.append((n, j, changes))
327 l.append((n, j, changes))
328 l.reverse()
328 l.reverse()
329 for e in l:
329 for e in l:
330 yield e
330 yield e
331
331
332 for n, i, changes in revgen():
332 for n, i, changes in revgen():
333 miss = 0
333 miss = 0
334 for q in qw:
334 for q in qw:
335 if not (q in changes[1].lower() or
335 if not (q in changes[1].lower() or
336 q in changes[4].lower() or
336 q in changes[4].lower() or
337 q in " ".join(changes[3][:20]).lower()):
337 q in " ".join(changes[3][:20]).lower()):
338 miss = 1
338 miss = 1
339 break
339 break
340 if miss:
340 if miss:
341 continue
341 continue
342
342
343 count += 1
343 count += 1
344 hn = hex(n)
344 hn = hex(n)
345 t = float(changes[2].split(' ')[0])
345 t = float(changes[2].split(' ')[0])
346
346
347 yield self.t('searchentry',
347 yield self.t('searchentry',
348 parity=count & 1,
348 parity=count & 1,
349 author=changes[1],
349 author=changes[1],
350 parent=self.parents("changelogparent",
350 parent=self.parents("changelogparent",
351 cl.parents(n), cl.rev),
351 cl.parents(n), cl.rev),
352 changelogtag=self.showtag("changelogtag",n),
352 changelogtag=self.showtag("changelogtag",n),
353 manifest=hex(changes[0]),
353 manifest=hex(changes[0]),
354 desc=changes[4],
354 desc=changes[4],
355 date=t,
355 date=t,
356 files=self.listfilediffs(changes[3], n),
356 files=self.listfilediffs(changes[3], n),
357 rev=i,
357 rev=i,
358 node=hn)
358 node=hn)
359
359
360 if count >= self.maxchanges:
360 if count >= self.maxchanges:
361 break
361 break
362
362
363 cl = self.repo.changelog
363 cl = self.repo.changelog
364 mf = cl.read(cl.tip())[0]
364 mf = cl.read(cl.tip())[0]
365
365
366 yield self.t('search',
366 yield self.t('search',
367 query=query,
367 query=query,
368 manifest=hex(mf),
368 manifest=hex(mf),
369 entries=changelist)
369 entries=changelist)
370
370
371 def changeset(self, nodeid):
371 def changeset(self, nodeid):
372 n = bin(nodeid)
372 n = bin(nodeid)
373 cl = self.repo.changelog
373 cl = self.repo.changelog
374 changes = cl.read(n)
374 changes = cl.read(n)
375 p1 = cl.parents(n)[0]
375 p1 = cl.parents(n)[0]
376 t = float(changes[2].split(' ')[0])
376 t = float(changes[2].split(' ')[0])
377
377
378 files = []
378 files = []
379 mf = self.repo.manifest.read(changes[0])
379 mf = self.repo.manifest.read(changes[0])
380 for f in changes[3]:
380 for f in changes[3]:
381 files.append(self.t("filenodelink",
381 files.append(self.t("filenodelink",
382 filenode=hex(mf.get(f, nullid)), file=f))
382 filenode=hex(mf.get(f, nullid)), file=f))
383
383
384 def diff(**map):
384 def diff(**map):
385 yield self.diff(p1, n, None)
385 yield self.diff(p1, n, None)
386
386
387 yield self.t('changeset',
387 yield self.t('changeset',
388 diff=diff,
388 diff=diff,
389 rev=cl.rev(n),
389 rev=cl.rev(n),
390 node=nodeid,
390 node=nodeid,
391 parent=self.parents("changesetparent",
391 parent=self.parents("changesetparent",
392 cl.parents(n), cl.rev),
392 cl.parents(n), cl.rev),
393 changesettag=self.showtag("changesettag",n),
393 changesettag=self.showtag("changesettag",n),
394 manifest=hex(changes[0]),
394 manifest=hex(changes[0]),
395 author=changes[1],
395 author=changes[1],
396 desc=changes[4],
396 desc=changes[4],
397 date=t,
397 date=t,
398 files=files)
398 files=files)
399
399
400 def filelog(self, f, filenode):
400 def filelog(self, f, filenode):
401 cl = self.repo.changelog
401 cl = self.repo.changelog
402 fl = self.repo.file(f)
402 fl = self.repo.file(f)
403 count = fl.count()
403 count = fl.count()
404
404
405 def entries(**map):
405 def entries(**map):
406 l = []
406 l = []
407 parity = (count - 1) & 1
407 parity = (count - 1) & 1
408
408
409 for i in range(count):
409 for i in range(count):
410 n = fl.node(i)
410 n = fl.node(i)
411 lr = fl.linkrev(n)
411 lr = fl.linkrev(n)
412 cn = cl.node(lr)
412 cn = cl.node(lr)
413 cs = cl.read(cl.node(lr))
413 cs = cl.read(cl.node(lr))
414 t = float(cs[2].split(' ')[0])
414 t = float(cs[2].split(' ')[0])
415
415
416 l.insert(0, {"parity": parity,
416 l.insert(0, {"parity": parity,
417 "filenode": hex(n),
417 "filenode": hex(n),
418 "filerev": i,
418 "filerev": i,
419 "file": f,
419 "file": f,
420 "node": hex(cn),
420 "node": hex(cn),
421 "author": cs[1],
421 "author": cs[1],
422 "date": t,
422 "date": t,
423 "parent": self.parents("filelogparent",
423 "parent": self.parents("filelogparent",
424 fl.parents(n),
424 fl.parents(n),
425 fl.rev, file=f),
425 fl.rev, file=f),
426 "desc": cs[4]})
426 "desc": cs[4]})
427 parity = 1 - parity
427 parity = 1 - parity
428
428
429 for e in l:
429 for e in l:
430 yield e
430 yield e
431
431
432 yield self.t("filelog", file=f, filenode=filenode, entries=entries)
432 yield self.t("filelog", file=f, filenode=filenode, entries=entries)
433
433
434 def filerevision(self, f, node):
434 def filerevision(self, f, node):
435 fl = self.repo.file(f)
435 fl = self.repo.file(f)
436 n = bin(node)
436 n = bin(node)
437 text = fl.read(n)
437 text = fl.read(n)
438 changerev = fl.linkrev(n)
438 changerev = fl.linkrev(n)
439 cl = self.repo.changelog
439 cl = self.repo.changelog
440 cn = cl.node(changerev)
440 cn = cl.node(changerev)
441 cs = cl.read(cn)
441 cs = cl.read(cn)
442 t = float(cs[2].split(' ')[0])
442 t = float(cs[2].split(' ')[0])
443 mfn = cs[0]
443 mfn = cs[0]
444
444
445 def lines():
445 def lines():
446 for l, t in enumerate(text.splitlines(1)):
446 for l, t in enumerate(text.splitlines(1)):
447 yield {"line": t,
447 yield {"line": t,
448 "linenumber": "% 6d" % (l + 1),
448 "linenumber": "% 6d" % (l + 1),
449 "parity": l & 1}
449 "parity": l & 1}
450
450
451 yield self.t("filerevision",
451 yield self.t("filerevision",
452 file=f,
452 file=f,
453 filenode=node,
453 filenode=node,
454 path=up(f),
454 path=up(f),
455 text=lines(),
455 text=lines(),
456 rev=changerev,
456 rev=changerev,
457 node=hex(cn),
457 node=hex(cn),
458 manifest=hex(mfn),
458 manifest=hex(mfn),
459 author=cs[1],
459 author=cs[1],
460 date=t,
460 date=t,
461 parent=self.parents("filerevparent",
461 parent=self.parents("filerevparent",
462 fl.parents(n), fl.rev, file=f),
462 fl.parents(n), fl.rev, file=f),
463 permissions=self.repo.manifest.readflags(mfn)[f])
463 permissions=self.repo.manifest.readflags(mfn)[f])
464
464
465 def fileannotate(self, f, node):
465 def fileannotate(self, f, node):
466 bcache = {}
466 bcache = {}
467 ncache = {}
467 ncache = {}
468 fl = self.repo.file(f)
468 fl = self.repo.file(f)
469 n = bin(node)
469 n = bin(node)
470 changerev = fl.linkrev(n)
470 changerev = fl.linkrev(n)
471
471
472 cl = self.repo.changelog
472 cl = self.repo.changelog
473 cn = cl.node(changerev)
473 cn = cl.node(changerev)
474 cs = cl.read(cn)
474 cs = cl.read(cn)
475 t = float(cs[2].split(' ')[0])
475 t = float(cs[2].split(' ')[0])
476 mfn = cs[0]
476 mfn = cs[0]
477
477
478 def annotate(**map):
478 def annotate(**map):
479 parity = 1
479 parity = 1
480 last = None
480 last = None
481 for r, l in fl.annotate(n):
481 for r, l in fl.annotate(n):
482 try:
482 try:
483 cnode = ncache[r]
483 cnode = ncache[r]
484 except KeyError:
484 except KeyError:
485 cnode = ncache[r] = self.repo.changelog.node(r)
485 cnode = ncache[r] = self.repo.changelog.node(r)
486
486
487 try:
487 try:
488 name = bcache[r]
488 name = bcache[r]
489 except KeyError:
489 except KeyError:
490 cl = self.repo.changelog.read(cnode)
490 cl = self.repo.changelog.read(cnode)
491 name = cl[1]
491 name = cl[1]
492 f = name.find('@')
492 f = name.find('@')
493 if f >= 0:
493 if f >= 0:
494 name = name[:f]
494 name = name[:f]
495 f = name.find('<')
495 f = name.find('<')
496 if f >= 0:
496 if f >= 0:
497 name = name[f+1:]
497 name = name[f+1:]
498 bcache[r] = name
498 bcache[r] = name
499
499
500 if last != cnode:
500 if last != cnode:
501 parity = 1 - parity
501 parity = 1 - parity
502 last = cnode
502 last = cnode
503
503
504 yield {"parity": parity,
504 yield {"parity": parity,
505 "node": hex(cnode),
505 "node": hex(cnode),
506 "rev": r,
506 "rev": r,
507 "author": name,
507 "author": name,
508 "file": f,
508 "file": f,
509 "line": l}
509 "line": l}
510
510
511 yield self.t("fileannotate",
511 yield self.t("fileannotate",
512 file=f,
512 file=f,
513 filenode=node,
513 filenode=node,
514 annotate=annotate,
514 annotate=annotate,
515 path=up(f),
515 path=up(f),
516 rev=changerev,
516 rev=changerev,
517 node=hex(cn),
517 node=hex(cn),
518 manifest=hex(mfn),
518 manifest=hex(mfn),
519 author=cs[1],
519 author=cs[1],
520 date=t,
520 date=t,
521 parent=self.parents("fileannotateparent",
521 parent=self.parents("fileannotateparent",
522 fl.parents(n), fl.rev, file=f),
522 fl.parents(n), fl.rev, file=f),
523 permissions=self.repo.manifest.readflags(mfn)[f])
523 permissions=self.repo.manifest.readflags(mfn)[f])
524
524
525 def manifest(self, mnode, path):
525 def manifest(self, mnode, path):
526 mf = self.repo.manifest.read(bin(mnode))
526 mf = self.repo.manifest.read(bin(mnode))
527 rev = self.repo.manifest.rev(bin(mnode))
527 rev = self.repo.manifest.rev(bin(mnode))
528 node = self.repo.changelog.node(rev)
528 node = self.repo.changelog.node(rev)
529 mff=self.repo.manifest.readflags(bin(mnode))
529 mff=self.repo.manifest.readflags(bin(mnode))
530
530
531 files = {}
531 files = {}
532
532
533 p = path[1:]
533 p = path[1:]
534 l = len(p)
534 l = len(p)
535
535
536 for f,n in mf.items():
536 for f,n in mf.items():
537 if f[:l] != p:
537 if f[:l] != p:
538 continue
538 continue
539 remain = f[l:]
539 remain = f[l:]
540 if "/" in remain:
540 if "/" in remain:
541 short = remain[:remain.find("/") + 1] # bleah
541 short = remain[:remain.find("/") + 1] # bleah
542 files[short] = (f, None)
542 files[short] = (f, None)
543 else:
543 else:
544 short = os.path.basename(remain)
544 short = os.path.basename(remain)
545 files[short] = (f, n)
545 files[short] = (f, n)
546
546
547 def filelist(**map):
547 def filelist(**map):
548 parity = 0
548 parity = 0
549 fl = files.keys()
549 fl = files.keys()
550 fl.sort()
550 fl.sort()
551 for f in fl:
551 for f in fl:
552 full, fnode = files[f]
552 full, fnode = files[f]
553 if not fnode:
553 if not fnode:
554 continue
554 continue
555
555
556 yield {"file": full,
556 yield {"file": full,
557 "manifest": mnode,
557 "manifest": mnode,
558 "filenode": hex(fnode),
558 "filenode": hex(fnode),
559 "parity": parity,
559 "parity": parity,
560 "basename": f,
560 "basename": f,
561 "permissions": mff[full]}
561 "permissions": mff[full]}
562 parity = 1 - parity
562 parity = 1 - parity
563
563
564 def dirlist(**map):
564 def dirlist(**map):
565 parity = 0
565 parity = 0
566 fl = files.keys()
566 fl = files.keys()
567 fl.sort()
567 fl.sort()
568 for f in fl:
568 for f in fl:
569 full, fnode = files[f]
569 full, fnode = files[f]
570 if fnode:
570 if fnode:
571 continue
571 continue
572
572
573 yield {"parity": parity,
573 yield {"parity": parity,
574 "path": os.path.join(path, f),
574 "path": os.path.join(path, f),
575 "manifest": mnode,
575 "manifest": mnode,
576 "basename": f[:-1]}
576 "basename": f[:-1]}
577 parity = 1 - parity
577 parity = 1 - parity
578
578
579 yield self.t("manifest",
579 yield self.t("manifest",
580 manifest=mnode,
580 manifest=mnode,
581 rev=rev,
581 rev=rev,
582 node=hex(node),
582 node=hex(node),
583 path=path,
583 path=path,
584 up=up(path),
584 up=up(path),
585 fentries=filelist,
585 fentries=filelist,
586 dentries=dirlist)
586 dentries=dirlist)
587
587
588 def tags(self):
588 def tags(self):
589 cl = self.repo.changelog
589 cl = self.repo.changelog
590 mf = cl.read(cl.tip())[0]
590 mf = cl.read(cl.tip())[0]
591
591
592 i = self.repo.tagslist()
592 i = self.repo.tagslist()
593 i.reverse()
593 i.reverse()
594
594
595 def entries(**map):
595 def entries(**map):
596 parity = 0
596 parity = 0
597 for k,n in i:
597 for k,n in i:
598 yield {"parity": parity,
598 yield {"parity": parity,
599 "tag": k,
599 "tag": k,
600 "node": hex(n)}
600 "node": hex(n)}
601 parity = 1 - parity
601 parity = 1 - parity
602
602
603 yield self.t("tags",
603 yield self.t("tags",
604 manifest=hex(mf),
604 manifest=hex(mf),
605 entries=entries)
605 entries=entries)
606
606
607 def filediff(self, file, changeset):
607 def filediff(self, file, changeset):
608 n = bin(changeset)
608 n = bin(changeset)
609 cl = self.repo.changelog
609 cl = self.repo.changelog
610 p1 = cl.parents(n)[0]
610 p1 = cl.parents(n)[0]
611 cs = cl.read(n)
611 cs = cl.read(n)
612 mf = self.repo.manifest.read(cs[0])
612 mf = self.repo.manifest.read(cs[0])
613
613
614 def diff(**map):
614 def diff(**map):
615 yield self.diff(p1, n, file)
615 yield self.diff(p1, n, file)
616
616
617 yield self.t("filediff",
617 yield self.t("filediff",
618 file=file,
618 file=file,
619 filenode=hex(mf.get(file, nullid)),
619 filenode=hex(mf.get(file, nullid)),
620 node=changeset,
620 node=changeset,
621 rev=self.repo.changelog.rev(n),
621 rev=self.repo.changelog.rev(n),
622 parent=self.parents("filediffparent",
622 parent=self.parents("filediffparent",
623 cl.parents(n), cl.rev),
623 cl.parents(n), cl.rev),
624 diff=diff)
624 diff=diff)
625
625
626 # add tags to things
626 # add tags to things
627 # tags -> list of changesets corresponding to tags
627 # tags -> list of changesets corresponding to tags
628 # find tag, changeset, file
628 # find tag, changeset, file
629
629
630 def run(self):
630 def run(self):
631 def header(**map):
631 def header(**map):
632 yield self.t("header", **map)
632 yield self.t("header", **map)
633
633
634 def footer(**map):
634 def footer(**map):
635 yield self.t("footer", **map)
635 yield self.t("footer", **map)
636
636
637 self.refresh()
637 self.refresh()
638 args = cgi.parse()
638 args = cgi.parse()
639
639
640 t = self.repo.ui.config("web", "templates", templatepath())
640 t = self.repo.ui.config("web", "templates", templatepath())
641 m = os.path.join(t, "map")
641 m = os.path.join(t, "map")
642 style = self.repo.ui.config("web", "style", "")
642 style = self.repo.ui.config("web", "style", "")
643 if args.has_key('style'):
643 if args.has_key('style'):
644 style = args['style'][0]
644 style = args['style'][0]
645 if style:
645 if style:
646 b = os.path.basename("map-" + style)
646 b = os.path.basename("map-" + style)
647 p = os.path.join(t, b)
647 p = os.path.join(t, b)
648 if os.path.isfile(p):
648 if os.path.isfile(p):
649 m = p
649 m = p
650
650
651 port = os.environ["SERVER_PORT"]
651 port = os.environ["SERVER_PORT"]
652 port = port != "80" and (":" + port) or ""
652 port = port != "80" and (":" + port) or ""
653 uri = os.environ["REQUEST_URI"]
653 uri = os.environ["REQUEST_URI"]
654 if "?" in uri:
654 if "?" in uri:
655 uri = uri.split("?")[0]
655 uri = uri.split("?")[0]
656 url = "http://%s%s%s" % (os.environ["SERVER_NAME"], port, uri)
656 url = "http://%s%s%s" % (os.environ["SERVER_NAME"], port, uri)
657
657
658 self.t = templater(m, common_filters,
658 self.t = templater(m, common_filters,
659 {"url": url,
659 {"url": url,
660 "repo": self.reponame,
660 "repo": self.reponame,
661 "header": header,
661 "header": header,
662 "footer": footer,
662 "footer": footer,
663 })
663 })
664
664
665 if not args.has_key('cmd'):
665 if not args.has_key('cmd'):
666 args['cmd'] = [self.t.cache['default'],]
666 args['cmd'] = [self.t.cache['default'],]
667
667
668 if args['cmd'][0] == 'changelog':
668 if args['cmd'][0] == 'changelog':
669 c = self.repo.changelog.count() - 1
669 c = self.repo.changelog.count() - 1
670 hi = c
670 hi = c
671 if args.has_key('rev'):
671 if args.has_key('rev'):
672 hi = args['rev'][0]
672 hi = args['rev'][0]
673 try:
673 try:
674 hi = self.repo.changelog.rev(self.repo.lookup(hi))
674 hi = self.repo.changelog.rev(self.repo.lookup(hi))
675 except RepoError:
675 except RepoError:
676 write(self.search(hi))
676 write(self.search(hi))
677 return
677 return
678
678
679 write(self.changelog(hi))
679 write(self.changelog(hi))
680
680
681 elif args['cmd'][0] == 'changeset':
681 elif args['cmd'][0] == 'changeset':
682 write(self.changeset(args['node'][0]))
682 write(self.changeset(args['node'][0]))
683
683
684 elif args['cmd'][0] == 'manifest':
684 elif args['cmd'][0] == 'manifest':
685 write(self.manifest(args['manifest'][0], args['path'][0]))
685 write(self.manifest(args['manifest'][0], args['path'][0]))
686
686
687 elif args['cmd'][0] == 'tags':
687 elif args['cmd'][0] == 'tags':
688 write(self.tags())
688 write(self.tags())
689
689
690 elif args['cmd'][0] == 'filediff':
690 elif args['cmd'][0] == 'filediff':
691 write(self.filediff(args['file'][0], args['node'][0]))
691 write(self.filediff(args['file'][0], args['node'][0]))
692
692
693 elif args['cmd'][0] == 'file':
693 elif args['cmd'][0] == 'file':
694 write(self.filerevision(args['file'][0], args['filenode'][0]))
694 write(self.filerevision(args['file'][0], args['filenode'][0]))
695
695
696 elif args['cmd'][0] == 'annotate':
696 elif args['cmd'][0] == 'annotate':
697 write(self.fileannotate(args['file'][0], args['filenode'][0]))
697 write(self.fileannotate(args['file'][0], args['filenode'][0]))
698
698
699 elif args['cmd'][0] == 'filelog':
699 elif args['cmd'][0] == 'filelog':
700 write(self.filelog(args['file'][0], args['filenode'][0]))
700 write(self.filelog(args['file'][0], args['filenode'][0]))
701
701
702 elif args['cmd'][0] == 'heads':
702 elif args['cmd'][0] == 'heads':
703 httphdr("application/mercurial-0.1")
703 httphdr("application/mercurial-0.1")
704 h = self.repo.heads()
704 h = self.repo.heads()
705 sys.stdout.write(" ".join(map(hex, h)) + "\n")
705 sys.stdout.write(" ".join(map(hex, h)) + "\n")
706
706
707 elif args['cmd'][0] == 'branches':
707 elif args['cmd'][0] == 'branches':
708 httphdr("application/mercurial-0.1")
708 httphdr("application/mercurial-0.1")
709 nodes = []
709 nodes = []
710 if args.has_key('nodes'):
710 if args.has_key('nodes'):
711 nodes = map(bin, args['nodes'][0].split(" "))
711 nodes = map(bin, args['nodes'][0].split(" "))
712 for b in self.repo.branches(nodes):
712 for b in self.repo.branches(nodes):
713 sys.stdout.write(" ".join(map(hex, b)) + "\n")
713 sys.stdout.write(" ".join(map(hex, b)) + "\n")
714
714
715 elif args['cmd'][0] == 'between':
715 elif args['cmd'][0] == 'between':
716 httphdr("application/mercurial-0.1")
716 httphdr("application/mercurial-0.1")
717 nodes = []
717 nodes = []
718 if args.has_key('pairs'):
718 if args.has_key('pairs'):
719 pairs = [map(bin, p.split("-"))
719 pairs = [map(bin, p.split("-"))
720 for p in args['pairs'][0].split(" ")]
720 for p in args['pairs'][0].split(" ")]
721 for b in self.repo.between(pairs):
721 for b in self.repo.between(pairs):
722 sys.stdout.write(" ".join(map(hex, b)) + "\n")
722 sys.stdout.write(" ".join(map(hex, b)) + "\n")
723
723
724 elif args['cmd'][0] == 'changegroup':
724 elif args['cmd'][0] == 'changegroup':
725 httphdr("application/mercurial-0.1")
725 httphdr("application/mercurial-0.1")
726 nodes = []
726 nodes = []
727 if not self.allowpull:
727 if not self.allowpull:
728 return
728 return
729
729
730 if args.has_key('roots'):
730 if args.has_key('roots'):
731 nodes = map(bin, args['roots'][0].split(" "))
731 nodes = map(bin, args['roots'][0].split(" "))
732
732
733 z = zlib.compressobj()
733 z = zlib.compressobj()
734 f = self.repo.changegroup(nodes)
734 f = self.repo.changegroup(nodes)
735 while 1:
735 while 1:
736 chunk = f.read(4096)
736 chunk = f.read(4096)
737 if not chunk:
737 if not chunk:
738 break
738 break
739 sys.stdout.write(z.compress(chunk))
739 sys.stdout.write(z.compress(chunk))
740
740
741 sys.stdout.write(z.flush())
741 sys.stdout.write(z.flush())
742
742
743 else:
743 else:
744 write(self.t("error"))
744 write(self.t("error"))
745
745
746 def create_server(repo):
746 def create_server(repo):
747
747
748 def openlog(opt, default):
748 def openlog(opt, default):
749 if opt and opt != '-':
749 if opt and opt != '-':
750 return open(opt, 'w')
750 return open(opt, 'w')
751 return default
751 return default
752
752
753 address = repo.ui.config("web", "address", "")
753 address = repo.ui.config("web", "address", "")
754 port = int(repo.ui.config("web", "port", 8000))
754 port = int(repo.ui.config("web", "port", 8000))
755 use_ipv6 = repo.ui.configbool("web", "ipv6")
755 use_ipv6 = repo.ui.configbool("web", "ipv6")
756 accesslog = openlog(repo.ui.config("web", "accesslog", "-"), sys.stdout)
756 accesslog = openlog(repo.ui.config("web", "accesslog", "-"), sys.stdout)
757 errorlog = openlog(repo.ui.config("web", "errorlog", "-"), sys.stderr)
757 errorlog = openlog(repo.ui.config("web", "errorlog", "-"), sys.stderr)
758
758
759 import BaseHTTPServer
759 import BaseHTTPServer
760
760
761 class IPv6HTTPServer(BaseHTTPServer.HTTPServer):
761 class IPv6HTTPServer(BaseHTTPServer.HTTPServer):
762 address_family = getattr(socket, 'AF_INET6', None)
762 address_family = getattr(socket, 'AF_INET6', None)
763
763
764 def __init__(self, *args, **kwargs):
764 def __init__(self, *args, **kwargs):
765 if self.address_family is None:
765 if self.address_family is None:
766 raise RepoError('IPv6 not available on this system')
766 raise RepoError('IPv6 not available on this system')
767 BaseHTTPServer.HTTPServer.__init__(self, *args, **kwargs)
767 BaseHTTPServer.HTTPServer.__init__(self, *args, **kwargs)
768
768
769 class hgwebhandler(BaseHTTPServer.BaseHTTPRequestHandler):
769 class hgwebhandler(BaseHTTPServer.BaseHTTPRequestHandler):
770 def log_error(self, format, *args):
770 def log_error(self, format, *args):
771 errorlog.write("%s - - [%s] %s\n" % (self.address_string(),
771 errorlog.write("%s - - [%s] %s\n" % (self.address_string(),
772 self.log_date_time_string(),
772 self.log_date_time_string(),
773 format % args))
773 format % args))
774
774
775 def log_message(self, format, *args):
775 def log_message(self, format, *args):
776 accesslog.write("%s - - [%s] %s\n" % (self.address_string(),
776 accesslog.write("%s - - [%s] %s\n" % (self.address_string(),
777 self.log_date_time_string(),
777 self.log_date_time_string(),
778 format % args))
778 format % args))
779
779
780 def do_POST(self):
780 def do_POST(self):
781 try:
781 try:
782 self.do_hgweb()
782 self.do_hgweb()
783 except socket.error, inst:
783 except socket.error, inst:
784 if inst.args[0] != 32:
784 if inst.args[0] != 32:
785 raise
785 raise
786
786
787 def do_GET(self):
787 def do_GET(self):
788 self.do_POST()
788 self.do_POST()
789
789
790 def do_hgweb(self):
790 def do_hgweb(self):
791 query = ""
791 query = ""
792 p = self.path.find("?")
792 p = self.path.find("?")
793 if p:
793 if p:
794 query = self.path[p + 1:]
794 query = self.path[p + 1:]
795 query = query.replace('+', ' ')
795 query = query.replace('+', ' ')
796
796
797 env = {}
797 env = {}
798 env['GATEWAY_INTERFACE'] = 'CGI/1.1'
798 env['GATEWAY_INTERFACE'] = 'CGI/1.1'
799 env['REQUEST_METHOD'] = self.command
799 env['REQUEST_METHOD'] = self.command
800 env['SERVER_NAME'] = self.server.server_name
800 env['SERVER_NAME'] = self.server.server_name
801 env['SERVER_PORT'] = str(self.server.server_port)
801 env['SERVER_PORT'] = str(self.server.server_port)
802 env['REQUEST_URI'] = "/"
802 env['REQUEST_URI'] = "/"
803 if query:
803 if query:
804 env['QUERY_STRING'] = query
804 env['QUERY_STRING'] = query
805 host = self.address_string()
805 host = self.address_string()
806 if host != self.client_address[0]:
806 if host != self.client_address[0]:
807 env['REMOTE_HOST'] = host
807 env['REMOTE_HOST'] = host
808 env['REMOTE_ADDR'] = self.client_address[0]
808 env['REMOTE_ADDR'] = self.client_address[0]
809
809
810 if self.headers.typeheader is None:
810 if self.headers.typeheader is None:
811 env['CONTENT_TYPE'] = self.headers.type
811 env['CONTENT_TYPE'] = self.headers.type
812 else:
812 else:
813 env['CONTENT_TYPE'] = self.headers.typeheader
813 env['CONTENT_TYPE'] = self.headers.typeheader
814 length = self.headers.getheader('content-length')
814 length = self.headers.getheader('content-length')
815 if length:
815 if length:
816 env['CONTENT_LENGTH'] = length
816 env['CONTENT_LENGTH'] = length
817 accept = []
817 accept = []
818 for line in self.headers.getallmatchingheaders('accept'):
818 for line in self.headers.getallmatchingheaders('accept'):
819 if line[:1] in "\t\n\r ":
819 if line[:1] in "\t\n\r ":
820 accept.append(line.strip())
820 accept.append(line.strip())
821 else:
821 else:
822 accept = accept + line[7:].split(',')
822 accept = accept + line[7:].split(',')
823 env['HTTP_ACCEPT'] = ','.join(accept)
823 env['HTTP_ACCEPT'] = ','.join(accept)
824
824
825 os.environ.update(env)
825 os.environ.update(env)
826
826
827 save = sys.argv, sys.stdin, sys.stdout, sys.stderr
827 save = sys.argv, sys.stdin, sys.stdout, sys.stderr
828 try:
828 try:
829 sys.stdin = self.rfile
829 sys.stdin = self.rfile
830 sys.stdout = self.wfile
830 sys.stdout = self.wfile
831 sys.argv = ["hgweb.py"]
831 sys.argv = ["hgweb.py"]
832 if '=' not in query:
832 if '=' not in query:
833 sys.argv.append(query)
833 sys.argv.append(query)
834 self.send_response(200, "Script output follows")
834 self.send_response(200, "Script output follows")
835 hg.run()
835 hg.run()
836 finally:
836 finally:
837 sys.argv, sys.stdin, sys.stdout, sys.stderr = save
837 sys.argv, sys.stdin, sys.stdout, sys.stderr = save
838
838
839 hg = hgweb(repo)
839 hg = hgweb(repo)
840 if use_ipv6:
840 if use_ipv6:
841 return IPv6HTTPServer((address, port), hgwebhandler)
841 return IPv6HTTPServer((address, port), hgwebhandler)
842 else:
842 else:
843 return BaseHTTPServer.HTTPServer((address, port), hgwebhandler)
843 return BaseHTTPServer.HTTPServer((address, port), hgwebhandler)
844
844
845 def server(path, name, templates, address, port, use_ipv6=False,
845 def server(path, name, templates, address, port, use_ipv6=False,
846 accesslog=sys.stdout, errorlog=sys.stderr):
846 accesslog=sys.stdout, errorlog=sys.stderr):
847 httpd = create_server(path, name, templates, address, port, use_ipv6,
847 httpd = create_server(path, name, templates, address, port, use_ipv6,
848 accesslog, errorlog)
848 accesslog, errorlog)
849 httpd.serve_forever()
849 httpd.serve_forever()
850
850
851 # This is a stopgap
851 # This is a stopgap
852 class hgwebdir:
852 class hgwebdir:
853 def __init__(self, config):
853 def __init__(self, config):
854 self.cp = ConfigParser.SafeConfigParser()
854 self.cp = ConfigParser.SafeConfigParser()
855 self.cp.read(config)
855 self.cp.read(config)
856
856
857 def run(self):
857 def run(self):
858 try:
858 try:
859 virtual = os.environ["PATH_INFO"]
859 virtual = os.environ["PATH_INFO"]
860 except:
860 except:
861 virtual = ""
861 virtual = ""
862
862
863 if virtual[1:]:
863 if virtual[1:]:
864 real = self.cp.get("paths", virtual[1:])
864 real = self.cp.get("paths", virtual[1:])
865 h = hgweb(real)
865 h = hgweb(real)
866 h.run()
866 h.run()
867 return
867 return
868
868
869 def header(**map):
869 def header(**map):
870 yield tmpl("header", **map)
870 yield tmpl("header", **map)
871
871
872 def footer(**map):
872 def footer(**map):
873 yield tmpl("footer", **map)
873 yield tmpl("footer", **map)
874
874
875 templates = templatepath()
875 templates = templatepath()
876 m = os.path.join(templates, "map")
876 m = os.path.join(templates, "map")
877 tmpl = templater(m, common_filters,
877 tmpl = templater(m, common_filters,
878 {"header": header, "footer": footer})
878 {"header": header, "footer": footer})
879
879
880 def entries(**map):
880 def entries(**map):
881 parity = 0
881 parity = 0
882 l = self.cp.items("paths")
882 l = self.cp.items("paths")
883 l.sort()
883 l.sort()
884 for v,r in l:
884 for v,r in l:
885 cp2 = ConfigParser.SafeConfigParser()
885 cp2 = ConfigParser.SafeConfigParser()
886 cp2.read(os.path.join(r, ".hg", "hgrc"))
886 cp2.read(os.path.join(r, ".hg", "hgrc"))
887
887
888 def get(sec, val, default):
888 def get(sec, val, default):
889 try:
889 try:
890 return cp2.get(sec, val)
890 return cp2.get(sec, val)
891 except:
891 except:
892 return default
892 return default
893
893
894 url = os.environ["REQUEST_URI"] + "/" + v
894 url = os.environ["REQUEST_URI"] + "/" + v
895 url = url.replace("//", "/")
895 url = url.replace("//", "/")
896
896
897 yield dict(author=get("web", "author", "unknown"),
897 yield dict(author=get("web", "author", "unknown"),
898 name=get("web", "name", v),
898 name=get("web", "name", v),
899 url=url,
899 url=url,
900 parity=parity,
900 parity=parity,
901 shortdesc=get("web", "description", "unknown"),
901 shortdesc=get("web", "description", "unknown"),
902 lastupdate=os.stat(os.path.join(r, ".hg",
902 lastupdate=os.stat(os.path.join(r, ".hg",
903 "00changelog.d")).st_mtime)
903 "00changelog.d")).st_mtime)
904
904
905 parity = 1 - parity
905 parity = 1 - parity
906
906
907 write(tmpl("index", entries=entries))
907 write(tmpl("index", entries=entries))
@@ -1,551 +1,553 b''
1 # revlog.py - storage back-end for mercurial
1 # revlog.py - storage back-end for mercurial
2 #
2 #
3 # This provides efficient delta storage with O(1) retrieve and append
3 # This provides efficient delta storage with O(1) retrieve and append
4 # and O(changes) merge between branches
4 # and O(changes) merge between branches
5 #
5 #
6 # Copyright 2005 Matt Mackall <mpm@selenic.com>
6 # Copyright 2005 Matt Mackall <mpm@selenic.com>
7 #
7 #
8 # This software may be used and distributed according to the terms
8 # This software may be used and distributed according to the terms
9 # of the GNU General Public License, incorporated herein by reference.
9 # of the GNU General Public License, incorporated herein by reference.
10
10
11 import zlib, struct, sha, binascii, heapq
11 import zlib, struct, sha, binascii, heapq
12 from mercurial import mdiff
12 from mercurial import mdiff
13
13
14 def hex(node): return binascii.hexlify(node)
14 def hex(node): return binascii.hexlify(node)
15 def bin(node): return binascii.unhexlify(node)
15 def bin(node): return binascii.unhexlify(node)
16 def short(node): return hex(node[:6])
16 def short(node): return hex(node[:6])
17
17
18 def compress(text):
18 def compress(text):
19 if not text: return text
19 if not text: return text
20 if len(text) < 44:
20 if len(text) < 44:
21 if text[0] == '\0': return text
21 if text[0] == '\0': return text
22 return 'u' + text
22 return 'u' + text
23 bin = zlib.compress(text)
23 bin = zlib.compress(text)
24 if len(bin) > len(text):
24 if len(bin) > len(text):
25 if text[0] == '\0': return text
25 if text[0] == '\0': return text
26 return 'u' + text
26 return 'u' + text
27 return bin
27 return bin
28
28
29 def decompress(bin):
29 def decompress(bin):
30 if not bin: return bin
30 if not bin: return bin
31 t = bin[0]
31 t = bin[0]
32 if t == '\0': return bin
32 if t == '\0': return bin
33 if t == 'x': return zlib.decompress(bin)
33 if t == 'x': return zlib.decompress(bin)
34 if t == 'u': return bin[1:]
34 if t == 'u': return bin[1:]
35 raise "unknown compression type %s" % t
35 raise RevlogError("unknown compression type %s" % t)
36
36
37 def hash(text, p1, p2):
37 def hash(text, p1, p2):
38 l = [p1, p2]
38 l = [p1, p2]
39 l.sort()
39 l.sort()
40 s = sha.new(l[0])
40 s = sha.new(l[0])
41 s.update(l[1])
41 s.update(l[1])
42 s.update(text)
42 s.update(text)
43 return s.digest()
43 return s.digest()
44
44
45 nullid = "\0" * 20
45 nullid = "\0" * 20
46 indexformat = ">4l20s20s20s"
46 indexformat = ">4l20s20s20s"
47
47
48 class lazyparser:
48 class lazyparser:
49 def __init__(self, data, revlog):
49 def __init__(self, data, revlog):
50 self.data = data
50 self.data = data
51 self.s = struct.calcsize(indexformat)
51 self.s = struct.calcsize(indexformat)
52 self.l = len(data)/self.s
52 self.l = len(data)/self.s
53 self.index = [None] * self.l
53 self.index = [None] * self.l
54 self.map = {nullid: -1}
54 self.map = {nullid: -1}
55 self.all = 0
55 self.all = 0
56 self.revlog = revlog
56 self.revlog = revlog
57
57
58 def load(self, pos=None):
58 def load(self, pos=None):
59 if self.all: return
59 if self.all: return
60 if pos is not None:
60 if pos is not None:
61 block = pos / 1000
61 block = pos / 1000
62 i = block * 1000
62 i = block * 1000
63 end = min(self.l, i + 1000)
63 end = min(self.l, i + 1000)
64 else:
64 else:
65 self.all = 1
65 self.all = 1
66 i = 0
66 i = 0
67 end = self.l
67 end = self.l
68 self.revlog.index = self.index
68 self.revlog.index = self.index
69 self.revlog.nodemap = self.map
69 self.revlog.nodemap = self.map
70
70
71 while i < end:
71 while i < end:
72 d = self.data[i * self.s: (i + 1) * self.s]
72 d = self.data[i * self.s: (i + 1) * self.s]
73 e = struct.unpack(indexformat, d)
73 e = struct.unpack(indexformat, d)
74 self.index[i] = e
74 self.index[i] = e
75 self.map[e[6]] = i
75 self.map[e[6]] = i
76 i += 1
76 i += 1
77
77
78 class lazyindex:
78 class lazyindex:
79 def __init__(self, parser):
79 def __init__(self, parser):
80 self.p = parser
80 self.p = parser
81 def __len__(self):
81 def __len__(self):
82 return len(self.p.index)
82 return len(self.p.index)
83 def load(self, pos):
83 def load(self, pos):
84 self.p.load(pos)
84 self.p.load(pos)
85 return self.p.index[pos]
85 return self.p.index[pos]
86 def __getitem__(self, pos):
86 def __getitem__(self, pos):
87 return self.p.index[pos] or self.load(pos)
87 return self.p.index[pos] or self.load(pos)
88 def append(self, e):
88 def append(self, e):
89 self.p.index.append(e)
89 self.p.index.append(e)
90
90
91 class lazymap:
91 class lazymap:
92 def __init__(self, parser):
92 def __init__(self, parser):
93 self.p = parser
93 self.p = parser
94 def load(self, key):
94 def load(self, key):
95 if self.p.all: return
95 if self.p.all: return
96 n = self.p.data.find(key)
96 n = self.p.data.find(key)
97 if n < 0: raise KeyError("node " + hex(key))
97 if n < 0: raise KeyError("node " + hex(key))
98 pos = n / self.p.s
98 pos = n / self.p.s
99 self.p.load(pos)
99 self.p.load(pos)
100 def __contains__(self, key):
100 def __contains__(self, key):
101 self.p.load()
101 self.p.load()
102 return key in self.p.map
102 return key in self.p.map
103 def __iter__(self):
103 def __iter__(self):
104 yield nullid
104 yield nullid
105 for i in xrange(self.p.l):
105 for i in xrange(self.p.l):
106 try:
106 try:
107 yield self.p.index[i][6]
107 yield self.p.index[i][6]
108 except:
108 except:
109 self.p.load(i)
109 self.p.load(i)
110 yield self.p.index[i][6]
110 yield self.p.index[i][6]
111 def __getitem__(self, key):
111 def __getitem__(self, key):
112 try:
112 try:
113 return self.p.map[key]
113 return self.p.map[key]
114 except KeyError:
114 except KeyError:
115 try:
115 try:
116 self.load(key)
116 self.load(key)
117 return self.p.map[key]
117 return self.p.map[key]
118 except KeyError:
118 except KeyError:
119 raise KeyError("node " + hex(key))
119 raise KeyError("node " + hex(key))
120 def __setitem__(self, key, val):
120 def __setitem__(self, key, val):
121 self.p.map[key] = val
121 self.p.map[key] = val
122
122
123 class RevlogError(Exception): pass
124
123 class revlog:
125 class revlog:
124 def __init__(self, opener, indexfile, datafile):
126 def __init__(self, opener, indexfile, datafile):
125 self.indexfile = indexfile
127 self.indexfile = indexfile
126 self.datafile = datafile
128 self.datafile = datafile
127 self.opener = opener
129 self.opener = opener
128 self.cache = None
130 self.cache = None
129
131
130 try:
132 try:
131 i = self.opener(self.indexfile).read()
133 i = self.opener(self.indexfile).read()
132 except IOError:
134 except IOError:
133 i = ""
135 i = ""
134
136
135 if len(i) > 10000:
137 if len(i) > 10000:
136 # big index, let's parse it on demand
138 # big index, let's parse it on demand
137 parser = lazyparser(i, self)
139 parser = lazyparser(i, self)
138 self.index = lazyindex(parser)
140 self.index = lazyindex(parser)
139 self.nodemap = lazymap(parser)
141 self.nodemap = lazymap(parser)
140 else:
142 else:
141 s = struct.calcsize(indexformat)
143 s = struct.calcsize(indexformat)
142 l = len(i) / s
144 l = len(i) / s
143 self.index = [None] * l
145 self.index = [None] * l
144 m = [None] * l
146 m = [None] * l
145
147
146 n = 0
148 n = 0
147 for f in xrange(0, len(i), s):
149 for f in xrange(0, len(i), s):
148 # offset, size, base, linkrev, p1, p2, nodeid
150 # offset, size, base, linkrev, p1, p2, nodeid
149 e = struct.unpack(indexformat, i[f:f + s])
151 e = struct.unpack(indexformat, i[f:f + s])
150 m[n] = (e[6], n)
152 m[n] = (e[6], n)
151 self.index[n] = e
153 self.index[n] = e
152 n += 1
154 n += 1
153
155
154 self.nodemap = dict(m)
156 self.nodemap = dict(m)
155 self.nodemap[nullid] = -1
157 self.nodemap[nullid] = -1
156
158
157 def tip(self): return self.node(len(self.index) - 1)
159 def tip(self): return self.node(len(self.index) - 1)
158 def count(self): return len(self.index)
160 def count(self): return len(self.index)
159 def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
161 def node(self, rev): return (rev < 0) and nullid or self.index[rev][6]
160 def rev(self, node): return self.nodemap[node]
162 def rev(self, node): return self.nodemap[node]
161 def linkrev(self, node): return self.index[self.nodemap[node]][3]
163 def linkrev(self, node): return self.index[self.nodemap[node]][3]
162 def parents(self, node):
164 def parents(self, node):
163 if node == nullid: return (nullid, nullid)
165 if node == nullid: return (nullid, nullid)
164 return self.index[self.nodemap[node]][4:6]
166 return self.index[self.nodemap[node]][4:6]
165
167
166 def start(self, rev): return self.index[rev][0]
168 def start(self, rev): return self.index[rev][0]
167 def length(self, rev): return self.index[rev][1]
169 def length(self, rev): return self.index[rev][1]
168 def end(self, rev): return self.start(rev) + self.length(rev)
170 def end(self, rev): return self.start(rev) + self.length(rev)
169 def base(self, rev): return self.index[rev][2]
171 def base(self, rev): return self.index[rev][2]
170
172
171 def heads(self, stop=None):
173 def heads(self, stop=None):
172 p = {}
174 p = {}
173 h = []
175 h = []
174 stoprev = 0
176 stoprev = 0
175 if stop and stop in self.nodemap:
177 if stop and stop in self.nodemap:
176 stoprev = self.rev(stop)
178 stoprev = self.rev(stop)
177
179
178 for r in range(self.count() - 1, -1, -1):
180 for r in range(self.count() - 1, -1, -1):
179 n = self.node(r)
181 n = self.node(r)
180 if n not in p:
182 if n not in p:
181 h.append(n)
183 h.append(n)
182 if n == stop:
184 if n == stop:
183 break
185 break
184 if r < stoprev:
186 if r < stoprev:
185 break
187 break
186 for pn in self.parents(n):
188 for pn in self.parents(n):
187 p[pn] = 1
189 p[pn] = 1
188 return h
190 return h
189
191
190 def children(self, node):
192 def children(self, node):
191 c = []
193 c = []
192 p = self.rev(node)
194 p = self.rev(node)
193 for r in range(p + 1, self.count()):
195 for r in range(p + 1, self.count()):
194 n = self.node(r)
196 n = self.node(r)
195 for pn in self.parents(n):
197 for pn in self.parents(n):
196 if pn == node:
198 if pn == node:
197 c.append(n)
199 c.append(n)
198 continue
200 continue
199 elif pn == nullid:
201 elif pn == nullid:
200 continue
202 continue
201 return c
203 return c
202
204
203 def lookup(self, id):
205 def lookup(self, id):
204 try:
206 try:
205 rev = int(id)
207 rev = int(id)
206 if str(rev) != id: raise ValueError
208 if str(rev) != id: raise ValueError
207 if rev < 0: rev = self.count() + rev
209 if rev < 0: rev = self.count() + rev
208 if rev < 0 or rev >= self.count(): raise ValueError
210 if rev < 0 or rev >= self.count(): raise ValueError
209 return self.node(rev)
211 return self.node(rev)
210 except (ValueError, OverflowError):
212 except (ValueError, OverflowError):
211 c = []
213 c = []
212 for n in self.nodemap:
214 for n in self.nodemap:
213 if hex(n).startswith(id):
215 if hex(n).startswith(id):
214 c.append(n)
216 c.append(n)
215 if len(c) > 1: raise KeyError("Ambiguous identifier")
217 if len(c) > 1: raise KeyError("Ambiguous identifier")
216 if len(c) < 1: raise KeyError("No match found")
218 if len(c) < 1: raise KeyError("No match found")
217 return c[0]
219 return c[0]
218
220
219 return None
221 return None
220
222
221 def diff(self, a, b):
223 def diff(self, a, b):
222 return mdiff.textdiff(a, b)
224 return mdiff.textdiff(a, b)
223
225
224 def patches(self, t, pl):
226 def patches(self, t, pl):
225 return mdiff.patches(t, pl)
227 return mdiff.patches(t, pl)
226
228
227 def delta(self, node):
229 def delta(self, node):
228 r = self.rev(node)
230 r = self.rev(node)
229 b = self.base(r)
231 b = self.base(r)
230 if r == b:
232 if r == b:
231 return self.diff(self.revision(self.node(r - 1)),
233 return self.diff(self.revision(self.node(r - 1)),
232 self.revision(node))
234 self.revision(node))
233 else:
235 else:
234 f = self.opener(self.datafile)
236 f = self.opener(self.datafile)
235 f.seek(self.start(r))
237 f.seek(self.start(r))
236 data = f.read(self.length(r))
238 data = f.read(self.length(r))
237 return decompress(data)
239 return decompress(data)
238
240
239 def revision(self, node):
241 def revision(self, node):
240 if node == nullid: return ""
242 if node == nullid: return ""
241 if self.cache and self.cache[0] == node: return self.cache[2]
243 if self.cache and self.cache[0] == node: return self.cache[2]
242
244
243 text = None
245 text = None
244 rev = self.rev(node)
246 rev = self.rev(node)
245 start, length, base, link, p1, p2, node = self.index[rev]
247 start, length, base, link, p1, p2, node = self.index[rev]
246 end = start + length
248 end = start + length
247 if base != rev: start = self.start(base)
249 if base != rev: start = self.start(base)
248
250
249 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
251 if self.cache and self.cache[1] >= base and self.cache[1] < rev:
250 base = self.cache[1]
252 base = self.cache[1]
251 start = self.start(base + 1)
253 start = self.start(base + 1)
252 text = self.cache[2]
254 text = self.cache[2]
253 last = 0
255 last = 0
254
256
255 f = self.opener(self.datafile)
257 f = self.opener(self.datafile)
256 f.seek(start)
258 f.seek(start)
257 data = f.read(end - start)
259 data = f.read(end - start)
258
260
259 if text is None:
261 if text is None:
260 last = self.length(base)
262 last = self.length(base)
261 text = decompress(data[:last])
263 text = decompress(data[:last])
262
264
263 bins = []
265 bins = []
264 for r in xrange(base + 1, rev + 1):
266 for r in xrange(base + 1, rev + 1):
265 s = self.length(r)
267 s = self.length(r)
266 bins.append(decompress(data[last:last + s]))
268 bins.append(decompress(data[last:last + s]))
267 last = last + s
269 last = last + s
268
270
269 text = mdiff.patches(text, bins)
271 text = mdiff.patches(text, bins)
270
272
271 if node != hash(text, p1, p2):
273 if node != hash(text, p1, p2):
272 raise IOError("integrity check failed on %s:%d"
274 raise IOError("integrity check failed on %s:%d"
273 % (self.datafile, rev))
275 % (self.datafile, rev))
274
276
275 self.cache = (node, rev, text)
277 self.cache = (node, rev, text)
276 return text
278 return text
277
279
278 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
280 def addrevision(self, text, transaction, link, p1=None, p2=None, d=None):
279 if text is None: text = ""
281 if text is None: text = ""
280 if p1 is None: p1 = self.tip()
282 if p1 is None: p1 = self.tip()
281 if p2 is None: p2 = nullid
283 if p2 is None: p2 = nullid
282
284
283 node = hash(text, p1, p2)
285 node = hash(text, p1, p2)
284
286
285 if node in self.nodemap:
287 if node in self.nodemap:
286 return node
288 return node
287
289
288 n = self.count()
290 n = self.count()
289 t = n - 1
291 t = n - 1
290
292
291 if n:
293 if n:
292 base = self.base(t)
294 base = self.base(t)
293 start = self.start(base)
295 start = self.start(base)
294 end = self.end(t)
296 end = self.end(t)
295 if not d:
297 if not d:
296 prev = self.revision(self.tip())
298 prev = self.revision(self.tip())
297 d = self.diff(prev, text)
299 d = self.diff(prev, text)
298 data = compress(d)
300 data = compress(d)
299 dist = end - start + len(data)
301 dist = end - start + len(data)
300
302
301 # full versions are inserted when the needed deltas
303 # full versions are inserted when the needed deltas
302 # become comparable to the uncompressed text
304 # become comparable to the uncompressed text
303 if not n or dist > len(text) * 2:
305 if not n or dist > len(text) * 2:
304 data = compress(text)
306 data = compress(text)
305 base = n
307 base = n
306 else:
308 else:
307 base = self.base(t)
309 base = self.base(t)
308
310
309 offset = 0
311 offset = 0
310 if t >= 0:
312 if t >= 0:
311 offset = self.end(t)
313 offset = self.end(t)
312
314
313 e = (offset, len(data), base, link, p1, p2, node)
315 e = (offset, len(data), base, link, p1, p2, node)
314
316
315 self.index.append(e)
317 self.index.append(e)
316 self.nodemap[node] = n
318 self.nodemap[node] = n
317 entry = struct.pack(indexformat, *e)
319 entry = struct.pack(indexformat, *e)
318
320
319 transaction.add(self.datafile, e[0])
321 transaction.add(self.datafile, e[0])
320 self.opener(self.datafile, "a").write(data)
322 self.opener(self.datafile, "a").write(data)
321 transaction.add(self.indexfile, n * len(entry))
323 transaction.add(self.indexfile, n * len(entry))
322 self.opener(self.indexfile, "a").write(entry)
324 self.opener(self.indexfile, "a").write(entry)
323
325
324 self.cache = (node, n, text)
326 self.cache = (node, n, text)
325 return node
327 return node
326
328
327 def ancestor(self, a, b):
329 def ancestor(self, a, b):
328 # calculate the distance of every node from root
330 # calculate the distance of every node from root
329 dist = {nullid: 0}
331 dist = {nullid: 0}
330 for i in xrange(self.count()):
332 for i in xrange(self.count()):
331 n = self.node(i)
333 n = self.node(i)
332 p1, p2 = self.parents(n)
334 p1, p2 = self.parents(n)
333 dist[n] = max(dist[p1], dist[p2]) + 1
335 dist[n] = max(dist[p1], dist[p2]) + 1
334
336
335 # traverse ancestors in order of decreasing distance from root
337 # traverse ancestors in order of decreasing distance from root
336 def ancestors(node):
338 def ancestors(node):
337 # we store negative distances because heap returns smallest member
339 # we store negative distances because heap returns smallest member
338 h = [(-dist[node], node)]
340 h = [(-dist[node], node)]
339 seen = {}
341 seen = {}
340 earliest = self.count()
342 earliest = self.count()
341 while h:
343 while h:
342 d, n = heapq.heappop(h)
344 d, n = heapq.heappop(h)
343 if n not in seen:
345 if n not in seen:
344 seen[n] = 1
346 seen[n] = 1
345 r = self.rev(n)
347 r = self.rev(n)
346 yield (-d, r, n)
348 yield (-d, r, n)
347 for p in self.parents(n):
349 for p in self.parents(n):
348 heapq.heappush(h, (-dist[p], p))
350 heapq.heappush(h, (-dist[p], p))
349
351
350 x = ancestors(a)
352 x = ancestors(a)
351 y = ancestors(b)
353 y = ancestors(b)
352 lx = x.next()
354 lx = x.next()
353 ly = y.next()
355 ly = y.next()
354
356
355 # increment each ancestor list until it is closer to root than
357 # increment each ancestor list until it is closer to root than
356 # the other, or they match
358 # the other, or they match
357 while 1:
359 while 1:
358 if lx == ly:
360 if lx == ly:
359 return lx[2]
361 return lx[2]
360 elif lx < ly:
362 elif lx < ly:
361 ly = y.next()
363 ly = y.next()
362 elif lx > ly:
364 elif lx > ly:
363 lx = x.next()
365 lx = x.next()
364
366
365 def group(self, linkmap):
367 def group(self, linkmap):
366 # given a list of changeset revs, return a set of deltas and
368 # given a list of changeset revs, return a set of deltas and
367 # metadata corresponding to nodes. the first delta is
369 # metadata corresponding to nodes. the first delta is
368 # parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
370 # parent(nodes[0]) -> nodes[0] the receiver is guaranteed to
369 # have this parent as it has all history before these
371 # have this parent as it has all history before these
370 # changesets. parent is parent[0]
372 # changesets. parent is parent[0]
371
373
372 revs = []
374 revs = []
373 needed = {}
375 needed = {}
374
376
375 # find file nodes/revs that match changeset revs
377 # find file nodes/revs that match changeset revs
376 for i in xrange(0, self.count()):
378 for i in xrange(0, self.count()):
377 if self.index[i][3] in linkmap:
379 if self.index[i][3] in linkmap:
378 revs.append(i)
380 revs.append(i)
379 needed[i] = 1
381 needed[i] = 1
380
382
381 # if we don't have any revisions touched by these changesets, bail
383 # if we don't have any revisions touched by these changesets, bail
382 if not revs:
384 if not revs:
383 yield struct.pack(">l", 0)
385 yield struct.pack(">l", 0)
384 return
386 return
385
387
386 # add the parent of the first rev
388 # add the parent of the first rev
387 p = self.parents(self.node(revs[0]))[0]
389 p = self.parents(self.node(revs[0]))[0]
388 revs.insert(0, self.rev(p))
390 revs.insert(0, self.rev(p))
389
391
390 # for each delta that isn't contiguous in the log, we need to
392 # for each delta that isn't contiguous in the log, we need to
391 # reconstruct the base, reconstruct the result, and then
393 # reconstruct the base, reconstruct the result, and then
392 # calculate the delta. We also need to do this where we've
394 # calculate the delta. We also need to do this where we've
393 # stored a full version and not a delta
395 # stored a full version and not a delta
394 for i in xrange(0, len(revs) - 1):
396 for i in xrange(0, len(revs) - 1):
395 a, b = revs[i], revs[i + 1]
397 a, b = revs[i], revs[i + 1]
396 if a + 1 != b or self.base(b) == b:
398 if a + 1 != b or self.base(b) == b:
397 for j in xrange(self.base(a), a + 1):
399 for j in xrange(self.base(a), a + 1):
398 needed[j] = 1
400 needed[j] = 1
399 for j in xrange(self.base(b), b + 1):
401 for j in xrange(self.base(b), b + 1):
400 needed[j] = 1
402 needed[j] = 1
401
403
402 # calculate spans to retrieve from datafile
404 # calculate spans to retrieve from datafile
403 needed = needed.keys()
405 needed = needed.keys()
404 needed.sort()
406 needed.sort()
405 spans = []
407 spans = []
406 oo = -1
408 oo = -1
407 ol = 0
409 ol = 0
408 for n in needed:
410 for n in needed:
409 if n < 0: continue
411 if n < 0: continue
410 o = self.start(n)
412 o = self.start(n)
411 l = self.length(n)
413 l = self.length(n)
412 if oo + ol == o: # can we merge with the previous?
414 if oo + ol == o: # can we merge with the previous?
413 nl = spans[-1][2]
415 nl = spans[-1][2]
414 nl.append((n, l))
416 nl.append((n, l))
415 ol += l
417 ol += l
416 spans[-1] = (oo, ol, nl)
418 spans[-1] = (oo, ol, nl)
417 else:
419 else:
418 oo = o
420 oo = o
419 ol = l
421 ol = l
420 spans.append((oo, ol, [(n, l)]))
422 spans.append((oo, ol, [(n, l)]))
421
423
422 # read spans in, divide up chunks
424 # read spans in, divide up chunks
423 chunks = {}
425 chunks = {}
424 for span in spans:
426 for span in spans:
425 # we reopen the file for each span to make http happy for now
427 # we reopen the file for each span to make http happy for now
426 f = self.opener(self.datafile)
428 f = self.opener(self.datafile)
427 f.seek(span[0])
429 f.seek(span[0])
428 data = f.read(span[1])
430 data = f.read(span[1])
429
431
430 # divide up the span
432 # divide up the span
431 pos = 0
433 pos = 0
432 for r, l in span[2]:
434 for r, l in span[2]:
433 chunks[r] = decompress(data[pos: pos + l])
435 chunks[r] = decompress(data[pos: pos + l])
434 pos += l
436 pos += l
435
437
436 # helper to reconstruct intermediate versions
438 # helper to reconstruct intermediate versions
437 def construct(text, base, rev):
439 def construct(text, base, rev):
438 bins = [chunks[r] for r in xrange(base + 1, rev + 1)]
440 bins = [chunks[r] for r in xrange(base + 1, rev + 1)]
439 return mdiff.patches(text, bins)
441 return mdiff.patches(text, bins)
440
442
441 # build deltas
443 # build deltas
442 deltas = []
444 deltas = []
443 for d in xrange(0, len(revs) - 1):
445 for d in xrange(0, len(revs) - 1):
444 a, b = revs[d], revs[d + 1]
446 a, b = revs[d], revs[d + 1]
445 n = self.node(b)
447 n = self.node(b)
446
448
447 # do we need to construct a new delta?
449 # do we need to construct a new delta?
448 if a + 1 != b or self.base(b) == b:
450 if a + 1 != b or self.base(b) == b:
449 if a >= 0:
451 if a >= 0:
450 base = self.base(a)
452 base = self.base(a)
451 ta = chunks[self.base(a)]
453 ta = chunks[self.base(a)]
452 ta = construct(ta, base, a)
454 ta = construct(ta, base, a)
453 else:
455 else:
454 ta = ""
456 ta = ""
455
457
456 base = self.base(b)
458 base = self.base(b)
457 if a > base:
459 if a > base:
458 base = a
460 base = a
459 tb = ta
461 tb = ta
460 else:
462 else:
461 tb = chunks[self.base(b)]
463 tb = chunks[self.base(b)]
462 tb = construct(tb, base, b)
464 tb = construct(tb, base, b)
463 d = self.diff(ta, tb)
465 d = self.diff(ta, tb)
464 else:
466 else:
465 d = chunks[b]
467 d = chunks[b]
466
468
467 p = self.parents(n)
469 p = self.parents(n)
468 meta = n + p[0] + p[1] + linkmap[self.linkrev(n)]
470 meta = n + p[0] + p[1] + linkmap[self.linkrev(n)]
469 l = struct.pack(">l", len(meta) + len(d) + 4)
471 l = struct.pack(">l", len(meta) + len(d) + 4)
470 yield l
472 yield l
471 yield meta
473 yield meta
472 yield d
474 yield d
473
475
474 yield struct.pack(">l", 0)
476 yield struct.pack(">l", 0)
475
477
476 def addgroup(self, revs, linkmapper, transaction, unique=0):
478 def addgroup(self, revs, linkmapper, transaction, unique=0):
477 # given a set of deltas, add them to the revision log. the
479 # given a set of deltas, add them to the revision log. the
478 # first delta is against its parent, which should be in our
480 # first delta is against its parent, which should be in our
479 # log, the rest are against the previous delta.
481 # log, the rest are against the previous delta.
480
482
481 # track the base of the current delta log
483 # track the base of the current delta log
482 r = self.count()
484 r = self.count()
483 t = r - 1
485 t = r - 1
484 node = nullid
486 node = nullid
485
487
486 base = prev = -1
488 base = prev = -1
487 start = end = measure = 0
489 start = end = measure = 0
488 if r:
490 if r:
489 start = self.start(self.base(t))
491 start = self.start(self.base(t))
490 end = self.end(t)
492 end = self.end(t)
491 measure = self.length(self.base(t))
493 measure = self.length(self.base(t))
492 base = self.base(t)
494 base = self.base(t)
493 prev = self.tip()
495 prev = self.tip()
494
496
495 transaction.add(self.datafile, end)
497 transaction.add(self.datafile, end)
496 transaction.add(self.indexfile, r * struct.calcsize(indexformat))
498 transaction.add(self.indexfile, r * struct.calcsize(indexformat))
497 dfh = self.opener(self.datafile, "a")
499 dfh = self.opener(self.datafile, "a")
498 ifh = self.opener(self.indexfile, "a")
500 ifh = self.opener(self.indexfile, "a")
499
501
500 # loop through our set of deltas
502 # loop through our set of deltas
501 chain = None
503 chain = None
502 for chunk in revs:
504 for chunk in revs:
503 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
505 node, p1, p2, cs = struct.unpack("20s20s20s20s", chunk[:80])
504 link = linkmapper(cs)
506 link = linkmapper(cs)
505 if node in self.nodemap:
507 if node in self.nodemap:
506 # this can happen if two branches make the same change
508 # this can happen if two branches make the same change
507 if unique:
509 if unique:
508 raise "already have %s" % hex(node[:4])
510 raise RevlogError("already have %s" % hex(node[:4]))
509 chain = node
511 chain = node
510 continue
512 continue
511 delta = chunk[80:]
513 delta = chunk[80:]
512
514
513 if not chain:
515 if not chain:
514 # retrieve the parent revision of the delta chain
516 # retrieve the parent revision of the delta chain
515 chain = p1
517 chain = p1
516 if not chain in self.nodemap:
518 if not chain in self.nodemap:
517 raise "unknown base %s" % short(chain[:4])
519 raise RevlogError("unknown base %s" % short(chain[:4]))
518
520
519 # full versions are inserted when the needed deltas become
521 # full versions are inserted when the needed deltas become
520 # comparable to the uncompressed text or when the previous
522 # comparable to the uncompressed text or when the previous
521 # version is not the one we have a delta against. We use
523 # version is not the one we have a delta against. We use
522 # the size of the previous full rev as a proxy for the
524 # the size of the previous full rev as a proxy for the
523 # current size.
525 # current size.
524
526
525 if chain == prev:
527 if chain == prev:
526 cdelta = compress(delta)
528 cdelta = compress(delta)
527
529
528 if chain != prev or (end - start + len(cdelta)) > measure * 2:
530 if chain != prev or (end - start + len(cdelta)) > measure * 2:
529 # flush our writes here so we can read it in revision
531 # flush our writes here so we can read it in revision
530 dfh.flush()
532 dfh.flush()
531 ifh.flush()
533 ifh.flush()
532 text = self.revision(chain)
534 text = self.revision(chain)
533 text = self.patches(text, [delta])
535 text = self.patches(text, [delta])
534 chk = self.addrevision(text, transaction, link, p1, p2)
536 chk = self.addrevision(text, transaction, link, p1, p2)
535 if chk != node:
537 if chk != node:
536 raise "consistency error adding group"
538 raise RevlogError("consistency error adding group")
537 measure = len(text)
539 measure = len(text)
538 else:
540 else:
539 e = (end, len(cdelta), self.base(t), link, p1, p2, node)
541 e = (end, len(cdelta), self.base(t), link, p1, p2, node)
540 self.index.append(e)
542 self.index.append(e)
541 self.nodemap[node] = r
543 self.nodemap[node] = r
542 dfh.write(cdelta)
544 dfh.write(cdelta)
543 ifh.write(struct.pack(indexformat, *e))
545 ifh.write(struct.pack(indexformat, *e))
544
546
545 t, r, chain, prev = r, r + 1, node, node
547 t, r, chain, prev = r, r + 1, node, node
546 start = self.start(self.base(t))
548 start = self.start(self.base(t))
547 end = self.end(t)
549 end = self.end(t)
548
550
549 dfh.close()
551 dfh.close()
550 ifh.close()
552 ifh.close()
551 return node
553 return node
@@ -1,78 +1,78 b''
1 # transaction.py - simple journalling scheme for mercurial
1 # transaction.py - simple journalling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms
11 # This software may be used and distributed according to the terms
12 # of the GNU General Public License, incorporated herein by reference.
12 # of the GNU General Public License, incorporated herein by reference.
13
13
14 import os
14 import os
15 import util
15 import util
16
16
17 class transaction:
17 class transaction:
18 def __init__(self, report, opener, journal, after=None):
18 def __init__(self, report, opener, journal, after=None):
19 self.journal = None
19 self.journal = None
20
20
21 # abort here if the journal already exists
21 # abort here if the journal already exists
22 if os.path.exists(journal):
22 if os.path.exists(journal):
23 raise "journal already exists - run hg recover"
23 raise AssertionError("journal already exists - run hg recover")
24
24
25 self.report = report
25 self.report = report
26 self.opener = opener
26 self.opener = opener
27 self.after = after
27 self.after = after
28 self.entries = []
28 self.entries = []
29 self.map = {}
29 self.map = {}
30 self.journal = journal
30 self.journal = journal
31
31
32 self.file = open(self.journal, "w")
32 self.file = open(self.journal, "w")
33
33
34 def __del__(self):
34 def __del__(self):
35 if self.journal:
35 if self.journal:
36 if self.entries: self.abort()
36 if self.entries: self.abort()
37 self.file.close()
37 self.file.close()
38 try: os.unlink(self.journal)
38 try: os.unlink(self.journal)
39 except: pass
39 except: pass
40
40
41 def add(self, file, offset):
41 def add(self, file, offset):
42 if file in self.map: return
42 if file in self.map: return
43 self.entries.append((file, offset))
43 self.entries.append((file, offset))
44 self.map[file] = 1
44 self.map[file] = 1
45 # add enough data to the journal to do the truncate
45 # add enough data to the journal to do the truncate
46 self.file.write("%s\0%d\n" % (file, offset))
46 self.file.write("%s\0%d\n" % (file, offset))
47 self.file.flush()
47 self.file.flush()
48
48
49 def close(self):
49 def close(self):
50 self.file.close()
50 self.file.close()
51 self.entries = []
51 self.entries = []
52 if self.after:
52 if self.after:
53 self.after()
53 self.after()
54 else:
54 else:
55 os.unlink(self.journal)
55 os.unlink(self.journal)
56 self.journal = None
56 self.journal = None
57
57
58 def abort(self):
58 def abort(self):
59 if not self.entries: return
59 if not self.entries: return
60
60
61 self.report("transaction abort!\n")
61 self.report("transaction abort!\n")
62
62
63 for f, o in self.entries:
63 for f, o in self.entries:
64 try:
64 try:
65 self.opener(f, "a").truncate(o)
65 self.opener(f, "a").truncate(o)
66 except:
66 except:
67 self.report("failed to truncate %s\n" % f)
67 self.report("failed to truncate %s\n" % f)
68
68
69 self.entries = []
69 self.entries = []
70
70
71 self.report("rollback completed\n")
71 self.report("rollback completed\n")
72
72
73 def rollback(opener, file):
73 def rollback(opener, file):
74 for l in open(file).readlines():
74 for l in open(file).readlines():
75 f, o = l.split('\0')
75 f, o = l.split('\0')
76 opener(f, "a").truncate(int(o))
76 opener(f, "a").truncate(int(o))
77 os.unlink(file)
77 os.unlink(file)
78
78
General Comments 0
You need to be logged in to leave comments. Login now