Show More
@@ -1,1082 +1,1088 | |||||
1 | # hgweb/hgweb_mod.py - Web interface for a repository. |
|
1 | # hgweb/hgweb_mod.py - Web interface for a repository. | |
2 | # |
|
2 | # | |
3 | # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net> |
|
3 | # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net> | |
4 | # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com> |
|
4 | # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com> | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms |
|
6 | # This software may be used and distributed according to the terms | |
7 | # of the GNU General Public License, incorporated herein by reference. |
|
7 | # of the GNU General Public License, incorporated herein by reference. | |
8 |
|
8 | |||
9 | import os |
|
9 | import os | |
10 | import os.path |
|
10 | import os.path | |
11 | import mimetypes |
|
11 | import mimetypes | |
12 | from mercurial.demandload import demandload |
|
12 | from mercurial.demandload import demandload | |
13 | demandload(globals(), "re zlib ConfigParser mimetools cStringIO sys tempfile") |
|
13 | demandload(globals(), "re zlib ConfigParser mimetools cStringIO sys tempfile") | |
14 | demandload(globals(), 'urllib') |
|
14 | demandload(globals(), 'urllib') | |
15 | demandload(globals(), "mercurial:mdiff,ui,hg,util,archival,streamclone,patch") |
|
15 | demandload(globals(), "mercurial:mdiff,ui,hg,util,archival,streamclone,patch") | |
16 | demandload(globals(), "mercurial:revlog,templater") |
|
16 | demandload(globals(), "mercurial:revlog,templater") | |
17 | demandload(globals(), "mercurial.hgweb.common:get_mtime,staticfile,style_map") |
|
17 | demandload(globals(), "mercurial.hgweb.common:get_mtime,staticfile,style_map") | |
18 | from mercurial.node import * |
|
18 | from mercurial.node import * | |
19 | from mercurial.i18n import gettext as _ |
|
19 | from mercurial.i18n import gettext as _ | |
20 |
|
20 | |||
21 | def _up(p): |
|
21 | def _up(p): | |
22 | if p[0] != "/": |
|
22 | if p[0] != "/": | |
23 | p = "/" + p |
|
23 | p = "/" + p | |
24 | if p[-1] == "/": |
|
24 | if p[-1] == "/": | |
25 | p = p[:-1] |
|
25 | p = p[:-1] | |
26 | up = os.path.dirname(p) |
|
26 | up = os.path.dirname(p) | |
27 | if up == "/": |
|
27 | if up == "/": | |
28 | return "/" |
|
28 | return "/" | |
29 | return up + "/" |
|
29 | return up + "/" | |
30 |
|
30 | |||
31 | def revnavgen(pos, pagelen, limit, nodefunc): |
|
31 | def revnavgen(pos, pagelen, limit, nodefunc): | |
32 | def seq(factor, limit=None): |
|
32 | def seq(factor, limit=None): | |
33 | if limit: |
|
33 | if limit: | |
34 | yield limit |
|
34 | yield limit | |
35 | if limit >= 20 and limit <= 40: |
|
35 | if limit >= 20 and limit <= 40: | |
36 | yield 50 |
|
36 | yield 50 | |
37 | else: |
|
37 | else: | |
38 | yield 1 * factor |
|
38 | yield 1 * factor | |
39 | yield 3 * factor |
|
39 | yield 3 * factor | |
40 | for f in seq(factor * 10): |
|
40 | for f in seq(factor * 10): | |
41 | yield f |
|
41 | yield f | |
42 |
|
42 | |||
43 | def nav(**map): |
|
43 | def nav(**map): | |
44 | l = [] |
|
44 | l = [] | |
45 | last = 0 |
|
45 | last = 0 | |
46 | for f in seq(1, pagelen): |
|
46 | for f in seq(1, pagelen): | |
47 | if f < pagelen or f <= last: |
|
47 | if f < pagelen or f <= last: | |
48 | continue |
|
48 | continue | |
49 | if f > limit: |
|
49 | if f > limit: | |
50 | break |
|
50 | break | |
51 | last = f |
|
51 | last = f | |
52 | if pos + f < limit: |
|
52 | if pos + f < limit: | |
53 | l.append(("+%d" % f, hex(nodefunc(pos + f).node()))) |
|
53 | l.append(("+%d" % f, hex(nodefunc(pos + f).node()))) | |
54 | if pos - f >= 0: |
|
54 | if pos - f >= 0: | |
55 | l.insert(0, ("-%d" % f, hex(nodefunc(pos - f).node()))) |
|
55 | l.insert(0, ("-%d" % f, hex(nodefunc(pos - f).node()))) | |
56 |
|
56 | |||
57 | try: |
|
57 | try: | |
58 | yield {"label": "(0)", "node": hex(nodefunc('0').node())} |
|
58 | yield {"label": "(0)", "node": hex(nodefunc('0').node())} | |
59 |
|
59 | |||
60 | for label, node in l: |
|
60 | for label, node in l: | |
61 | yield {"label": label, "node": node} |
|
61 | yield {"label": label, "node": node} | |
62 |
|
62 | |||
63 | yield {"label": "tip", "node": "tip"} |
|
63 | yield {"label": "tip", "node": "tip"} | |
64 | except hg.RepoError: |
|
64 | except hg.RepoError: | |
65 | pass |
|
65 | pass | |
66 |
|
66 | |||
67 | return nav |
|
67 | return nav | |
68 |
|
68 | |||
69 | class hgweb(object): |
|
69 | class hgweb(object): | |
70 | def __init__(self, repo, name=None): |
|
70 | def __init__(self, repo, name=None): | |
71 | if type(repo) == type(""): |
|
71 | if type(repo) == type(""): | |
72 | self.repo = hg.repository(ui.ui(), repo) |
|
72 | self.repo = hg.repository(ui.ui(), repo) | |
73 | else: |
|
73 | else: | |
74 | self.repo = repo |
|
74 | self.repo = repo | |
75 |
|
75 | |||
76 | self.mtime = -1 |
|
76 | self.mtime = -1 | |
77 | self.reponame = name |
|
77 | self.reponame = name | |
78 | self.archives = 'zip', 'gz', 'bz2' |
|
78 | self.archives = 'zip', 'gz', 'bz2' | |
79 | self.stripecount = 1 |
|
79 | self.stripecount = 1 | |
80 | self.templatepath = self.repo.ui.config("web", "templates", |
|
80 | self.templatepath = self.repo.ui.config("web", "templates", | |
81 | templater.templatepath()) |
|
81 | templater.templatepath()) | |
82 |
|
82 | |||
83 | def refresh(self): |
|
83 | def refresh(self): | |
84 | mtime = get_mtime(self.repo.root) |
|
84 | mtime = get_mtime(self.repo.root) | |
85 | if mtime != self.mtime: |
|
85 | if mtime != self.mtime: | |
86 | self.mtime = mtime |
|
86 | self.mtime = mtime | |
87 | self.repo = hg.repository(self.repo.ui, self.repo.root) |
|
87 | self.repo = hg.repository(self.repo.ui, self.repo.root) | |
88 | self.maxchanges = int(self.repo.ui.config("web", "maxchanges", 10)) |
|
88 | self.maxchanges = int(self.repo.ui.config("web", "maxchanges", 10)) | |
89 | self.stripecount = int(self.repo.ui.config("web", "stripes", 1)) |
|
89 | self.stripecount = int(self.repo.ui.config("web", "stripes", 1)) | |
90 | self.maxshortchanges = int(self.repo.ui.config("web", "maxshortchanges", 60)) |
|
90 | self.maxshortchanges = int(self.repo.ui.config("web", "maxshortchanges", 60)) | |
91 | self.maxfiles = int(self.repo.ui.config("web", "maxfiles", 10)) |
|
91 | self.maxfiles = int(self.repo.ui.config("web", "maxfiles", 10)) | |
92 | self.allowpull = self.repo.ui.configbool("web", "allowpull", True) |
|
92 | self.allowpull = self.repo.ui.configbool("web", "allowpull", True) | |
93 |
|
93 | |||
94 | def archivelist(self, nodeid): |
|
94 | def archivelist(self, nodeid): | |
95 | allowed = self.repo.ui.configlist("web", "allow_archive") |
|
95 | allowed = self.repo.ui.configlist("web", "allow_archive") | |
96 | for i, spec in self.archive_specs.iteritems(): |
|
96 | for i, spec in self.archive_specs.iteritems(): | |
97 | if i in allowed or self.repo.ui.configbool("web", "allow" + i): |
|
97 | if i in allowed or self.repo.ui.configbool("web", "allow" + i): | |
98 | yield {"type" : i, "extension" : spec[2], "node" : nodeid} |
|
98 | yield {"type" : i, "extension" : spec[2], "node" : nodeid} | |
99 |
|
99 | |||
100 | def listfilediffs(self, files, changeset): |
|
100 | def listfilediffs(self, files, changeset): | |
101 | for f in files[:self.maxfiles]: |
|
101 | for f in files[:self.maxfiles]: | |
102 | yield self.t("filedifflink", node=hex(changeset), file=f) |
|
102 | yield self.t("filedifflink", node=hex(changeset), file=f) | |
103 | if len(files) > self.maxfiles: |
|
103 | if len(files) > self.maxfiles: | |
104 | yield self.t("fileellipses") |
|
104 | yield self.t("fileellipses") | |
105 |
|
105 | |||
106 | def siblings(self, siblings=[], hiderev=None, **args): |
|
106 | def siblings(self, siblings=[], hiderev=None, **args): | |
107 | siblings = [s for s in siblings if s.node() != nullid] |
|
107 | siblings = [s for s in siblings if s.node() != nullid] | |
108 | if len(siblings) == 1 and siblings[0].rev() == hiderev: |
|
108 | if len(siblings) == 1 and siblings[0].rev() == hiderev: | |
109 | return |
|
109 | return | |
110 | for s in siblings: |
|
110 | for s in siblings: | |
111 | d = {'node': hex(s.node()), 'rev': s.rev()} |
|
111 | d = {'node': hex(s.node()), 'rev': s.rev()} | |
112 | if hasattr(s, 'path'): |
|
112 | if hasattr(s, 'path'): | |
113 | d['file'] = s.path() |
|
113 | d['file'] = s.path() | |
114 | d.update(args) |
|
114 | d.update(args) | |
115 | yield d |
|
115 | yield d | |
116 |
|
116 | |||
117 | def renamelink(self, fl, node): |
|
117 | def renamelink(self, fl, node): | |
118 | r = fl.renamed(node) |
|
118 | r = fl.renamed(node) | |
119 | if r: |
|
119 | if r: | |
120 | return [dict(file=r[0], node=hex(r[1]))] |
|
120 | return [dict(file=r[0], node=hex(r[1]))] | |
121 | return [] |
|
121 | return [] | |
122 |
|
122 | |||
123 | def showtag(self, t1, node=nullid, **args): |
|
123 | def showtag(self, t1, node=nullid, **args): | |
124 | for t in self.repo.nodetags(node): |
|
124 | for t in self.repo.nodetags(node): | |
125 | yield self.t(t1, tag=t, **args) |
|
125 | yield self.t(t1, tag=t, **args) | |
126 |
|
126 | |||
127 | def diff(self, node1, node2, files): |
|
127 | def diff(self, node1, node2, files): | |
128 | def filterfiles(filters, files): |
|
128 | def filterfiles(filters, files): | |
129 | l = [x for x in files if x in filters] |
|
129 | l = [x for x in files if x in filters] | |
130 |
|
130 | |||
131 | for t in filters: |
|
131 | for t in filters: | |
132 | if t and t[-1] != os.sep: |
|
132 | if t and t[-1] != os.sep: | |
133 | t += os.sep |
|
133 | t += os.sep | |
134 | l += [x for x in files if x.startswith(t)] |
|
134 | l += [x for x in files if x.startswith(t)] | |
135 | return l |
|
135 | return l | |
136 |
|
136 | |||
137 | parity = [0] |
|
137 | parity = [0] | |
138 | def diffblock(diff, f, fn): |
|
138 | def diffblock(diff, f, fn): | |
139 | yield self.t("diffblock", |
|
139 | yield self.t("diffblock", | |
140 | lines=prettyprintlines(diff), |
|
140 | lines=prettyprintlines(diff), | |
141 | parity=parity[0], |
|
141 | parity=parity[0], | |
142 | file=f, |
|
142 | file=f, | |
143 | filenode=hex(fn or nullid)) |
|
143 | filenode=hex(fn or nullid)) | |
144 | parity[0] = 1 - parity[0] |
|
144 | parity[0] = 1 - parity[0] | |
145 |
|
145 | |||
146 | def prettyprintlines(diff): |
|
146 | def prettyprintlines(diff): | |
147 | for l in diff.splitlines(1): |
|
147 | for l in diff.splitlines(1): | |
148 | if l.startswith('+'): |
|
148 | if l.startswith('+'): | |
149 | yield self.t("difflineplus", line=l) |
|
149 | yield self.t("difflineplus", line=l) | |
150 | elif l.startswith('-'): |
|
150 | elif l.startswith('-'): | |
151 | yield self.t("difflineminus", line=l) |
|
151 | yield self.t("difflineminus", line=l) | |
152 | elif l.startswith('@'): |
|
152 | elif l.startswith('@'): | |
153 | yield self.t("difflineat", line=l) |
|
153 | yield self.t("difflineat", line=l) | |
154 | else: |
|
154 | else: | |
155 | yield self.t("diffline", line=l) |
|
155 | yield self.t("diffline", line=l) | |
156 |
|
156 | |||
157 | r = self.repo |
|
157 | r = self.repo | |
158 | cl = r.changelog |
|
158 | cl = r.changelog | |
159 | mf = r.manifest |
|
159 | mf = r.manifest | |
160 | change1 = cl.read(node1) |
|
160 | change1 = cl.read(node1) | |
161 | change2 = cl.read(node2) |
|
161 | change2 = cl.read(node2) | |
162 | mmap1 = mf.read(change1[0]) |
|
162 | mmap1 = mf.read(change1[0]) | |
163 | mmap2 = mf.read(change2[0]) |
|
163 | mmap2 = mf.read(change2[0]) | |
164 | date1 = util.datestr(change1[2]) |
|
164 | date1 = util.datestr(change1[2]) | |
165 | date2 = util.datestr(change2[2]) |
|
165 | date2 = util.datestr(change2[2]) | |
166 |
|
166 | |||
167 | modified, added, removed, deleted, unknown = r.status(node1, node2)[:5] |
|
167 | modified, added, removed, deleted, unknown = r.status(node1, node2)[:5] | |
168 | if files: |
|
168 | if files: | |
169 | modified, added, removed = map(lambda x: filterfiles(files, x), |
|
169 | modified, added, removed = map(lambda x: filterfiles(files, x), | |
170 | (modified, added, removed)) |
|
170 | (modified, added, removed)) | |
171 |
|
171 | |||
172 | diffopts = patch.diffopts(self.repo.ui) |
|
172 | diffopts = patch.diffopts(self.repo.ui) | |
173 | for f in modified: |
|
173 | for f in modified: | |
174 | to = r.file(f).read(mmap1[f]) |
|
174 | to = r.file(f).read(mmap1[f]) | |
175 | tn = r.file(f).read(mmap2[f]) |
|
175 | tn = r.file(f).read(mmap2[f]) | |
176 | yield diffblock(mdiff.unidiff(to, date1, tn, date2, f, |
|
176 | yield diffblock(mdiff.unidiff(to, date1, tn, date2, f, | |
177 | opts=diffopts), f, tn) |
|
177 | opts=diffopts), f, tn) | |
178 | for f in added: |
|
178 | for f in added: | |
179 | to = None |
|
179 | to = None | |
180 | tn = r.file(f).read(mmap2[f]) |
|
180 | tn = r.file(f).read(mmap2[f]) | |
181 | yield diffblock(mdiff.unidiff(to, date1, tn, date2, f, |
|
181 | yield diffblock(mdiff.unidiff(to, date1, tn, date2, f, | |
182 | opts=diffopts), f, tn) |
|
182 | opts=diffopts), f, tn) | |
183 | for f in removed: |
|
183 | for f in removed: | |
184 | to = r.file(f).read(mmap1[f]) |
|
184 | to = r.file(f).read(mmap1[f]) | |
185 | tn = None |
|
185 | tn = None | |
186 | yield diffblock(mdiff.unidiff(to, date1, tn, date2, f, |
|
186 | yield diffblock(mdiff.unidiff(to, date1, tn, date2, f, | |
187 | opts=diffopts), f, tn) |
|
187 | opts=diffopts), f, tn) | |
188 |
|
188 | |||
189 | def changelog(self, ctx, shortlog=False): |
|
189 | def changelog(self, ctx, shortlog=False): | |
190 | def changelist(**map): |
|
190 | def changelist(**map): | |
191 | parity = (start - end) & 1 |
|
191 | parity = (start - end) & 1 | |
192 | cl = self.repo.changelog |
|
192 | cl = self.repo.changelog | |
193 | l = [] # build a list in forward order for efficiency |
|
193 | l = [] # build a list in forward order for efficiency | |
194 | for i in range(start, end): |
|
194 | for i in range(start, end): | |
195 | ctx = self.repo.changectx(i) |
|
195 | ctx = self.repo.changectx(i) | |
196 | n = ctx.node() |
|
196 | n = ctx.node() | |
197 |
|
197 | |||
198 | l.insert(0, {"parity": parity, |
|
198 | l.insert(0, {"parity": parity, | |
199 | "author": ctx.user(), |
|
199 | "author": ctx.user(), | |
200 | "parent": self.siblings(ctx.parents(), i - 1), |
|
200 | "parent": self.siblings(ctx.parents(), i - 1), | |
201 | "child": self.siblings(ctx.children(), i + 1), |
|
201 | "child": self.siblings(ctx.children(), i + 1), | |
202 | "changelogtag": self.showtag("changelogtag",n), |
|
202 | "changelogtag": self.showtag("changelogtag",n), | |
203 | "desc": ctx.description(), |
|
203 | "desc": ctx.description(), | |
204 | "date": ctx.date(), |
|
204 | "date": ctx.date(), | |
205 | "files": self.listfilediffs(ctx.files(), n), |
|
205 | "files": self.listfilediffs(ctx.files(), n), | |
206 | "rev": i, |
|
206 | "rev": i, | |
207 | "node": hex(n)}) |
|
207 | "node": hex(n)}) | |
208 | parity = 1 - parity |
|
208 | parity = 1 - parity | |
209 |
|
209 | |||
210 | for e in l: |
|
210 | for e in l: | |
211 | yield e |
|
211 | yield e | |
212 |
|
212 | |||
213 | maxchanges = shortlog and self.maxshortchanges or self.maxchanges |
|
213 | maxchanges = shortlog and self.maxshortchanges or self.maxchanges | |
214 | cl = self.repo.changelog |
|
214 | cl = self.repo.changelog | |
215 | count = cl.count() |
|
215 | count = cl.count() | |
216 | pos = ctx.rev() |
|
216 | pos = ctx.rev() | |
217 | start = max(0, pos - maxchanges + 1) |
|
217 | start = max(0, pos - maxchanges + 1) | |
218 | end = min(count, start + maxchanges) |
|
218 | end = min(count, start + maxchanges) | |
219 | pos = end - 1 |
|
219 | pos = end - 1 | |
220 |
|
220 | |||
221 | changenav = revnavgen(pos, maxchanges, count, self.repo.changectx) |
|
221 | changenav = revnavgen(pos, maxchanges, count, self.repo.changectx) | |
222 |
|
222 | |||
223 | yield self.t(shortlog and 'shortlog' or 'changelog', |
|
223 | yield self.t(shortlog and 'shortlog' or 'changelog', | |
224 | changenav=changenav, |
|
224 | changenav=changenav, | |
225 | node=hex(cl.tip()), |
|
225 | node=hex(cl.tip()), | |
226 | rev=pos, changesets=count, entries=changelist, |
|
226 | rev=pos, changesets=count, entries=changelist, | |
227 | archives=self.archivelist("tip")) |
|
227 | archives=self.archivelist("tip")) | |
228 |
|
228 | |||
229 | def search(self, query): |
|
229 | def search(self, query): | |
230 |
|
230 | |||
231 | def changelist(**map): |
|
231 | def changelist(**map): | |
232 | cl = self.repo.changelog |
|
232 | cl = self.repo.changelog | |
233 | count = 0 |
|
233 | count = 0 | |
234 | qw = query.lower().split() |
|
234 | qw = query.lower().split() | |
235 |
|
235 | |||
236 | def revgen(): |
|
236 | def revgen(): | |
237 | for i in range(cl.count() - 1, 0, -100): |
|
237 | for i in range(cl.count() - 1, 0, -100): | |
238 | l = [] |
|
238 | l = [] | |
239 | for j in range(max(0, i - 100), i): |
|
239 | for j in range(max(0, i - 100), i): | |
240 | ctx = self.repo.changectx(j) |
|
240 | ctx = self.repo.changectx(j) | |
241 | l.append(ctx) |
|
241 | l.append(ctx) | |
242 | l.reverse() |
|
242 | l.reverse() | |
243 | for e in l: |
|
243 | for e in l: | |
244 | yield e |
|
244 | yield e | |
245 |
|
245 | |||
246 | for ctx in revgen(): |
|
246 | for ctx in revgen(): | |
247 | miss = 0 |
|
247 | miss = 0 | |
248 | for q in qw: |
|
248 | for q in qw: | |
249 | if not (q in ctx.user().lower() or |
|
249 | if not (q in ctx.user().lower() or | |
250 | q in ctx.description().lower() or |
|
250 | q in ctx.description().lower() or | |
251 | q in " ".join(ctx.files()[:20]).lower()): |
|
251 | q in " ".join(ctx.files()[:20]).lower()): | |
252 | miss = 1 |
|
252 | miss = 1 | |
253 | break |
|
253 | break | |
254 | if miss: |
|
254 | if miss: | |
255 | continue |
|
255 | continue | |
256 |
|
256 | |||
257 | count += 1 |
|
257 | count += 1 | |
258 | n = ctx.node() |
|
258 | n = ctx.node() | |
259 |
|
259 | |||
260 | yield self.t('searchentry', |
|
260 | yield self.t('searchentry', | |
261 | parity=self.stripes(count), |
|
261 | parity=self.stripes(count), | |
262 | author=ctx.user(), |
|
262 | author=ctx.user(), | |
263 | parent=self.siblings(ctx.parents()), |
|
263 | parent=self.siblings(ctx.parents()), | |
264 | child=self.siblings(ctx.children()), |
|
264 | child=self.siblings(ctx.children()), | |
265 | changelogtag=self.showtag("changelogtag",n), |
|
265 | changelogtag=self.showtag("changelogtag",n), | |
266 | desc=ctx.description(), |
|
266 | desc=ctx.description(), | |
267 | date=ctx.date(), |
|
267 | date=ctx.date(), | |
268 | files=self.listfilediffs(ctx.files(), n), |
|
268 | files=self.listfilediffs(ctx.files(), n), | |
269 | rev=ctx.rev(), |
|
269 | rev=ctx.rev(), | |
270 | node=hex(n)) |
|
270 | node=hex(n)) | |
271 |
|
271 | |||
272 | if count >= self.maxchanges: |
|
272 | if count >= self.maxchanges: | |
273 | break |
|
273 | break | |
274 |
|
274 | |||
275 | cl = self.repo.changelog |
|
275 | cl = self.repo.changelog | |
276 |
|
276 | |||
277 | yield self.t('search', |
|
277 | yield self.t('search', | |
278 | query=query, |
|
278 | query=query, | |
279 | node=hex(cl.tip()), |
|
279 | node=hex(cl.tip()), | |
280 | entries=changelist) |
|
280 | entries=changelist) | |
281 |
|
281 | |||
282 | def changeset(self, ctx): |
|
282 | def changeset(self, ctx): | |
283 | n = ctx.node() |
|
283 | n = ctx.node() | |
284 | parents = ctx.parents() |
|
284 | parents = ctx.parents() | |
285 | p1 = parents[0].node() |
|
285 | p1 = parents[0].node() | |
286 |
|
286 | |||
287 | files = [] |
|
287 | files = [] | |
288 | parity = 0 |
|
288 | parity = 0 | |
289 | for f in ctx.files(): |
|
289 | for f in ctx.files(): | |
290 | files.append(self.t("filenodelink", |
|
290 | files.append(self.t("filenodelink", | |
291 | node=hex(n), file=f, |
|
291 | node=hex(n), file=f, | |
292 | parity=parity)) |
|
292 | parity=parity)) | |
293 | parity = 1 - parity |
|
293 | parity = 1 - parity | |
294 |
|
294 | |||
295 | def diff(**map): |
|
295 | def diff(**map): | |
296 | yield self.diff(p1, n, None) |
|
296 | yield self.diff(p1, n, None) | |
297 |
|
297 | |||
298 | yield self.t('changeset', |
|
298 | yield self.t('changeset', | |
299 | diff=diff, |
|
299 | diff=diff, | |
300 | rev=ctx.rev(), |
|
300 | rev=ctx.rev(), | |
301 | node=hex(n), |
|
301 | node=hex(n), | |
302 | parent=self.siblings(parents), |
|
302 | parent=self.siblings(parents), | |
303 | child=self.siblings(ctx.children()), |
|
303 | child=self.siblings(ctx.children()), | |
304 | changesettag=self.showtag("changesettag",n), |
|
304 | changesettag=self.showtag("changesettag",n), | |
305 | author=ctx.user(), |
|
305 | author=ctx.user(), | |
306 | desc=ctx.description(), |
|
306 | desc=ctx.description(), | |
307 | date=ctx.date(), |
|
307 | date=ctx.date(), | |
308 | files=files, |
|
308 | files=files, | |
309 | archives=self.archivelist(hex(n))) |
|
309 | archives=self.archivelist(hex(n))) | |
310 |
|
310 | |||
311 | def filelog(self, fctx): |
|
311 | def filelog(self, fctx): | |
312 | f = fctx.path() |
|
312 | f = fctx.path() | |
313 | fl = fctx.filelog() |
|
313 | fl = fctx.filelog() | |
314 | count = fl.count() |
|
314 | count = fl.count() | |
315 | pagelen = self.maxshortchanges |
|
315 | pagelen = self.maxshortchanges | |
316 | pos = fctx.filerev() |
|
316 | pos = fctx.filerev() | |
317 | start = max(0, pos - pagelen + 1) |
|
317 | start = max(0, pos - pagelen + 1) | |
318 | end = min(count, start + pagelen) |
|
318 | end = min(count, start + pagelen) | |
319 | pos = end - 1 |
|
319 | pos = end - 1 | |
320 |
|
320 | |||
321 | def entries(**map): |
|
321 | def entries(**map): | |
322 | l = [] |
|
322 | l = [] | |
323 | parity = (count - 1) & 1 |
|
323 | parity = (count - 1) & 1 | |
324 |
|
324 | |||
325 | for i in range(start, end): |
|
325 | for i in range(start, end): | |
326 | ctx = fctx.filectx(i) |
|
326 | ctx = fctx.filectx(i) | |
327 | n = fl.node(i) |
|
327 | n = fl.node(i) | |
328 |
|
328 | |||
329 | l.insert(0, {"parity": parity, |
|
329 | l.insert(0, {"parity": parity, | |
330 | "filerev": i, |
|
330 | "filerev": i, | |
331 | "file": f, |
|
331 | "file": f, | |
332 | "node": hex(ctx.node()), |
|
332 | "node": hex(ctx.node()), | |
333 | "author": ctx.user(), |
|
333 | "author": ctx.user(), | |
334 | "date": ctx.date(), |
|
334 | "date": ctx.date(), | |
335 | "rename": self.renamelink(fl, n), |
|
335 | "rename": self.renamelink(fl, n), | |
336 | "parent": self.siblings(fctx.parents()), |
|
336 | "parent": self.siblings(fctx.parents()), | |
337 | "child": self.siblings(fctx.children()), |
|
337 | "child": self.siblings(fctx.children()), | |
338 | "desc": ctx.description()}) |
|
338 | "desc": ctx.description()}) | |
339 | parity = 1 - parity |
|
339 | parity = 1 - parity | |
340 |
|
340 | |||
341 | for e in l: |
|
341 | for e in l: | |
342 | yield e |
|
342 | yield e | |
343 |
|
343 | |||
344 | nodefunc = lambda x: fctx.filectx(fileid=x) |
|
344 | nodefunc = lambda x: fctx.filectx(fileid=x) | |
345 | nav = revnavgen(pos, pagelen, count, nodefunc) |
|
345 | nav = revnavgen(pos, pagelen, count, nodefunc) | |
346 | yield self.t("filelog", file=f, node=hex(fctx.node()), nav=nav, |
|
346 | yield self.t("filelog", file=f, node=hex(fctx.node()), nav=nav, | |
347 | entries=entries) |
|
347 | entries=entries) | |
348 |
|
348 | |||
349 | def filerevision(self, fctx): |
|
349 | def filerevision(self, fctx): | |
350 | f = fctx.path() |
|
350 | f = fctx.path() | |
351 | text = fctx.data() |
|
351 | text = fctx.data() | |
352 | fl = fctx.filelog() |
|
352 | fl = fctx.filelog() | |
353 | n = fctx.filenode() |
|
353 | n = fctx.filenode() | |
354 |
|
354 | |||
355 | mt = mimetypes.guess_type(f)[0] |
|
355 | mt = mimetypes.guess_type(f)[0] | |
356 | rawtext = text |
|
356 | rawtext = text | |
357 | if util.binary(text): |
|
357 | if util.binary(text): | |
358 | mt = mt or 'application/octet-stream' |
|
358 | mt = mt or 'application/octet-stream' | |
359 | text = "(binary:%s)" % mt |
|
359 | text = "(binary:%s)" % mt | |
360 | mt = mt or 'text/plain' |
|
360 | mt = mt or 'text/plain' | |
361 |
|
361 | |||
362 | def lines(): |
|
362 | def lines(): | |
363 | for l, t in enumerate(text.splitlines(1)): |
|
363 | for l, t in enumerate(text.splitlines(1)): | |
364 | yield {"line": t, |
|
364 | yield {"line": t, | |
365 | "linenumber": "% 6d" % (l + 1), |
|
365 | "linenumber": "% 6d" % (l + 1), | |
366 | "parity": self.stripes(l)} |
|
366 | "parity": self.stripes(l)} | |
367 |
|
367 | |||
368 | yield self.t("filerevision", |
|
368 | yield self.t("filerevision", | |
369 | file=f, |
|
369 | file=f, | |
370 | path=_up(f), |
|
370 | path=_up(f), | |
371 | text=lines(), |
|
371 | text=lines(), | |
372 | raw=rawtext, |
|
372 | raw=rawtext, | |
373 | mimetype=mt, |
|
373 | mimetype=mt, | |
374 | rev=fctx.rev(), |
|
374 | rev=fctx.rev(), | |
375 | node=hex(fctx.node()), |
|
375 | node=hex(fctx.node()), | |
376 | author=fctx.user(), |
|
376 | author=fctx.user(), | |
377 | date=fctx.date(), |
|
377 | date=fctx.date(), | |
378 | desc=fctx.description(), |
|
378 | desc=fctx.description(), | |
379 | parent=self.siblings(fctx.parents()), |
|
379 | parent=self.siblings(fctx.parents()), | |
380 | child=self.siblings(fctx.children()), |
|
380 | child=self.siblings(fctx.children()), | |
381 | rename=self.renamelink(fl, n), |
|
381 | rename=self.renamelink(fl, n), | |
382 | permissions=fctx.manifest().execf(f)) |
|
382 | permissions=fctx.manifest().execf(f)) | |
383 |
|
383 | |||
384 | def fileannotate(self, fctx): |
|
384 | def fileannotate(self, fctx): | |
385 | f = fctx.path() |
|
385 | f = fctx.path() | |
386 | n = fctx.filenode() |
|
386 | n = fctx.filenode() | |
387 | fl = fctx.filelog() |
|
387 | fl = fctx.filelog() | |
388 |
|
388 | |||
389 | def annotate(**map): |
|
389 | def annotate(**map): | |
390 | parity = 0 |
|
390 | parity = 0 | |
391 | last = None |
|
391 | last = None | |
392 | for f, l in fctx.annotate(follow=True): |
|
392 | for f, l in fctx.annotate(follow=True): | |
393 | fnode = f.filenode() |
|
393 | fnode = f.filenode() | |
394 | name = self.repo.ui.shortuser(f.user()) |
|
394 | name = self.repo.ui.shortuser(f.user()) | |
395 |
|
395 | |||
396 | if last != fnode: |
|
396 | if last != fnode: | |
397 | parity = 1 - parity |
|
397 | parity = 1 - parity | |
398 | last = fnode |
|
398 | last = fnode | |
399 |
|
399 | |||
400 | yield {"parity": parity, |
|
400 | yield {"parity": parity, | |
401 | "node": hex(f.node()), |
|
401 | "node": hex(f.node()), | |
402 | "rev": f.rev(), |
|
402 | "rev": f.rev(), | |
403 | "author": name, |
|
403 | "author": name, | |
404 | "file": f.path(), |
|
404 | "file": f.path(), | |
405 | "line": l} |
|
405 | "line": l} | |
406 |
|
406 | |||
407 | yield self.t("fileannotate", |
|
407 | yield self.t("fileannotate", | |
408 | file=f, |
|
408 | file=f, | |
409 | annotate=annotate, |
|
409 | annotate=annotate, | |
410 | path=_up(f), |
|
410 | path=_up(f), | |
411 | rev=fctx.rev(), |
|
411 | rev=fctx.rev(), | |
412 | node=hex(fctx.node()), |
|
412 | node=hex(fctx.node()), | |
413 | author=fctx.user(), |
|
413 | author=fctx.user(), | |
414 | date=fctx.date(), |
|
414 | date=fctx.date(), | |
415 | desc=fctx.description(), |
|
415 | desc=fctx.description(), | |
416 | rename=self.renamelink(fl, n), |
|
416 | rename=self.renamelink(fl, n), | |
417 | parent=self.siblings(fctx.parents()), |
|
417 | parent=self.siblings(fctx.parents()), | |
418 | child=self.siblings(fctx.children()), |
|
418 | child=self.siblings(fctx.children()), | |
419 | permissions=fctx.manifest().execf(f)) |
|
419 | permissions=fctx.manifest().execf(f)) | |
420 |
|
420 | |||
421 | def manifest(self, ctx, path): |
|
421 | def manifest(self, ctx, path): | |
422 | mf = ctx.manifest() |
|
422 | mf = ctx.manifest() | |
423 | node = ctx.node() |
|
423 | node = ctx.node() | |
424 |
|
424 | |||
425 | files = {} |
|
425 | files = {} | |
426 |
|
426 | |||
427 | p = path[1:] |
|
427 | p = path[1:] | |
428 | if p and p[-1] != "/": |
|
428 | if p and p[-1] != "/": | |
429 | p += "/" |
|
429 | p += "/" | |
430 | l = len(p) |
|
430 | l = len(p) | |
431 |
|
431 | |||
432 | for f,n in mf.items(): |
|
432 | for f,n in mf.items(): | |
433 | if f[:l] != p: |
|
433 | if f[:l] != p: | |
434 | continue |
|
434 | continue | |
435 | remain = f[l:] |
|
435 | remain = f[l:] | |
436 | if "/" in remain: |
|
436 | if "/" in remain: | |
437 | short = remain[:remain.index("/") + 1] # bleah |
|
437 | short = remain[:remain.index("/") + 1] # bleah | |
438 | files[short] = (f, None) |
|
438 | files[short] = (f, None) | |
439 | else: |
|
439 | else: | |
440 | short = os.path.basename(remain) |
|
440 | short = os.path.basename(remain) | |
441 | files[short] = (f, n) |
|
441 | files[short] = (f, n) | |
442 |
|
442 | |||
443 | def filelist(**map): |
|
443 | def filelist(**map): | |
444 | parity = 0 |
|
444 | parity = 0 | |
445 | fl = files.keys() |
|
445 | fl = files.keys() | |
446 | fl.sort() |
|
446 | fl.sort() | |
447 | for f in fl: |
|
447 | for f in fl: | |
448 | full, fnode = files[f] |
|
448 | full, fnode = files[f] | |
449 | if not fnode: |
|
449 | if not fnode: | |
450 | continue |
|
450 | continue | |
451 |
|
451 | |||
452 | yield {"file": full, |
|
452 | yield {"file": full, | |
453 | "parity": self.stripes(parity), |
|
453 | "parity": self.stripes(parity), | |
454 | "basename": f, |
|
454 | "basename": f, | |
455 | "size": ctx.filectx(full).size(), |
|
455 | "size": ctx.filectx(full).size(), | |
456 | "permissions": mf.execf(full)} |
|
456 | "permissions": mf.execf(full)} | |
457 | parity += 1 |
|
457 | parity += 1 | |
458 |
|
458 | |||
459 | def dirlist(**map): |
|
459 | def dirlist(**map): | |
460 | parity = 0 |
|
460 | parity = 0 | |
461 | fl = files.keys() |
|
461 | fl = files.keys() | |
462 | fl.sort() |
|
462 | fl.sort() | |
463 | for f in fl: |
|
463 | for f in fl: | |
464 | full, fnode = files[f] |
|
464 | full, fnode = files[f] | |
465 | if fnode: |
|
465 | if fnode: | |
466 | continue |
|
466 | continue | |
467 |
|
467 | |||
468 | yield {"parity": self.stripes(parity), |
|
468 | yield {"parity": self.stripes(parity), | |
469 | "path": os.path.join(path, f), |
|
469 | "path": os.path.join(path, f), | |
470 | "basename": f[:-1]} |
|
470 | "basename": f[:-1]} | |
471 | parity += 1 |
|
471 | parity += 1 | |
472 |
|
472 | |||
473 | yield self.t("manifest", |
|
473 | yield self.t("manifest", | |
474 | rev=ctx.rev(), |
|
474 | rev=ctx.rev(), | |
475 | node=hex(node), |
|
475 | node=hex(node), | |
476 | path=path, |
|
476 | path=path, | |
477 | up=_up(path), |
|
477 | up=_up(path), | |
478 | fentries=filelist, |
|
478 | fentries=filelist, | |
479 | dentries=dirlist, |
|
479 | dentries=dirlist, | |
480 | archives=self.archivelist(hex(node))) |
|
480 | archives=self.archivelist(hex(node))) | |
481 |
|
481 | |||
482 | def tags(self): |
|
482 | def tags(self): | |
483 | cl = self.repo.changelog |
|
483 | cl = self.repo.changelog | |
484 |
|
484 | |||
485 | i = self.repo.tagslist() |
|
485 | i = self.repo.tagslist() | |
486 | i.reverse() |
|
486 | i.reverse() | |
487 |
|
487 | |||
488 | def entries(notip=False, **map): |
|
488 | def entries(notip=False, **map): | |
489 | parity = 0 |
|
489 | parity = 0 | |
490 | for k,n in i: |
|
490 | for k,n in i: | |
491 | if notip and k == "tip": continue |
|
491 | if notip and k == "tip": continue | |
492 | yield {"parity": self.stripes(parity), |
|
492 | yield {"parity": self.stripes(parity), | |
493 | "tag": k, |
|
493 | "tag": k, | |
494 | "date": cl.read(n)[2], |
|
494 | "date": cl.read(n)[2], | |
495 | "node": hex(n)} |
|
495 | "node": hex(n)} | |
496 | parity += 1 |
|
496 | parity += 1 | |
497 |
|
497 | |||
498 | yield self.t("tags", |
|
498 | yield self.t("tags", | |
499 | node=hex(self.repo.changelog.tip()), |
|
499 | node=hex(self.repo.changelog.tip()), | |
500 | entries=lambda **x: entries(False, **x), |
|
500 | entries=lambda **x: entries(False, **x), | |
501 | entriesnotip=lambda **x: entries(True, **x)) |
|
501 | entriesnotip=lambda **x: entries(True, **x)) | |
502 |
|
502 | |||
503 | def summary(self): |
|
503 | def summary(self): | |
504 | cl = self.repo.changelog |
|
504 | cl = self.repo.changelog | |
505 |
|
505 | |||
506 | i = self.repo.tagslist() |
|
506 | i = self.repo.tagslist() | |
507 | i.reverse() |
|
507 | i.reverse() | |
508 |
|
508 | |||
509 | def tagentries(**map): |
|
509 | def tagentries(**map): | |
510 | parity = 0 |
|
510 | parity = 0 | |
511 | count = 0 |
|
511 | count = 0 | |
512 | for k,n in i: |
|
512 | for k,n in i: | |
513 | if k == "tip": # skip tip |
|
513 | if k == "tip": # skip tip | |
514 | continue; |
|
514 | continue; | |
515 |
|
515 | |||
516 | count += 1 |
|
516 | count += 1 | |
517 | if count > 10: # limit to 10 tags |
|
517 | if count > 10: # limit to 10 tags | |
518 | break; |
|
518 | break; | |
519 |
|
519 | |||
520 | c = cl.read(n) |
|
520 | c = cl.read(n) | |
521 | t = c[2] |
|
521 | t = c[2] | |
522 |
|
522 | |||
523 | yield self.t("tagentry", |
|
523 | yield self.t("tagentry", | |
524 | parity = self.stripes(parity), |
|
524 | parity = self.stripes(parity), | |
525 | tag = k, |
|
525 | tag = k, | |
526 | node = hex(n), |
|
526 | node = hex(n), | |
527 | date = t) |
|
527 | date = t) | |
528 | parity += 1 |
|
528 | parity += 1 | |
529 |
|
529 | |||
530 | def changelist(**map): |
|
530 | def changelist(**map): | |
531 | parity = 0 |
|
531 | parity = 0 | |
532 | cl = self.repo.changelog |
|
532 | cl = self.repo.changelog | |
533 | l = [] # build a list in forward order for efficiency |
|
533 | l = [] # build a list in forward order for efficiency | |
534 | for i in range(start, end): |
|
534 | for i in range(start, end): | |
535 | n = cl.node(i) |
|
535 | n = cl.node(i) | |
536 | changes = cl.read(n) |
|
536 | changes = cl.read(n) | |
537 | hn = hex(n) |
|
537 | hn = hex(n) | |
538 | t = changes[2] |
|
538 | t = changes[2] | |
539 |
|
539 | |||
540 | l.insert(0, self.t( |
|
540 | l.insert(0, self.t( | |
541 | 'shortlogentry', |
|
541 | 'shortlogentry', | |
542 | parity = parity, |
|
542 | parity = parity, | |
543 | author = changes[1], |
|
543 | author = changes[1], | |
544 | desc = changes[4], |
|
544 | desc = changes[4], | |
545 | date = t, |
|
545 | date = t, | |
546 | rev = i, |
|
546 | rev = i, | |
547 | node = hn)) |
|
547 | node = hn)) | |
548 | parity = 1 - parity |
|
548 | parity = 1 - parity | |
549 |
|
549 | |||
550 | yield l |
|
550 | yield l | |
551 |
|
551 | |||
552 | count = cl.count() |
|
552 | count = cl.count() | |
553 | start = max(0, count - self.maxchanges) |
|
553 | start = max(0, count - self.maxchanges) | |
554 | end = min(count, start + self.maxchanges) |
|
554 | end = min(count, start + self.maxchanges) | |
555 |
|
555 | |||
556 | yield self.t("summary", |
|
556 | yield self.t("summary", | |
557 | desc = self.repo.ui.config("web", "description", "unknown"), |
|
557 | desc = self.repo.ui.config("web", "description", "unknown"), | |
558 | owner = (self.repo.ui.config("ui", "username") or # preferred |
|
558 | owner = (self.repo.ui.config("ui", "username") or # preferred | |
559 | self.repo.ui.config("web", "contact") or # deprecated |
|
559 | self.repo.ui.config("web", "contact") or # deprecated | |
560 | self.repo.ui.config("web", "author", "unknown")), # also |
|
560 | self.repo.ui.config("web", "author", "unknown")), # also | |
561 | lastchange = cl.read(cl.tip())[2], |
|
561 | lastchange = cl.read(cl.tip())[2], | |
562 | tags = tagentries, |
|
562 | tags = tagentries, | |
563 | shortlog = changelist, |
|
563 | shortlog = changelist, | |
564 | node = hex(cl.tip()), |
|
564 | node = hex(cl.tip()), | |
565 | archives=self.archivelist("tip")) |
|
565 | archives=self.archivelist("tip")) | |
566 |
|
566 | |||
567 | def filediff(self, fctx): |
|
567 | def filediff(self, fctx): | |
568 | n = fctx.node() |
|
568 | n = fctx.node() | |
569 | path = fctx.path() |
|
569 | path = fctx.path() | |
570 | parents = fctx.parents() |
|
570 | parents = fctx.parents() | |
571 | p1 = parents and parents[0].node() or nullid |
|
571 | p1 = parents and parents[0].node() or nullid | |
572 |
|
572 | |||
573 | def diff(**map): |
|
573 | def diff(**map): | |
574 | yield self.diff(p1, n, [path]) |
|
574 | yield self.diff(p1, n, [path]) | |
575 |
|
575 | |||
576 | yield self.t("filediff", |
|
576 | yield self.t("filediff", | |
577 | file=path, |
|
577 | file=path, | |
578 | node=hex(n), |
|
578 | node=hex(n), | |
579 | rev=fctx.rev(), |
|
579 | rev=fctx.rev(), | |
580 | parent=self.siblings(parents), |
|
580 | parent=self.siblings(parents), | |
581 | child=self.siblings(fctx.children()), |
|
581 | child=self.siblings(fctx.children()), | |
582 | diff=diff) |
|
582 | diff=diff) | |
583 |
|
583 | |||
584 | archive_specs = { |
|
584 | archive_specs = { | |
585 | 'bz2': ('application/x-tar', 'tbz2', '.tar.bz2', None), |
|
585 | 'bz2': ('application/x-tar', 'tbz2', '.tar.bz2', None), | |
586 | 'gz': ('application/x-tar', 'tgz', '.tar.gz', None), |
|
586 | 'gz': ('application/x-tar', 'tgz', '.tar.gz', None), | |
587 | 'zip': ('application/zip', 'zip', '.zip', None), |
|
587 | 'zip': ('application/zip', 'zip', '.zip', None), | |
588 | } |
|
588 | } | |
589 |
|
589 | |||
590 | def archive(self, req, cnode, type_): |
|
590 | def archive(self, req, cnode, type_): | |
591 | reponame = re.sub(r"\W+", "-", os.path.basename(self.reponame)) |
|
591 | reponame = re.sub(r"\W+", "-", os.path.basename(self.reponame)) | |
592 | name = "%s-%s" % (reponame, short(cnode)) |
|
592 | name = "%s-%s" % (reponame, short(cnode)) | |
593 | mimetype, artype, extension, encoding = self.archive_specs[type_] |
|
593 | mimetype, artype, extension, encoding = self.archive_specs[type_] | |
594 | headers = [('Content-type', mimetype), |
|
594 | headers = [('Content-type', mimetype), | |
595 | ('Content-disposition', 'attachment; filename=%s%s' % |
|
595 | ('Content-disposition', 'attachment; filename=%s%s' % | |
596 | (name, extension))] |
|
596 | (name, extension))] | |
597 | if encoding: |
|
597 | if encoding: | |
598 | headers.append(('Content-encoding', encoding)) |
|
598 | headers.append(('Content-encoding', encoding)) | |
599 | req.header(headers) |
|
599 | req.header(headers) | |
600 | archival.archive(self.repo, req.out, cnode, artype, prefix=name) |
|
600 | archival.archive(self.repo, req.out, cnode, artype, prefix=name) | |
601 |
|
601 | |||
602 | # add tags to things |
|
602 | # add tags to things | |
603 | # tags -> list of changesets corresponding to tags |
|
603 | # tags -> list of changesets corresponding to tags | |
604 | # find tag, changeset, file |
|
604 | # find tag, changeset, file | |
605 |
|
605 | |||
606 | def cleanpath(self, path): |
|
606 | def cleanpath(self, path): | |
607 | return util.canonpath(self.repo.root, '', path) |
|
607 | return util.canonpath(self.repo.root, '', path) | |
608 |
|
608 | |||
609 | def run(self): |
|
609 | def run(self): | |
610 | if not os.environ.get('GATEWAY_INTERFACE', '').startswith("CGI/1."): |
|
610 | if not os.environ.get('GATEWAY_INTERFACE', '').startswith("CGI/1."): | |
611 | raise RuntimeError("This function is only intended to be called while running as a CGI script.") |
|
611 | raise RuntimeError("This function is only intended to be called while running as a CGI script.") | |
612 | import mercurial.hgweb.wsgicgi as wsgicgi |
|
612 | import mercurial.hgweb.wsgicgi as wsgicgi | |
613 | from request import wsgiapplication |
|
613 | from request import wsgiapplication | |
614 | def make_web_app(): |
|
614 | def make_web_app(): | |
615 | return self |
|
615 | return self | |
616 | wsgicgi.launch(wsgiapplication(make_web_app)) |
|
616 | wsgicgi.launch(wsgiapplication(make_web_app)) | |
617 |
|
617 | |||
618 | def run_wsgi(self, req): |
|
618 | def run_wsgi(self, req): | |
619 | def header(**map): |
|
619 | def header(**map): | |
620 | header_file = cStringIO.StringIO(''.join(self.t("header", **map))) |
|
620 | header_file = cStringIO.StringIO(''.join(self.t("header", **map))) | |
621 | msg = mimetools.Message(header_file, 0) |
|
621 | msg = mimetools.Message(header_file, 0) | |
622 | req.header(msg.items()) |
|
622 | req.header(msg.items()) | |
623 | yield header_file.read() |
|
623 | yield header_file.read() | |
624 |
|
624 | |||
625 | def rawfileheader(**map): |
|
625 | def rawfileheader(**map): | |
626 | req.header([('Content-type', map['mimetype']), |
|
626 | req.header([('Content-type', map['mimetype']), | |
627 | ('Content-disposition', 'filename=%s' % map['file']), |
|
627 | ('Content-disposition', 'filename=%s' % map['file']), | |
628 | ('Content-length', str(len(map['raw'])))]) |
|
628 | ('Content-length', str(len(map['raw'])))]) | |
629 | yield '' |
|
629 | yield '' | |
630 |
|
630 | |||
631 | def footer(**map): |
|
631 | def footer(**map): | |
632 | yield self.t("footer", |
|
632 | yield self.t("footer", | |
633 | motd=self.repo.ui.config("web", "motd", ""), |
|
633 | motd=self.repo.ui.config("web", "motd", ""), | |
634 | **map) |
|
634 | **map) | |
635 |
|
635 | |||
636 | def expand_form(form): |
|
636 | def expand_form(form): | |
637 | shortcuts = { |
|
637 | shortcuts = { | |
638 | 'cl': [('cmd', ['changelog']), ('rev', None)], |
|
638 | 'cl': [('cmd', ['changelog']), ('rev', None)], | |
639 | 'sl': [('cmd', ['shortlog']), ('rev', None)], |
|
639 | 'sl': [('cmd', ['shortlog']), ('rev', None)], | |
640 | 'cs': [('cmd', ['changeset']), ('node', None)], |
|
640 | 'cs': [('cmd', ['changeset']), ('node', None)], | |
641 | 'f': [('cmd', ['file']), ('filenode', None)], |
|
641 | 'f': [('cmd', ['file']), ('filenode', None)], | |
642 | 'fl': [('cmd', ['filelog']), ('filenode', None)], |
|
642 | 'fl': [('cmd', ['filelog']), ('filenode', None)], | |
643 | 'fd': [('cmd', ['filediff']), ('node', None)], |
|
643 | 'fd': [('cmd', ['filediff']), ('node', None)], | |
644 | 'fa': [('cmd', ['annotate']), ('filenode', None)], |
|
644 | 'fa': [('cmd', ['annotate']), ('filenode', None)], | |
645 | 'mf': [('cmd', ['manifest']), ('manifest', None)], |
|
645 | 'mf': [('cmd', ['manifest']), ('manifest', None)], | |
646 | 'ca': [('cmd', ['archive']), ('node', None)], |
|
646 | 'ca': [('cmd', ['archive']), ('node', None)], | |
647 | 'tags': [('cmd', ['tags'])], |
|
647 | 'tags': [('cmd', ['tags'])], | |
648 | 'tip': [('cmd', ['changeset']), ('node', ['tip'])], |
|
648 | 'tip': [('cmd', ['changeset']), ('node', ['tip'])], | |
649 | 'static': [('cmd', ['static']), ('file', None)] |
|
649 | 'static': [('cmd', ['static']), ('file', None)] | |
650 | } |
|
650 | } | |
651 |
|
651 | |||
652 | for k in shortcuts.iterkeys(): |
|
652 | for k in shortcuts.iterkeys(): | |
653 | if form.has_key(k): |
|
653 | if form.has_key(k): | |
654 | for name, value in shortcuts[k]: |
|
654 | for name, value in shortcuts[k]: | |
655 | if value is None: |
|
655 | if value is None: | |
656 | value = form[k] |
|
656 | value = form[k] | |
657 | form[name] = value |
|
657 | form[name] = value | |
658 | del form[k] |
|
658 | del form[k] | |
659 |
|
659 | |||
660 | def rewrite_request(req): |
|
660 | def rewrite_request(req): | |
661 | '''translate new web interface to traditional format''' |
|
661 | '''translate new web interface to traditional format''' | |
662 |
|
662 | |||
663 | def spliturl(req): |
|
663 | def spliturl(req): | |
664 | def firstitem(query): |
|
664 | def firstitem(query): | |
665 | return query.split('&', 1)[0].split(';', 1)[0] |
|
665 | return query.split('&', 1)[0].split(';', 1)[0] | |
666 |
|
666 | |||
667 | def normurl(url): |
|
667 | def normurl(url): | |
668 | inner = '/'.join([x for x in url.split('/') if x]) |
|
668 | inner = '/'.join([x for x in url.split('/') if x]) | |
669 | tl = len(url) > 1 and url.endswith('/') and '/' or '' |
|
669 | tl = len(url) > 1 and url.endswith('/') and '/' or '' | |
670 |
|
670 | |||
671 | return '%s%s%s' % (url.startswith('/') and '/' or '', |
|
671 | return '%s%s%s' % (url.startswith('/') and '/' or '', | |
672 | inner, tl) |
|
672 | inner, tl) | |
673 |
|
673 | |||
674 | root = normurl(req.env.get('REQUEST_URI', '').split('?', 1)[0]) |
|
674 | root = normurl(req.env.get('REQUEST_URI', '').split('?', 1)[0]) | |
675 | pi = normurl(req.env.get('PATH_INFO', '')) |
|
675 | pi = normurl(req.env.get('PATH_INFO', '')) | |
676 | if pi: |
|
676 | if pi: | |
677 | # strip leading / |
|
677 | # strip leading / | |
678 | pi = pi[1:] |
|
678 | pi = pi[1:] | |
679 | if pi: |
|
679 | if pi: | |
680 | root = root[:-len(pi)] |
|
680 | root = root[:-len(pi)] | |
681 | if req.env.has_key('REPO_NAME'): |
|
681 | if req.env.has_key('REPO_NAME'): | |
682 | rn = req.env['REPO_NAME'] + '/' |
|
682 | rn = req.env['REPO_NAME'] + '/' | |
683 | root += rn |
|
683 | root += rn | |
684 | query = pi[len(rn):] |
|
684 | query = pi[len(rn):] | |
685 | else: |
|
685 | else: | |
686 | query = pi |
|
686 | query = pi | |
687 | else: |
|
687 | else: | |
688 | root += '?' |
|
688 | root += '?' | |
689 | query = firstitem(req.env['QUERY_STRING']) |
|
689 | query = firstitem(req.env['QUERY_STRING']) | |
690 |
|
690 | |||
691 | return (root, query) |
|
691 | return (root, query) | |
692 |
|
692 | |||
693 | req.url, query = spliturl(req) |
|
693 | req.url, query = spliturl(req) | |
694 |
|
694 | |||
695 | if req.form.has_key('cmd'): |
|
695 | if req.form.has_key('cmd'): | |
696 | # old style |
|
696 | # old style | |
697 | return |
|
697 | return | |
698 |
|
698 | |||
699 | args = query.split('/', 2) |
|
699 | args = query.split('/', 2) | |
700 | if not args or not args[0]: |
|
700 | if not args or not args[0]: | |
701 | return |
|
701 | return | |
702 |
|
702 | |||
703 | cmd = args.pop(0) |
|
703 | cmd = args.pop(0) | |
704 | style = cmd.rfind('-') |
|
704 | style = cmd.rfind('-') | |
705 | if style != -1: |
|
705 | if style != -1: | |
706 | req.form['style'] = [cmd[:style]] |
|
706 | req.form['style'] = [cmd[:style]] | |
707 | cmd = cmd[style+1:] |
|
707 | cmd = cmd[style+1:] | |
708 | # avoid accepting e.g. style parameter as command |
|
708 | # avoid accepting e.g. style parameter as command | |
709 | if hasattr(self, 'do_' + cmd): |
|
709 | if hasattr(self, 'do_' + cmd): | |
710 | req.form['cmd'] = [cmd] |
|
710 | req.form['cmd'] = [cmd] | |
711 |
|
711 | |||
712 | if args and args[0]: |
|
712 | if args and args[0]: | |
713 | node = args.pop(0) |
|
713 | node = args.pop(0) | |
714 | req.form['node'] = [node] |
|
714 | req.form['node'] = [node] | |
715 | if args: |
|
715 | if args: | |
716 | req.form['file'] = args |
|
716 | req.form['file'] = args | |
717 |
|
717 | |||
718 | if cmd == 'static': |
|
718 | if cmd == 'static': | |
719 | req.form['file'] = req.form['node'] |
|
719 | req.form['file'] = req.form['node'] | |
720 | elif cmd == 'archive': |
|
720 | elif cmd == 'archive': | |
721 | fn = req.form['node'][0] |
|
721 | fn = req.form['node'][0] | |
722 | for type_, spec in self.archive_specs.iteritems(): |
|
722 | for type_, spec in self.archive_specs.iteritems(): | |
723 | ext = spec[2] |
|
723 | ext = spec[2] | |
724 | if fn.endswith(ext): |
|
724 | if fn.endswith(ext): | |
725 | req.form['node'] = [fn[:-len(ext)]] |
|
725 | req.form['node'] = [fn[:-len(ext)]] | |
726 | req.form['type'] = [type_] |
|
726 | req.form['type'] = [type_] | |
727 |
|
727 | |||
728 | def sessionvars(**map): |
|
728 | def sessionvars(**map): | |
729 | fields = [] |
|
729 | fields = [] | |
730 | if req.form.has_key('style'): |
|
730 | if req.form.has_key('style'): | |
731 | style = req.form['style'][0] |
|
731 | style = req.form['style'][0] | |
732 | if style != self.repo.ui.config('web', 'style', ''): |
|
732 | if style != self.repo.ui.config('web', 'style', ''): | |
733 | fields.append(('style', style)) |
|
733 | fields.append(('style', style)) | |
734 |
|
734 | |||
735 | separator = req.url[-1] == '?' and ';' or '?' |
|
735 | separator = req.url[-1] == '?' and ';' or '?' | |
736 | for name, value in fields: |
|
736 | for name, value in fields: | |
737 | yield dict(name=name, value=value, separator=separator) |
|
737 | yield dict(name=name, value=value, separator=separator) | |
738 | separator = ';' |
|
738 | separator = ';' | |
739 |
|
739 | |||
740 | self.refresh() |
|
740 | self.refresh() | |
741 |
|
741 | |||
742 | expand_form(req.form) |
|
742 | expand_form(req.form) | |
743 | rewrite_request(req) |
|
743 | rewrite_request(req) | |
744 |
|
744 | |||
745 | style = self.repo.ui.config("web", "style", "") |
|
745 | style = self.repo.ui.config("web", "style", "") | |
746 | if req.form.has_key('style'): |
|
746 | if req.form.has_key('style'): | |
747 | style = req.form['style'][0] |
|
747 | style = req.form['style'][0] | |
748 | mapfile = style_map(self.templatepath, style) |
|
748 | mapfile = style_map(self.templatepath, style) | |
749 |
|
749 | |||
750 | port = req.env["SERVER_PORT"] |
|
750 | port = req.env["SERVER_PORT"] | |
751 | port = port != "80" and (":" + port) or "" |
|
751 | port = port != "80" and (":" + port) or "" | |
752 | urlbase = 'http://%s%s' % (req.env['SERVER_NAME'], port) |
|
752 | urlbase = 'http://%s%s' % (req.env['SERVER_NAME'], port) | |
753 |
|
753 | |||
754 | if not self.reponame: |
|
754 | if not self.reponame: | |
755 | self.reponame = (self.repo.ui.config("web", "name") |
|
755 | self.reponame = (self.repo.ui.config("web", "name") | |
756 | or req.env.get('REPO_NAME') |
|
756 | or req.env.get('REPO_NAME') | |
757 | or req.url.strip('/') or self.repo.root) |
|
757 | or req.url.strip('/') or self.repo.root) | |
758 |
|
758 | |||
759 | self.t = templater.templater(mapfile, templater.common_filters, |
|
759 | self.t = templater.templater(mapfile, templater.common_filters, | |
760 | defaults={"url": req.url, |
|
760 | defaults={"url": req.url, | |
761 | "urlbase": urlbase, |
|
761 | "urlbase": urlbase, | |
762 | "repo": self.reponame, |
|
762 | "repo": self.reponame, | |
763 | "header": header, |
|
763 | "header": header, | |
764 | "footer": footer, |
|
764 | "footer": footer, | |
765 | "rawfileheader": rawfileheader, |
|
765 | "rawfileheader": rawfileheader, | |
766 | "sessionvars": sessionvars |
|
766 | "sessionvars": sessionvars | |
767 | }) |
|
767 | }) | |
768 |
|
768 | |||
769 | if not req.form.has_key('cmd'): |
|
769 | if not req.form.has_key('cmd'): | |
770 | req.form['cmd'] = [self.t.cache['default'],] |
|
770 | req.form['cmd'] = [self.t.cache['default'],] | |
771 |
|
771 | |||
772 | cmd = req.form['cmd'][0] |
|
772 | cmd = req.form['cmd'][0] | |
773 |
|
773 | |||
774 | method = getattr(self, 'do_' + cmd, None) |
|
774 | method = getattr(self, 'do_' + cmd, None) | |
775 | if method: |
|
775 | if method: | |
776 | try: |
|
776 | try: | |
777 | method(req) |
|
777 | method(req) | |
778 | except (hg.RepoError, revlog.RevlogError), inst: |
|
778 | except (hg.RepoError, revlog.RevlogError), inst: | |
779 | req.write(self.t("error", error=str(inst))) |
|
779 | req.write(self.t("error", error=str(inst))) | |
780 | else: |
|
780 | else: | |
781 | req.write(self.t("error", error='No such method: ' + cmd)) |
|
781 | req.write(self.t("error", error='No such method: ' + cmd)) | |
782 |
|
782 | |||
783 | def changectx(self, req): |
|
783 | def changectx(self, req): | |
784 | if req.form.has_key('node'): |
|
784 | if req.form.has_key('node'): | |
785 | changeid = req.form['node'][0] |
|
785 | changeid = req.form['node'][0] | |
786 | elif req.form.has_key('manifest'): |
|
786 | elif req.form.has_key('manifest'): | |
787 | changeid = req.form['manifest'][0] |
|
787 | changeid = req.form['manifest'][0] | |
788 | else: |
|
788 | else: | |
789 | changeid = self.repo.changelog.count() - 1 |
|
789 | changeid = self.repo.changelog.count() - 1 | |
790 |
|
790 | |||
791 | try: |
|
791 | try: | |
792 | ctx = self.repo.changectx(changeid) |
|
792 | ctx = self.repo.changectx(changeid) | |
793 | except hg.RepoError: |
|
793 | except hg.RepoError: | |
794 | man = self.repo.manifest |
|
794 | man = self.repo.manifest | |
795 | mn = man.lookup(changeid) |
|
795 | mn = man.lookup(changeid) | |
796 | ctx = self.repo.changectx(man.linkrev(mn)) |
|
796 | ctx = self.repo.changectx(man.linkrev(mn)) | |
797 |
|
797 | |||
798 | return ctx |
|
798 | return ctx | |
799 |
|
799 | |||
800 | def filectx(self, req): |
|
800 | def filectx(self, req): | |
801 | path = self.cleanpath(req.form['file'][0]) |
|
801 | path = self.cleanpath(req.form['file'][0]) | |
802 | if req.form.has_key('node'): |
|
802 | if req.form.has_key('node'): | |
803 | changeid = req.form['node'][0] |
|
803 | changeid = req.form['node'][0] | |
804 | else: |
|
804 | else: | |
805 | changeid = req.form['filenode'][0] |
|
805 | changeid = req.form['filenode'][0] | |
806 | try: |
|
806 | try: | |
807 | ctx = self.repo.changectx(changeid) |
|
807 | ctx = self.repo.changectx(changeid) | |
808 | fctx = ctx.filectx(path) |
|
808 | fctx = ctx.filectx(path) | |
809 | except hg.RepoError: |
|
809 | except hg.RepoError: | |
810 | fctx = self.repo.filectx(path, fileid=changeid) |
|
810 | fctx = self.repo.filectx(path, fileid=changeid) | |
811 |
|
811 | |||
812 | return fctx |
|
812 | return fctx | |
813 |
|
813 | |||
814 | def stripes(self, parity): |
|
814 | def stripes(self, parity): | |
815 | "make horizontal stripes for easier reading" |
|
815 | "make horizontal stripes for easier reading" | |
816 | if self.stripecount: |
|
816 | if self.stripecount: | |
817 | return (1 + parity / self.stripecount) & 1 |
|
817 | return (1 + parity / self.stripecount) & 1 | |
818 | else: |
|
818 | else: | |
819 | return 0 |
|
819 | return 0 | |
820 |
|
820 | |||
821 | def do_log(self, req): |
|
821 | def do_log(self, req): | |
822 | if req.form.has_key('file') and req.form['file'][0]: |
|
822 | if req.form.has_key('file') and req.form['file'][0]: | |
823 | self.do_filelog(req) |
|
823 | self.do_filelog(req) | |
824 | else: |
|
824 | else: | |
825 | self.do_changelog(req) |
|
825 | self.do_changelog(req) | |
826 |
|
826 | |||
827 | def do_rev(self, req): |
|
827 | def do_rev(self, req): | |
828 | self.do_changeset(req) |
|
828 | self.do_changeset(req) | |
829 |
|
829 | |||
830 | def do_file(self, req): |
|
830 | def do_file(self, req): | |
831 | path = req.form.get('file', [''])[0] |
|
831 | path = req.form.get('file', [''])[0] | |
832 | if path: |
|
832 | if path: | |
833 | try: |
|
833 | try: | |
834 | req.write(self.filerevision(self.filectx(req))) |
|
834 | req.write(self.filerevision(self.filectx(req))) | |
835 | return |
|
835 | return | |
836 | except hg.RepoError: |
|
836 | except hg.RepoError: | |
837 | pass |
|
837 | pass | |
838 | path = self.cleanpath(path) |
|
838 | path = self.cleanpath(path) | |
839 |
|
839 | |||
840 | req.write(self.manifest(self.changectx(req), '/' + path)) |
|
840 | req.write(self.manifest(self.changectx(req), '/' + path)) | |
841 |
|
841 | |||
842 | def do_diff(self, req): |
|
842 | def do_diff(self, req): | |
843 | self.do_filediff(req) |
|
843 | self.do_filediff(req) | |
844 |
|
844 | |||
845 | def do_changelog(self, req, shortlog = False): |
|
845 | def do_changelog(self, req, shortlog = False): | |
846 | if req.form.has_key('node'): |
|
846 | if req.form.has_key('node'): | |
847 | ctx = self.changectx(req) |
|
847 | ctx = self.changectx(req) | |
848 | else: |
|
848 | else: | |
849 | if req.form.has_key('rev'): |
|
849 | if req.form.has_key('rev'): | |
850 | hi = req.form['rev'][0] |
|
850 | hi = req.form['rev'][0] | |
851 | else: |
|
851 | else: | |
852 | hi = self.repo.changelog.count() - 1 |
|
852 | hi = self.repo.changelog.count() - 1 | |
853 | try: |
|
853 | try: | |
854 | ctx = self.repo.changectx(hi) |
|
854 | ctx = self.repo.changectx(hi) | |
855 | except hg.RepoError: |
|
855 | except hg.RepoError: | |
856 | req.write(self.search(hi)) # XXX redirect to 404 page? |
|
856 | req.write(self.search(hi)) # XXX redirect to 404 page? | |
857 | return |
|
857 | return | |
858 |
|
858 | |||
859 | req.write(self.changelog(ctx, shortlog = shortlog)) |
|
859 | req.write(self.changelog(ctx, shortlog = shortlog)) | |
860 |
|
860 | |||
861 | def do_shortlog(self, req): |
|
861 | def do_shortlog(self, req): | |
862 | self.do_changelog(req, shortlog = True) |
|
862 | self.do_changelog(req, shortlog = True) | |
863 |
|
863 | |||
864 | def do_changeset(self, req): |
|
864 | def do_changeset(self, req): | |
865 | req.write(self.changeset(self.changectx(req))) |
|
865 | req.write(self.changeset(self.changectx(req))) | |
866 |
|
866 | |||
867 | def do_manifest(self, req): |
|
867 | def do_manifest(self, req): | |
868 | req.write(self.manifest(self.changectx(req), |
|
868 | req.write(self.manifest(self.changectx(req), | |
869 | self.cleanpath(req.form['path'][0]))) |
|
869 | self.cleanpath(req.form['path'][0]))) | |
870 |
|
870 | |||
871 | def do_tags(self, req): |
|
871 | def do_tags(self, req): | |
872 | req.write(self.tags()) |
|
872 | req.write(self.tags()) | |
873 |
|
873 | |||
874 | def do_summary(self, req): |
|
874 | def do_summary(self, req): | |
875 | req.write(self.summary()) |
|
875 | req.write(self.summary()) | |
876 |
|
876 | |||
877 | def do_filediff(self, req): |
|
877 | def do_filediff(self, req): | |
878 | req.write(self.filediff(self.filectx(req))) |
|
878 | req.write(self.filediff(self.filectx(req))) | |
879 |
|
879 | |||
880 | def do_annotate(self, req): |
|
880 | def do_annotate(self, req): | |
881 | req.write(self.fileannotate(self.filectx(req))) |
|
881 | req.write(self.fileannotate(self.filectx(req))) | |
882 |
|
882 | |||
883 | def do_filelog(self, req): |
|
883 | def do_filelog(self, req): | |
884 | req.write(self.filelog(self.filectx(req))) |
|
884 | req.write(self.filelog(self.filectx(req))) | |
885 |
|
885 | |||
886 | def do_lookup(self, req): |
|
886 | def do_lookup(self, req): | |
887 | resp = hex(self.repo.lookup(req.form['key'][0])) + "\n" |
|
887 | try: | |
|
888 | r = hex(self.repo.lookup(req.form['key'][0])) | |||
|
889 | success = 1 | |||
|
890 | except Exception,inst: | |||
|
891 | r = str(inst) | |||
|
892 | success = 0 | |||
|
893 | resp = "%s %s\n" % (success, r) | |||
888 | req.httphdr("application/mercurial-0.1", length=len(resp)) |
|
894 | req.httphdr("application/mercurial-0.1", length=len(resp)) | |
889 | req.write(resp) |
|
895 | req.write(resp) | |
890 |
|
896 | |||
891 | def do_heads(self, req): |
|
897 | def do_heads(self, req): | |
892 | resp = " ".join(map(hex, self.repo.heads())) + "\n" |
|
898 | resp = " ".join(map(hex, self.repo.heads())) + "\n" | |
893 | req.httphdr("application/mercurial-0.1", length=len(resp)) |
|
899 | req.httphdr("application/mercurial-0.1", length=len(resp)) | |
894 | req.write(resp) |
|
900 | req.write(resp) | |
895 |
|
901 | |||
896 | def do_branches(self, req): |
|
902 | def do_branches(self, req): | |
897 | nodes = [] |
|
903 | nodes = [] | |
898 | if req.form.has_key('nodes'): |
|
904 | if req.form.has_key('nodes'): | |
899 | nodes = map(bin, req.form['nodes'][0].split(" ")) |
|
905 | nodes = map(bin, req.form['nodes'][0].split(" ")) | |
900 | resp = cStringIO.StringIO() |
|
906 | resp = cStringIO.StringIO() | |
901 | for b in self.repo.branches(nodes): |
|
907 | for b in self.repo.branches(nodes): | |
902 | resp.write(" ".join(map(hex, b)) + "\n") |
|
908 | resp.write(" ".join(map(hex, b)) + "\n") | |
903 | resp = resp.getvalue() |
|
909 | resp = resp.getvalue() | |
904 | req.httphdr("application/mercurial-0.1", length=len(resp)) |
|
910 | req.httphdr("application/mercurial-0.1", length=len(resp)) | |
905 | req.write(resp) |
|
911 | req.write(resp) | |
906 |
|
912 | |||
907 | def do_between(self, req): |
|
913 | def do_between(self, req): | |
908 | if req.form.has_key('pairs'): |
|
914 | if req.form.has_key('pairs'): | |
909 | pairs = [map(bin, p.split("-")) |
|
915 | pairs = [map(bin, p.split("-")) | |
910 | for p in req.form['pairs'][0].split(" ")] |
|
916 | for p in req.form['pairs'][0].split(" ")] | |
911 | resp = cStringIO.StringIO() |
|
917 | resp = cStringIO.StringIO() | |
912 | for b in self.repo.between(pairs): |
|
918 | for b in self.repo.between(pairs): | |
913 | resp.write(" ".join(map(hex, b)) + "\n") |
|
919 | resp.write(" ".join(map(hex, b)) + "\n") | |
914 | resp = resp.getvalue() |
|
920 | resp = resp.getvalue() | |
915 | req.httphdr("application/mercurial-0.1", length=len(resp)) |
|
921 | req.httphdr("application/mercurial-0.1", length=len(resp)) | |
916 | req.write(resp) |
|
922 | req.write(resp) | |
917 |
|
923 | |||
918 | def do_changegroup(self, req): |
|
924 | def do_changegroup(self, req): | |
919 | req.httphdr("application/mercurial-0.1") |
|
925 | req.httphdr("application/mercurial-0.1") | |
920 | nodes = [] |
|
926 | nodes = [] | |
921 | if not self.allowpull: |
|
927 | if not self.allowpull: | |
922 | return |
|
928 | return | |
923 |
|
929 | |||
924 | if req.form.has_key('roots'): |
|
930 | if req.form.has_key('roots'): | |
925 | nodes = map(bin, req.form['roots'][0].split(" ")) |
|
931 | nodes = map(bin, req.form['roots'][0].split(" ")) | |
926 |
|
932 | |||
927 | z = zlib.compressobj() |
|
933 | z = zlib.compressobj() | |
928 | f = self.repo.changegroup(nodes, 'serve') |
|
934 | f = self.repo.changegroup(nodes, 'serve') | |
929 | while 1: |
|
935 | while 1: | |
930 | chunk = f.read(4096) |
|
936 | chunk = f.read(4096) | |
931 | if not chunk: |
|
937 | if not chunk: | |
932 | break |
|
938 | break | |
933 | req.write(z.compress(chunk)) |
|
939 | req.write(z.compress(chunk)) | |
934 |
|
940 | |||
935 | req.write(z.flush()) |
|
941 | req.write(z.flush()) | |
936 |
|
942 | |||
937 | def do_changegroupsubset(self, req): |
|
943 | def do_changegroupsubset(self, req): | |
938 | req.httphdr("application/mercurial-0.1") |
|
944 | req.httphdr("application/mercurial-0.1") | |
939 | bases = [] |
|
945 | bases = [] | |
940 | heads = [] |
|
946 | heads = [] | |
941 | if not self.allowpull: |
|
947 | if not self.allowpull: | |
942 | return |
|
948 | return | |
943 |
|
949 | |||
944 | if req.form.has_key('bases'): |
|
950 | if req.form.has_key('bases'): | |
945 | bases = [bin(x) for x in req.form['bases'][0].split(' ')] |
|
951 | bases = [bin(x) for x in req.form['bases'][0].split(' ')] | |
946 | if req.form.has_key('heads'): |
|
952 | if req.form.has_key('heads'): | |
947 | heads = [bin(x) for x in req.form['heads'][0].split(' ')] |
|
953 | heads = [bin(x) for x in req.form['heads'][0].split(' ')] | |
948 |
|
954 | |||
949 | z = zlib.compressobj() |
|
955 | z = zlib.compressobj() | |
950 | f = self.repo.changegroupsubset(bases, heads, 'serve') |
|
956 | f = self.repo.changegroupsubset(bases, heads, 'serve') | |
951 | while 1: |
|
957 | while 1: | |
952 | chunk = f.read(4096) |
|
958 | chunk = f.read(4096) | |
953 | if not chunk: |
|
959 | if not chunk: | |
954 | break |
|
960 | break | |
955 | req.write(z.compress(chunk)) |
|
961 | req.write(z.compress(chunk)) | |
956 |
|
962 | |||
957 | req.write(z.flush()) |
|
963 | req.write(z.flush()) | |
958 |
|
964 | |||
959 | def do_archive(self, req): |
|
965 | def do_archive(self, req): | |
960 | changeset = self.repo.lookup(req.form['node'][0]) |
|
966 | changeset = self.repo.lookup(req.form['node'][0]) | |
961 | type_ = req.form['type'][0] |
|
967 | type_ = req.form['type'][0] | |
962 | allowed = self.repo.ui.configlist("web", "allow_archive") |
|
968 | allowed = self.repo.ui.configlist("web", "allow_archive") | |
963 | if (type_ in self.archives and (type_ in allowed or |
|
969 | if (type_ in self.archives and (type_ in allowed or | |
964 | self.repo.ui.configbool("web", "allow" + type_, False))): |
|
970 | self.repo.ui.configbool("web", "allow" + type_, False))): | |
965 | self.archive(req, changeset, type_) |
|
971 | self.archive(req, changeset, type_) | |
966 | return |
|
972 | return | |
967 |
|
973 | |||
968 | req.write(self.t("error")) |
|
974 | req.write(self.t("error")) | |
969 |
|
975 | |||
970 | def do_static(self, req): |
|
976 | def do_static(self, req): | |
971 | fname = req.form['file'][0] |
|
977 | fname = req.form['file'][0] | |
972 | static = self.repo.ui.config("web", "static", |
|
978 | static = self.repo.ui.config("web", "static", | |
973 | os.path.join(self.templatepath, |
|
979 | os.path.join(self.templatepath, | |
974 | "static")) |
|
980 | "static")) | |
975 | req.write(staticfile(static, fname, req) |
|
981 | req.write(staticfile(static, fname, req) | |
976 | or self.t("error", error="%r not found" % fname)) |
|
982 | or self.t("error", error="%r not found" % fname)) | |
977 |
|
983 | |||
978 | def do_capabilities(self, req): |
|
984 | def do_capabilities(self, req): | |
979 | caps = ['unbundle', 'lookup', 'changegroupsubset'] |
|
985 | caps = ['unbundle', 'lookup', 'changegroupsubset'] | |
980 | if self.repo.ui.configbool('server', 'uncompressed'): |
|
986 | if self.repo.ui.configbool('server', 'uncompressed'): | |
981 | caps.append('stream=%d' % self.repo.revlogversion) |
|
987 | caps.append('stream=%d' % self.repo.revlogversion) | |
982 | resp = ' '.join(caps) |
|
988 | resp = ' '.join(caps) | |
983 | req.httphdr("application/mercurial-0.1", length=len(resp)) |
|
989 | req.httphdr("application/mercurial-0.1", length=len(resp)) | |
984 | req.write(resp) |
|
990 | req.write(resp) | |
985 |
|
991 | |||
986 | def check_perm(self, req, op, default): |
|
992 | def check_perm(self, req, op, default): | |
987 | '''check permission for operation based on user auth. |
|
993 | '''check permission for operation based on user auth. | |
988 | return true if op allowed, else false. |
|
994 | return true if op allowed, else false. | |
989 | default is policy to use if no config given.''' |
|
995 | default is policy to use if no config given.''' | |
990 |
|
996 | |||
991 | user = req.env.get('REMOTE_USER') |
|
997 | user = req.env.get('REMOTE_USER') | |
992 |
|
998 | |||
993 | deny = self.repo.ui.configlist('web', 'deny_' + op) |
|
999 | deny = self.repo.ui.configlist('web', 'deny_' + op) | |
994 | if deny and (not user or deny == ['*'] or user in deny): |
|
1000 | if deny and (not user or deny == ['*'] or user in deny): | |
995 | return False |
|
1001 | return False | |
996 |
|
1002 | |||
997 | allow = self.repo.ui.configlist('web', 'allow_' + op) |
|
1003 | allow = self.repo.ui.configlist('web', 'allow_' + op) | |
998 | return (allow and (allow == ['*'] or user in allow)) or default |
|
1004 | return (allow and (allow == ['*'] or user in allow)) or default | |
999 |
|
1005 | |||
1000 | def do_unbundle(self, req): |
|
1006 | def do_unbundle(self, req): | |
1001 | def bail(response, headers={}): |
|
1007 | def bail(response, headers={}): | |
1002 | length = int(req.env['CONTENT_LENGTH']) |
|
1008 | length = int(req.env['CONTENT_LENGTH']) | |
1003 | for s in util.filechunkiter(req, limit=length): |
|
1009 | for s in util.filechunkiter(req, limit=length): | |
1004 | # drain incoming bundle, else client will not see |
|
1010 | # drain incoming bundle, else client will not see | |
1005 | # response when run outside cgi script |
|
1011 | # response when run outside cgi script | |
1006 | pass |
|
1012 | pass | |
1007 | req.httphdr("application/mercurial-0.1", headers=headers) |
|
1013 | req.httphdr("application/mercurial-0.1", headers=headers) | |
1008 | req.write('0\n') |
|
1014 | req.write('0\n') | |
1009 | req.write(response) |
|
1015 | req.write(response) | |
1010 |
|
1016 | |||
1011 | # require ssl by default, auth info cannot be sniffed and |
|
1017 | # require ssl by default, auth info cannot be sniffed and | |
1012 | # replayed |
|
1018 | # replayed | |
1013 | ssl_req = self.repo.ui.configbool('web', 'push_ssl', True) |
|
1019 | ssl_req = self.repo.ui.configbool('web', 'push_ssl', True) | |
1014 | if ssl_req: |
|
1020 | if ssl_req: | |
1015 | if not req.env.get('HTTPS'): |
|
1021 | if not req.env.get('HTTPS'): | |
1016 | bail(_('ssl required\n')) |
|
1022 | bail(_('ssl required\n')) | |
1017 | return |
|
1023 | return | |
1018 | proto = 'https' |
|
1024 | proto = 'https' | |
1019 | else: |
|
1025 | else: | |
1020 | proto = 'http' |
|
1026 | proto = 'http' | |
1021 |
|
1027 | |||
1022 | # do not allow push unless explicitly allowed |
|
1028 | # do not allow push unless explicitly allowed | |
1023 | if not self.check_perm(req, 'push', False): |
|
1029 | if not self.check_perm(req, 'push', False): | |
1024 | bail(_('push not authorized\n'), |
|
1030 | bail(_('push not authorized\n'), | |
1025 | headers={'status': '401 Unauthorized'}) |
|
1031 | headers={'status': '401 Unauthorized'}) | |
1026 | return |
|
1032 | return | |
1027 |
|
1033 | |||
1028 | req.httphdr("application/mercurial-0.1") |
|
1034 | req.httphdr("application/mercurial-0.1") | |
1029 |
|
1035 | |||
1030 | their_heads = req.form['heads'][0].split(' ') |
|
1036 | their_heads = req.form['heads'][0].split(' ') | |
1031 |
|
1037 | |||
1032 | def check_heads(): |
|
1038 | def check_heads(): | |
1033 | heads = map(hex, self.repo.heads()) |
|
1039 | heads = map(hex, self.repo.heads()) | |
1034 | return their_heads == [hex('force')] or their_heads == heads |
|
1040 | return their_heads == [hex('force')] or their_heads == heads | |
1035 |
|
1041 | |||
1036 | # fail early if possible |
|
1042 | # fail early if possible | |
1037 | if not check_heads(): |
|
1043 | if not check_heads(): | |
1038 | bail(_('unsynced changes\n')) |
|
1044 | bail(_('unsynced changes\n')) | |
1039 | return |
|
1045 | return | |
1040 |
|
1046 | |||
1041 | # do not lock repo until all changegroup data is |
|
1047 | # do not lock repo until all changegroup data is | |
1042 | # streamed. save to temporary file. |
|
1048 | # streamed. save to temporary file. | |
1043 |
|
1049 | |||
1044 | fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-') |
|
1050 | fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-') | |
1045 | fp = os.fdopen(fd, 'wb+') |
|
1051 | fp = os.fdopen(fd, 'wb+') | |
1046 | try: |
|
1052 | try: | |
1047 | length = int(req.env['CONTENT_LENGTH']) |
|
1053 | length = int(req.env['CONTENT_LENGTH']) | |
1048 | for s in util.filechunkiter(req, limit=length): |
|
1054 | for s in util.filechunkiter(req, limit=length): | |
1049 | fp.write(s) |
|
1055 | fp.write(s) | |
1050 |
|
1056 | |||
1051 | lock = self.repo.lock() |
|
1057 | lock = self.repo.lock() | |
1052 | try: |
|
1058 | try: | |
1053 | if not check_heads(): |
|
1059 | if not check_heads(): | |
1054 | req.write('0\n') |
|
1060 | req.write('0\n') | |
1055 | req.write(_('unsynced changes\n')) |
|
1061 | req.write(_('unsynced changes\n')) | |
1056 | return |
|
1062 | return | |
1057 |
|
1063 | |||
1058 | fp.seek(0) |
|
1064 | fp.seek(0) | |
1059 |
|
1065 | |||
1060 | # send addchangegroup output to client |
|
1066 | # send addchangegroup output to client | |
1061 |
|
1067 | |||
1062 | old_stdout = sys.stdout |
|
1068 | old_stdout = sys.stdout | |
1063 | sys.stdout = cStringIO.StringIO() |
|
1069 | sys.stdout = cStringIO.StringIO() | |
1064 |
|
1070 | |||
1065 | try: |
|
1071 | try: | |
1066 | url = 'remote:%s:%s' % (proto, |
|
1072 | url = 'remote:%s:%s' % (proto, | |
1067 | req.env.get('REMOTE_HOST', '')) |
|
1073 | req.env.get('REMOTE_HOST', '')) | |
1068 | ret = self.repo.addchangegroup(fp, 'serve', url) |
|
1074 | ret = self.repo.addchangegroup(fp, 'serve', url) | |
1069 | finally: |
|
1075 | finally: | |
1070 | val = sys.stdout.getvalue() |
|
1076 | val = sys.stdout.getvalue() | |
1071 | sys.stdout = old_stdout |
|
1077 | sys.stdout = old_stdout | |
1072 | req.write('%d\n' % ret) |
|
1078 | req.write('%d\n' % ret) | |
1073 | req.write(val) |
|
1079 | req.write(val) | |
1074 | finally: |
|
1080 | finally: | |
1075 | lock.release() |
|
1081 | lock.release() | |
1076 | finally: |
|
1082 | finally: | |
1077 | fp.close() |
|
1083 | fp.close() | |
1078 | os.unlink(tempname) |
|
1084 | os.unlink(tempname) | |
1079 |
|
1085 | |||
1080 | def do_stream_out(self, req): |
|
1086 | def do_stream_out(self, req): | |
1081 | req.httphdr("application/mercurial-0.1") |
|
1087 | req.httphdr("application/mercurial-0.1") | |
1082 | streamclone.stream_out(self.repo, req) |
|
1088 | streamclone.stream_out(self.repo, req) |
@@ -1,379 +1,378 | |||||
1 | # httprepo.py - HTTP repository proxy classes for mercurial |
|
1 | # httprepo.py - HTTP repository proxy classes for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com> | |
4 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> |
|
4 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms |
|
6 | # This software may be used and distributed according to the terms | |
7 | # of the GNU General Public License, incorporated herein by reference. |
|
7 | # of the GNU General Public License, incorporated herein by reference. | |
8 |
|
8 | |||
9 | from node import * |
|
9 | from node import * | |
10 | from remoterepo import * |
|
10 | from remoterepo import * | |
11 | from i18n import gettext as _ |
|
11 | from i18n import gettext as _ | |
12 | from demandload import * |
|
12 | from demandload import * | |
13 | demandload(globals(), "hg os urllib urllib2 urlparse zlib util httplib") |
|
13 | demandload(globals(), "hg os urllib urllib2 urlparse zlib util httplib") | |
14 | demandload(globals(), "errno keepalive tempfile socket") |
|
14 | demandload(globals(), "errno keepalive tempfile socket") | |
15 |
|
15 | |||
16 | class passwordmgr(urllib2.HTTPPasswordMgrWithDefaultRealm): |
|
16 | class passwordmgr(urllib2.HTTPPasswordMgrWithDefaultRealm): | |
17 | def __init__(self, ui): |
|
17 | def __init__(self, ui): | |
18 | urllib2.HTTPPasswordMgrWithDefaultRealm.__init__(self) |
|
18 | urllib2.HTTPPasswordMgrWithDefaultRealm.__init__(self) | |
19 | self.ui = ui |
|
19 | self.ui = ui | |
20 |
|
20 | |||
21 | def find_user_password(self, realm, authuri): |
|
21 | def find_user_password(self, realm, authuri): | |
22 | authinfo = urllib2.HTTPPasswordMgrWithDefaultRealm.find_user_password( |
|
22 | authinfo = urllib2.HTTPPasswordMgrWithDefaultRealm.find_user_password( | |
23 | self, realm, authuri) |
|
23 | self, realm, authuri) | |
24 | user, passwd = authinfo |
|
24 | user, passwd = authinfo | |
25 | if user and passwd: |
|
25 | if user and passwd: | |
26 | return (user, passwd) |
|
26 | return (user, passwd) | |
27 |
|
27 | |||
28 | if not self.ui.interactive: |
|
28 | if not self.ui.interactive: | |
29 | raise util.Abort(_('http authorization required')) |
|
29 | raise util.Abort(_('http authorization required')) | |
30 |
|
30 | |||
31 | self.ui.write(_("http authorization required\n")) |
|
31 | self.ui.write(_("http authorization required\n")) | |
32 | self.ui.status(_("realm: %s\n") % realm) |
|
32 | self.ui.status(_("realm: %s\n") % realm) | |
33 | if user: |
|
33 | if user: | |
34 | self.ui.status(_("user: %s\n") % user) |
|
34 | self.ui.status(_("user: %s\n") % user) | |
35 | else: |
|
35 | else: | |
36 | user = self.ui.prompt(_("user:"), default=None) |
|
36 | user = self.ui.prompt(_("user:"), default=None) | |
37 |
|
37 | |||
38 | if not passwd: |
|
38 | if not passwd: | |
39 | passwd = self.ui.getpass() |
|
39 | passwd = self.ui.getpass() | |
40 |
|
40 | |||
41 | self.add_password(realm, authuri, user, passwd) |
|
41 | self.add_password(realm, authuri, user, passwd) | |
42 | return (user, passwd) |
|
42 | return (user, passwd) | |
43 |
|
43 | |||
44 | def netlocsplit(netloc): |
|
44 | def netlocsplit(netloc): | |
45 | '''split [user[:passwd]@]host[:port] into 4-tuple.''' |
|
45 | '''split [user[:passwd]@]host[:port] into 4-tuple.''' | |
46 |
|
46 | |||
47 | a = netloc.find('@') |
|
47 | a = netloc.find('@') | |
48 | if a == -1: |
|
48 | if a == -1: | |
49 | user, passwd = None, None |
|
49 | user, passwd = None, None | |
50 | else: |
|
50 | else: | |
51 | userpass, netloc = netloc[:a], netloc[a+1:] |
|
51 | userpass, netloc = netloc[:a], netloc[a+1:] | |
52 | c = userpass.find(':') |
|
52 | c = userpass.find(':') | |
53 | if c == -1: |
|
53 | if c == -1: | |
54 | user, passwd = urllib.unquote(userpass), None |
|
54 | user, passwd = urllib.unquote(userpass), None | |
55 | else: |
|
55 | else: | |
56 | user = urllib.unquote(userpass[:c]) |
|
56 | user = urllib.unquote(userpass[:c]) | |
57 | passwd = urllib.unquote(userpass[c+1:]) |
|
57 | passwd = urllib.unquote(userpass[c+1:]) | |
58 | c = netloc.find(':') |
|
58 | c = netloc.find(':') | |
59 | if c == -1: |
|
59 | if c == -1: | |
60 | host, port = netloc, None |
|
60 | host, port = netloc, None | |
61 | else: |
|
61 | else: | |
62 | host, port = netloc[:c], netloc[c+1:] |
|
62 | host, port = netloc[:c], netloc[c+1:] | |
63 | return host, port, user, passwd |
|
63 | return host, port, user, passwd | |
64 |
|
64 | |||
65 | def netlocunsplit(host, port, user=None, passwd=None): |
|
65 | def netlocunsplit(host, port, user=None, passwd=None): | |
66 | '''turn host, port, user, passwd into [user[:passwd]@]host[:port].''' |
|
66 | '''turn host, port, user, passwd into [user[:passwd]@]host[:port].''' | |
67 | if port: |
|
67 | if port: | |
68 | hostport = host + ':' + port |
|
68 | hostport = host + ':' + port | |
69 | else: |
|
69 | else: | |
70 | hostport = host |
|
70 | hostport = host | |
71 | if user: |
|
71 | if user: | |
72 | if passwd: |
|
72 | if passwd: | |
73 | userpass = urllib.quote(user) + ':' + urllib.quote(passwd) |
|
73 | userpass = urllib.quote(user) + ':' + urllib.quote(passwd) | |
74 | else: |
|
74 | else: | |
75 | userpass = urllib.quote(user) |
|
75 | userpass = urllib.quote(user) | |
76 | return userpass + '@' + hostport |
|
76 | return userpass + '@' + hostport | |
77 | return hostport |
|
77 | return hostport | |
78 |
|
78 | |||
79 | class httpconnection(keepalive.HTTPConnection): |
|
79 | class httpconnection(keepalive.HTTPConnection): | |
80 | # must be able to send big bundle as stream. |
|
80 | # must be able to send big bundle as stream. | |
81 |
|
81 | |||
82 | def send(self, data): |
|
82 | def send(self, data): | |
83 | if isinstance(data, str): |
|
83 | if isinstance(data, str): | |
84 | keepalive.HTTPConnection.send(self, data) |
|
84 | keepalive.HTTPConnection.send(self, data) | |
85 | else: |
|
85 | else: | |
86 | # if auth required, some data sent twice, so rewind here |
|
86 | # if auth required, some data sent twice, so rewind here | |
87 | data.seek(0) |
|
87 | data.seek(0) | |
88 | for chunk in util.filechunkiter(data): |
|
88 | for chunk in util.filechunkiter(data): | |
89 | keepalive.HTTPConnection.send(self, chunk) |
|
89 | keepalive.HTTPConnection.send(self, chunk) | |
90 |
|
90 | |||
91 | class basehttphandler(keepalive.HTTPHandler): |
|
91 | class basehttphandler(keepalive.HTTPHandler): | |
92 | def http_open(self, req): |
|
92 | def http_open(self, req): | |
93 | return self.do_open(httpconnection, req) |
|
93 | return self.do_open(httpconnection, req) | |
94 |
|
94 | |||
95 | has_https = hasattr(urllib2, 'HTTPSHandler') |
|
95 | has_https = hasattr(urllib2, 'HTTPSHandler') | |
96 | if has_https: |
|
96 | if has_https: | |
97 | class httpsconnection(httplib.HTTPSConnection): |
|
97 | class httpsconnection(httplib.HTTPSConnection): | |
98 | response_class = keepalive.HTTPResponse |
|
98 | response_class = keepalive.HTTPResponse | |
99 | # must be able to send big bundle as stream. |
|
99 | # must be able to send big bundle as stream. | |
100 |
|
100 | |||
101 | def send(self, data): |
|
101 | def send(self, data): | |
102 | if isinstance(data, str): |
|
102 | if isinstance(data, str): | |
103 | httplib.HTTPSConnection.send(self, data) |
|
103 | httplib.HTTPSConnection.send(self, data) | |
104 | else: |
|
104 | else: | |
105 | # if auth required, some data sent twice, so rewind here |
|
105 | # if auth required, some data sent twice, so rewind here | |
106 | data.seek(0) |
|
106 | data.seek(0) | |
107 | for chunk in util.filechunkiter(data): |
|
107 | for chunk in util.filechunkiter(data): | |
108 | httplib.HTTPSConnection.send(self, chunk) |
|
108 | httplib.HTTPSConnection.send(self, chunk) | |
109 |
|
109 | |||
110 | class httphandler(basehttphandler, urllib2.HTTPSHandler): |
|
110 | class httphandler(basehttphandler, urllib2.HTTPSHandler): | |
111 | def https_open(self, req): |
|
111 | def https_open(self, req): | |
112 | return self.do_open(httpsconnection, req) |
|
112 | return self.do_open(httpsconnection, req) | |
113 | else: |
|
113 | else: | |
114 | class httphandler(basehttphandler): |
|
114 | class httphandler(basehttphandler): | |
115 | pass |
|
115 | pass | |
116 |
|
116 | |||
117 | class httprepository(remoterepository): |
|
117 | class httprepository(remoterepository): | |
118 | def __init__(self, ui, path): |
|
118 | def __init__(self, ui, path): | |
119 | self.path = path |
|
119 | self.path = path | |
120 | self.caps = None |
|
120 | self.caps = None | |
121 | scheme, netloc, urlpath, query, frag = urlparse.urlsplit(path) |
|
121 | scheme, netloc, urlpath, query, frag = urlparse.urlsplit(path) | |
122 | if query or frag: |
|
122 | if query or frag: | |
123 | raise util.Abort(_('unsupported URL component: "%s"') % |
|
123 | raise util.Abort(_('unsupported URL component: "%s"') % | |
124 | (query or frag)) |
|
124 | (query or frag)) | |
125 | if not urlpath: urlpath = '/' |
|
125 | if not urlpath: urlpath = '/' | |
126 | host, port, user, passwd = netlocsplit(netloc) |
|
126 | host, port, user, passwd = netlocsplit(netloc) | |
127 |
|
127 | |||
128 | # urllib cannot handle URLs with embedded user or passwd |
|
128 | # urllib cannot handle URLs with embedded user or passwd | |
129 | self._url = urlparse.urlunsplit((scheme, netlocunsplit(host, port), |
|
129 | self._url = urlparse.urlunsplit((scheme, netlocunsplit(host, port), | |
130 | urlpath, '', '')) |
|
130 | urlpath, '', '')) | |
131 | self.ui = ui |
|
131 | self.ui = ui | |
132 |
|
132 | |||
133 | proxyurl = ui.config("http_proxy", "host") or os.getenv('http_proxy') |
|
133 | proxyurl = ui.config("http_proxy", "host") or os.getenv('http_proxy') | |
134 | # XXX proxyauthinfo = None |
|
134 | # XXX proxyauthinfo = None | |
135 | handler = httphandler() |
|
135 | handler = httphandler() | |
136 |
|
136 | |||
137 | if proxyurl: |
|
137 | if proxyurl: | |
138 | # proxy can be proper url or host[:port] |
|
138 | # proxy can be proper url or host[:port] | |
139 | if not (proxyurl.startswith('http:') or |
|
139 | if not (proxyurl.startswith('http:') or | |
140 | proxyurl.startswith('https:')): |
|
140 | proxyurl.startswith('https:')): | |
141 | proxyurl = 'http://' + proxyurl + '/' |
|
141 | proxyurl = 'http://' + proxyurl + '/' | |
142 | snpqf = urlparse.urlsplit(proxyurl) |
|
142 | snpqf = urlparse.urlsplit(proxyurl) | |
143 | proxyscheme, proxynetloc, proxypath, proxyquery, proxyfrag = snpqf |
|
143 | proxyscheme, proxynetloc, proxypath, proxyquery, proxyfrag = snpqf | |
144 | hpup = netlocsplit(proxynetloc) |
|
144 | hpup = netlocsplit(proxynetloc) | |
145 |
|
145 | |||
146 | proxyhost, proxyport, proxyuser, proxypasswd = hpup |
|
146 | proxyhost, proxyport, proxyuser, proxypasswd = hpup | |
147 | if not proxyuser: |
|
147 | if not proxyuser: | |
148 | proxyuser = ui.config("http_proxy", "user") |
|
148 | proxyuser = ui.config("http_proxy", "user") | |
149 | proxypasswd = ui.config("http_proxy", "passwd") |
|
149 | proxypasswd = ui.config("http_proxy", "passwd") | |
150 |
|
150 | |||
151 | # see if we should use a proxy for this url |
|
151 | # see if we should use a proxy for this url | |
152 | no_list = [ "localhost", "127.0.0.1" ] |
|
152 | no_list = [ "localhost", "127.0.0.1" ] | |
153 | no_list.extend([p.lower() for |
|
153 | no_list.extend([p.lower() for | |
154 | p in ui.configlist("http_proxy", "no")]) |
|
154 | p in ui.configlist("http_proxy", "no")]) | |
155 | no_list.extend([p.strip().lower() for |
|
155 | no_list.extend([p.strip().lower() for | |
156 | p in os.getenv("no_proxy", '').split(',') |
|
156 | p in os.getenv("no_proxy", '').split(',') | |
157 | if p.strip()]) |
|
157 | if p.strip()]) | |
158 | # "http_proxy.always" config is for running tests on localhost |
|
158 | # "http_proxy.always" config is for running tests on localhost | |
159 | if (not ui.configbool("http_proxy", "always") and |
|
159 | if (not ui.configbool("http_proxy", "always") and | |
160 | host.lower() in no_list): |
|
160 | host.lower() in no_list): | |
161 | ui.debug(_('disabling proxy for %s\n') % host) |
|
161 | ui.debug(_('disabling proxy for %s\n') % host) | |
162 | else: |
|
162 | else: | |
163 | proxyurl = urlparse.urlunsplit(( |
|
163 | proxyurl = urlparse.urlunsplit(( | |
164 | proxyscheme, netlocunsplit(proxyhost, proxyport, |
|
164 | proxyscheme, netlocunsplit(proxyhost, proxyport, | |
165 | proxyuser, proxypasswd or ''), |
|
165 | proxyuser, proxypasswd or ''), | |
166 | proxypath, proxyquery, proxyfrag)) |
|
166 | proxypath, proxyquery, proxyfrag)) | |
167 | handler = urllib2.ProxyHandler({scheme: proxyurl}) |
|
167 | handler = urllib2.ProxyHandler({scheme: proxyurl}) | |
168 | ui.debug(_('proxying through http://%s:%s\n') % |
|
168 | ui.debug(_('proxying through http://%s:%s\n') % | |
169 | (proxyhost, proxyport)) |
|
169 | (proxyhost, proxyport)) | |
170 |
|
170 | |||
171 | # urllib2 takes proxy values from the environment and those |
|
171 | # urllib2 takes proxy values from the environment and those | |
172 | # will take precedence if found, so drop them |
|
172 | # will take precedence if found, so drop them | |
173 | for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]: |
|
173 | for env in ["HTTP_PROXY", "http_proxy", "no_proxy"]: | |
174 | try: |
|
174 | try: | |
175 | if os.environ.has_key(env): |
|
175 | if os.environ.has_key(env): | |
176 | del os.environ[env] |
|
176 | del os.environ[env] | |
177 | except OSError: |
|
177 | except OSError: | |
178 | pass |
|
178 | pass | |
179 |
|
179 | |||
180 | passmgr = passwordmgr(ui) |
|
180 | passmgr = passwordmgr(ui) | |
181 | if user: |
|
181 | if user: | |
182 | ui.debug(_('http auth: user %s, password %s\n') % |
|
182 | ui.debug(_('http auth: user %s, password %s\n') % | |
183 | (user, passwd and '*' * len(passwd) or 'not set')) |
|
183 | (user, passwd and '*' * len(passwd) or 'not set')) | |
184 | passmgr.add_password(None, host, user, passwd or '') |
|
184 | passmgr.add_password(None, host, user, passwd or '') | |
185 |
|
185 | |||
186 | opener = urllib2.build_opener( |
|
186 | opener = urllib2.build_opener( | |
187 | handler, |
|
187 | handler, | |
188 | urllib2.HTTPBasicAuthHandler(passmgr), |
|
188 | urllib2.HTTPBasicAuthHandler(passmgr), | |
189 | urllib2.HTTPDigestAuthHandler(passmgr)) |
|
189 | urllib2.HTTPDigestAuthHandler(passmgr)) | |
190 |
|
190 | |||
191 | # 1.0 here is the _protocol_ version |
|
191 | # 1.0 here is the _protocol_ version | |
192 | opener.addheaders = [('User-agent', 'mercurial/proto-1.0')] |
|
192 | opener.addheaders = [('User-agent', 'mercurial/proto-1.0')] | |
193 | urllib2.install_opener(opener) |
|
193 | urllib2.install_opener(opener) | |
194 |
|
194 | |||
195 | def url(self): |
|
195 | def url(self): | |
196 | return self.path |
|
196 | return self.path | |
197 |
|
197 | |||
198 | # look up capabilities only when needed |
|
198 | # look up capabilities only when needed | |
199 |
|
199 | |||
200 | def get_caps(self): |
|
200 | def get_caps(self): | |
201 | if self.caps is None: |
|
201 | if self.caps is None: | |
202 | try: |
|
202 | try: | |
203 | self.caps = self.do_read('capabilities').split() |
|
203 | self.caps = self.do_read('capabilities').split() | |
204 | except hg.RepoError: |
|
204 | except hg.RepoError: | |
205 | self.caps = () |
|
205 | self.caps = () | |
206 | self.ui.debug(_('capabilities: %s\n') % |
|
206 | self.ui.debug(_('capabilities: %s\n') % | |
207 | (' '.join(self.caps or ['none']))) |
|
207 | (' '.join(self.caps or ['none']))) | |
208 | return self.caps |
|
208 | return self.caps | |
209 |
|
209 | |||
210 | capabilities = property(get_caps) |
|
210 | capabilities = property(get_caps) | |
211 |
|
211 | |||
212 | def lock(self): |
|
212 | def lock(self): | |
213 | raise util.Abort(_('operation not supported over http')) |
|
213 | raise util.Abort(_('operation not supported over http')) | |
214 |
|
214 | |||
215 | def do_cmd(self, cmd, **args): |
|
215 | def do_cmd(self, cmd, **args): | |
216 | data = args.pop('data', None) |
|
216 | data = args.pop('data', None) | |
217 | headers = args.pop('headers', {}) |
|
217 | headers = args.pop('headers', {}) | |
218 | self.ui.debug(_("sending %s command\n") % cmd) |
|
218 | self.ui.debug(_("sending %s command\n") % cmd) | |
219 | q = {"cmd": cmd} |
|
219 | q = {"cmd": cmd} | |
220 | q.update(args) |
|
220 | q.update(args) | |
221 | qs = urllib.urlencode(q) |
|
221 | qs = urllib.urlencode(q) | |
222 | cu = "%s?%s" % (self._url, qs) |
|
222 | cu = "%s?%s" % (self._url, qs) | |
223 | try: |
|
223 | try: | |
224 | resp = urllib2.urlopen(urllib2.Request(cu, data, headers)) |
|
224 | resp = urllib2.urlopen(urllib2.Request(cu, data, headers)) | |
225 | except urllib2.HTTPError, inst: |
|
225 | except urllib2.HTTPError, inst: | |
226 | if inst.code == 401: |
|
226 | if inst.code == 401: | |
227 | raise util.Abort(_('authorization failed')) |
|
227 | raise util.Abort(_('authorization failed')) | |
228 | raise |
|
228 | raise | |
229 | except httplib.HTTPException, inst: |
|
229 | except httplib.HTTPException, inst: | |
230 | self.ui.debug(_('http error while sending %s command\n') % cmd) |
|
230 | self.ui.debug(_('http error while sending %s command\n') % cmd) | |
231 | self.ui.print_exc() |
|
231 | self.ui.print_exc() | |
232 | raise IOError(None, inst) |
|
232 | raise IOError(None, inst) | |
233 | except IndexError: |
|
233 | except IndexError: | |
234 | # this only happens with Python 2.3, later versions raise URLError |
|
234 | # this only happens with Python 2.3, later versions raise URLError | |
235 | raise util.Abort(_('http error, possibly caused by proxy setting')) |
|
235 | raise util.Abort(_('http error, possibly caused by proxy setting')) | |
236 | try: |
|
236 | try: | |
237 | proto = resp.getheader('content-type') |
|
237 | proto = resp.getheader('content-type') | |
238 | except AttributeError: |
|
238 | except AttributeError: | |
239 | proto = resp.headers['content-type'] |
|
239 | proto = resp.headers['content-type'] | |
240 |
|
240 | |||
241 | # accept old "text/plain" and "application/hg-changegroup" for now |
|
241 | # accept old "text/plain" and "application/hg-changegroup" for now | |
242 | if not proto.startswith('application/mercurial') and \ |
|
242 | if not proto.startswith('application/mercurial') and \ | |
243 | not proto.startswith('text/plain') and \ |
|
243 | not proto.startswith('text/plain') and \ | |
244 | not proto.startswith('application/hg-changegroup'): |
|
244 | not proto.startswith('application/hg-changegroup'): | |
245 | raise hg.RepoError(_("'%s' does not appear to be an hg repository") % |
|
245 | raise hg.RepoError(_("'%s' does not appear to be an hg repository") % | |
246 | self._url) |
|
246 | self._url) | |
247 |
|
247 | |||
248 | if proto.startswith('application/mercurial'): |
|
248 | if proto.startswith('application/mercurial'): | |
249 | version = proto[22:] |
|
249 | version = proto[22:] | |
250 | if float(version) > 0.1: |
|
250 | if float(version) > 0.1: | |
251 | raise hg.RepoError(_("'%s' uses newer protocol %s") % |
|
251 | raise hg.RepoError(_("'%s' uses newer protocol %s") % | |
252 | (self._url, version)) |
|
252 | (self._url, version)) | |
253 |
|
253 | |||
254 | return resp |
|
254 | return resp | |
255 |
|
255 | |||
256 | def do_read(self, cmd, **args): |
|
256 | def do_read(self, cmd, **args): | |
257 | fp = self.do_cmd(cmd, **args) |
|
257 | fp = self.do_cmd(cmd, **args) | |
258 | try: |
|
258 | try: | |
259 | return fp.read() |
|
259 | return fp.read() | |
260 | finally: |
|
260 | finally: | |
261 | # if using keepalive, allow connection to be reused |
|
261 | # if using keepalive, allow connection to be reused | |
262 | fp.close() |
|
262 | fp.close() | |
263 |
|
263 | |||
264 | def lookup(self, key): |
|
264 | def lookup(self, key): | |
265 | try: |
|
|||
266 |
|
|
265 | d = self.do_cmd("lookup", key = key).read() | |
267 | return bin(d[:-1]) |
|
266 | success, data = d[:-1].split(' ', 1) | |
268 | except: |
|
267 | if int(success): | |
269 | self.ui.warn('Not able to look up revision named "%s"\n' % (key,)) |
|
268 | return bin(data) | |
270 | raise |
|
269 | raise hg.RepoError(data) | |
271 |
|
270 | |||
272 | def heads(self): |
|
271 | def heads(self): | |
273 | d = self.do_read("heads") |
|
272 | d = self.do_read("heads") | |
274 | try: |
|
273 | try: | |
275 | return map(bin, d[:-1].split(" ")) |
|
274 | return map(bin, d[:-1].split(" ")) | |
276 | except: |
|
275 | except: | |
277 | self.ui.warn(_("unexpected response:\n") + d[:400] + "\n...\n") |
|
276 | self.ui.warn(_("unexpected response:\n") + d[:400] + "\n...\n") | |
278 | raise |
|
277 | raise | |
279 |
|
278 | |||
280 | def branches(self, nodes): |
|
279 | def branches(self, nodes): | |
281 | n = " ".join(map(hex, nodes)) |
|
280 | n = " ".join(map(hex, nodes)) | |
282 | d = self.do_read("branches", nodes=n) |
|
281 | d = self.do_read("branches", nodes=n) | |
283 | try: |
|
282 | try: | |
284 | br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ] |
|
283 | br = [ tuple(map(bin, b.split(" "))) for b in d.splitlines() ] | |
285 | return br |
|
284 | return br | |
286 | except: |
|
285 | except: | |
287 | self.ui.warn(_("unexpected response:\n") + d[:400] + "\n...\n") |
|
286 | self.ui.warn(_("unexpected response:\n") + d[:400] + "\n...\n") | |
288 | raise |
|
287 | raise | |
289 |
|
288 | |||
290 | def between(self, pairs): |
|
289 | def between(self, pairs): | |
291 | n = "\n".join(["-".join(map(hex, p)) for p in pairs]) |
|
290 | n = "\n".join(["-".join(map(hex, p)) for p in pairs]) | |
292 | d = self.do_read("between", pairs=n) |
|
291 | d = self.do_read("between", pairs=n) | |
293 | try: |
|
292 | try: | |
294 | p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ] |
|
293 | p = [ l and map(bin, l.split(" ")) or [] for l in d.splitlines() ] | |
295 | return p |
|
294 | return p | |
296 | except: |
|
295 | except: | |
297 | self.ui.warn(_("unexpected response:\n") + d[:400] + "\n...\n") |
|
296 | self.ui.warn(_("unexpected response:\n") + d[:400] + "\n...\n") | |
298 | raise |
|
297 | raise | |
299 |
|
298 | |||
300 | def changegroup(self, nodes, kind): |
|
299 | def changegroup(self, nodes, kind): | |
301 | n = " ".join(map(hex, nodes)) |
|
300 | n = " ".join(map(hex, nodes)) | |
302 | f = self.do_cmd("changegroup", roots=n) |
|
301 | f = self.do_cmd("changegroup", roots=n) | |
303 |
|
302 | |||
304 | def zgenerator(f): |
|
303 | def zgenerator(f): | |
305 | zd = zlib.decompressobj() |
|
304 | zd = zlib.decompressobj() | |
306 | try: |
|
305 | try: | |
307 | for chnk in f: |
|
306 | for chnk in f: | |
308 | yield zd.decompress(chnk) |
|
307 | yield zd.decompress(chnk) | |
309 | except httplib.HTTPException, inst: |
|
308 | except httplib.HTTPException, inst: | |
310 | raise IOError(None, _('connection ended unexpectedly')) |
|
309 | raise IOError(None, _('connection ended unexpectedly')) | |
311 | yield zd.flush() |
|
310 | yield zd.flush() | |
312 |
|
311 | |||
313 | return util.chunkbuffer(zgenerator(util.filechunkiter(f))) |
|
312 | return util.chunkbuffer(zgenerator(util.filechunkiter(f))) | |
314 |
|
313 | |||
315 | def changegroupsubset(self, bases, heads, source): |
|
314 | def changegroupsubset(self, bases, heads, source): | |
316 | baselst = " ".join([hex(n) for n in bases]) |
|
315 | baselst = " ".join([hex(n) for n in bases]) | |
317 | headlst = " ".join([hex(n) for n in heads]) |
|
316 | headlst = " ".join([hex(n) for n in heads]) | |
318 | f = self.do_cmd("changegroupsubset", bases=baselst, heads=headlst) |
|
317 | f = self.do_cmd("changegroupsubset", bases=baselst, heads=headlst) | |
319 |
|
318 | |||
320 | def zgenerator(f): |
|
319 | def zgenerator(f): | |
321 | zd = zlib.decompressobj() |
|
320 | zd = zlib.decompressobj() | |
322 | try: |
|
321 | try: | |
323 | for chnk in f: |
|
322 | for chnk in f: | |
324 | yield zd.decompress(chnk) |
|
323 | yield zd.decompress(chnk) | |
325 | except httplib.HTTPException: |
|
324 | except httplib.HTTPException: | |
326 | raise IOError(None, _('connection ended unexpectedly')) |
|
325 | raise IOError(None, _('connection ended unexpectedly')) | |
327 | yield zd.flush() |
|
326 | yield zd.flush() | |
328 |
|
327 | |||
329 | return util.chunkbuffer(zgenerator(util.filechunkiter(f))) |
|
328 | return util.chunkbuffer(zgenerator(util.filechunkiter(f))) | |
330 |
|
329 | |||
331 | def unbundle(self, cg, heads, source): |
|
330 | def unbundle(self, cg, heads, source): | |
332 | # have to stream bundle to a temp file because we do not have |
|
331 | # have to stream bundle to a temp file because we do not have | |
333 | # http 1.1 chunked transfer. |
|
332 | # http 1.1 chunked transfer. | |
334 |
|
333 | |||
335 | fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-') |
|
334 | fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-') | |
336 | fp = os.fdopen(fd, 'wb+') |
|
335 | fp = os.fdopen(fd, 'wb+') | |
337 | try: |
|
336 | try: | |
338 | for chunk in util.filechunkiter(cg): |
|
337 | for chunk in util.filechunkiter(cg): | |
339 | fp.write(chunk) |
|
338 | fp.write(chunk) | |
340 | length = fp.tell() |
|
339 | length = fp.tell() | |
341 | try: |
|
340 | try: | |
342 | rfp = self.do_cmd( |
|
341 | rfp = self.do_cmd( | |
343 | 'unbundle', data=fp, |
|
342 | 'unbundle', data=fp, | |
344 | headers={'content-length': length, |
|
343 | headers={'content-length': length, | |
345 | 'content-type': 'application/octet-stream'}, |
|
344 | 'content-type': 'application/octet-stream'}, | |
346 | heads=' '.join(map(hex, heads))) |
|
345 | heads=' '.join(map(hex, heads))) | |
347 | try: |
|
346 | try: | |
348 | ret = int(rfp.readline()) |
|
347 | ret = int(rfp.readline()) | |
349 | self.ui.write(rfp.read()) |
|
348 | self.ui.write(rfp.read()) | |
350 | return ret |
|
349 | return ret | |
351 | finally: |
|
350 | finally: | |
352 | rfp.close() |
|
351 | rfp.close() | |
353 | except socket.error, err: |
|
352 | except socket.error, err: | |
354 | if err[0] in (errno.ECONNRESET, errno.EPIPE): |
|
353 | if err[0] in (errno.ECONNRESET, errno.EPIPE): | |
355 | raise util.Abort(_('push failed: %s') % err[1]) |
|
354 | raise util.Abort(_('push failed: %s') % err[1]) | |
356 | raise util.Abort(err[1]) |
|
355 | raise util.Abort(err[1]) | |
357 | finally: |
|
356 | finally: | |
358 | fp.close() |
|
357 | fp.close() | |
359 | os.unlink(tempname) |
|
358 | os.unlink(tempname) | |
360 |
|
359 | |||
361 | def stream_out(self): |
|
360 | def stream_out(self): | |
362 | return self.do_cmd('stream_out') |
|
361 | return self.do_cmd('stream_out') | |
363 |
|
362 | |||
364 | class httpsrepository(httprepository): |
|
363 | class httpsrepository(httprepository): | |
365 | def __init__(self, ui, path): |
|
364 | def __init__(self, ui, path): | |
366 | if not has_https: |
|
365 | if not has_https: | |
367 | raise util.Abort(_('Python support for SSL and HTTPS ' |
|
366 | raise util.Abort(_('Python support for SSL and HTTPS ' | |
368 | 'is not installed')) |
|
367 | 'is not installed')) | |
369 | httprepository.__init__(self, ui, path) |
|
368 | httprepository.__init__(self, ui, path) | |
370 |
|
369 | |||
371 | def instance(ui, path, create): |
|
370 | def instance(ui, path, create): | |
372 | if create: |
|
371 | if create: | |
373 | raise util.Abort(_('cannot create new http repository')) |
|
372 | raise util.Abort(_('cannot create new http repository')) | |
374 | if path.startswith('hg:'): |
|
373 | if path.startswith('hg:'): | |
375 | ui.warn(_("hg:// syntax is deprecated, please use http:// instead\n")) |
|
374 | ui.warn(_("hg:// syntax is deprecated, please use http:// instead\n")) | |
376 | path = 'http:' + path[3:] |
|
375 | path = 'http:' + path[3:] | |
377 | if path.startswith('https:'): |
|
376 | if path.startswith('https:'): | |
378 | return httpsrepository(ui, path) |
|
377 | return httpsrepository(ui, path) | |
379 | return httprepository(ui, path) |
|
378 | return httprepository(ui, path) |
General Comments 0
You need to be logged in to leave comments.
Login now