Show More
@@ -1,126 +1,129 b'' | |||
|
1 | 1 | """ |
|
2 | 2 | changegroup.py - Mercurial changegroup manipulation functions |
|
3 | 3 | |
|
4 | 4 | Copyright 2006 Matt Mackall <mpm@selenic.com> |
|
5 | 5 | |
|
6 | 6 | This software may be used and distributed according to the terms |
|
7 | 7 | of the GNU General Public License, incorporated herein by reference. |
|
8 | 8 | """ |
|
9 | 9 | |
|
10 | 10 | from i18n import _ |
|
11 | 11 | import struct, os, bz2, zlib, util, tempfile |
|
12 | 12 | |
|
13 | 13 | def getchunk(source): |
|
14 | 14 | """get a chunk from a changegroup""" |
|
15 | 15 | d = source.read(4) |
|
16 | 16 | if not d: |
|
17 | 17 | return "" |
|
18 | 18 | l = struct.unpack(">l", d)[0] |
|
19 | 19 | if l <= 4: |
|
20 | 20 | return "" |
|
21 | 21 | d = source.read(l - 4) |
|
22 | 22 | if len(d) < l - 4: |
|
23 | 23 | raise util.Abort(_("premature EOF reading chunk" |
|
24 | 24 | " (got %d bytes, expected %d)") |
|
25 | 25 | % (len(d), l - 4)) |
|
26 | 26 | return d |
|
27 | 27 | |
|
28 | 28 | def chunkiter(source): |
|
29 | 29 | """iterate through the chunks in source""" |
|
30 | 30 | while 1: |
|
31 | 31 | c = getchunk(source) |
|
32 | 32 | if not c: |
|
33 | 33 | break |
|
34 | 34 | yield c |
|
35 | 35 | |
|
36 | 36 | def chunkheader(length): |
|
37 | 37 | """build a changegroup chunk header""" |
|
38 | 38 | return struct.pack(">l", length + 4) |
|
39 | 39 | |
|
40 | 40 | def closechunk(): |
|
41 | 41 | return struct.pack(">l", 0) |
|
42 | 42 | |
|
43 | 43 | class nocompress(object): |
|
44 | 44 | def compress(self, x): |
|
45 | 45 | return x |
|
46 | 46 | def flush(self): |
|
47 | 47 | return "" |
|
48 | 48 | |
|
49 | 49 | bundletypes = { |
|
50 | 50 | "": ("", nocompress), |
|
51 | 51 | "HG10UN": ("HG10UN", nocompress), |
|
52 | 52 | "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()), |
|
53 | 53 | "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()), |
|
54 | 54 | } |
|
55 | 55 | |
|
56 | # hgweb uses this list to communicate it's preferred type | |
|
57 | bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN'] | |
|
58 | ||
|
56 | 59 | def writebundle(cg, filename, bundletype): |
|
57 | 60 | """Write a bundle file and return its filename. |
|
58 | 61 | |
|
59 | 62 | Existing files will not be overwritten. |
|
60 | 63 | If no filename is specified, a temporary file is created. |
|
61 | 64 | bz2 compression can be turned off. |
|
62 | 65 | The bundle file will be deleted in case of errors. |
|
63 | 66 | """ |
|
64 | 67 | |
|
65 | 68 | fh = None |
|
66 | 69 | cleanup = None |
|
67 | 70 | try: |
|
68 | 71 | if filename: |
|
69 | 72 | fh = open(filename, "wb") |
|
70 | 73 | else: |
|
71 | 74 | fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg") |
|
72 | 75 | fh = os.fdopen(fd, "wb") |
|
73 | 76 | cleanup = filename |
|
74 | 77 | |
|
75 | 78 | header, compressor = bundletypes[bundletype] |
|
76 | 79 | fh.write(header) |
|
77 | 80 | z = compressor() |
|
78 | 81 | |
|
79 | 82 | # parse the changegroup data, otherwise we will block |
|
80 | 83 | # in case of sshrepo because we don't know the end of the stream |
|
81 | 84 | |
|
82 | 85 | # an empty chunkiter is the end of the changegroup |
|
83 | 86 | # a changegroup has at least 2 chunkiters (changelog and manifest). |
|
84 | 87 | # after that, an empty chunkiter is the end of the changegroup |
|
85 | 88 | empty = False |
|
86 | 89 | count = 0 |
|
87 | 90 | while not empty or count <= 2: |
|
88 | 91 | empty = True |
|
89 | 92 | count += 1 |
|
90 | 93 | for chunk in chunkiter(cg): |
|
91 | 94 | empty = False |
|
92 | 95 | fh.write(z.compress(chunkheader(len(chunk)))) |
|
93 | 96 | pos = 0 |
|
94 | 97 | while pos < len(chunk): |
|
95 | 98 | next = pos + 2**20 |
|
96 | 99 | fh.write(z.compress(chunk[pos:next])) |
|
97 | 100 | pos = next |
|
98 | 101 | fh.write(z.compress(closechunk())) |
|
99 | 102 | fh.write(z.flush()) |
|
100 | 103 | cleanup = None |
|
101 | 104 | return filename |
|
102 | 105 | finally: |
|
103 | 106 | if fh is not None: |
|
104 | 107 | fh.close() |
|
105 | 108 | if cleanup is not None: |
|
106 | 109 | os.unlink(cleanup) |
|
107 | 110 | |
|
108 | 111 | def readbundle(fh, fname): |
|
109 | 112 | header = fh.read(6) |
|
110 | 113 | if not header.startswith("HG"): |
|
111 | 114 | raise util.Abort(_("%s: not a Mercurial bundle file") % fname) |
|
112 | 115 | elif not header.startswith("HG10"): |
|
113 | 116 | raise util.Abort(_("%s: unknown bundle version") % fname) |
|
114 | 117 | |
|
115 | 118 | if header == "HG10BZ": |
|
116 | 119 | def generator(f): |
|
117 | 120 | zd = bz2.BZ2Decompressor() |
|
118 | 121 | zd.decompress("BZ") |
|
119 | 122 | for chunk in util.filechunkiter(f, 4096): |
|
120 | 123 | yield zd.decompress(chunk) |
|
121 | 124 | return util.chunkbuffer(generator(fh)) |
|
122 | 125 | elif header == "HG10UN": |
|
123 | 126 | return fh |
|
124 | 127 | |
|
125 | 128 | raise util.Abort(_("%s: unknown bundle compression type") |
|
126 | 129 | % fname) |
@@ -1,929 +1,942 b'' | |||
|
1 | 1 | # hgweb/hgweb_mod.py - Web interface for a repository. |
|
2 | 2 | # |
|
3 | 3 | # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net> |
|
4 | 4 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
5 | 5 | # |
|
6 | 6 | # This software may be used and distributed according to the terms |
|
7 | 7 | # of the GNU General Public License, incorporated herein by reference. |
|
8 | 8 | |
|
9 | 9 | import os, mimetypes, re |
|
10 | 10 | from mercurial.node import * |
|
11 | 11 | from mercurial import mdiff, ui, hg, util, archival, patch, hook |
|
12 | from mercurial import revlog, templater, templatefilters | |
|
12 | from mercurial import revlog, templater, templatefilters, changegroup | |
|
13 | 13 | from common import get_mtime, style_map, paritygen, countgen, get_contact |
|
14 | 14 | from common import ErrorResponse |
|
15 | 15 | from common import HTTP_OK, HTTP_BAD_REQUEST, HTTP_NOT_FOUND, HTTP_SERVER_ERROR |
|
16 | 16 | from request import wsgirequest |
|
17 | 17 | import webcommands, protocol |
|
18 | 18 | |
|
19 | 19 | shortcuts = { |
|
20 | 20 | 'cl': [('cmd', ['changelog']), ('rev', None)], |
|
21 | 21 | 'sl': [('cmd', ['shortlog']), ('rev', None)], |
|
22 | 22 | 'cs': [('cmd', ['changeset']), ('node', None)], |
|
23 | 23 | 'f': [('cmd', ['file']), ('filenode', None)], |
|
24 | 24 | 'fl': [('cmd', ['filelog']), ('filenode', None)], |
|
25 | 25 | 'fd': [('cmd', ['filediff']), ('node', None)], |
|
26 | 26 | 'fa': [('cmd', ['annotate']), ('filenode', None)], |
|
27 | 27 | 'mf': [('cmd', ['manifest']), ('manifest', None)], |
|
28 | 28 | 'ca': [('cmd', ['archive']), ('node', None)], |
|
29 | 29 | 'tags': [('cmd', ['tags'])], |
|
30 | 30 | 'tip': [('cmd', ['changeset']), ('node', ['tip'])], |
|
31 | 31 | 'static': [('cmd', ['static']), ('file', None)] |
|
32 | 32 | } |
|
33 | 33 | |
|
34 | 34 | def _up(p): |
|
35 | 35 | if p[0] != "/": |
|
36 | 36 | p = "/" + p |
|
37 | 37 | if p[-1] == "/": |
|
38 | 38 | p = p[:-1] |
|
39 | 39 | up = os.path.dirname(p) |
|
40 | 40 | if up == "/": |
|
41 | 41 | return "/" |
|
42 | 42 | return up + "/" |
|
43 | 43 | |
|
44 | 44 | def revnavgen(pos, pagelen, limit, nodefunc): |
|
45 | 45 | def seq(factor, limit=None): |
|
46 | 46 | if limit: |
|
47 | 47 | yield limit |
|
48 | 48 | if limit >= 20 and limit <= 40: |
|
49 | 49 | yield 50 |
|
50 | 50 | else: |
|
51 | 51 | yield 1 * factor |
|
52 | 52 | yield 3 * factor |
|
53 | 53 | for f in seq(factor * 10): |
|
54 | 54 | yield f |
|
55 | 55 | |
|
56 | 56 | def nav(**map): |
|
57 | 57 | l = [] |
|
58 | 58 | last = 0 |
|
59 | 59 | for f in seq(1, pagelen): |
|
60 | 60 | if f < pagelen or f <= last: |
|
61 | 61 | continue |
|
62 | 62 | if f > limit: |
|
63 | 63 | break |
|
64 | 64 | last = f |
|
65 | 65 | if pos + f < limit: |
|
66 | 66 | l.append(("+%d" % f, hex(nodefunc(pos + f).node()))) |
|
67 | 67 | if pos - f >= 0: |
|
68 | 68 | l.insert(0, ("-%d" % f, hex(nodefunc(pos - f).node()))) |
|
69 | 69 | |
|
70 | 70 | try: |
|
71 | 71 | yield {"label": "(0)", "node": hex(nodefunc('0').node())} |
|
72 | 72 | |
|
73 | 73 | for label, node in l: |
|
74 | 74 | yield {"label": label, "node": node} |
|
75 | 75 | |
|
76 | 76 | yield {"label": "tip", "node": "tip"} |
|
77 | 77 | except hg.RepoError: |
|
78 | 78 | pass |
|
79 | 79 | |
|
80 | 80 | return nav |
|
81 | 81 | |
|
82 | 82 | class hgweb(object): |
|
83 | 83 | def __init__(self, repo, name=None): |
|
84 | 84 | if isinstance(repo, str): |
|
85 | 85 | parentui = ui.ui(report_untrusted=False, interactive=False) |
|
86 | 86 | self.repo = hg.repository(parentui, repo) |
|
87 | 87 | else: |
|
88 | 88 | self.repo = repo |
|
89 | 89 | |
|
90 | 90 | hook.redirect(True) |
|
91 | 91 | self.mtime = -1 |
|
92 | 92 | self.reponame = name |
|
93 | 93 | self.archives = 'zip', 'gz', 'bz2' |
|
94 | 94 | self.stripecount = 1 |
|
95 | self._capabilities = None | |
|
95 | 96 | # a repo owner may set web.templates in .hg/hgrc to get any file |
|
96 | 97 | # readable by the user running the CGI script |
|
97 | 98 | self.templatepath = self.config("web", "templates", |
|
98 | 99 | templater.templatepath(), |
|
99 | 100 | untrusted=False) |
|
100 | 101 | |
|
101 | 102 | # The CGI scripts are often run by a user different from the repo owner. |
|
102 | 103 | # Trust the settings from the .hg/hgrc files by default. |
|
103 | 104 | def config(self, section, name, default=None, untrusted=True): |
|
104 | 105 | return self.repo.ui.config(section, name, default, |
|
105 | 106 | untrusted=untrusted) |
|
106 | 107 | |
|
107 | 108 | def configbool(self, section, name, default=False, untrusted=True): |
|
108 | 109 | return self.repo.ui.configbool(section, name, default, |
|
109 | 110 | untrusted=untrusted) |
|
110 | 111 | |
|
111 | 112 | def configlist(self, section, name, default=None, untrusted=True): |
|
112 | 113 | return self.repo.ui.configlist(section, name, default, |
|
113 | 114 | untrusted=untrusted) |
|
114 | 115 | |
|
115 | 116 | def refresh(self): |
|
116 | 117 | mtime = get_mtime(self.repo.root) |
|
117 | 118 | if mtime != self.mtime: |
|
118 | 119 | self.mtime = mtime |
|
119 | 120 | self.repo = hg.repository(self.repo.ui, self.repo.root) |
|
120 | 121 | self.maxchanges = int(self.config("web", "maxchanges", 10)) |
|
121 | 122 | self.stripecount = int(self.config("web", "stripes", 1)) |
|
122 | 123 | self.maxshortchanges = int(self.config("web", "maxshortchanges", 60)) |
|
123 | 124 | self.maxfiles = int(self.config("web", "maxfiles", 10)) |
|
124 | 125 | self.allowpull = self.configbool("web", "allowpull", True) |
|
125 | 126 | self.encoding = self.config("web", "encoding", util._encoding) |
|
127 | self._capabilities = None | |
|
128 | ||
|
129 | def capabilities(self): | |
|
130 | if self._capabilities is not None: | |
|
131 | return self._capabilities | |
|
132 | caps = ['lookup', 'changegroupsubset'] | |
|
133 | if self.configbool('server', 'uncompressed'): | |
|
134 | caps.append('stream=%d' % self.repo.changelog.version) | |
|
135 | if changegroup.bundlepriority: | |
|
136 | caps.append('unbundle=%s' % ','.join(changegroup.bundlepriority)) | |
|
137 | self._capabilities = caps | |
|
138 | return caps | |
|
126 | 139 | |
|
127 | 140 | def run(self): |
|
128 | 141 | if not os.environ.get('GATEWAY_INTERFACE', '').startswith("CGI/1."): |
|
129 | 142 | raise RuntimeError("This function is only intended to be called while running as a CGI script.") |
|
130 | 143 | import mercurial.hgweb.wsgicgi as wsgicgi |
|
131 | 144 | wsgicgi.launch(self) |
|
132 | 145 | |
|
133 | 146 | def __call__(self, env, respond): |
|
134 | 147 | req = wsgirequest(env, respond) |
|
135 | 148 | self.run_wsgi(req) |
|
136 | 149 | return req |
|
137 | 150 | |
|
138 | 151 | def run_wsgi(self, req): |
|
139 | 152 | |
|
140 | 153 | self.refresh() |
|
141 | 154 | |
|
142 | 155 | # expand form shortcuts |
|
143 | 156 | |
|
144 | 157 | for k in shortcuts.iterkeys(): |
|
145 | 158 | if k in req.form: |
|
146 | 159 | for name, value in shortcuts[k]: |
|
147 | 160 | if value is None: |
|
148 | 161 | value = req.form[k] |
|
149 | 162 | req.form[name] = value |
|
150 | 163 | del req.form[k] |
|
151 | 164 | |
|
152 | 165 | # work with CGI variables to create coherent structure |
|
153 | 166 | # use SCRIPT_NAME, PATH_INFO and QUERY_STRING as well as our REPO_NAME |
|
154 | 167 | |
|
155 | 168 | req.url = req.env['SCRIPT_NAME'] |
|
156 | 169 | if not req.url.endswith('/'): |
|
157 | 170 | req.url += '/' |
|
158 | 171 | if 'REPO_NAME' in req.env: |
|
159 | 172 | req.url += req.env['REPO_NAME'] + '/' |
|
160 | 173 | |
|
161 | 174 | if req.env.get('PATH_INFO'): |
|
162 | 175 | parts = req.env.get('PATH_INFO').strip('/').split('/') |
|
163 | 176 | repo_parts = req.env.get('REPO_NAME', '').split('/') |
|
164 | 177 | if parts[:len(repo_parts)] == repo_parts: |
|
165 | 178 | parts = parts[len(repo_parts):] |
|
166 | 179 | query = '/'.join(parts) |
|
167 | 180 | else: |
|
168 | 181 | query = req.env['QUERY_STRING'].split('&', 1)[0] |
|
169 | 182 | query = query.split(';', 1)[0] |
|
170 | 183 | |
|
171 | 184 | # translate user-visible url structure to internal structure |
|
172 | 185 | |
|
173 | 186 | args = query.split('/', 2) |
|
174 | 187 | if 'cmd' not in req.form and args and args[0]: |
|
175 | 188 | |
|
176 | 189 | cmd = args.pop(0) |
|
177 | 190 | style = cmd.rfind('-') |
|
178 | 191 | if style != -1: |
|
179 | 192 | req.form['style'] = [cmd[:style]] |
|
180 | 193 | cmd = cmd[style+1:] |
|
181 | 194 | |
|
182 | 195 | # avoid accepting e.g. style parameter as command |
|
183 | 196 | if hasattr(webcommands, cmd) or hasattr(protocol, cmd): |
|
184 | 197 | req.form['cmd'] = [cmd] |
|
185 | 198 | |
|
186 | 199 | if args and args[0]: |
|
187 | 200 | node = args.pop(0) |
|
188 | 201 | req.form['node'] = [node] |
|
189 | 202 | if args: |
|
190 | 203 | req.form['file'] = args |
|
191 | 204 | |
|
192 | 205 | if cmd == 'static': |
|
193 | 206 | req.form['file'] = req.form['node'] |
|
194 | 207 | elif cmd == 'archive': |
|
195 | 208 | fn = req.form['node'][0] |
|
196 | 209 | for type_, spec in self.archive_specs.iteritems(): |
|
197 | 210 | ext = spec[2] |
|
198 | 211 | if fn.endswith(ext): |
|
199 | 212 | req.form['node'] = [fn[:-len(ext)]] |
|
200 | 213 | req.form['type'] = [type_] |
|
201 | 214 | |
|
202 | 215 | # process this if it's a protocol request |
|
203 | 216 | |
|
204 | 217 | cmd = req.form.get('cmd', [''])[0] |
|
205 | 218 | if cmd in protocol.__all__: |
|
206 | 219 | method = getattr(protocol, cmd) |
|
207 | 220 | method(self, req) |
|
208 | 221 | return |
|
209 | 222 | |
|
210 | 223 | # process the web interface request |
|
211 | 224 | |
|
212 | 225 | try: |
|
213 | 226 | |
|
214 | 227 | tmpl = self.templater(req) |
|
215 | 228 | ctype = tmpl('mimetype', encoding=self.encoding) |
|
216 | 229 | ctype = templater.stringify(ctype) |
|
217 | 230 | |
|
218 | 231 | if cmd == '': |
|
219 | 232 | req.form['cmd'] = [tmpl.cache['default']] |
|
220 | 233 | cmd = req.form['cmd'][0] |
|
221 | 234 | |
|
222 | 235 | if cmd not in webcommands.__all__: |
|
223 | 236 | msg = 'No such method: %s' % cmd |
|
224 | 237 | raise ErrorResponse(HTTP_BAD_REQUEST, msg) |
|
225 | 238 | elif cmd == 'file' and 'raw' in req.form.get('style', []): |
|
226 | 239 | self.ctype = ctype |
|
227 | 240 | content = webcommands.rawfile(self, req, tmpl) |
|
228 | 241 | else: |
|
229 | 242 | content = getattr(webcommands, cmd)(self, req, tmpl) |
|
230 | 243 | req.respond(HTTP_OK, ctype) |
|
231 | 244 | |
|
232 | 245 | req.write(content) |
|
233 | 246 | del tmpl |
|
234 | 247 | |
|
235 | 248 | except revlog.LookupError, err: |
|
236 | 249 | req.respond(HTTP_NOT_FOUND, ctype) |
|
237 | 250 | req.write(tmpl('error', error='revision not found: %s' % err.name)) |
|
238 | 251 | except (hg.RepoError, revlog.RevlogError), inst: |
|
239 | 252 | req.respond(HTTP_SERVER_ERROR, ctype) |
|
240 | 253 | req.write(tmpl('error', error=str(inst))) |
|
241 | 254 | except ErrorResponse, inst: |
|
242 | 255 | req.respond(inst.code, ctype) |
|
243 | 256 | req.write(tmpl('error', error=inst.message)) |
|
244 | 257 | |
|
245 | 258 | def templater(self, req): |
|
246 | 259 | |
|
247 | 260 | # determine scheme, port and server name |
|
248 | 261 | # this is needed to create absolute urls |
|
249 | 262 | |
|
250 | 263 | proto = req.env.get('wsgi.url_scheme') |
|
251 | 264 | if proto == 'https': |
|
252 | 265 | proto = 'https' |
|
253 | 266 | default_port = "443" |
|
254 | 267 | else: |
|
255 | 268 | proto = 'http' |
|
256 | 269 | default_port = "80" |
|
257 | 270 | |
|
258 | 271 | port = req.env["SERVER_PORT"] |
|
259 | 272 | port = port != default_port and (":" + port) or "" |
|
260 | 273 | urlbase = '%s://%s%s' % (proto, req.env['SERVER_NAME'], port) |
|
261 | 274 | staticurl = self.config("web", "staticurl") or req.url + 'static/' |
|
262 | 275 | if not staticurl.endswith('/'): |
|
263 | 276 | staticurl += '/' |
|
264 | 277 | |
|
265 | 278 | # some functions for the templater |
|
266 | 279 | |
|
267 | 280 | def header(**map): |
|
268 | 281 | yield tmpl('header', encoding=self.encoding, **map) |
|
269 | 282 | |
|
270 | 283 | def footer(**map): |
|
271 | 284 | yield tmpl("footer", **map) |
|
272 | 285 | |
|
273 | 286 | def motd(**map): |
|
274 | 287 | yield self.config("web", "motd", "") |
|
275 | 288 | |
|
276 | 289 | def sessionvars(**map): |
|
277 | 290 | fields = [] |
|
278 | 291 | if 'style' in req.form: |
|
279 | 292 | style = req.form['style'][0] |
|
280 | 293 | if style != self.config('web', 'style', ''): |
|
281 | 294 | fields.append(('style', style)) |
|
282 | 295 | |
|
283 | 296 | separator = req.url[-1] == '?' and ';' or '?' |
|
284 | 297 | for name, value in fields: |
|
285 | 298 | yield dict(name=name, value=value, separator=separator) |
|
286 | 299 | separator = ';' |
|
287 | 300 | |
|
288 | 301 | # figure out which style to use |
|
289 | 302 | |
|
290 | 303 | style = self.config("web", "style", "") |
|
291 | 304 | if 'style' in req.form: |
|
292 | 305 | style = req.form['style'][0] |
|
293 | 306 | mapfile = style_map(self.templatepath, style) |
|
294 | 307 | |
|
295 | 308 | if not self.reponame: |
|
296 | 309 | self.reponame = (self.config("web", "name") |
|
297 | 310 | or req.env.get('REPO_NAME') |
|
298 | 311 | or req.url.strip('/') or self.repo.root) |
|
299 | 312 | |
|
300 | 313 | # create the templater |
|
301 | 314 | |
|
302 | 315 | tmpl = templater.templater(mapfile, templatefilters.filters, |
|
303 | 316 | defaults={"url": req.url, |
|
304 | 317 | "staticurl": staticurl, |
|
305 | 318 | "urlbase": urlbase, |
|
306 | 319 | "repo": self.reponame, |
|
307 | 320 | "header": header, |
|
308 | 321 | "footer": footer, |
|
309 | 322 | "motd": motd, |
|
310 | 323 | "sessionvars": sessionvars |
|
311 | 324 | }) |
|
312 | 325 | return tmpl |
|
313 | 326 | |
|
314 | 327 | def archivelist(self, nodeid): |
|
315 | 328 | allowed = self.configlist("web", "allow_archive") |
|
316 | 329 | for i, spec in self.archive_specs.iteritems(): |
|
317 | 330 | if i in allowed or self.configbool("web", "allow" + i): |
|
318 | 331 | yield {"type" : i, "extension" : spec[2], "node" : nodeid} |
|
319 | 332 | |
|
320 | 333 | def listfilediffs(self, tmpl, files, changeset): |
|
321 | 334 | for f in files[:self.maxfiles]: |
|
322 | 335 | yield tmpl("filedifflink", node=hex(changeset), file=f) |
|
323 | 336 | if len(files) > self.maxfiles: |
|
324 | 337 | yield tmpl("fileellipses") |
|
325 | 338 | |
|
326 | 339 | def siblings(self, siblings=[], hiderev=None, **args): |
|
327 | 340 | siblings = [s for s in siblings if s.node() != nullid] |
|
328 | 341 | if len(siblings) == 1 and siblings[0].rev() == hiderev: |
|
329 | 342 | return |
|
330 | 343 | for s in siblings: |
|
331 | 344 | d = {'node': hex(s.node()), 'rev': s.rev()} |
|
332 | 345 | if hasattr(s, 'path'): |
|
333 | 346 | d['file'] = s.path() |
|
334 | 347 | d.update(args) |
|
335 | 348 | yield d |
|
336 | 349 | |
|
337 | 350 | def renamelink(self, fl, node): |
|
338 | 351 | r = fl.renamed(node) |
|
339 | 352 | if r: |
|
340 | 353 | return [dict(file=r[0], node=hex(r[1]))] |
|
341 | 354 | return [] |
|
342 | 355 | |
|
343 | 356 | def nodetagsdict(self, node): |
|
344 | 357 | return [{"name": i} for i in self.repo.nodetags(node)] |
|
345 | 358 | |
|
346 | 359 | def nodebranchdict(self, ctx): |
|
347 | 360 | branches = [] |
|
348 | 361 | branch = ctx.branch() |
|
349 | 362 | # If this is an empty repo, ctx.node() == nullid, |
|
350 | 363 | # ctx.branch() == 'default', but branchtags() is |
|
351 | 364 | # an empty dict. Using dict.get avoids a traceback. |
|
352 | 365 | if self.repo.branchtags().get(branch) == ctx.node(): |
|
353 | 366 | branches.append({"name": branch}) |
|
354 | 367 | return branches |
|
355 | 368 | |
|
356 | 369 | def showtag(self, tmpl, t1, node=nullid, **args): |
|
357 | 370 | for t in self.repo.nodetags(node): |
|
358 | 371 | yield tmpl(t1, tag=t, **args) |
|
359 | 372 | |
|
360 | 373 | def diff(self, tmpl, node1, node2, files): |
|
361 | 374 | def filterfiles(filters, files): |
|
362 | 375 | l = [x for x in files if x in filters] |
|
363 | 376 | |
|
364 | 377 | for t in filters: |
|
365 | 378 | if t and t[-1] != os.sep: |
|
366 | 379 | t += os.sep |
|
367 | 380 | l += [x for x in files if x.startswith(t)] |
|
368 | 381 | return l |
|
369 | 382 | |
|
370 | 383 | parity = paritygen(self.stripecount) |
|
371 | 384 | def diffblock(diff, f, fn): |
|
372 | 385 | yield tmpl("diffblock", |
|
373 | 386 | lines=prettyprintlines(diff), |
|
374 | 387 | parity=parity.next(), |
|
375 | 388 | file=f, |
|
376 | 389 | filenode=hex(fn or nullid)) |
|
377 | 390 | |
|
378 | 391 | blockcount = countgen() |
|
379 | 392 | def prettyprintlines(diff): |
|
380 | 393 | blockno = blockcount.next() |
|
381 | 394 | for lineno, l in enumerate(diff.splitlines(1)): |
|
382 | 395 | if blockno == 0: |
|
383 | 396 | lineno = lineno + 1 |
|
384 | 397 | else: |
|
385 | 398 | lineno = "%d.%d" % (blockno, lineno + 1) |
|
386 | 399 | if l.startswith('+'): |
|
387 | 400 | ltype = "difflineplus" |
|
388 | 401 | elif l.startswith('-'): |
|
389 | 402 | ltype = "difflineminus" |
|
390 | 403 | elif l.startswith('@'): |
|
391 | 404 | ltype = "difflineat" |
|
392 | 405 | else: |
|
393 | 406 | ltype = "diffline" |
|
394 | 407 | yield tmpl(ltype, |
|
395 | 408 | line=l, |
|
396 | 409 | lineid="l%s" % lineno, |
|
397 | 410 | linenumber="% 8s" % lineno) |
|
398 | 411 | |
|
399 | 412 | r = self.repo |
|
400 | 413 | c1 = r.changectx(node1) |
|
401 | 414 | c2 = r.changectx(node2) |
|
402 | 415 | date1 = util.datestr(c1.date()) |
|
403 | 416 | date2 = util.datestr(c2.date()) |
|
404 | 417 | |
|
405 | 418 | modified, added, removed, deleted, unknown = r.status(node1, node2)[:5] |
|
406 | 419 | if files: |
|
407 | 420 | modified, added, removed = map(lambda x: filterfiles(files, x), |
|
408 | 421 | (modified, added, removed)) |
|
409 | 422 | |
|
410 | 423 | diffopts = patch.diffopts(self.repo.ui, untrusted=True) |
|
411 | 424 | for f in modified: |
|
412 | 425 | to = c1.filectx(f).data() |
|
413 | 426 | tn = c2.filectx(f).data() |
|
414 | 427 | yield diffblock(mdiff.unidiff(to, date1, tn, date2, f, f, |
|
415 | 428 | opts=diffopts), f, tn) |
|
416 | 429 | for f in added: |
|
417 | 430 | to = None |
|
418 | 431 | tn = c2.filectx(f).data() |
|
419 | 432 | yield diffblock(mdiff.unidiff(to, date1, tn, date2, f, f, |
|
420 | 433 | opts=diffopts), f, tn) |
|
421 | 434 | for f in removed: |
|
422 | 435 | to = c1.filectx(f).data() |
|
423 | 436 | tn = None |
|
424 | 437 | yield diffblock(mdiff.unidiff(to, date1, tn, date2, f, f, |
|
425 | 438 | opts=diffopts), f, tn) |
|
426 | 439 | |
|
427 | 440 | def changelog(self, tmpl, ctx, shortlog=False): |
|
428 | 441 | def changelist(limit=0,**map): |
|
429 | 442 | cl = self.repo.changelog |
|
430 | 443 | l = [] # build a list in forward order for efficiency |
|
431 | 444 | for i in xrange(start, end): |
|
432 | 445 | ctx = self.repo.changectx(i) |
|
433 | 446 | n = ctx.node() |
|
434 | 447 | |
|
435 | 448 | l.insert(0, {"parity": parity.next(), |
|
436 | 449 | "author": ctx.user(), |
|
437 | 450 | "parent": self.siblings(ctx.parents(), i - 1), |
|
438 | 451 | "child": self.siblings(ctx.children(), i + 1), |
|
439 | 452 | "changelogtag": self.showtag("changelogtag",n), |
|
440 | 453 | "desc": ctx.description(), |
|
441 | 454 | "date": ctx.date(), |
|
442 | 455 | "files": self.listfilediffs(tmpl, ctx.files(), n), |
|
443 | 456 | "rev": i, |
|
444 | 457 | "node": hex(n), |
|
445 | 458 | "tags": self.nodetagsdict(n), |
|
446 | 459 | "branches": self.nodebranchdict(ctx)}) |
|
447 | 460 | |
|
448 | 461 | if limit > 0: |
|
449 | 462 | l = l[:limit] |
|
450 | 463 | |
|
451 | 464 | for e in l: |
|
452 | 465 | yield e |
|
453 | 466 | |
|
454 | 467 | maxchanges = shortlog and self.maxshortchanges or self.maxchanges |
|
455 | 468 | cl = self.repo.changelog |
|
456 | 469 | count = cl.count() |
|
457 | 470 | pos = ctx.rev() |
|
458 | 471 | start = max(0, pos - maxchanges + 1) |
|
459 | 472 | end = min(count, start + maxchanges) |
|
460 | 473 | pos = end - 1 |
|
461 | 474 | parity = paritygen(self.stripecount, offset=start-end) |
|
462 | 475 | |
|
463 | 476 | changenav = revnavgen(pos, maxchanges, count, self.repo.changectx) |
|
464 | 477 | |
|
465 | 478 | return tmpl(shortlog and 'shortlog' or 'changelog', |
|
466 | 479 | changenav=changenav, |
|
467 | 480 | node=hex(cl.tip()), |
|
468 | 481 | rev=pos, changesets=count, |
|
469 | 482 | entries=lambda **x: changelist(limit=0,**x), |
|
470 | 483 | latestentry=lambda **x: changelist(limit=1,**x), |
|
471 | 484 | archives=self.archivelist("tip")) |
|
472 | 485 | |
|
473 | 486 | def search(self, tmpl, query): |
|
474 | 487 | |
|
475 | 488 | def changelist(**map): |
|
476 | 489 | cl = self.repo.changelog |
|
477 | 490 | count = 0 |
|
478 | 491 | qw = query.lower().split() |
|
479 | 492 | |
|
480 | 493 | def revgen(): |
|
481 | 494 | for i in xrange(cl.count() - 1, 0, -100): |
|
482 | 495 | l = [] |
|
483 | 496 | for j in xrange(max(0, i - 100), i + 1): |
|
484 | 497 | ctx = self.repo.changectx(j) |
|
485 | 498 | l.append(ctx) |
|
486 | 499 | l.reverse() |
|
487 | 500 | for e in l: |
|
488 | 501 | yield e |
|
489 | 502 | |
|
490 | 503 | for ctx in revgen(): |
|
491 | 504 | miss = 0 |
|
492 | 505 | for q in qw: |
|
493 | 506 | if not (q in ctx.user().lower() or |
|
494 | 507 | q in ctx.description().lower() or |
|
495 | 508 | q in " ".join(ctx.files()).lower()): |
|
496 | 509 | miss = 1 |
|
497 | 510 | break |
|
498 | 511 | if miss: |
|
499 | 512 | continue |
|
500 | 513 | |
|
501 | 514 | count += 1 |
|
502 | 515 | n = ctx.node() |
|
503 | 516 | |
|
504 | 517 | yield tmpl('searchentry', |
|
505 | 518 | parity=parity.next(), |
|
506 | 519 | author=ctx.user(), |
|
507 | 520 | parent=self.siblings(ctx.parents()), |
|
508 | 521 | child=self.siblings(ctx.children()), |
|
509 | 522 | changelogtag=self.showtag("changelogtag",n), |
|
510 | 523 | desc=ctx.description(), |
|
511 | 524 | date=ctx.date(), |
|
512 | 525 | files=self.listfilediffs(tmpl, ctx.files(), n), |
|
513 | 526 | rev=ctx.rev(), |
|
514 | 527 | node=hex(n), |
|
515 | 528 | tags=self.nodetagsdict(n), |
|
516 | 529 | branches=self.nodebranchdict(ctx)) |
|
517 | 530 | |
|
518 | 531 | if count >= self.maxchanges: |
|
519 | 532 | break |
|
520 | 533 | |
|
521 | 534 | cl = self.repo.changelog |
|
522 | 535 | parity = paritygen(self.stripecount) |
|
523 | 536 | |
|
524 | 537 | return tmpl('search', |
|
525 | 538 | query=query, |
|
526 | 539 | node=hex(cl.tip()), |
|
527 | 540 | entries=changelist, |
|
528 | 541 | archives=self.archivelist("tip")) |
|
529 | 542 | |
|
530 | 543 | def changeset(self, tmpl, ctx): |
|
531 | 544 | n = ctx.node() |
|
532 | 545 | parents = ctx.parents() |
|
533 | 546 | p1 = parents[0].node() |
|
534 | 547 | |
|
535 | 548 | files = [] |
|
536 | 549 | parity = paritygen(self.stripecount) |
|
537 | 550 | for f in ctx.files(): |
|
538 | 551 | files.append(tmpl("filenodelink", |
|
539 | 552 | node=hex(n), file=f, |
|
540 | 553 | parity=parity.next())) |
|
541 | 554 | |
|
542 | 555 | def diff(**map): |
|
543 | 556 | yield self.diff(tmpl, p1, n, None) |
|
544 | 557 | |
|
545 | 558 | return tmpl('changeset', |
|
546 | 559 | diff=diff, |
|
547 | 560 | rev=ctx.rev(), |
|
548 | 561 | node=hex(n), |
|
549 | 562 | parent=self.siblings(parents), |
|
550 | 563 | child=self.siblings(ctx.children()), |
|
551 | 564 | changesettag=self.showtag("changesettag",n), |
|
552 | 565 | author=ctx.user(), |
|
553 | 566 | desc=ctx.description(), |
|
554 | 567 | date=ctx.date(), |
|
555 | 568 | files=files, |
|
556 | 569 | archives=self.archivelist(hex(n)), |
|
557 | 570 | tags=self.nodetagsdict(n), |
|
558 | 571 | branches=self.nodebranchdict(ctx)) |
|
559 | 572 | |
|
560 | 573 | def filelog(self, tmpl, fctx): |
|
561 | 574 | f = fctx.path() |
|
562 | 575 | fl = fctx.filelog() |
|
563 | 576 | count = fl.count() |
|
564 | 577 | pagelen = self.maxshortchanges |
|
565 | 578 | pos = fctx.filerev() |
|
566 | 579 | start = max(0, pos - pagelen + 1) |
|
567 | 580 | end = min(count, start + pagelen) |
|
568 | 581 | pos = end - 1 |
|
569 | 582 | parity = paritygen(self.stripecount, offset=start-end) |
|
570 | 583 | |
|
571 | 584 | def entries(limit=0, **map): |
|
572 | 585 | l = [] |
|
573 | 586 | |
|
574 | 587 | for i in xrange(start, end): |
|
575 | 588 | ctx = fctx.filectx(i) |
|
576 | 589 | n = fl.node(i) |
|
577 | 590 | |
|
578 | 591 | l.insert(0, {"parity": parity.next(), |
|
579 | 592 | "filerev": i, |
|
580 | 593 | "file": f, |
|
581 | 594 | "node": hex(ctx.node()), |
|
582 | 595 | "author": ctx.user(), |
|
583 | 596 | "date": ctx.date(), |
|
584 | 597 | "rename": self.renamelink(fl, n), |
|
585 | 598 | "parent": self.siblings(fctx.parents()), |
|
586 | 599 | "child": self.siblings(fctx.children()), |
|
587 | 600 | "desc": ctx.description()}) |
|
588 | 601 | |
|
589 | 602 | if limit > 0: |
|
590 | 603 | l = l[:limit] |
|
591 | 604 | |
|
592 | 605 | for e in l: |
|
593 | 606 | yield e |
|
594 | 607 | |
|
595 | 608 | nodefunc = lambda x: fctx.filectx(fileid=x) |
|
596 | 609 | nav = revnavgen(pos, pagelen, count, nodefunc) |
|
597 | 610 | return tmpl("filelog", file=f, node=hex(fctx.node()), nav=nav, |
|
598 | 611 | entries=lambda **x: entries(limit=0, **x), |
|
599 | 612 | latestentry=lambda **x: entries(limit=1, **x)) |
|
600 | 613 | |
|
601 | 614 | def filerevision(self, tmpl, fctx): |
|
602 | 615 | f = fctx.path() |
|
603 | 616 | text = fctx.data() |
|
604 | 617 | fl = fctx.filelog() |
|
605 | 618 | n = fctx.filenode() |
|
606 | 619 | parity = paritygen(self.stripecount) |
|
607 | 620 | |
|
608 | 621 | if util.binary(text): |
|
609 | 622 | mt = mimetypes.guess_type(f)[0] or 'application/octet-stream' |
|
610 | 623 | text = '(binary:%s)' % mt |
|
611 | 624 | |
|
612 | 625 | def lines(): |
|
613 | 626 | for lineno, t in enumerate(text.splitlines(1)): |
|
614 | 627 | yield {"line": t, |
|
615 | 628 | "lineid": "l%d" % (lineno + 1), |
|
616 | 629 | "linenumber": "% 6d" % (lineno + 1), |
|
617 | 630 | "parity": parity.next()} |
|
618 | 631 | |
|
619 | 632 | return tmpl("filerevision", |
|
620 | 633 | file=f, |
|
621 | 634 | path=_up(f), |
|
622 | 635 | text=lines(), |
|
623 | 636 | rev=fctx.rev(), |
|
624 | 637 | node=hex(fctx.node()), |
|
625 | 638 | author=fctx.user(), |
|
626 | 639 | date=fctx.date(), |
|
627 | 640 | desc=fctx.description(), |
|
628 | 641 | parent=self.siblings(fctx.parents()), |
|
629 | 642 | child=self.siblings(fctx.children()), |
|
630 | 643 | rename=self.renamelink(fl, n), |
|
631 | 644 | permissions=fctx.manifest().flags(f)) |
|
632 | 645 | |
|
633 | 646 | def fileannotate(self, tmpl, fctx): |
|
634 | 647 | f = fctx.path() |
|
635 | 648 | n = fctx.filenode() |
|
636 | 649 | fl = fctx.filelog() |
|
637 | 650 | parity = paritygen(self.stripecount) |
|
638 | 651 | |
|
639 | 652 | def annotate(**map): |
|
640 | 653 | last = None |
|
641 | 654 | lines = enumerate(fctx.annotate(follow=True, linenumber=True)) |
|
642 | 655 | for lineno, ((f, targetline), l) in lines: |
|
643 | 656 | fnode = f.filenode() |
|
644 | 657 | name = self.repo.ui.shortuser(f.user()) |
|
645 | 658 | |
|
646 | 659 | if last != fnode: |
|
647 | 660 | last = fnode |
|
648 | 661 | |
|
649 | 662 | yield {"parity": parity.next(), |
|
650 | 663 | "node": hex(f.node()), |
|
651 | 664 | "rev": f.rev(), |
|
652 | 665 | "author": name, |
|
653 | 666 | "file": f.path(), |
|
654 | 667 | "targetline": targetline, |
|
655 | 668 | "line": l, |
|
656 | 669 | "lineid": "l%d" % (lineno + 1), |
|
657 | 670 | "linenumber": "% 6d" % (lineno + 1)} |
|
658 | 671 | |
|
659 | 672 | return tmpl("fileannotate", |
|
660 | 673 | file=f, |
|
661 | 674 | annotate=annotate, |
|
662 | 675 | path=_up(f), |
|
663 | 676 | rev=fctx.rev(), |
|
664 | 677 | node=hex(fctx.node()), |
|
665 | 678 | author=fctx.user(), |
|
666 | 679 | date=fctx.date(), |
|
667 | 680 | desc=fctx.description(), |
|
668 | 681 | rename=self.renamelink(fl, n), |
|
669 | 682 | parent=self.siblings(fctx.parents()), |
|
670 | 683 | child=self.siblings(fctx.children()), |
|
671 | 684 | permissions=fctx.manifest().flags(f)) |
|
672 | 685 | |
|
673 | 686 | def manifest(self, tmpl, ctx, path): |
|
674 | 687 | mf = ctx.manifest() |
|
675 | 688 | node = ctx.node() |
|
676 | 689 | |
|
677 | 690 | files = {} |
|
678 | 691 | parity = paritygen(self.stripecount) |
|
679 | 692 | |
|
680 | 693 | if path and path[-1] != "/": |
|
681 | 694 | path += "/" |
|
682 | 695 | l = len(path) |
|
683 | 696 | abspath = "/" + path |
|
684 | 697 | |
|
685 | 698 | for f, n in mf.items(): |
|
686 | 699 | if f[:l] != path: |
|
687 | 700 | continue |
|
688 | 701 | remain = f[l:] |
|
689 | 702 | if "/" in remain: |
|
690 | 703 | short = remain[:remain.index("/") + 1] # bleah |
|
691 | 704 | files[short] = (f, None) |
|
692 | 705 | else: |
|
693 | 706 | short = os.path.basename(remain) |
|
694 | 707 | files[short] = (f, n) |
|
695 | 708 | |
|
696 | 709 | if not files: |
|
697 | 710 | raise ErrorResponse(HTTP_NOT_FOUND, 'Path not found: ' + path) |
|
698 | 711 | |
|
699 | 712 | def filelist(**map): |
|
700 | 713 | fl = files.keys() |
|
701 | 714 | fl.sort() |
|
702 | 715 | for f in fl: |
|
703 | 716 | full, fnode = files[f] |
|
704 | 717 | if not fnode: |
|
705 | 718 | continue |
|
706 | 719 | |
|
707 | 720 | fctx = ctx.filectx(full) |
|
708 | 721 | yield {"file": full, |
|
709 | 722 | "parity": parity.next(), |
|
710 | 723 | "basename": f, |
|
711 | 724 | "date": fctx.changectx().date(), |
|
712 | 725 | "size": fctx.size(), |
|
713 | 726 | "permissions": mf.flags(full)} |
|
714 | 727 | |
|
715 | 728 | def dirlist(**map): |
|
716 | 729 | fl = files.keys() |
|
717 | 730 | fl.sort() |
|
718 | 731 | for f in fl: |
|
719 | 732 | full, fnode = files[f] |
|
720 | 733 | if fnode: |
|
721 | 734 | continue |
|
722 | 735 | |
|
723 | 736 | yield {"parity": parity.next(), |
|
724 | 737 | "path": "%s%s" % (abspath, f), |
|
725 | 738 | "basename": f[:-1]} |
|
726 | 739 | |
|
727 | 740 | return tmpl("manifest", |
|
728 | 741 | rev=ctx.rev(), |
|
729 | 742 | node=hex(node), |
|
730 | 743 | path=abspath, |
|
731 | 744 | up=_up(abspath), |
|
732 | 745 | upparity=parity.next(), |
|
733 | 746 | fentries=filelist, |
|
734 | 747 | dentries=dirlist, |
|
735 | 748 | archives=self.archivelist(hex(node)), |
|
736 | 749 | tags=self.nodetagsdict(node), |
|
737 | 750 | branches=self.nodebranchdict(ctx)) |
|
738 | 751 | |
|
739 | 752 | def tags(self, tmpl): |
|
740 | 753 | i = self.repo.tagslist() |
|
741 | 754 | i.reverse() |
|
742 | 755 | parity = paritygen(self.stripecount) |
|
743 | 756 | |
|
744 | 757 | def entries(notip=False,limit=0, **map): |
|
745 | 758 | count = 0 |
|
746 | 759 | for k, n in i: |
|
747 | 760 | if notip and k == "tip": |
|
748 | 761 | continue |
|
749 | 762 | if limit > 0 and count >= limit: |
|
750 | 763 | continue |
|
751 | 764 | count = count + 1 |
|
752 | 765 | yield {"parity": parity.next(), |
|
753 | 766 | "tag": k, |
|
754 | 767 | "date": self.repo.changectx(n).date(), |
|
755 | 768 | "node": hex(n)} |
|
756 | 769 | |
|
757 | 770 | return tmpl("tags", |
|
758 | 771 | node=hex(self.repo.changelog.tip()), |
|
759 | 772 | entries=lambda **x: entries(False,0, **x), |
|
760 | 773 | entriesnotip=lambda **x: entries(True,0, **x), |
|
761 | 774 | latestentry=lambda **x: entries(True,1, **x)) |
|
762 | 775 | |
|
763 | 776 | def summary(self, tmpl): |
|
764 | 777 | i = self.repo.tagslist() |
|
765 | 778 | i.reverse() |
|
766 | 779 | |
|
767 | 780 | def tagentries(**map): |
|
768 | 781 | parity = paritygen(self.stripecount) |
|
769 | 782 | count = 0 |
|
770 | 783 | for k, n in i: |
|
771 | 784 | if k == "tip": # skip tip |
|
772 | 785 | continue; |
|
773 | 786 | |
|
774 | 787 | count += 1 |
|
775 | 788 | if count > 10: # limit to 10 tags |
|
776 | 789 | break; |
|
777 | 790 | |
|
778 | 791 | yield tmpl("tagentry", |
|
779 | 792 | parity=parity.next(), |
|
780 | 793 | tag=k, |
|
781 | 794 | node=hex(n), |
|
782 | 795 | date=self.repo.changectx(n).date()) |
|
783 | 796 | |
|
784 | 797 | |
|
785 | 798 | def branches(**map): |
|
786 | 799 | parity = paritygen(self.stripecount) |
|
787 | 800 | |
|
788 | 801 | b = self.repo.branchtags() |
|
789 | 802 | l = [(-self.repo.changelog.rev(n), n, t) for t, n in b.items()] |
|
790 | 803 | l.sort() |
|
791 | 804 | |
|
792 | 805 | for r,n,t in l: |
|
793 | 806 | ctx = self.repo.changectx(n) |
|
794 | 807 | |
|
795 | 808 | yield {'parity': parity.next(), |
|
796 | 809 | 'branch': t, |
|
797 | 810 | 'node': hex(n), |
|
798 | 811 | 'date': ctx.date()} |
|
799 | 812 | |
|
800 | 813 | def changelist(**map): |
|
801 | 814 | parity = paritygen(self.stripecount, offset=start-end) |
|
802 | 815 | l = [] # build a list in forward order for efficiency |
|
803 | 816 | for i in xrange(start, end): |
|
804 | 817 | ctx = self.repo.changectx(i) |
|
805 | 818 | n = ctx.node() |
|
806 | 819 | hn = hex(n) |
|
807 | 820 | |
|
808 | 821 | l.insert(0, tmpl( |
|
809 | 822 | 'shortlogentry', |
|
810 | 823 | parity=parity.next(), |
|
811 | 824 | author=ctx.user(), |
|
812 | 825 | desc=ctx.description(), |
|
813 | 826 | date=ctx.date(), |
|
814 | 827 | rev=i, |
|
815 | 828 | node=hn, |
|
816 | 829 | tags=self.nodetagsdict(n), |
|
817 | 830 | branches=self.nodebranchdict(ctx))) |
|
818 | 831 | |
|
819 | 832 | yield l |
|
820 | 833 | |
|
821 | 834 | cl = self.repo.changelog |
|
822 | 835 | count = cl.count() |
|
823 | 836 | start = max(0, count - self.maxchanges) |
|
824 | 837 | end = min(count, start + self.maxchanges) |
|
825 | 838 | |
|
826 | 839 | return tmpl("summary", |
|
827 | 840 | desc=self.config("web", "description", "unknown"), |
|
828 | 841 | owner=get_contact(self.config) or "unknown", |
|
829 | 842 | lastchange=cl.read(cl.tip())[2], |
|
830 | 843 | tags=tagentries, |
|
831 | 844 | branches=branches, |
|
832 | 845 | shortlog=changelist, |
|
833 | 846 | node=hex(cl.tip()), |
|
834 | 847 | archives=self.archivelist("tip")) |
|
835 | 848 | |
|
836 | 849 | def filediff(self, tmpl, fctx): |
|
837 | 850 | n = fctx.node() |
|
838 | 851 | path = fctx.path() |
|
839 | 852 | parents = fctx.parents() |
|
840 | 853 | p1 = parents and parents[0].node() or nullid |
|
841 | 854 | |
|
842 | 855 | def diff(**map): |
|
843 | 856 | yield self.diff(tmpl, p1, n, [path]) |
|
844 | 857 | |
|
845 | 858 | return tmpl("filediff", |
|
846 | 859 | file=path, |
|
847 | 860 | node=hex(n), |
|
848 | 861 | rev=fctx.rev(), |
|
849 | 862 | parent=self.siblings(parents), |
|
850 | 863 | child=self.siblings(fctx.children()), |
|
851 | 864 | diff=diff) |
|
852 | 865 | |
|
853 | 866 | archive_specs = { |
|
854 | 867 | 'bz2': ('application/x-tar', 'tbz2', '.tar.bz2', None), |
|
855 | 868 | 'gz': ('application/x-tar', 'tgz', '.tar.gz', None), |
|
856 | 869 | 'zip': ('application/zip', 'zip', '.zip', None), |
|
857 | 870 | } |
|
858 | 871 | |
|
859 | 872 | def archive(self, tmpl, req, key, type_): |
|
860 | 873 | reponame = re.sub(r"\W+", "-", os.path.basename(self.reponame)) |
|
861 | 874 | cnode = self.repo.lookup(key) |
|
862 | 875 | arch_version = key |
|
863 | 876 | if cnode == key or key == 'tip': |
|
864 | 877 | arch_version = short(cnode) |
|
865 | 878 | name = "%s-%s" % (reponame, arch_version) |
|
866 | 879 | mimetype, artype, extension, encoding = self.archive_specs[type_] |
|
867 | 880 | headers = [ |
|
868 | 881 | ('Content-Type', mimetype), |
|
869 | 882 | ('Content-Disposition', 'attachment; filename=%s%s' % |
|
870 | 883 | (name, extension)) |
|
871 | 884 | ] |
|
872 | 885 | if encoding: |
|
873 | 886 | headers.append(('Content-Encoding', encoding)) |
|
874 | 887 | req.header(headers) |
|
875 | 888 | req.respond(HTTP_OK) |
|
876 | 889 | archival.archive(self.repo, req, cnode, artype, prefix=name) |
|
877 | 890 | |
|
878 | 891 | # add tags to things |
|
879 | 892 | # tags -> list of changesets corresponding to tags |
|
880 | 893 | # find tag, changeset, file |
|
881 | 894 | |
|
882 | 895 | def cleanpath(self, path): |
|
883 | 896 | path = path.lstrip('/') |
|
884 | 897 | return util.canonpath(self.repo.root, '', path) |
|
885 | 898 | |
|
886 | 899 | def changectx(self, req): |
|
887 | 900 | if 'node' in req.form: |
|
888 | 901 | changeid = req.form['node'][0] |
|
889 | 902 | elif 'manifest' in req.form: |
|
890 | 903 | changeid = req.form['manifest'][0] |
|
891 | 904 | else: |
|
892 | 905 | changeid = self.repo.changelog.count() - 1 |
|
893 | 906 | |
|
894 | 907 | try: |
|
895 | 908 | ctx = self.repo.changectx(changeid) |
|
896 | 909 | except hg.RepoError: |
|
897 | 910 | man = self.repo.manifest |
|
898 | 911 | mn = man.lookup(changeid) |
|
899 | 912 | ctx = self.repo.changectx(man.linkrev(mn)) |
|
900 | 913 | |
|
901 | 914 | return ctx |
|
902 | 915 | |
|
903 | 916 | def filectx(self, req): |
|
904 | 917 | path = self.cleanpath(req.form['file'][0]) |
|
905 | 918 | if 'node' in req.form: |
|
906 | 919 | changeid = req.form['node'][0] |
|
907 | 920 | else: |
|
908 | 921 | changeid = req.form['filenode'][0] |
|
909 | 922 | try: |
|
910 | 923 | ctx = self.repo.changectx(changeid) |
|
911 | 924 | fctx = ctx.filectx(path) |
|
912 | 925 | except hg.RepoError: |
|
913 | 926 | fctx = self.repo.filectx(path, fileid=changeid) |
|
914 | 927 | |
|
915 | 928 | return fctx |
|
916 | 929 | |
|
917 | 930 | def check_perm(self, req, op, default): |
|
918 | 931 | '''check permission for operation based on user auth. |
|
919 | 932 | return true if op allowed, else false. |
|
920 | 933 | default is policy to use if no config given.''' |
|
921 | 934 | |
|
922 | 935 | user = req.env.get('REMOTE_USER') |
|
923 | 936 | |
|
924 | 937 | deny = self.configlist('web', 'deny_' + op) |
|
925 | 938 | if deny and (not user or deny == ['*'] or user in deny): |
|
926 | 939 | return False |
|
927 | 940 | |
|
928 | 941 | allow = self.configlist('web', 'allow_' + op) |
|
929 | 942 | return (allow and (allow == ['*'] or user in allow)) or default |
@@ -1,250 +1,243 b'' | |||
|
1 | 1 | # |
|
2 | 2 | # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net> |
|
3 | 3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms |
|
6 | 6 | # of the GNU General Public License, incorporated herein by reference. |
|
7 | 7 | |
|
8 | 8 | import cStringIO, zlib, bz2, tempfile, errno, os, sys |
|
9 | 9 | from mercurial import util, streamclone |
|
10 | 10 | from mercurial.i18n import gettext as _ |
|
11 | 11 | from mercurial.node import * |
|
12 | 12 | from common import HTTP_OK, HTTP_NOT_FOUND, HTTP_SERVER_ERROR |
|
13 | 13 | |
|
14 | 14 | # __all__ is populated with the allowed commands. Be sure to add to it if |
|
15 | 15 | # you're adding a new command, or the new command won't work. |
|
16 | 16 | |
|
17 | 17 | __all__ = [ |
|
18 | 18 | 'lookup', 'heads', 'branches', 'between', 'changegroup', |
|
19 | 19 | 'changegroupsubset', 'capabilities', 'unbundle', 'stream_out', |
|
20 | 20 | ] |
|
21 | 21 | |
|
22 | 22 | HGTYPE = 'application/mercurial-0.1' |
|
23 | 23 | |
|
24 | 24 | def lookup(web, req): |
|
25 | 25 | try: |
|
26 | 26 | r = hex(web.repo.lookup(req.form['key'][0])) |
|
27 | 27 | success = 1 |
|
28 | 28 | except Exception,inst: |
|
29 | 29 | r = str(inst) |
|
30 | 30 | success = 0 |
|
31 | 31 | resp = "%s %s\n" % (success, r) |
|
32 | 32 | req.respond(HTTP_OK, HGTYPE, length=len(resp)) |
|
33 | 33 | req.write(resp) |
|
34 | 34 | |
|
35 | 35 | def heads(web, req): |
|
36 | 36 | resp = " ".join(map(hex, web.repo.heads())) + "\n" |
|
37 | 37 | req.respond(HTTP_OK, HGTYPE, length=len(resp)) |
|
38 | 38 | req.write(resp) |
|
39 | 39 | |
|
40 | 40 | def branches(web, req): |
|
41 | 41 | nodes = [] |
|
42 | 42 | if 'nodes' in req.form: |
|
43 | 43 | nodes = map(bin, req.form['nodes'][0].split(" ")) |
|
44 | 44 | resp = cStringIO.StringIO() |
|
45 | 45 | for b in web.repo.branches(nodes): |
|
46 | 46 | resp.write(" ".join(map(hex, b)) + "\n") |
|
47 | 47 | resp = resp.getvalue() |
|
48 | 48 | req.respond(HTTP_OK, HGTYPE, length=len(resp)) |
|
49 | 49 | req.write(resp) |
|
50 | 50 | |
|
51 | 51 | def between(web, req): |
|
52 | 52 | if 'pairs' in req.form: |
|
53 | 53 | pairs = [map(bin, p.split("-")) |
|
54 | 54 | for p in req.form['pairs'][0].split(" ")] |
|
55 | 55 | resp = cStringIO.StringIO() |
|
56 | 56 | for b in web.repo.between(pairs): |
|
57 | 57 | resp.write(" ".join(map(hex, b)) + "\n") |
|
58 | 58 | resp = resp.getvalue() |
|
59 | 59 | req.respond(HTTP_OK, HGTYPE, length=len(resp)) |
|
60 | 60 | req.write(resp) |
|
61 | 61 | |
|
62 | 62 | def changegroup(web, req): |
|
63 | 63 | req.respond(HTTP_OK, HGTYPE) |
|
64 | 64 | nodes = [] |
|
65 | 65 | if not web.allowpull: |
|
66 | 66 | return |
|
67 | 67 | |
|
68 | 68 | if 'roots' in req.form: |
|
69 | 69 | nodes = map(bin, req.form['roots'][0].split(" ")) |
|
70 | 70 | |
|
71 | 71 | z = zlib.compressobj() |
|
72 | 72 | f = web.repo.changegroup(nodes, 'serve') |
|
73 | 73 | while 1: |
|
74 | 74 | chunk = f.read(4096) |
|
75 | 75 | if not chunk: |
|
76 | 76 | break |
|
77 | 77 | req.write(z.compress(chunk)) |
|
78 | 78 | |
|
79 | 79 | req.write(z.flush()) |
|
80 | 80 | |
|
81 | 81 | def changegroupsubset(web, req): |
|
82 | 82 | req.respond(HTTP_OK, HGTYPE) |
|
83 | 83 | bases = [] |
|
84 | 84 | heads = [] |
|
85 | 85 | if not web.allowpull: |
|
86 | 86 | return |
|
87 | 87 | |
|
88 | 88 | if 'bases' in req.form: |
|
89 | 89 | bases = [bin(x) for x in req.form['bases'][0].split(' ')] |
|
90 | 90 | if 'heads' in req.form: |
|
91 | 91 | heads = [bin(x) for x in req.form['heads'][0].split(' ')] |
|
92 | 92 | |
|
93 | 93 | z = zlib.compressobj() |
|
94 | 94 | f = web.repo.changegroupsubset(bases, heads, 'serve') |
|
95 | 95 | while 1: |
|
96 | 96 | chunk = f.read(4096) |
|
97 | 97 | if not chunk: |
|
98 | 98 | break |
|
99 | 99 | req.write(z.compress(chunk)) |
|
100 | 100 | |
|
101 | 101 | req.write(z.flush()) |
|
102 | 102 | |
|
103 | 103 | def capabilities(web, req): |
|
104 | caps = ['lookup', 'changegroupsubset'] | |
|
105 | if web.configbool('server', 'uncompressed'): | |
|
106 | caps.append('stream=%d' % web.repo.changelog.version) | |
|
107 | # XXX: make configurable and/or share code with do_unbundle: | |
|
108 | unbundleversions = ['HG10GZ', 'HG10BZ', 'HG10UN'] | |
|
109 | if unbundleversions: | |
|
110 | caps.append('unbundle=%s' % ','.join(unbundleversions)) | |
|
111 | resp = ' '.join(caps) | |
|
104 | resp = ' '.join(web.capabilities()) | |
|
112 | 105 | req.respond(HTTP_OK, HGTYPE, length=len(resp)) |
|
113 | 106 | req.write(resp) |
|
114 | 107 | |
|
115 | 108 | def unbundle(web, req): |
|
116 | 109 | def bail(response, headers={}): |
|
117 | 110 | length = int(req.env['CONTENT_LENGTH']) |
|
118 | 111 | for s in util.filechunkiter(req, limit=length): |
|
119 | 112 | # drain incoming bundle, else client will not see |
|
120 | 113 | # response when run outside cgi script |
|
121 | 114 | pass |
|
122 | 115 | req.header(headers.items()) |
|
123 | 116 | req.respond(HTTP_OK, HGTYPE) |
|
124 | 117 | req.write('0\n') |
|
125 | 118 | req.write(response) |
|
126 | 119 | |
|
127 | 120 | # require ssl by default, auth info cannot be sniffed and |
|
128 | 121 | # replayed |
|
129 | 122 | ssl_req = web.configbool('web', 'push_ssl', True) |
|
130 | 123 | if ssl_req: |
|
131 | 124 | if req.env.get('wsgi.url_scheme') != 'https': |
|
132 | 125 | bail(_('ssl required\n')) |
|
133 | 126 | return |
|
134 | 127 | proto = 'https' |
|
135 | 128 | else: |
|
136 | 129 | proto = 'http' |
|
137 | 130 | |
|
138 | 131 | # do not allow push unless explicitly allowed |
|
139 | 132 | if not web.check_perm(req, 'push', False): |
|
140 | 133 | bail(_('push not authorized\n'), |
|
141 | 134 | headers={'status': '401 Unauthorized'}) |
|
142 | 135 | return |
|
143 | 136 | |
|
144 | 137 | their_heads = req.form['heads'][0].split(' ') |
|
145 | 138 | |
|
146 | 139 | def check_heads(): |
|
147 | 140 | heads = map(hex, web.repo.heads()) |
|
148 | 141 | return their_heads == [hex('force')] or their_heads == heads |
|
149 | 142 | |
|
150 | 143 | # fail early if possible |
|
151 | 144 | if not check_heads(): |
|
152 | 145 | bail(_('unsynced changes\n')) |
|
153 | 146 | return |
|
154 | 147 | |
|
155 | 148 | req.respond(HTTP_OK, HGTYPE) |
|
156 | 149 | |
|
157 | 150 | # do not lock repo until all changegroup data is |
|
158 | 151 | # streamed. save to temporary file. |
|
159 | 152 | |
|
160 | 153 | fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-') |
|
161 | 154 | fp = os.fdopen(fd, 'wb+') |
|
162 | 155 | try: |
|
163 | 156 | length = int(req.env['CONTENT_LENGTH']) |
|
164 | 157 | for s in util.filechunkiter(req, limit=length): |
|
165 | 158 | fp.write(s) |
|
166 | 159 | |
|
167 | 160 | try: |
|
168 | 161 | lock = web.repo.lock() |
|
169 | 162 | try: |
|
170 | 163 | if not check_heads(): |
|
171 | 164 | req.write('0\n') |
|
172 | 165 | req.write(_('unsynced changes\n')) |
|
173 | 166 | return |
|
174 | 167 | |
|
175 | 168 | fp.seek(0) |
|
176 | 169 | header = fp.read(6) |
|
177 | 170 | if not header.startswith("HG"): |
|
178 | 171 | # old client with uncompressed bundle |
|
179 | 172 | def generator(f): |
|
180 | 173 | yield header |
|
181 | 174 | for chunk in f: |
|
182 | 175 | yield chunk |
|
183 | 176 | elif not header.startswith("HG10"): |
|
184 | 177 | req.write("0\n") |
|
185 | 178 | req.write(_("unknown bundle version\n")) |
|
186 | 179 | return |
|
187 | 180 | elif header == "HG10GZ": |
|
188 | 181 | def generator(f): |
|
189 | 182 | zd = zlib.decompressobj() |
|
190 | 183 | for chunk in f: |
|
191 | 184 | yield zd.decompress(chunk) |
|
192 | 185 | elif header == "HG10BZ": |
|
193 | 186 | def generator(f): |
|
194 | 187 | zd = bz2.BZ2Decompressor() |
|
195 | 188 | zd.decompress("BZ") |
|
196 | 189 | for chunk in f: |
|
197 | 190 | yield zd.decompress(chunk) |
|
198 | 191 | elif header == "HG10UN": |
|
199 | 192 | def generator(f): |
|
200 | 193 | for chunk in f: |
|
201 | 194 | yield chunk |
|
202 | 195 | else: |
|
203 | 196 | req.write("0\n") |
|
204 | 197 | req.write(_("unknown bundle compression type\n")) |
|
205 | 198 | return |
|
206 | 199 | gen = generator(util.filechunkiter(fp, 4096)) |
|
207 | 200 | |
|
208 | 201 | # send addchangegroup output to client |
|
209 | 202 | |
|
210 | 203 | old_stdout = sys.stdout |
|
211 | 204 | sys.stdout = cStringIO.StringIO() |
|
212 | 205 | |
|
213 | 206 | try: |
|
214 | 207 | url = 'remote:%s:%s' % (proto, |
|
215 | 208 | req.env.get('REMOTE_HOST', '')) |
|
216 | 209 | try: |
|
217 | 210 | ret = web.repo.addchangegroup( |
|
218 | 211 | util.chunkbuffer(gen), 'serve', url) |
|
219 | 212 | except util.Abort, inst: |
|
220 | 213 | sys.stdout.write("abort: %s\n" % inst) |
|
221 | 214 | ret = 0 |
|
222 | 215 | finally: |
|
223 | 216 | val = sys.stdout.getvalue() |
|
224 | 217 | sys.stdout = old_stdout |
|
225 | 218 | req.write('%d\n' % ret) |
|
226 | 219 | req.write(val) |
|
227 | 220 | finally: |
|
228 | 221 | del lock |
|
229 | 222 | except (OSError, IOError), inst: |
|
230 | 223 | req.write('0\n') |
|
231 | 224 | filename = getattr(inst, 'filename', '') |
|
232 | 225 | # Don't send our filesystem layout to the client |
|
233 | 226 | if filename.startswith(web.repo.root): |
|
234 | 227 | filename = filename[len(web.repo.root)+1:] |
|
235 | 228 | else: |
|
236 | 229 | filename = '' |
|
237 | 230 | error = getattr(inst, 'strerror', 'Unknown error') |
|
238 | 231 | if inst.errno == errno.ENOENT: |
|
239 | 232 | code = HTTP_NOT_FOUND |
|
240 | 233 | else: |
|
241 | 234 | code = HTTP_SERVER_ERROR |
|
242 | 235 | req.respond(code) |
|
243 | 236 | req.write('%s: %s\n' % (error, filename)) |
|
244 | 237 | finally: |
|
245 | 238 | fp.close() |
|
246 | 239 | os.unlink(tempname) |
|
247 | 240 | |
|
248 | 241 | def stream_out(web, req): |
|
249 | 242 | req.respond(HTTP_OK, HGTYPE) |
|
250 | 243 | streamclone.stream_out(web.repo, req, untrusted=True) |
General Comments 0
You need to be logged in to leave comments.
Login now