Show More
@@ -1,126 +1,129 b'' | |||||
1 | """ |
|
1 | """ | |
2 | changegroup.py - Mercurial changegroup manipulation functions |
|
2 | changegroup.py - Mercurial changegroup manipulation functions | |
3 |
|
3 | |||
4 | Copyright 2006 Matt Mackall <mpm@selenic.com> |
|
4 | Copyright 2006 Matt Mackall <mpm@selenic.com> | |
5 |
|
5 | |||
6 | This software may be used and distributed according to the terms |
|
6 | This software may be used and distributed according to the terms | |
7 | of the GNU General Public License, incorporated herein by reference. |
|
7 | of the GNU General Public License, incorporated herein by reference. | |
8 | """ |
|
8 | """ | |
9 |
|
9 | |||
10 | from i18n import _ |
|
10 | from i18n import _ | |
11 | import struct, os, bz2, zlib, util, tempfile |
|
11 | import struct, os, bz2, zlib, util, tempfile | |
12 |
|
12 | |||
13 | def getchunk(source): |
|
13 | def getchunk(source): | |
14 | """get a chunk from a changegroup""" |
|
14 | """get a chunk from a changegroup""" | |
15 | d = source.read(4) |
|
15 | d = source.read(4) | |
16 | if not d: |
|
16 | if not d: | |
17 | return "" |
|
17 | return "" | |
18 | l = struct.unpack(">l", d)[0] |
|
18 | l = struct.unpack(">l", d)[0] | |
19 | if l <= 4: |
|
19 | if l <= 4: | |
20 | return "" |
|
20 | return "" | |
21 | d = source.read(l - 4) |
|
21 | d = source.read(l - 4) | |
22 | if len(d) < l - 4: |
|
22 | if len(d) < l - 4: | |
23 | raise util.Abort(_("premature EOF reading chunk" |
|
23 | raise util.Abort(_("premature EOF reading chunk" | |
24 | " (got %d bytes, expected %d)") |
|
24 | " (got %d bytes, expected %d)") | |
25 | % (len(d), l - 4)) |
|
25 | % (len(d), l - 4)) | |
26 | return d |
|
26 | return d | |
27 |
|
27 | |||
28 | def chunkiter(source): |
|
28 | def chunkiter(source): | |
29 | """iterate through the chunks in source""" |
|
29 | """iterate through the chunks in source""" | |
30 | while 1: |
|
30 | while 1: | |
31 | c = getchunk(source) |
|
31 | c = getchunk(source) | |
32 | if not c: |
|
32 | if not c: | |
33 | break |
|
33 | break | |
34 | yield c |
|
34 | yield c | |
35 |
|
35 | |||
36 | def chunkheader(length): |
|
36 | def chunkheader(length): | |
37 | """build a changegroup chunk header""" |
|
37 | """build a changegroup chunk header""" | |
38 | return struct.pack(">l", length + 4) |
|
38 | return struct.pack(">l", length + 4) | |
39 |
|
39 | |||
40 | def closechunk(): |
|
40 | def closechunk(): | |
41 | return struct.pack(">l", 0) |
|
41 | return struct.pack(">l", 0) | |
42 |
|
42 | |||
43 | class nocompress(object): |
|
43 | class nocompress(object): | |
44 | def compress(self, x): |
|
44 | def compress(self, x): | |
45 | return x |
|
45 | return x | |
46 | def flush(self): |
|
46 | def flush(self): | |
47 | return "" |
|
47 | return "" | |
48 |
|
48 | |||
49 | bundletypes = { |
|
49 | bundletypes = { | |
50 | "": ("", nocompress), |
|
50 | "": ("", nocompress), | |
51 | "HG10UN": ("HG10UN", nocompress), |
|
51 | "HG10UN": ("HG10UN", nocompress), | |
52 | "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()), |
|
52 | "HG10BZ": ("HG10", lambda: bz2.BZ2Compressor()), | |
53 | "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()), |
|
53 | "HG10GZ": ("HG10GZ", lambda: zlib.compressobj()), | |
54 | } |
|
54 | } | |
55 |
|
55 | |||
|
56 | # hgweb uses this list to communicate it's preferred type | |||
|
57 | bundlepriority = ['HG10GZ', 'HG10BZ', 'HG10UN'] | |||
|
58 | ||||
56 | def writebundle(cg, filename, bundletype): |
|
59 | def writebundle(cg, filename, bundletype): | |
57 | """Write a bundle file and return its filename. |
|
60 | """Write a bundle file and return its filename. | |
58 |
|
61 | |||
59 | Existing files will not be overwritten. |
|
62 | Existing files will not be overwritten. | |
60 | If no filename is specified, a temporary file is created. |
|
63 | If no filename is specified, a temporary file is created. | |
61 | bz2 compression can be turned off. |
|
64 | bz2 compression can be turned off. | |
62 | The bundle file will be deleted in case of errors. |
|
65 | The bundle file will be deleted in case of errors. | |
63 | """ |
|
66 | """ | |
64 |
|
67 | |||
65 | fh = None |
|
68 | fh = None | |
66 | cleanup = None |
|
69 | cleanup = None | |
67 | try: |
|
70 | try: | |
68 | if filename: |
|
71 | if filename: | |
69 | fh = open(filename, "wb") |
|
72 | fh = open(filename, "wb") | |
70 | else: |
|
73 | else: | |
71 | fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg") |
|
74 | fd, filename = tempfile.mkstemp(prefix="hg-bundle-", suffix=".hg") | |
72 | fh = os.fdopen(fd, "wb") |
|
75 | fh = os.fdopen(fd, "wb") | |
73 | cleanup = filename |
|
76 | cleanup = filename | |
74 |
|
77 | |||
75 | header, compressor = bundletypes[bundletype] |
|
78 | header, compressor = bundletypes[bundletype] | |
76 | fh.write(header) |
|
79 | fh.write(header) | |
77 | z = compressor() |
|
80 | z = compressor() | |
78 |
|
81 | |||
79 | # parse the changegroup data, otherwise we will block |
|
82 | # parse the changegroup data, otherwise we will block | |
80 | # in case of sshrepo because we don't know the end of the stream |
|
83 | # in case of sshrepo because we don't know the end of the stream | |
81 |
|
84 | |||
82 | # an empty chunkiter is the end of the changegroup |
|
85 | # an empty chunkiter is the end of the changegroup | |
83 | # a changegroup has at least 2 chunkiters (changelog and manifest). |
|
86 | # a changegroup has at least 2 chunkiters (changelog and manifest). | |
84 | # after that, an empty chunkiter is the end of the changegroup |
|
87 | # after that, an empty chunkiter is the end of the changegroup | |
85 | empty = False |
|
88 | empty = False | |
86 | count = 0 |
|
89 | count = 0 | |
87 | while not empty or count <= 2: |
|
90 | while not empty or count <= 2: | |
88 | empty = True |
|
91 | empty = True | |
89 | count += 1 |
|
92 | count += 1 | |
90 | for chunk in chunkiter(cg): |
|
93 | for chunk in chunkiter(cg): | |
91 | empty = False |
|
94 | empty = False | |
92 | fh.write(z.compress(chunkheader(len(chunk)))) |
|
95 | fh.write(z.compress(chunkheader(len(chunk)))) | |
93 | pos = 0 |
|
96 | pos = 0 | |
94 | while pos < len(chunk): |
|
97 | while pos < len(chunk): | |
95 | next = pos + 2**20 |
|
98 | next = pos + 2**20 | |
96 | fh.write(z.compress(chunk[pos:next])) |
|
99 | fh.write(z.compress(chunk[pos:next])) | |
97 | pos = next |
|
100 | pos = next | |
98 | fh.write(z.compress(closechunk())) |
|
101 | fh.write(z.compress(closechunk())) | |
99 | fh.write(z.flush()) |
|
102 | fh.write(z.flush()) | |
100 | cleanup = None |
|
103 | cleanup = None | |
101 | return filename |
|
104 | return filename | |
102 | finally: |
|
105 | finally: | |
103 | if fh is not None: |
|
106 | if fh is not None: | |
104 | fh.close() |
|
107 | fh.close() | |
105 | if cleanup is not None: |
|
108 | if cleanup is not None: | |
106 | os.unlink(cleanup) |
|
109 | os.unlink(cleanup) | |
107 |
|
110 | |||
108 | def readbundle(fh, fname): |
|
111 | def readbundle(fh, fname): | |
109 | header = fh.read(6) |
|
112 | header = fh.read(6) | |
110 | if not header.startswith("HG"): |
|
113 | if not header.startswith("HG"): | |
111 | raise util.Abort(_("%s: not a Mercurial bundle file") % fname) |
|
114 | raise util.Abort(_("%s: not a Mercurial bundle file") % fname) | |
112 | elif not header.startswith("HG10"): |
|
115 | elif not header.startswith("HG10"): | |
113 | raise util.Abort(_("%s: unknown bundle version") % fname) |
|
116 | raise util.Abort(_("%s: unknown bundle version") % fname) | |
114 |
|
117 | |||
115 | if header == "HG10BZ": |
|
118 | if header == "HG10BZ": | |
116 | def generator(f): |
|
119 | def generator(f): | |
117 | zd = bz2.BZ2Decompressor() |
|
120 | zd = bz2.BZ2Decompressor() | |
118 | zd.decompress("BZ") |
|
121 | zd.decompress("BZ") | |
119 | for chunk in util.filechunkiter(f, 4096): |
|
122 | for chunk in util.filechunkiter(f, 4096): | |
120 | yield zd.decompress(chunk) |
|
123 | yield zd.decompress(chunk) | |
121 | return util.chunkbuffer(generator(fh)) |
|
124 | return util.chunkbuffer(generator(fh)) | |
122 | elif header == "HG10UN": |
|
125 | elif header == "HG10UN": | |
123 | return fh |
|
126 | return fh | |
124 |
|
127 | |||
125 | raise util.Abort(_("%s: unknown bundle compression type") |
|
128 | raise util.Abort(_("%s: unknown bundle compression type") | |
126 | % fname) |
|
129 | % fname) |
@@ -1,929 +1,942 b'' | |||||
1 | # hgweb/hgweb_mod.py - Web interface for a repository. |
|
1 | # hgweb/hgweb_mod.py - Web interface for a repository. | |
2 | # |
|
2 | # | |
3 | # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net> |
|
3 | # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net> | |
4 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
4 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms |
|
6 | # This software may be used and distributed according to the terms | |
7 | # of the GNU General Public License, incorporated herein by reference. |
|
7 | # of the GNU General Public License, incorporated herein by reference. | |
8 |
|
8 | |||
9 | import os, mimetypes, re |
|
9 | import os, mimetypes, re | |
10 | from mercurial.node import * |
|
10 | from mercurial.node import * | |
11 | from mercurial import mdiff, ui, hg, util, archival, patch, hook |
|
11 | from mercurial import mdiff, ui, hg, util, archival, patch, hook | |
12 | from mercurial import revlog, templater, templatefilters |
|
12 | from mercurial import revlog, templater, templatefilters, changegroup | |
13 | from common import get_mtime, style_map, paritygen, countgen, get_contact |
|
13 | from common import get_mtime, style_map, paritygen, countgen, get_contact | |
14 | from common import ErrorResponse |
|
14 | from common import ErrorResponse | |
15 | from common import HTTP_OK, HTTP_BAD_REQUEST, HTTP_NOT_FOUND, HTTP_SERVER_ERROR |
|
15 | from common import HTTP_OK, HTTP_BAD_REQUEST, HTTP_NOT_FOUND, HTTP_SERVER_ERROR | |
16 | from request import wsgirequest |
|
16 | from request import wsgirequest | |
17 | import webcommands, protocol |
|
17 | import webcommands, protocol | |
18 |
|
18 | |||
19 | shortcuts = { |
|
19 | shortcuts = { | |
20 | 'cl': [('cmd', ['changelog']), ('rev', None)], |
|
20 | 'cl': [('cmd', ['changelog']), ('rev', None)], | |
21 | 'sl': [('cmd', ['shortlog']), ('rev', None)], |
|
21 | 'sl': [('cmd', ['shortlog']), ('rev', None)], | |
22 | 'cs': [('cmd', ['changeset']), ('node', None)], |
|
22 | 'cs': [('cmd', ['changeset']), ('node', None)], | |
23 | 'f': [('cmd', ['file']), ('filenode', None)], |
|
23 | 'f': [('cmd', ['file']), ('filenode', None)], | |
24 | 'fl': [('cmd', ['filelog']), ('filenode', None)], |
|
24 | 'fl': [('cmd', ['filelog']), ('filenode', None)], | |
25 | 'fd': [('cmd', ['filediff']), ('node', None)], |
|
25 | 'fd': [('cmd', ['filediff']), ('node', None)], | |
26 | 'fa': [('cmd', ['annotate']), ('filenode', None)], |
|
26 | 'fa': [('cmd', ['annotate']), ('filenode', None)], | |
27 | 'mf': [('cmd', ['manifest']), ('manifest', None)], |
|
27 | 'mf': [('cmd', ['manifest']), ('manifest', None)], | |
28 | 'ca': [('cmd', ['archive']), ('node', None)], |
|
28 | 'ca': [('cmd', ['archive']), ('node', None)], | |
29 | 'tags': [('cmd', ['tags'])], |
|
29 | 'tags': [('cmd', ['tags'])], | |
30 | 'tip': [('cmd', ['changeset']), ('node', ['tip'])], |
|
30 | 'tip': [('cmd', ['changeset']), ('node', ['tip'])], | |
31 | 'static': [('cmd', ['static']), ('file', None)] |
|
31 | 'static': [('cmd', ['static']), ('file', None)] | |
32 | } |
|
32 | } | |
33 |
|
33 | |||
34 | def _up(p): |
|
34 | def _up(p): | |
35 | if p[0] != "/": |
|
35 | if p[0] != "/": | |
36 | p = "/" + p |
|
36 | p = "/" + p | |
37 | if p[-1] == "/": |
|
37 | if p[-1] == "/": | |
38 | p = p[:-1] |
|
38 | p = p[:-1] | |
39 | up = os.path.dirname(p) |
|
39 | up = os.path.dirname(p) | |
40 | if up == "/": |
|
40 | if up == "/": | |
41 | return "/" |
|
41 | return "/" | |
42 | return up + "/" |
|
42 | return up + "/" | |
43 |
|
43 | |||
44 | def revnavgen(pos, pagelen, limit, nodefunc): |
|
44 | def revnavgen(pos, pagelen, limit, nodefunc): | |
45 | def seq(factor, limit=None): |
|
45 | def seq(factor, limit=None): | |
46 | if limit: |
|
46 | if limit: | |
47 | yield limit |
|
47 | yield limit | |
48 | if limit >= 20 and limit <= 40: |
|
48 | if limit >= 20 and limit <= 40: | |
49 | yield 50 |
|
49 | yield 50 | |
50 | else: |
|
50 | else: | |
51 | yield 1 * factor |
|
51 | yield 1 * factor | |
52 | yield 3 * factor |
|
52 | yield 3 * factor | |
53 | for f in seq(factor * 10): |
|
53 | for f in seq(factor * 10): | |
54 | yield f |
|
54 | yield f | |
55 |
|
55 | |||
56 | def nav(**map): |
|
56 | def nav(**map): | |
57 | l = [] |
|
57 | l = [] | |
58 | last = 0 |
|
58 | last = 0 | |
59 | for f in seq(1, pagelen): |
|
59 | for f in seq(1, pagelen): | |
60 | if f < pagelen or f <= last: |
|
60 | if f < pagelen or f <= last: | |
61 | continue |
|
61 | continue | |
62 | if f > limit: |
|
62 | if f > limit: | |
63 | break |
|
63 | break | |
64 | last = f |
|
64 | last = f | |
65 | if pos + f < limit: |
|
65 | if pos + f < limit: | |
66 | l.append(("+%d" % f, hex(nodefunc(pos + f).node()))) |
|
66 | l.append(("+%d" % f, hex(nodefunc(pos + f).node()))) | |
67 | if pos - f >= 0: |
|
67 | if pos - f >= 0: | |
68 | l.insert(0, ("-%d" % f, hex(nodefunc(pos - f).node()))) |
|
68 | l.insert(0, ("-%d" % f, hex(nodefunc(pos - f).node()))) | |
69 |
|
69 | |||
70 | try: |
|
70 | try: | |
71 | yield {"label": "(0)", "node": hex(nodefunc('0').node())} |
|
71 | yield {"label": "(0)", "node": hex(nodefunc('0').node())} | |
72 |
|
72 | |||
73 | for label, node in l: |
|
73 | for label, node in l: | |
74 | yield {"label": label, "node": node} |
|
74 | yield {"label": label, "node": node} | |
75 |
|
75 | |||
76 | yield {"label": "tip", "node": "tip"} |
|
76 | yield {"label": "tip", "node": "tip"} | |
77 | except hg.RepoError: |
|
77 | except hg.RepoError: | |
78 | pass |
|
78 | pass | |
79 |
|
79 | |||
80 | return nav |
|
80 | return nav | |
81 |
|
81 | |||
82 | class hgweb(object): |
|
82 | class hgweb(object): | |
83 | def __init__(self, repo, name=None): |
|
83 | def __init__(self, repo, name=None): | |
84 | if isinstance(repo, str): |
|
84 | if isinstance(repo, str): | |
85 | parentui = ui.ui(report_untrusted=False, interactive=False) |
|
85 | parentui = ui.ui(report_untrusted=False, interactive=False) | |
86 | self.repo = hg.repository(parentui, repo) |
|
86 | self.repo = hg.repository(parentui, repo) | |
87 | else: |
|
87 | else: | |
88 | self.repo = repo |
|
88 | self.repo = repo | |
89 |
|
89 | |||
90 | hook.redirect(True) |
|
90 | hook.redirect(True) | |
91 | self.mtime = -1 |
|
91 | self.mtime = -1 | |
92 | self.reponame = name |
|
92 | self.reponame = name | |
93 | self.archives = 'zip', 'gz', 'bz2' |
|
93 | self.archives = 'zip', 'gz', 'bz2' | |
94 | self.stripecount = 1 |
|
94 | self.stripecount = 1 | |
|
95 | self._capabilities = None | |||
95 | # a repo owner may set web.templates in .hg/hgrc to get any file |
|
96 | # a repo owner may set web.templates in .hg/hgrc to get any file | |
96 | # readable by the user running the CGI script |
|
97 | # readable by the user running the CGI script | |
97 | self.templatepath = self.config("web", "templates", |
|
98 | self.templatepath = self.config("web", "templates", | |
98 | templater.templatepath(), |
|
99 | templater.templatepath(), | |
99 | untrusted=False) |
|
100 | untrusted=False) | |
100 |
|
101 | |||
101 | # The CGI scripts are often run by a user different from the repo owner. |
|
102 | # The CGI scripts are often run by a user different from the repo owner. | |
102 | # Trust the settings from the .hg/hgrc files by default. |
|
103 | # Trust the settings from the .hg/hgrc files by default. | |
103 | def config(self, section, name, default=None, untrusted=True): |
|
104 | def config(self, section, name, default=None, untrusted=True): | |
104 | return self.repo.ui.config(section, name, default, |
|
105 | return self.repo.ui.config(section, name, default, | |
105 | untrusted=untrusted) |
|
106 | untrusted=untrusted) | |
106 |
|
107 | |||
107 | def configbool(self, section, name, default=False, untrusted=True): |
|
108 | def configbool(self, section, name, default=False, untrusted=True): | |
108 | return self.repo.ui.configbool(section, name, default, |
|
109 | return self.repo.ui.configbool(section, name, default, | |
109 | untrusted=untrusted) |
|
110 | untrusted=untrusted) | |
110 |
|
111 | |||
111 | def configlist(self, section, name, default=None, untrusted=True): |
|
112 | def configlist(self, section, name, default=None, untrusted=True): | |
112 | return self.repo.ui.configlist(section, name, default, |
|
113 | return self.repo.ui.configlist(section, name, default, | |
113 | untrusted=untrusted) |
|
114 | untrusted=untrusted) | |
114 |
|
115 | |||
115 | def refresh(self): |
|
116 | def refresh(self): | |
116 | mtime = get_mtime(self.repo.root) |
|
117 | mtime = get_mtime(self.repo.root) | |
117 | if mtime != self.mtime: |
|
118 | if mtime != self.mtime: | |
118 | self.mtime = mtime |
|
119 | self.mtime = mtime | |
119 | self.repo = hg.repository(self.repo.ui, self.repo.root) |
|
120 | self.repo = hg.repository(self.repo.ui, self.repo.root) | |
120 | self.maxchanges = int(self.config("web", "maxchanges", 10)) |
|
121 | self.maxchanges = int(self.config("web", "maxchanges", 10)) | |
121 | self.stripecount = int(self.config("web", "stripes", 1)) |
|
122 | self.stripecount = int(self.config("web", "stripes", 1)) | |
122 | self.maxshortchanges = int(self.config("web", "maxshortchanges", 60)) |
|
123 | self.maxshortchanges = int(self.config("web", "maxshortchanges", 60)) | |
123 | self.maxfiles = int(self.config("web", "maxfiles", 10)) |
|
124 | self.maxfiles = int(self.config("web", "maxfiles", 10)) | |
124 | self.allowpull = self.configbool("web", "allowpull", True) |
|
125 | self.allowpull = self.configbool("web", "allowpull", True) | |
125 | self.encoding = self.config("web", "encoding", util._encoding) |
|
126 | self.encoding = self.config("web", "encoding", util._encoding) | |
|
127 | self._capabilities = None | |||
|
128 | ||||
|
129 | def capabilities(self): | |||
|
130 | if self._capabilities is not None: | |||
|
131 | return self._capabilities | |||
|
132 | caps = ['lookup', 'changegroupsubset'] | |||
|
133 | if self.configbool('server', 'uncompressed'): | |||
|
134 | caps.append('stream=%d' % self.repo.changelog.version) | |||
|
135 | if changegroup.bundlepriority: | |||
|
136 | caps.append('unbundle=%s' % ','.join(changegroup.bundlepriority)) | |||
|
137 | self._capabilities = caps | |||
|
138 | return caps | |||
126 |
|
139 | |||
127 | def run(self): |
|
140 | def run(self): | |
128 | if not os.environ.get('GATEWAY_INTERFACE', '').startswith("CGI/1."): |
|
141 | if not os.environ.get('GATEWAY_INTERFACE', '').startswith("CGI/1."): | |
129 | raise RuntimeError("This function is only intended to be called while running as a CGI script.") |
|
142 | raise RuntimeError("This function is only intended to be called while running as a CGI script.") | |
130 | import mercurial.hgweb.wsgicgi as wsgicgi |
|
143 | import mercurial.hgweb.wsgicgi as wsgicgi | |
131 | wsgicgi.launch(self) |
|
144 | wsgicgi.launch(self) | |
132 |
|
145 | |||
133 | def __call__(self, env, respond): |
|
146 | def __call__(self, env, respond): | |
134 | req = wsgirequest(env, respond) |
|
147 | req = wsgirequest(env, respond) | |
135 | self.run_wsgi(req) |
|
148 | self.run_wsgi(req) | |
136 | return req |
|
149 | return req | |
137 |
|
150 | |||
138 | def run_wsgi(self, req): |
|
151 | def run_wsgi(self, req): | |
139 |
|
152 | |||
140 | self.refresh() |
|
153 | self.refresh() | |
141 |
|
154 | |||
142 | # expand form shortcuts |
|
155 | # expand form shortcuts | |
143 |
|
156 | |||
144 | for k in shortcuts.iterkeys(): |
|
157 | for k in shortcuts.iterkeys(): | |
145 | if k in req.form: |
|
158 | if k in req.form: | |
146 | for name, value in shortcuts[k]: |
|
159 | for name, value in shortcuts[k]: | |
147 | if value is None: |
|
160 | if value is None: | |
148 | value = req.form[k] |
|
161 | value = req.form[k] | |
149 | req.form[name] = value |
|
162 | req.form[name] = value | |
150 | del req.form[k] |
|
163 | del req.form[k] | |
151 |
|
164 | |||
152 | # work with CGI variables to create coherent structure |
|
165 | # work with CGI variables to create coherent structure | |
153 | # use SCRIPT_NAME, PATH_INFO and QUERY_STRING as well as our REPO_NAME |
|
166 | # use SCRIPT_NAME, PATH_INFO and QUERY_STRING as well as our REPO_NAME | |
154 |
|
167 | |||
155 | req.url = req.env['SCRIPT_NAME'] |
|
168 | req.url = req.env['SCRIPT_NAME'] | |
156 | if not req.url.endswith('/'): |
|
169 | if not req.url.endswith('/'): | |
157 | req.url += '/' |
|
170 | req.url += '/' | |
158 | if 'REPO_NAME' in req.env: |
|
171 | if 'REPO_NAME' in req.env: | |
159 | req.url += req.env['REPO_NAME'] + '/' |
|
172 | req.url += req.env['REPO_NAME'] + '/' | |
160 |
|
173 | |||
161 | if req.env.get('PATH_INFO'): |
|
174 | if req.env.get('PATH_INFO'): | |
162 | parts = req.env.get('PATH_INFO').strip('/').split('/') |
|
175 | parts = req.env.get('PATH_INFO').strip('/').split('/') | |
163 | repo_parts = req.env.get('REPO_NAME', '').split('/') |
|
176 | repo_parts = req.env.get('REPO_NAME', '').split('/') | |
164 | if parts[:len(repo_parts)] == repo_parts: |
|
177 | if parts[:len(repo_parts)] == repo_parts: | |
165 | parts = parts[len(repo_parts):] |
|
178 | parts = parts[len(repo_parts):] | |
166 | query = '/'.join(parts) |
|
179 | query = '/'.join(parts) | |
167 | else: |
|
180 | else: | |
168 | query = req.env['QUERY_STRING'].split('&', 1)[0] |
|
181 | query = req.env['QUERY_STRING'].split('&', 1)[0] | |
169 | query = query.split(';', 1)[0] |
|
182 | query = query.split(';', 1)[0] | |
170 |
|
183 | |||
171 | # translate user-visible url structure to internal structure |
|
184 | # translate user-visible url structure to internal structure | |
172 |
|
185 | |||
173 | args = query.split('/', 2) |
|
186 | args = query.split('/', 2) | |
174 | if 'cmd' not in req.form and args and args[0]: |
|
187 | if 'cmd' not in req.form and args and args[0]: | |
175 |
|
188 | |||
176 | cmd = args.pop(0) |
|
189 | cmd = args.pop(0) | |
177 | style = cmd.rfind('-') |
|
190 | style = cmd.rfind('-') | |
178 | if style != -1: |
|
191 | if style != -1: | |
179 | req.form['style'] = [cmd[:style]] |
|
192 | req.form['style'] = [cmd[:style]] | |
180 | cmd = cmd[style+1:] |
|
193 | cmd = cmd[style+1:] | |
181 |
|
194 | |||
182 | # avoid accepting e.g. style parameter as command |
|
195 | # avoid accepting e.g. style parameter as command | |
183 | if hasattr(webcommands, cmd) or hasattr(protocol, cmd): |
|
196 | if hasattr(webcommands, cmd) or hasattr(protocol, cmd): | |
184 | req.form['cmd'] = [cmd] |
|
197 | req.form['cmd'] = [cmd] | |
185 |
|
198 | |||
186 | if args and args[0]: |
|
199 | if args and args[0]: | |
187 | node = args.pop(0) |
|
200 | node = args.pop(0) | |
188 | req.form['node'] = [node] |
|
201 | req.form['node'] = [node] | |
189 | if args: |
|
202 | if args: | |
190 | req.form['file'] = args |
|
203 | req.form['file'] = args | |
191 |
|
204 | |||
192 | if cmd == 'static': |
|
205 | if cmd == 'static': | |
193 | req.form['file'] = req.form['node'] |
|
206 | req.form['file'] = req.form['node'] | |
194 | elif cmd == 'archive': |
|
207 | elif cmd == 'archive': | |
195 | fn = req.form['node'][0] |
|
208 | fn = req.form['node'][0] | |
196 | for type_, spec in self.archive_specs.iteritems(): |
|
209 | for type_, spec in self.archive_specs.iteritems(): | |
197 | ext = spec[2] |
|
210 | ext = spec[2] | |
198 | if fn.endswith(ext): |
|
211 | if fn.endswith(ext): | |
199 | req.form['node'] = [fn[:-len(ext)]] |
|
212 | req.form['node'] = [fn[:-len(ext)]] | |
200 | req.form['type'] = [type_] |
|
213 | req.form['type'] = [type_] | |
201 |
|
214 | |||
202 | # process this if it's a protocol request |
|
215 | # process this if it's a protocol request | |
203 |
|
216 | |||
204 | cmd = req.form.get('cmd', [''])[0] |
|
217 | cmd = req.form.get('cmd', [''])[0] | |
205 | if cmd in protocol.__all__: |
|
218 | if cmd in protocol.__all__: | |
206 | method = getattr(protocol, cmd) |
|
219 | method = getattr(protocol, cmd) | |
207 | method(self, req) |
|
220 | method(self, req) | |
208 | return |
|
221 | return | |
209 |
|
222 | |||
210 | # process the web interface request |
|
223 | # process the web interface request | |
211 |
|
224 | |||
212 | try: |
|
225 | try: | |
213 |
|
226 | |||
214 | tmpl = self.templater(req) |
|
227 | tmpl = self.templater(req) | |
215 | ctype = tmpl('mimetype', encoding=self.encoding) |
|
228 | ctype = tmpl('mimetype', encoding=self.encoding) | |
216 | ctype = templater.stringify(ctype) |
|
229 | ctype = templater.stringify(ctype) | |
217 |
|
230 | |||
218 | if cmd == '': |
|
231 | if cmd == '': | |
219 | req.form['cmd'] = [tmpl.cache['default']] |
|
232 | req.form['cmd'] = [tmpl.cache['default']] | |
220 | cmd = req.form['cmd'][0] |
|
233 | cmd = req.form['cmd'][0] | |
221 |
|
234 | |||
222 | if cmd not in webcommands.__all__: |
|
235 | if cmd not in webcommands.__all__: | |
223 | msg = 'No such method: %s' % cmd |
|
236 | msg = 'No such method: %s' % cmd | |
224 | raise ErrorResponse(HTTP_BAD_REQUEST, msg) |
|
237 | raise ErrorResponse(HTTP_BAD_REQUEST, msg) | |
225 | elif cmd == 'file' and 'raw' in req.form.get('style', []): |
|
238 | elif cmd == 'file' and 'raw' in req.form.get('style', []): | |
226 | self.ctype = ctype |
|
239 | self.ctype = ctype | |
227 | content = webcommands.rawfile(self, req, tmpl) |
|
240 | content = webcommands.rawfile(self, req, tmpl) | |
228 | else: |
|
241 | else: | |
229 | content = getattr(webcommands, cmd)(self, req, tmpl) |
|
242 | content = getattr(webcommands, cmd)(self, req, tmpl) | |
230 | req.respond(HTTP_OK, ctype) |
|
243 | req.respond(HTTP_OK, ctype) | |
231 |
|
244 | |||
232 | req.write(content) |
|
245 | req.write(content) | |
233 | del tmpl |
|
246 | del tmpl | |
234 |
|
247 | |||
235 | except revlog.LookupError, err: |
|
248 | except revlog.LookupError, err: | |
236 | req.respond(HTTP_NOT_FOUND, ctype) |
|
249 | req.respond(HTTP_NOT_FOUND, ctype) | |
237 | req.write(tmpl('error', error='revision not found: %s' % err.name)) |
|
250 | req.write(tmpl('error', error='revision not found: %s' % err.name)) | |
238 | except (hg.RepoError, revlog.RevlogError), inst: |
|
251 | except (hg.RepoError, revlog.RevlogError), inst: | |
239 | req.respond(HTTP_SERVER_ERROR, ctype) |
|
252 | req.respond(HTTP_SERVER_ERROR, ctype) | |
240 | req.write(tmpl('error', error=str(inst))) |
|
253 | req.write(tmpl('error', error=str(inst))) | |
241 | except ErrorResponse, inst: |
|
254 | except ErrorResponse, inst: | |
242 | req.respond(inst.code, ctype) |
|
255 | req.respond(inst.code, ctype) | |
243 | req.write(tmpl('error', error=inst.message)) |
|
256 | req.write(tmpl('error', error=inst.message)) | |
244 |
|
257 | |||
245 | def templater(self, req): |
|
258 | def templater(self, req): | |
246 |
|
259 | |||
247 | # determine scheme, port and server name |
|
260 | # determine scheme, port and server name | |
248 | # this is needed to create absolute urls |
|
261 | # this is needed to create absolute urls | |
249 |
|
262 | |||
250 | proto = req.env.get('wsgi.url_scheme') |
|
263 | proto = req.env.get('wsgi.url_scheme') | |
251 | if proto == 'https': |
|
264 | if proto == 'https': | |
252 | proto = 'https' |
|
265 | proto = 'https' | |
253 | default_port = "443" |
|
266 | default_port = "443" | |
254 | else: |
|
267 | else: | |
255 | proto = 'http' |
|
268 | proto = 'http' | |
256 | default_port = "80" |
|
269 | default_port = "80" | |
257 |
|
270 | |||
258 | port = req.env["SERVER_PORT"] |
|
271 | port = req.env["SERVER_PORT"] | |
259 | port = port != default_port and (":" + port) or "" |
|
272 | port = port != default_port and (":" + port) or "" | |
260 | urlbase = '%s://%s%s' % (proto, req.env['SERVER_NAME'], port) |
|
273 | urlbase = '%s://%s%s' % (proto, req.env['SERVER_NAME'], port) | |
261 | staticurl = self.config("web", "staticurl") or req.url + 'static/' |
|
274 | staticurl = self.config("web", "staticurl") or req.url + 'static/' | |
262 | if not staticurl.endswith('/'): |
|
275 | if not staticurl.endswith('/'): | |
263 | staticurl += '/' |
|
276 | staticurl += '/' | |
264 |
|
277 | |||
265 | # some functions for the templater |
|
278 | # some functions for the templater | |
266 |
|
279 | |||
267 | def header(**map): |
|
280 | def header(**map): | |
268 | yield tmpl('header', encoding=self.encoding, **map) |
|
281 | yield tmpl('header', encoding=self.encoding, **map) | |
269 |
|
282 | |||
270 | def footer(**map): |
|
283 | def footer(**map): | |
271 | yield tmpl("footer", **map) |
|
284 | yield tmpl("footer", **map) | |
272 |
|
285 | |||
273 | def motd(**map): |
|
286 | def motd(**map): | |
274 | yield self.config("web", "motd", "") |
|
287 | yield self.config("web", "motd", "") | |
275 |
|
288 | |||
276 | def sessionvars(**map): |
|
289 | def sessionvars(**map): | |
277 | fields = [] |
|
290 | fields = [] | |
278 | if 'style' in req.form: |
|
291 | if 'style' in req.form: | |
279 | style = req.form['style'][0] |
|
292 | style = req.form['style'][0] | |
280 | if style != self.config('web', 'style', ''): |
|
293 | if style != self.config('web', 'style', ''): | |
281 | fields.append(('style', style)) |
|
294 | fields.append(('style', style)) | |
282 |
|
295 | |||
283 | separator = req.url[-1] == '?' and ';' or '?' |
|
296 | separator = req.url[-1] == '?' and ';' or '?' | |
284 | for name, value in fields: |
|
297 | for name, value in fields: | |
285 | yield dict(name=name, value=value, separator=separator) |
|
298 | yield dict(name=name, value=value, separator=separator) | |
286 | separator = ';' |
|
299 | separator = ';' | |
287 |
|
300 | |||
288 | # figure out which style to use |
|
301 | # figure out which style to use | |
289 |
|
302 | |||
290 | style = self.config("web", "style", "") |
|
303 | style = self.config("web", "style", "") | |
291 | if 'style' in req.form: |
|
304 | if 'style' in req.form: | |
292 | style = req.form['style'][0] |
|
305 | style = req.form['style'][0] | |
293 | mapfile = style_map(self.templatepath, style) |
|
306 | mapfile = style_map(self.templatepath, style) | |
294 |
|
307 | |||
295 | if not self.reponame: |
|
308 | if not self.reponame: | |
296 | self.reponame = (self.config("web", "name") |
|
309 | self.reponame = (self.config("web", "name") | |
297 | or req.env.get('REPO_NAME') |
|
310 | or req.env.get('REPO_NAME') | |
298 | or req.url.strip('/') or self.repo.root) |
|
311 | or req.url.strip('/') or self.repo.root) | |
299 |
|
312 | |||
300 | # create the templater |
|
313 | # create the templater | |
301 |
|
314 | |||
302 | tmpl = templater.templater(mapfile, templatefilters.filters, |
|
315 | tmpl = templater.templater(mapfile, templatefilters.filters, | |
303 | defaults={"url": req.url, |
|
316 | defaults={"url": req.url, | |
304 | "staticurl": staticurl, |
|
317 | "staticurl": staticurl, | |
305 | "urlbase": urlbase, |
|
318 | "urlbase": urlbase, | |
306 | "repo": self.reponame, |
|
319 | "repo": self.reponame, | |
307 | "header": header, |
|
320 | "header": header, | |
308 | "footer": footer, |
|
321 | "footer": footer, | |
309 | "motd": motd, |
|
322 | "motd": motd, | |
310 | "sessionvars": sessionvars |
|
323 | "sessionvars": sessionvars | |
311 | }) |
|
324 | }) | |
312 | return tmpl |
|
325 | return tmpl | |
313 |
|
326 | |||
314 | def archivelist(self, nodeid): |
|
327 | def archivelist(self, nodeid): | |
315 | allowed = self.configlist("web", "allow_archive") |
|
328 | allowed = self.configlist("web", "allow_archive") | |
316 | for i, spec in self.archive_specs.iteritems(): |
|
329 | for i, spec in self.archive_specs.iteritems(): | |
317 | if i in allowed or self.configbool("web", "allow" + i): |
|
330 | if i in allowed or self.configbool("web", "allow" + i): | |
318 | yield {"type" : i, "extension" : spec[2], "node" : nodeid} |
|
331 | yield {"type" : i, "extension" : spec[2], "node" : nodeid} | |
319 |
|
332 | |||
320 | def listfilediffs(self, tmpl, files, changeset): |
|
333 | def listfilediffs(self, tmpl, files, changeset): | |
321 | for f in files[:self.maxfiles]: |
|
334 | for f in files[:self.maxfiles]: | |
322 | yield tmpl("filedifflink", node=hex(changeset), file=f) |
|
335 | yield tmpl("filedifflink", node=hex(changeset), file=f) | |
323 | if len(files) > self.maxfiles: |
|
336 | if len(files) > self.maxfiles: | |
324 | yield tmpl("fileellipses") |
|
337 | yield tmpl("fileellipses") | |
325 |
|
338 | |||
326 | def siblings(self, siblings=[], hiderev=None, **args): |
|
339 | def siblings(self, siblings=[], hiderev=None, **args): | |
327 | siblings = [s for s in siblings if s.node() != nullid] |
|
340 | siblings = [s for s in siblings if s.node() != nullid] | |
328 | if len(siblings) == 1 and siblings[0].rev() == hiderev: |
|
341 | if len(siblings) == 1 and siblings[0].rev() == hiderev: | |
329 | return |
|
342 | return | |
330 | for s in siblings: |
|
343 | for s in siblings: | |
331 | d = {'node': hex(s.node()), 'rev': s.rev()} |
|
344 | d = {'node': hex(s.node()), 'rev': s.rev()} | |
332 | if hasattr(s, 'path'): |
|
345 | if hasattr(s, 'path'): | |
333 | d['file'] = s.path() |
|
346 | d['file'] = s.path() | |
334 | d.update(args) |
|
347 | d.update(args) | |
335 | yield d |
|
348 | yield d | |
336 |
|
349 | |||
337 | def renamelink(self, fl, node): |
|
350 | def renamelink(self, fl, node): | |
338 | r = fl.renamed(node) |
|
351 | r = fl.renamed(node) | |
339 | if r: |
|
352 | if r: | |
340 | return [dict(file=r[0], node=hex(r[1]))] |
|
353 | return [dict(file=r[0], node=hex(r[1]))] | |
341 | return [] |
|
354 | return [] | |
342 |
|
355 | |||
343 | def nodetagsdict(self, node): |
|
356 | def nodetagsdict(self, node): | |
344 | return [{"name": i} for i in self.repo.nodetags(node)] |
|
357 | return [{"name": i} for i in self.repo.nodetags(node)] | |
345 |
|
358 | |||
346 | def nodebranchdict(self, ctx): |
|
359 | def nodebranchdict(self, ctx): | |
347 | branches = [] |
|
360 | branches = [] | |
348 | branch = ctx.branch() |
|
361 | branch = ctx.branch() | |
349 | # If this is an empty repo, ctx.node() == nullid, |
|
362 | # If this is an empty repo, ctx.node() == nullid, | |
350 | # ctx.branch() == 'default', but branchtags() is |
|
363 | # ctx.branch() == 'default', but branchtags() is | |
351 | # an empty dict. Using dict.get avoids a traceback. |
|
364 | # an empty dict. Using dict.get avoids a traceback. | |
352 | if self.repo.branchtags().get(branch) == ctx.node(): |
|
365 | if self.repo.branchtags().get(branch) == ctx.node(): | |
353 | branches.append({"name": branch}) |
|
366 | branches.append({"name": branch}) | |
354 | return branches |
|
367 | return branches | |
355 |
|
368 | |||
356 | def showtag(self, tmpl, t1, node=nullid, **args): |
|
369 | def showtag(self, tmpl, t1, node=nullid, **args): | |
357 | for t in self.repo.nodetags(node): |
|
370 | for t in self.repo.nodetags(node): | |
358 | yield tmpl(t1, tag=t, **args) |
|
371 | yield tmpl(t1, tag=t, **args) | |
359 |
|
372 | |||
360 | def diff(self, tmpl, node1, node2, files): |
|
373 | def diff(self, tmpl, node1, node2, files): | |
361 | def filterfiles(filters, files): |
|
374 | def filterfiles(filters, files): | |
362 | l = [x for x in files if x in filters] |
|
375 | l = [x for x in files if x in filters] | |
363 |
|
376 | |||
364 | for t in filters: |
|
377 | for t in filters: | |
365 | if t and t[-1] != os.sep: |
|
378 | if t and t[-1] != os.sep: | |
366 | t += os.sep |
|
379 | t += os.sep | |
367 | l += [x for x in files if x.startswith(t)] |
|
380 | l += [x for x in files if x.startswith(t)] | |
368 | return l |
|
381 | return l | |
369 |
|
382 | |||
370 | parity = paritygen(self.stripecount) |
|
383 | parity = paritygen(self.stripecount) | |
371 | def diffblock(diff, f, fn): |
|
384 | def diffblock(diff, f, fn): | |
372 | yield tmpl("diffblock", |
|
385 | yield tmpl("diffblock", | |
373 | lines=prettyprintlines(diff), |
|
386 | lines=prettyprintlines(diff), | |
374 | parity=parity.next(), |
|
387 | parity=parity.next(), | |
375 | file=f, |
|
388 | file=f, | |
376 | filenode=hex(fn or nullid)) |
|
389 | filenode=hex(fn or nullid)) | |
377 |
|
390 | |||
378 | blockcount = countgen() |
|
391 | blockcount = countgen() | |
379 | def prettyprintlines(diff): |
|
392 | def prettyprintlines(diff): | |
380 | blockno = blockcount.next() |
|
393 | blockno = blockcount.next() | |
381 | for lineno, l in enumerate(diff.splitlines(1)): |
|
394 | for lineno, l in enumerate(diff.splitlines(1)): | |
382 | if blockno == 0: |
|
395 | if blockno == 0: | |
383 | lineno = lineno + 1 |
|
396 | lineno = lineno + 1 | |
384 | else: |
|
397 | else: | |
385 | lineno = "%d.%d" % (blockno, lineno + 1) |
|
398 | lineno = "%d.%d" % (blockno, lineno + 1) | |
386 | if l.startswith('+'): |
|
399 | if l.startswith('+'): | |
387 | ltype = "difflineplus" |
|
400 | ltype = "difflineplus" | |
388 | elif l.startswith('-'): |
|
401 | elif l.startswith('-'): | |
389 | ltype = "difflineminus" |
|
402 | ltype = "difflineminus" | |
390 | elif l.startswith('@'): |
|
403 | elif l.startswith('@'): | |
391 | ltype = "difflineat" |
|
404 | ltype = "difflineat" | |
392 | else: |
|
405 | else: | |
393 | ltype = "diffline" |
|
406 | ltype = "diffline" | |
394 | yield tmpl(ltype, |
|
407 | yield tmpl(ltype, | |
395 | line=l, |
|
408 | line=l, | |
396 | lineid="l%s" % lineno, |
|
409 | lineid="l%s" % lineno, | |
397 | linenumber="% 8s" % lineno) |
|
410 | linenumber="% 8s" % lineno) | |
398 |
|
411 | |||
399 | r = self.repo |
|
412 | r = self.repo | |
400 | c1 = r.changectx(node1) |
|
413 | c1 = r.changectx(node1) | |
401 | c2 = r.changectx(node2) |
|
414 | c2 = r.changectx(node2) | |
402 | date1 = util.datestr(c1.date()) |
|
415 | date1 = util.datestr(c1.date()) | |
403 | date2 = util.datestr(c2.date()) |
|
416 | date2 = util.datestr(c2.date()) | |
404 |
|
417 | |||
405 | modified, added, removed, deleted, unknown = r.status(node1, node2)[:5] |
|
418 | modified, added, removed, deleted, unknown = r.status(node1, node2)[:5] | |
406 | if files: |
|
419 | if files: | |
407 | modified, added, removed = map(lambda x: filterfiles(files, x), |
|
420 | modified, added, removed = map(lambda x: filterfiles(files, x), | |
408 | (modified, added, removed)) |
|
421 | (modified, added, removed)) | |
409 |
|
422 | |||
410 | diffopts = patch.diffopts(self.repo.ui, untrusted=True) |
|
423 | diffopts = patch.diffopts(self.repo.ui, untrusted=True) | |
411 | for f in modified: |
|
424 | for f in modified: | |
412 | to = c1.filectx(f).data() |
|
425 | to = c1.filectx(f).data() | |
413 | tn = c2.filectx(f).data() |
|
426 | tn = c2.filectx(f).data() | |
414 | yield diffblock(mdiff.unidiff(to, date1, tn, date2, f, f, |
|
427 | yield diffblock(mdiff.unidiff(to, date1, tn, date2, f, f, | |
415 | opts=diffopts), f, tn) |
|
428 | opts=diffopts), f, tn) | |
416 | for f in added: |
|
429 | for f in added: | |
417 | to = None |
|
430 | to = None | |
418 | tn = c2.filectx(f).data() |
|
431 | tn = c2.filectx(f).data() | |
419 | yield diffblock(mdiff.unidiff(to, date1, tn, date2, f, f, |
|
432 | yield diffblock(mdiff.unidiff(to, date1, tn, date2, f, f, | |
420 | opts=diffopts), f, tn) |
|
433 | opts=diffopts), f, tn) | |
421 | for f in removed: |
|
434 | for f in removed: | |
422 | to = c1.filectx(f).data() |
|
435 | to = c1.filectx(f).data() | |
423 | tn = None |
|
436 | tn = None | |
424 | yield diffblock(mdiff.unidiff(to, date1, tn, date2, f, f, |
|
437 | yield diffblock(mdiff.unidiff(to, date1, tn, date2, f, f, | |
425 | opts=diffopts), f, tn) |
|
438 | opts=diffopts), f, tn) | |
426 |
|
439 | |||
427 | def changelog(self, tmpl, ctx, shortlog=False): |
|
440 | def changelog(self, tmpl, ctx, shortlog=False): | |
428 | def changelist(limit=0,**map): |
|
441 | def changelist(limit=0,**map): | |
429 | cl = self.repo.changelog |
|
442 | cl = self.repo.changelog | |
430 | l = [] # build a list in forward order for efficiency |
|
443 | l = [] # build a list in forward order for efficiency | |
431 | for i in xrange(start, end): |
|
444 | for i in xrange(start, end): | |
432 | ctx = self.repo.changectx(i) |
|
445 | ctx = self.repo.changectx(i) | |
433 | n = ctx.node() |
|
446 | n = ctx.node() | |
434 |
|
447 | |||
435 | l.insert(0, {"parity": parity.next(), |
|
448 | l.insert(0, {"parity": parity.next(), | |
436 | "author": ctx.user(), |
|
449 | "author": ctx.user(), | |
437 | "parent": self.siblings(ctx.parents(), i - 1), |
|
450 | "parent": self.siblings(ctx.parents(), i - 1), | |
438 | "child": self.siblings(ctx.children(), i + 1), |
|
451 | "child": self.siblings(ctx.children(), i + 1), | |
439 | "changelogtag": self.showtag("changelogtag",n), |
|
452 | "changelogtag": self.showtag("changelogtag",n), | |
440 | "desc": ctx.description(), |
|
453 | "desc": ctx.description(), | |
441 | "date": ctx.date(), |
|
454 | "date": ctx.date(), | |
442 | "files": self.listfilediffs(tmpl, ctx.files(), n), |
|
455 | "files": self.listfilediffs(tmpl, ctx.files(), n), | |
443 | "rev": i, |
|
456 | "rev": i, | |
444 | "node": hex(n), |
|
457 | "node": hex(n), | |
445 | "tags": self.nodetagsdict(n), |
|
458 | "tags": self.nodetagsdict(n), | |
446 | "branches": self.nodebranchdict(ctx)}) |
|
459 | "branches": self.nodebranchdict(ctx)}) | |
447 |
|
460 | |||
448 | if limit > 0: |
|
461 | if limit > 0: | |
449 | l = l[:limit] |
|
462 | l = l[:limit] | |
450 |
|
463 | |||
451 | for e in l: |
|
464 | for e in l: | |
452 | yield e |
|
465 | yield e | |
453 |
|
466 | |||
454 | maxchanges = shortlog and self.maxshortchanges or self.maxchanges |
|
467 | maxchanges = shortlog and self.maxshortchanges or self.maxchanges | |
455 | cl = self.repo.changelog |
|
468 | cl = self.repo.changelog | |
456 | count = cl.count() |
|
469 | count = cl.count() | |
457 | pos = ctx.rev() |
|
470 | pos = ctx.rev() | |
458 | start = max(0, pos - maxchanges + 1) |
|
471 | start = max(0, pos - maxchanges + 1) | |
459 | end = min(count, start + maxchanges) |
|
472 | end = min(count, start + maxchanges) | |
460 | pos = end - 1 |
|
473 | pos = end - 1 | |
461 | parity = paritygen(self.stripecount, offset=start-end) |
|
474 | parity = paritygen(self.stripecount, offset=start-end) | |
462 |
|
475 | |||
463 | changenav = revnavgen(pos, maxchanges, count, self.repo.changectx) |
|
476 | changenav = revnavgen(pos, maxchanges, count, self.repo.changectx) | |
464 |
|
477 | |||
465 | return tmpl(shortlog and 'shortlog' or 'changelog', |
|
478 | return tmpl(shortlog and 'shortlog' or 'changelog', | |
466 | changenav=changenav, |
|
479 | changenav=changenav, | |
467 | node=hex(cl.tip()), |
|
480 | node=hex(cl.tip()), | |
468 | rev=pos, changesets=count, |
|
481 | rev=pos, changesets=count, | |
469 | entries=lambda **x: changelist(limit=0,**x), |
|
482 | entries=lambda **x: changelist(limit=0,**x), | |
470 | latestentry=lambda **x: changelist(limit=1,**x), |
|
483 | latestentry=lambda **x: changelist(limit=1,**x), | |
471 | archives=self.archivelist("tip")) |
|
484 | archives=self.archivelist("tip")) | |
472 |
|
485 | |||
473 | def search(self, tmpl, query): |
|
486 | def search(self, tmpl, query): | |
474 |
|
487 | |||
475 | def changelist(**map): |
|
488 | def changelist(**map): | |
476 | cl = self.repo.changelog |
|
489 | cl = self.repo.changelog | |
477 | count = 0 |
|
490 | count = 0 | |
478 | qw = query.lower().split() |
|
491 | qw = query.lower().split() | |
479 |
|
492 | |||
480 | def revgen(): |
|
493 | def revgen(): | |
481 | for i in xrange(cl.count() - 1, 0, -100): |
|
494 | for i in xrange(cl.count() - 1, 0, -100): | |
482 | l = [] |
|
495 | l = [] | |
483 | for j in xrange(max(0, i - 100), i + 1): |
|
496 | for j in xrange(max(0, i - 100), i + 1): | |
484 | ctx = self.repo.changectx(j) |
|
497 | ctx = self.repo.changectx(j) | |
485 | l.append(ctx) |
|
498 | l.append(ctx) | |
486 | l.reverse() |
|
499 | l.reverse() | |
487 | for e in l: |
|
500 | for e in l: | |
488 | yield e |
|
501 | yield e | |
489 |
|
502 | |||
490 | for ctx in revgen(): |
|
503 | for ctx in revgen(): | |
491 | miss = 0 |
|
504 | miss = 0 | |
492 | for q in qw: |
|
505 | for q in qw: | |
493 | if not (q in ctx.user().lower() or |
|
506 | if not (q in ctx.user().lower() or | |
494 | q in ctx.description().lower() or |
|
507 | q in ctx.description().lower() or | |
495 | q in " ".join(ctx.files()).lower()): |
|
508 | q in " ".join(ctx.files()).lower()): | |
496 | miss = 1 |
|
509 | miss = 1 | |
497 | break |
|
510 | break | |
498 | if miss: |
|
511 | if miss: | |
499 | continue |
|
512 | continue | |
500 |
|
513 | |||
501 | count += 1 |
|
514 | count += 1 | |
502 | n = ctx.node() |
|
515 | n = ctx.node() | |
503 |
|
516 | |||
504 | yield tmpl('searchentry', |
|
517 | yield tmpl('searchentry', | |
505 | parity=parity.next(), |
|
518 | parity=parity.next(), | |
506 | author=ctx.user(), |
|
519 | author=ctx.user(), | |
507 | parent=self.siblings(ctx.parents()), |
|
520 | parent=self.siblings(ctx.parents()), | |
508 | child=self.siblings(ctx.children()), |
|
521 | child=self.siblings(ctx.children()), | |
509 | changelogtag=self.showtag("changelogtag",n), |
|
522 | changelogtag=self.showtag("changelogtag",n), | |
510 | desc=ctx.description(), |
|
523 | desc=ctx.description(), | |
511 | date=ctx.date(), |
|
524 | date=ctx.date(), | |
512 | files=self.listfilediffs(tmpl, ctx.files(), n), |
|
525 | files=self.listfilediffs(tmpl, ctx.files(), n), | |
513 | rev=ctx.rev(), |
|
526 | rev=ctx.rev(), | |
514 | node=hex(n), |
|
527 | node=hex(n), | |
515 | tags=self.nodetagsdict(n), |
|
528 | tags=self.nodetagsdict(n), | |
516 | branches=self.nodebranchdict(ctx)) |
|
529 | branches=self.nodebranchdict(ctx)) | |
517 |
|
530 | |||
518 | if count >= self.maxchanges: |
|
531 | if count >= self.maxchanges: | |
519 | break |
|
532 | break | |
520 |
|
533 | |||
521 | cl = self.repo.changelog |
|
534 | cl = self.repo.changelog | |
522 | parity = paritygen(self.stripecount) |
|
535 | parity = paritygen(self.stripecount) | |
523 |
|
536 | |||
524 | return tmpl('search', |
|
537 | return tmpl('search', | |
525 | query=query, |
|
538 | query=query, | |
526 | node=hex(cl.tip()), |
|
539 | node=hex(cl.tip()), | |
527 | entries=changelist, |
|
540 | entries=changelist, | |
528 | archives=self.archivelist("tip")) |
|
541 | archives=self.archivelist("tip")) | |
529 |
|
542 | |||
530 | def changeset(self, tmpl, ctx): |
|
543 | def changeset(self, tmpl, ctx): | |
531 | n = ctx.node() |
|
544 | n = ctx.node() | |
532 | parents = ctx.parents() |
|
545 | parents = ctx.parents() | |
533 | p1 = parents[0].node() |
|
546 | p1 = parents[0].node() | |
534 |
|
547 | |||
535 | files = [] |
|
548 | files = [] | |
536 | parity = paritygen(self.stripecount) |
|
549 | parity = paritygen(self.stripecount) | |
537 | for f in ctx.files(): |
|
550 | for f in ctx.files(): | |
538 | files.append(tmpl("filenodelink", |
|
551 | files.append(tmpl("filenodelink", | |
539 | node=hex(n), file=f, |
|
552 | node=hex(n), file=f, | |
540 | parity=parity.next())) |
|
553 | parity=parity.next())) | |
541 |
|
554 | |||
542 | def diff(**map): |
|
555 | def diff(**map): | |
543 | yield self.diff(tmpl, p1, n, None) |
|
556 | yield self.diff(tmpl, p1, n, None) | |
544 |
|
557 | |||
545 | return tmpl('changeset', |
|
558 | return tmpl('changeset', | |
546 | diff=diff, |
|
559 | diff=diff, | |
547 | rev=ctx.rev(), |
|
560 | rev=ctx.rev(), | |
548 | node=hex(n), |
|
561 | node=hex(n), | |
549 | parent=self.siblings(parents), |
|
562 | parent=self.siblings(parents), | |
550 | child=self.siblings(ctx.children()), |
|
563 | child=self.siblings(ctx.children()), | |
551 | changesettag=self.showtag("changesettag",n), |
|
564 | changesettag=self.showtag("changesettag",n), | |
552 | author=ctx.user(), |
|
565 | author=ctx.user(), | |
553 | desc=ctx.description(), |
|
566 | desc=ctx.description(), | |
554 | date=ctx.date(), |
|
567 | date=ctx.date(), | |
555 | files=files, |
|
568 | files=files, | |
556 | archives=self.archivelist(hex(n)), |
|
569 | archives=self.archivelist(hex(n)), | |
557 | tags=self.nodetagsdict(n), |
|
570 | tags=self.nodetagsdict(n), | |
558 | branches=self.nodebranchdict(ctx)) |
|
571 | branches=self.nodebranchdict(ctx)) | |
559 |
|
572 | |||
560 | def filelog(self, tmpl, fctx): |
|
573 | def filelog(self, tmpl, fctx): | |
561 | f = fctx.path() |
|
574 | f = fctx.path() | |
562 | fl = fctx.filelog() |
|
575 | fl = fctx.filelog() | |
563 | count = fl.count() |
|
576 | count = fl.count() | |
564 | pagelen = self.maxshortchanges |
|
577 | pagelen = self.maxshortchanges | |
565 | pos = fctx.filerev() |
|
578 | pos = fctx.filerev() | |
566 | start = max(0, pos - pagelen + 1) |
|
579 | start = max(0, pos - pagelen + 1) | |
567 | end = min(count, start + pagelen) |
|
580 | end = min(count, start + pagelen) | |
568 | pos = end - 1 |
|
581 | pos = end - 1 | |
569 | parity = paritygen(self.stripecount, offset=start-end) |
|
582 | parity = paritygen(self.stripecount, offset=start-end) | |
570 |
|
583 | |||
571 | def entries(limit=0, **map): |
|
584 | def entries(limit=0, **map): | |
572 | l = [] |
|
585 | l = [] | |
573 |
|
586 | |||
574 | for i in xrange(start, end): |
|
587 | for i in xrange(start, end): | |
575 | ctx = fctx.filectx(i) |
|
588 | ctx = fctx.filectx(i) | |
576 | n = fl.node(i) |
|
589 | n = fl.node(i) | |
577 |
|
590 | |||
578 | l.insert(0, {"parity": parity.next(), |
|
591 | l.insert(0, {"parity": parity.next(), | |
579 | "filerev": i, |
|
592 | "filerev": i, | |
580 | "file": f, |
|
593 | "file": f, | |
581 | "node": hex(ctx.node()), |
|
594 | "node": hex(ctx.node()), | |
582 | "author": ctx.user(), |
|
595 | "author": ctx.user(), | |
583 | "date": ctx.date(), |
|
596 | "date": ctx.date(), | |
584 | "rename": self.renamelink(fl, n), |
|
597 | "rename": self.renamelink(fl, n), | |
585 | "parent": self.siblings(fctx.parents()), |
|
598 | "parent": self.siblings(fctx.parents()), | |
586 | "child": self.siblings(fctx.children()), |
|
599 | "child": self.siblings(fctx.children()), | |
587 | "desc": ctx.description()}) |
|
600 | "desc": ctx.description()}) | |
588 |
|
601 | |||
589 | if limit > 0: |
|
602 | if limit > 0: | |
590 | l = l[:limit] |
|
603 | l = l[:limit] | |
591 |
|
604 | |||
592 | for e in l: |
|
605 | for e in l: | |
593 | yield e |
|
606 | yield e | |
594 |
|
607 | |||
595 | nodefunc = lambda x: fctx.filectx(fileid=x) |
|
608 | nodefunc = lambda x: fctx.filectx(fileid=x) | |
596 | nav = revnavgen(pos, pagelen, count, nodefunc) |
|
609 | nav = revnavgen(pos, pagelen, count, nodefunc) | |
597 | return tmpl("filelog", file=f, node=hex(fctx.node()), nav=nav, |
|
610 | return tmpl("filelog", file=f, node=hex(fctx.node()), nav=nav, | |
598 | entries=lambda **x: entries(limit=0, **x), |
|
611 | entries=lambda **x: entries(limit=0, **x), | |
599 | latestentry=lambda **x: entries(limit=1, **x)) |
|
612 | latestentry=lambda **x: entries(limit=1, **x)) | |
600 |
|
613 | |||
601 | def filerevision(self, tmpl, fctx): |
|
614 | def filerevision(self, tmpl, fctx): | |
602 | f = fctx.path() |
|
615 | f = fctx.path() | |
603 | text = fctx.data() |
|
616 | text = fctx.data() | |
604 | fl = fctx.filelog() |
|
617 | fl = fctx.filelog() | |
605 | n = fctx.filenode() |
|
618 | n = fctx.filenode() | |
606 | parity = paritygen(self.stripecount) |
|
619 | parity = paritygen(self.stripecount) | |
607 |
|
620 | |||
608 | if util.binary(text): |
|
621 | if util.binary(text): | |
609 | mt = mimetypes.guess_type(f)[0] or 'application/octet-stream' |
|
622 | mt = mimetypes.guess_type(f)[0] or 'application/octet-stream' | |
610 | text = '(binary:%s)' % mt |
|
623 | text = '(binary:%s)' % mt | |
611 |
|
624 | |||
612 | def lines(): |
|
625 | def lines(): | |
613 | for lineno, t in enumerate(text.splitlines(1)): |
|
626 | for lineno, t in enumerate(text.splitlines(1)): | |
614 | yield {"line": t, |
|
627 | yield {"line": t, | |
615 | "lineid": "l%d" % (lineno + 1), |
|
628 | "lineid": "l%d" % (lineno + 1), | |
616 | "linenumber": "% 6d" % (lineno + 1), |
|
629 | "linenumber": "% 6d" % (lineno + 1), | |
617 | "parity": parity.next()} |
|
630 | "parity": parity.next()} | |
618 |
|
631 | |||
619 | return tmpl("filerevision", |
|
632 | return tmpl("filerevision", | |
620 | file=f, |
|
633 | file=f, | |
621 | path=_up(f), |
|
634 | path=_up(f), | |
622 | text=lines(), |
|
635 | text=lines(), | |
623 | rev=fctx.rev(), |
|
636 | rev=fctx.rev(), | |
624 | node=hex(fctx.node()), |
|
637 | node=hex(fctx.node()), | |
625 | author=fctx.user(), |
|
638 | author=fctx.user(), | |
626 | date=fctx.date(), |
|
639 | date=fctx.date(), | |
627 | desc=fctx.description(), |
|
640 | desc=fctx.description(), | |
628 | parent=self.siblings(fctx.parents()), |
|
641 | parent=self.siblings(fctx.parents()), | |
629 | child=self.siblings(fctx.children()), |
|
642 | child=self.siblings(fctx.children()), | |
630 | rename=self.renamelink(fl, n), |
|
643 | rename=self.renamelink(fl, n), | |
631 | permissions=fctx.manifest().flags(f)) |
|
644 | permissions=fctx.manifest().flags(f)) | |
632 |
|
645 | |||
633 | def fileannotate(self, tmpl, fctx): |
|
646 | def fileannotate(self, tmpl, fctx): | |
634 | f = fctx.path() |
|
647 | f = fctx.path() | |
635 | n = fctx.filenode() |
|
648 | n = fctx.filenode() | |
636 | fl = fctx.filelog() |
|
649 | fl = fctx.filelog() | |
637 | parity = paritygen(self.stripecount) |
|
650 | parity = paritygen(self.stripecount) | |
638 |
|
651 | |||
639 | def annotate(**map): |
|
652 | def annotate(**map): | |
640 | last = None |
|
653 | last = None | |
641 | lines = enumerate(fctx.annotate(follow=True, linenumber=True)) |
|
654 | lines = enumerate(fctx.annotate(follow=True, linenumber=True)) | |
642 | for lineno, ((f, targetline), l) in lines: |
|
655 | for lineno, ((f, targetline), l) in lines: | |
643 | fnode = f.filenode() |
|
656 | fnode = f.filenode() | |
644 | name = self.repo.ui.shortuser(f.user()) |
|
657 | name = self.repo.ui.shortuser(f.user()) | |
645 |
|
658 | |||
646 | if last != fnode: |
|
659 | if last != fnode: | |
647 | last = fnode |
|
660 | last = fnode | |
648 |
|
661 | |||
649 | yield {"parity": parity.next(), |
|
662 | yield {"parity": parity.next(), | |
650 | "node": hex(f.node()), |
|
663 | "node": hex(f.node()), | |
651 | "rev": f.rev(), |
|
664 | "rev": f.rev(), | |
652 | "author": name, |
|
665 | "author": name, | |
653 | "file": f.path(), |
|
666 | "file": f.path(), | |
654 | "targetline": targetline, |
|
667 | "targetline": targetline, | |
655 | "line": l, |
|
668 | "line": l, | |
656 | "lineid": "l%d" % (lineno + 1), |
|
669 | "lineid": "l%d" % (lineno + 1), | |
657 | "linenumber": "% 6d" % (lineno + 1)} |
|
670 | "linenumber": "% 6d" % (lineno + 1)} | |
658 |
|
671 | |||
659 | return tmpl("fileannotate", |
|
672 | return tmpl("fileannotate", | |
660 | file=f, |
|
673 | file=f, | |
661 | annotate=annotate, |
|
674 | annotate=annotate, | |
662 | path=_up(f), |
|
675 | path=_up(f), | |
663 | rev=fctx.rev(), |
|
676 | rev=fctx.rev(), | |
664 | node=hex(fctx.node()), |
|
677 | node=hex(fctx.node()), | |
665 | author=fctx.user(), |
|
678 | author=fctx.user(), | |
666 | date=fctx.date(), |
|
679 | date=fctx.date(), | |
667 | desc=fctx.description(), |
|
680 | desc=fctx.description(), | |
668 | rename=self.renamelink(fl, n), |
|
681 | rename=self.renamelink(fl, n), | |
669 | parent=self.siblings(fctx.parents()), |
|
682 | parent=self.siblings(fctx.parents()), | |
670 | child=self.siblings(fctx.children()), |
|
683 | child=self.siblings(fctx.children()), | |
671 | permissions=fctx.manifest().flags(f)) |
|
684 | permissions=fctx.manifest().flags(f)) | |
672 |
|
685 | |||
673 | def manifest(self, tmpl, ctx, path): |
|
686 | def manifest(self, tmpl, ctx, path): | |
674 | mf = ctx.manifest() |
|
687 | mf = ctx.manifest() | |
675 | node = ctx.node() |
|
688 | node = ctx.node() | |
676 |
|
689 | |||
677 | files = {} |
|
690 | files = {} | |
678 | parity = paritygen(self.stripecount) |
|
691 | parity = paritygen(self.stripecount) | |
679 |
|
692 | |||
680 | if path and path[-1] != "/": |
|
693 | if path and path[-1] != "/": | |
681 | path += "/" |
|
694 | path += "/" | |
682 | l = len(path) |
|
695 | l = len(path) | |
683 | abspath = "/" + path |
|
696 | abspath = "/" + path | |
684 |
|
697 | |||
685 | for f, n in mf.items(): |
|
698 | for f, n in mf.items(): | |
686 | if f[:l] != path: |
|
699 | if f[:l] != path: | |
687 | continue |
|
700 | continue | |
688 | remain = f[l:] |
|
701 | remain = f[l:] | |
689 | if "/" in remain: |
|
702 | if "/" in remain: | |
690 | short = remain[:remain.index("/") + 1] # bleah |
|
703 | short = remain[:remain.index("/") + 1] # bleah | |
691 | files[short] = (f, None) |
|
704 | files[short] = (f, None) | |
692 | else: |
|
705 | else: | |
693 | short = os.path.basename(remain) |
|
706 | short = os.path.basename(remain) | |
694 | files[short] = (f, n) |
|
707 | files[short] = (f, n) | |
695 |
|
708 | |||
696 | if not files: |
|
709 | if not files: | |
697 | raise ErrorResponse(HTTP_NOT_FOUND, 'Path not found: ' + path) |
|
710 | raise ErrorResponse(HTTP_NOT_FOUND, 'Path not found: ' + path) | |
698 |
|
711 | |||
699 | def filelist(**map): |
|
712 | def filelist(**map): | |
700 | fl = files.keys() |
|
713 | fl = files.keys() | |
701 | fl.sort() |
|
714 | fl.sort() | |
702 | for f in fl: |
|
715 | for f in fl: | |
703 | full, fnode = files[f] |
|
716 | full, fnode = files[f] | |
704 | if not fnode: |
|
717 | if not fnode: | |
705 | continue |
|
718 | continue | |
706 |
|
719 | |||
707 | fctx = ctx.filectx(full) |
|
720 | fctx = ctx.filectx(full) | |
708 | yield {"file": full, |
|
721 | yield {"file": full, | |
709 | "parity": parity.next(), |
|
722 | "parity": parity.next(), | |
710 | "basename": f, |
|
723 | "basename": f, | |
711 | "date": fctx.changectx().date(), |
|
724 | "date": fctx.changectx().date(), | |
712 | "size": fctx.size(), |
|
725 | "size": fctx.size(), | |
713 | "permissions": mf.flags(full)} |
|
726 | "permissions": mf.flags(full)} | |
714 |
|
727 | |||
715 | def dirlist(**map): |
|
728 | def dirlist(**map): | |
716 | fl = files.keys() |
|
729 | fl = files.keys() | |
717 | fl.sort() |
|
730 | fl.sort() | |
718 | for f in fl: |
|
731 | for f in fl: | |
719 | full, fnode = files[f] |
|
732 | full, fnode = files[f] | |
720 | if fnode: |
|
733 | if fnode: | |
721 | continue |
|
734 | continue | |
722 |
|
735 | |||
723 | yield {"parity": parity.next(), |
|
736 | yield {"parity": parity.next(), | |
724 | "path": "%s%s" % (abspath, f), |
|
737 | "path": "%s%s" % (abspath, f), | |
725 | "basename": f[:-1]} |
|
738 | "basename": f[:-1]} | |
726 |
|
739 | |||
727 | return tmpl("manifest", |
|
740 | return tmpl("manifest", | |
728 | rev=ctx.rev(), |
|
741 | rev=ctx.rev(), | |
729 | node=hex(node), |
|
742 | node=hex(node), | |
730 | path=abspath, |
|
743 | path=abspath, | |
731 | up=_up(abspath), |
|
744 | up=_up(abspath), | |
732 | upparity=parity.next(), |
|
745 | upparity=parity.next(), | |
733 | fentries=filelist, |
|
746 | fentries=filelist, | |
734 | dentries=dirlist, |
|
747 | dentries=dirlist, | |
735 | archives=self.archivelist(hex(node)), |
|
748 | archives=self.archivelist(hex(node)), | |
736 | tags=self.nodetagsdict(node), |
|
749 | tags=self.nodetagsdict(node), | |
737 | branches=self.nodebranchdict(ctx)) |
|
750 | branches=self.nodebranchdict(ctx)) | |
738 |
|
751 | |||
739 | def tags(self, tmpl): |
|
752 | def tags(self, tmpl): | |
740 | i = self.repo.tagslist() |
|
753 | i = self.repo.tagslist() | |
741 | i.reverse() |
|
754 | i.reverse() | |
742 | parity = paritygen(self.stripecount) |
|
755 | parity = paritygen(self.stripecount) | |
743 |
|
756 | |||
744 | def entries(notip=False,limit=0, **map): |
|
757 | def entries(notip=False,limit=0, **map): | |
745 | count = 0 |
|
758 | count = 0 | |
746 | for k, n in i: |
|
759 | for k, n in i: | |
747 | if notip and k == "tip": |
|
760 | if notip and k == "tip": | |
748 | continue |
|
761 | continue | |
749 | if limit > 0 and count >= limit: |
|
762 | if limit > 0 and count >= limit: | |
750 | continue |
|
763 | continue | |
751 | count = count + 1 |
|
764 | count = count + 1 | |
752 | yield {"parity": parity.next(), |
|
765 | yield {"parity": parity.next(), | |
753 | "tag": k, |
|
766 | "tag": k, | |
754 | "date": self.repo.changectx(n).date(), |
|
767 | "date": self.repo.changectx(n).date(), | |
755 | "node": hex(n)} |
|
768 | "node": hex(n)} | |
756 |
|
769 | |||
757 | return tmpl("tags", |
|
770 | return tmpl("tags", | |
758 | node=hex(self.repo.changelog.tip()), |
|
771 | node=hex(self.repo.changelog.tip()), | |
759 | entries=lambda **x: entries(False,0, **x), |
|
772 | entries=lambda **x: entries(False,0, **x), | |
760 | entriesnotip=lambda **x: entries(True,0, **x), |
|
773 | entriesnotip=lambda **x: entries(True,0, **x), | |
761 | latestentry=lambda **x: entries(True,1, **x)) |
|
774 | latestentry=lambda **x: entries(True,1, **x)) | |
762 |
|
775 | |||
763 | def summary(self, tmpl): |
|
776 | def summary(self, tmpl): | |
764 | i = self.repo.tagslist() |
|
777 | i = self.repo.tagslist() | |
765 | i.reverse() |
|
778 | i.reverse() | |
766 |
|
779 | |||
767 | def tagentries(**map): |
|
780 | def tagentries(**map): | |
768 | parity = paritygen(self.stripecount) |
|
781 | parity = paritygen(self.stripecount) | |
769 | count = 0 |
|
782 | count = 0 | |
770 | for k, n in i: |
|
783 | for k, n in i: | |
771 | if k == "tip": # skip tip |
|
784 | if k == "tip": # skip tip | |
772 | continue; |
|
785 | continue; | |
773 |
|
786 | |||
774 | count += 1 |
|
787 | count += 1 | |
775 | if count > 10: # limit to 10 tags |
|
788 | if count > 10: # limit to 10 tags | |
776 | break; |
|
789 | break; | |
777 |
|
790 | |||
778 | yield tmpl("tagentry", |
|
791 | yield tmpl("tagentry", | |
779 | parity=parity.next(), |
|
792 | parity=parity.next(), | |
780 | tag=k, |
|
793 | tag=k, | |
781 | node=hex(n), |
|
794 | node=hex(n), | |
782 | date=self.repo.changectx(n).date()) |
|
795 | date=self.repo.changectx(n).date()) | |
783 |
|
796 | |||
784 |
|
797 | |||
785 | def branches(**map): |
|
798 | def branches(**map): | |
786 | parity = paritygen(self.stripecount) |
|
799 | parity = paritygen(self.stripecount) | |
787 |
|
800 | |||
788 | b = self.repo.branchtags() |
|
801 | b = self.repo.branchtags() | |
789 | l = [(-self.repo.changelog.rev(n), n, t) for t, n in b.items()] |
|
802 | l = [(-self.repo.changelog.rev(n), n, t) for t, n in b.items()] | |
790 | l.sort() |
|
803 | l.sort() | |
791 |
|
804 | |||
792 | for r,n,t in l: |
|
805 | for r,n,t in l: | |
793 | ctx = self.repo.changectx(n) |
|
806 | ctx = self.repo.changectx(n) | |
794 |
|
807 | |||
795 | yield {'parity': parity.next(), |
|
808 | yield {'parity': parity.next(), | |
796 | 'branch': t, |
|
809 | 'branch': t, | |
797 | 'node': hex(n), |
|
810 | 'node': hex(n), | |
798 | 'date': ctx.date()} |
|
811 | 'date': ctx.date()} | |
799 |
|
812 | |||
800 | def changelist(**map): |
|
813 | def changelist(**map): | |
801 | parity = paritygen(self.stripecount, offset=start-end) |
|
814 | parity = paritygen(self.stripecount, offset=start-end) | |
802 | l = [] # build a list in forward order for efficiency |
|
815 | l = [] # build a list in forward order for efficiency | |
803 | for i in xrange(start, end): |
|
816 | for i in xrange(start, end): | |
804 | ctx = self.repo.changectx(i) |
|
817 | ctx = self.repo.changectx(i) | |
805 | n = ctx.node() |
|
818 | n = ctx.node() | |
806 | hn = hex(n) |
|
819 | hn = hex(n) | |
807 |
|
820 | |||
808 | l.insert(0, tmpl( |
|
821 | l.insert(0, tmpl( | |
809 | 'shortlogentry', |
|
822 | 'shortlogentry', | |
810 | parity=parity.next(), |
|
823 | parity=parity.next(), | |
811 | author=ctx.user(), |
|
824 | author=ctx.user(), | |
812 | desc=ctx.description(), |
|
825 | desc=ctx.description(), | |
813 | date=ctx.date(), |
|
826 | date=ctx.date(), | |
814 | rev=i, |
|
827 | rev=i, | |
815 | node=hn, |
|
828 | node=hn, | |
816 | tags=self.nodetagsdict(n), |
|
829 | tags=self.nodetagsdict(n), | |
817 | branches=self.nodebranchdict(ctx))) |
|
830 | branches=self.nodebranchdict(ctx))) | |
818 |
|
831 | |||
819 | yield l |
|
832 | yield l | |
820 |
|
833 | |||
821 | cl = self.repo.changelog |
|
834 | cl = self.repo.changelog | |
822 | count = cl.count() |
|
835 | count = cl.count() | |
823 | start = max(0, count - self.maxchanges) |
|
836 | start = max(0, count - self.maxchanges) | |
824 | end = min(count, start + self.maxchanges) |
|
837 | end = min(count, start + self.maxchanges) | |
825 |
|
838 | |||
826 | return tmpl("summary", |
|
839 | return tmpl("summary", | |
827 | desc=self.config("web", "description", "unknown"), |
|
840 | desc=self.config("web", "description", "unknown"), | |
828 | owner=get_contact(self.config) or "unknown", |
|
841 | owner=get_contact(self.config) or "unknown", | |
829 | lastchange=cl.read(cl.tip())[2], |
|
842 | lastchange=cl.read(cl.tip())[2], | |
830 | tags=tagentries, |
|
843 | tags=tagentries, | |
831 | branches=branches, |
|
844 | branches=branches, | |
832 | shortlog=changelist, |
|
845 | shortlog=changelist, | |
833 | node=hex(cl.tip()), |
|
846 | node=hex(cl.tip()), | |
834 | archives=self.archivelist("tip")) |
|
847 | archives=self.archivelist("tip")) | |
835 |
|
848 | |||
836 | def filediff(self, tmpl, fctx): |
|
849 | def filediff(self, tmpl, fctx): | |
837 | n = fctx.node() |
|
850 | n = fctx.node() | |
838 | path = fctx.path() |
|
851 | path = fctx.path() | |
839 | parents = fctx.parents() |
|
852 | parents = fctx.parents() | |
840 | p1 = parents and parents[0].node() or nullid |
|
853 | p1 = parents and parents[0].node() or nullid | |
841 |
|
854 | |||
842 | def diff(**map): |
|
855 | def diff(**map): | |
843 | yield self.diff(tmpl, p1, n, [path]) |
|
856 | yield self.diff(tmpl, p1, n, [path]) | |
844 |
|
857 | |||
845 | return tmpl("filediff", |
|
858 | return tmpl("filediff", | |
846 | file=path, |
|
859 | file=path, | |
847 | node=hex(n), |
|
860 | node=hex(n), | |
848 | rev=fctx.rev(), |
|
861 | rev=fctx.rev(), | |
849 | parent=self.siblings(parents), |
|
862 | parent=self.siblings(parents), | |
850 | child=self.siblings(fctx.children()), |
|
863 | child=self.siblings(fctx.children()), | |
851 | diff=diff) |
|
864 | diff=diff) | |
852 |
|
865 | |||
853 | archive_specs = { |
|
866 | archive_specs = { | |
854 | 'bz2': ('application/x-tar', 'tbz2', '.tar.bz2', None), |
|
867 | 'bz2': ('application/x-tar', 'tbz2', '.tar.bz2', None), | |
855 | 'gz': ('application/x-tar', 'tgz', '.tar.gz', None), |
|
868 | 'gz': ('application/x-tar', 'tgz', '.tar.gz', None), | |
856 | 'zip': ('application/zip', 'zip', '.zip', None), |
|
869 | 'zip': ('application/zip', 'zip', '.zip', None), | |
857 | } |
|
870 | } | |
858 |
|
871 | |||
859 | def archive(self, tmpl, req, key, type_): |
|
872 | def archive(self, tmpl, req, key, type_): | |
860 | reponame = re.sub(r"\W+", "-", os.path.basename(self.reponame)) |
|
873 | reponame = re.sub(r"\W+", "-", os.path.basename(self.reponame)) | |
861 | cnode = self.repo.lookup(key) |
|
874 | cnode = self.repo.lookup(key) | |
862 | arch_version = key |
|
875 | arch_version = key | |
863 | if cnode == key or key == 'tip': |
|
876 | if cnode == key or key == 'tip': | |
864 | arch_version = short(cnode) |
|
877 | arch_version = short(cnode) | |
865 | name = "%s-%s" % (reponame, arch_version) |
|
878 | name = "%s-%s" % (reponame, arch_version) | |
866 | mimetype, artype, extension, encoding = self.archive_specs[type_] |
|
879 | mimetype, artype, extension, encoding = self.archive_specs[type_] | |
867 | headers = [ |
|
880 | headers = [ | |
868 | ('Content-Type', mimetype), |
|
881 | ('Content-Type', mimetype), | |
869 | ('Content-Disposition', 'attachment; filename=%s%s' % |
|
882 | ('Content-Disposition', 'attachment; filename=%s%s' % | |
870 | (name, extension)) |
|
883 | (name, extension)) | |
871 | ] |
|
884 | ] | |
872 | if encoding: |
|
885 | if encoding: | |
873 | headers.append(('Content-Encoding', encoding)) |
|
886 | headers.append(('Content-Encoding', encoding)) | |
874 | req.header(headers) |
|
887 | req.header(headers) | |
875 | req.respond(HTTP_OK) |
|
888 | req.respond(HTTP_OK) | |
876 | archival.archive(self.repo, req, cnode, artype, prefix=name) |
|
889 | archival.archive(self.repo, req, cnode, artype, prefix=name) | |
877 |
|
890 | |||
878 | # add tags to things |
|
891 | # add tags to things | |
879 | # tags -> list of changesets corresponding to tags |
|
892 | # tags -> list of changesets corresponding to tags | |
880 | # find tag, changeset, file |
|
893 | # find tag, changeset, file | |
881 |
|
894 | |||
882 | def cleanpath(self, path): |
|
895 | def cleanpath(self, path): | |
883 | path = path.lstrip('/') |
|
896 | path = path.lstrip('/') | |
884 | return util.canonpath(self.repo.root, '', path) |
|
897 | return util.canonpath(self.repo.root, '', path) | |
885 |
|
898 | |||
886 | def changectx(self, req): |
|
899 | def changectx(self, req): | |
887 | if 'node' in req.form: |
|
900 | if 'node' in req.form: | |
888 | changeid = req.form['node'][0] |
|
901 | changeid = req.form['node'][0] | |
889 | elif 'manifest' in req.form: |
|
902 | elif 'manifest' in req.form: | |
890 | changeid = req.form['manifest'][0] |
|
903 | changeid = req.form['manifest'][0] | |
891 | else: |
|
904 | else: | |
892 | changeid = self.repo.changelog.count() - 1 |
|
905 | changeid = self.repo.changelog.count() - 1 | |
893 |
|
906 | |||
894 | try: |
|
907 | try: | |
895 | ctx = self.repo.changectx(changeid) |
|
908 | ctx = self.repo.changectx(changeid) | |
896 | except hg.RepoError: |
|
909 | except hg.RepoError: | |
897 | man = self.repo.manifest |
|
910 | man = self.repo.manifest | |
898 | mn = man.lookup(changeid) |
|
911 | mn = man.lookup(changeid) | |
899 | ctx = self.repo.changectx(man.linkrev(mn)) |
|
912 | ctx = self.repo.changectx(man.linkrev(mn)) | |
900 |
|
913 | |||
901 | return ctx |
|
914 | return ctx | |
902 |
|
915 | |||
903 | def filectx(self, req): |
|
916 | def filectx(self, req): | |
904 | path = self.cleanpath(req.form['file'][0]) |
|
917 | path = self.cleanpath(req.form['file'][0]) | |
905 | if 'node' in req.form: |
|
918 | if 'node' in req.form: | |
906 | changeid = req.form['node'][0] |
|
919 | changeid = req.form['node'][0] | |
907 | else: |
|
920 | else: | |
908 | changeid = req.form['filenode'][0] |
|
921 | changeid = req.form['filenode'][0] | |
909 | try: |
|
922 | try: | |
910 | ctx = self.repo.changectx(changeid) |
|
923 | ctx = self.repo.changectx(changeid) | |
911 | fctx = ctx.filectx(path) |
|
924 | fctx = ctx.filectx(path) | |
912 | except hg.RepoError: |
|
925 | except hg.RepoError: | |
913 | fctx = self.repo.filectx(path, fileid=changeid) |
|
926 | fctx = self.repo.filectx(path, fileid=changeid) | |
914 |
|
927 | |||
915 | return fctx |
|
928 | return fctx | |
916 |
|
929 | |||
917 | def check_perm(self, req, op, default): |
|
930 | def check_perm(self, req, op, default): | |
918 | '''check permission for operation based on user auth. |
|
931 | '''check permission for operation based on user auth. | |
919 | return true if op allowed, else false. |
|
932 | return true if op allowed, else false. | |
920 | default is policy to use if no config given.''' |
|
933 | default is policy to use if no config given.''' | |
921 |
|
934 | |||
922 | user = req.env.get('REMOTE_USER') |
|
935 | user = req.env.get('REMOTE_USER') | |
923 |
|
936 | |||
924 | deny = self.configlist('web', 'deny_' + op) |
|
937 | deny = self.configlist('web', 'deny_' + op) | |
925 | if deny and (not user or deny == ['*'] or user in deny): |
|
938 | if deny and (not user or deny == ['*'] or user in deny): | |
926 | return False |
|
939 | return False | |
927 |
|
940 | |||
928 | allow = self.configlist('web', 'allow_' + op) |
|
941 | allow = self.configlist('web', 'allow_' + op) | |
929 | return (allow and (allow == ['*'] or user in allow)) or default |
|
942 | return (allow and (allow == ['*'] or user in allow)) or default |
@@ -1,250 +1,243 b'' | |||||
1 | # |
|
1 | # | |
2 | # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net> |
|
2 | # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net> | |
3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms |
|
5 | # This software may be used and distributed according to the terms | |
6 | # of the GNU General Public License, incorporated herein by reference. |
|
6 | # of the GNU General Public License, incorporated herein by reference. | |
7 |
|
7 | |||
8 | import cStringIO, zlib, bz2, tempfile, errno, os, sys |
|
8 | import cStringIO, zlib, bz2, tempfile, errno, os, sys | |
9 | from mercurial import util, streamclone |
|
9 | from mercurial import util, streamclone | |
10 | from mercurial.i18n import gettext as _ |
|
10 | from mercurial.i18n import gettext as _ | |
11 | from mercurial.node import * |
|
11 | from mercurial.node import * | |
12 | from common import HTTP_OK, HTTP_NOT_FOUND, HTTP_SERVER_ERROR |
|
12 | from common import HTTP_OK, HTTP_NOT_FOUND, HTTP_SERVER_ERROR | |
13 |
|
13 | |||
14 | # __all__ is populated with the allowed commands. Be sure to add to it if |
|
14 | # __all__ is populated with the allowed commands. Be sure to add to it if | |
15 | # you're adding a new command, or the new command won't work. |
|
15 | # you're adding a new command, or the new command won't work. | |
16 |
|
16 | |||
17 | __all__ = [ |
|
17 | __all__ = [ | |
18 | 'lookup', 'heads', 'branches', 'between', 'changegroup', |
|
18 | 'lookup', 'heads', 'branches', 'between', 'changegroup', | |
19 | 'changegroupsubset', 'capabilities', 'unbundle', 'stream_out', |
|
19 | 'changegroupsubset', 'capabilities', 'unbundle', 'stream_out', | |
20 | ] |
|
20 | ] | |
21 |
|
21 | |||
22 | HGTYPE = 'application/mercurial-0.1' |
|
22 | HGTYPE = 'application/mercurial-0.1' | |
23 |
|
23 | |||
24 | def lookup(web, req): |
|
24 | def lookup(web, req): | |
25 | try: |
|
25 | try: | |
26 | r = hex(web.repo.lookup(req.form['key'][0])) |
|
26 | r = hex(web.repo.lookup(req.form['key'][0])) | |
27 | success = 1 |
|
27 | success = 1 | |
28 | except Exception,inst: |
|
28 | except Exception,inst: | |
29 | r = str(inst) |
|
29 | r = str(inst) | |
30 | success = 0 |
|
30 | success = 0 | |
31 | resp = "%s %s\n" % (success, r) |
|
31 | resp = "%s %s\n" % (success, r) | |
32 | req.respond(HTTP_OK, HGTYPE, length=len(resp)) |
|
32 | req.respond(HTTP_OK, HGTYPE, length=len(resp)) | |
33 | req.write(resp) |
|
33 | req.write(resp) | |
34 |
|
34 | |||
35 | def heads(web, req): |
|
35 | def heads(web, req): | |
36 | resp = " ".join(map(hex, web.repo.heads())) + "\n" |
|
36 | resp = " ".join(map(hex, web.repo.heads())) + "\n" | |
37 | req.respond(HTTP_OK, HGTYPE, length=len(resp)) |
|
37 | req.respond(HTTP_OK, HGTYPE, length=len(resp)) | |
38 | req.write(resp) |
|
38 | req.write(resp) | |
39 |
|
39 | |||
40 | def branches(web, req): |
|
40 | def branches(web, req): | |
41 | nodes = [] |
|
41 | nodes = [] | |
42 | if 'nodes' in req.form: |
|
42 | if 'nodes' in req.form: | |
43 | nodes = map(bin, req.form['nodes'][0].split(" ")) |
|
43 | nodes = map(bin, req.form['nodes'][0].split(" ")) | |
44 | resp = cStringIO.StringIO() |
|
44 | resp = cStringIO.StringIO() | |
45 | for b in web.repo.branches(nodes): |
|
45 | for b in web.repo.branches(nodes): | |
46 | resp.write(" ".join(map(hex, b)) + "\n") |
|
46 | resp.write(" ".join(map(hex, b)) + "\n") | |
47 | resp = resp.getvalue() |
|
47 | resp = resp.getvalue() | |
48 | req.respond(HTTP_OK, HGTYPE, length=len(resp)) |
|
48 | req.respond(HTTP_OK, HGTYPE, length=len(resp)) | |
49 | req.write(resp) |
|
49 | req.write(resp) | |
50 |
|
50 | |||
51 | def between(web, req): |
|
51 | def between(web, req): | |
52 | if 'pairs' in req.form: |
|
52 | if 'pairs' in req.form: | |
53 | pairs = [map(bin, p.split("-")) |
|
53 | pairs = [map(bin, p.split("-")) | |
54 | for p in req.form['pairs'][0].split(" ")] |
|
54 | for p in req.form['pairs'][0].split(" ")] | |
55 | resp = cStringIO.StringIO() |
|
55 | resp = cStringIO.StringIO() | |
56 | for b in web.repo.between(pairs): |
|
56 | for b in web.repo.between(pairs): | |
57 | resp.write(" ".join(map(hex, b)) + "\n") |
|
57 | resp.write(" ".join(map(hex, b)) + "\n") | |
58 | resp = resp.getvalue() |
|
58 | resp = resp.getvalue() | |
59 | req.respond(HTTP_OK, HGTYPE, length=len(resp)) |
|
59 | req.respond(HTTP_OK, HGTYPE, length=len(resp)) | |
60 | req.write(resp) |
|
60 | req.write(resp) | |
61 |
|
61 | |||
62 | def changegroup(web, req): |
|
62 | def changegroup(web, req): | |
63 | req.respond(HTTP_OK, HGTYPE) |
|
63 | req.respond(HTTP_OK, HGTYPE) | |
64 | nodes = [] |
|
64 | nodes = [] | |
65 | if not web.allowpull: |
|
65 | if not web.allowpull: | |
66 | return |
|
66 | return | |
67 |
|
67 | |||
68 | if 'roots' in req.form: |
|
68 | if 'roots' in req.form: | |
69 | nodes = map(bin, req.form['roots'][0].split(" ")) |
|
69 | nodes = map(bin, req.form['roots'][0].split(" ")) | |
70 |
|
70 | |||
71 | z = zlib.compressobj() |
|
71 | z = zlib.compressobj() | |
72 | f = web.repo.changegroup(nodes, 'serve') |
|
72 | f = web.repo.changegroup(nodes, 'serve') | |
73 | while 1: |
|
73 | while 1: | |
74 | chunk = f.read(4096) |
|
74 | chunk = f.read(4096) | |
75 | if not chunk: |
|
75 | if not chunk: | |
76 | break |
|
76 | break | |
77 | req.write(z.compress(chunk)) |
|
77 | req.write(z.compress(chunk)) | |
78 |
|
78 | |||
79 | req.write(z.flush()) |
|
79 | req.write(z.flush()) | |
80 |
|
80 | |||
81 | def changegroupsubset(web, req): |
|
81 | def changegroupsubset(web, req): | |
82 | req.respond(HTTP_OK, HGTYPE) |
|
82 | req.respond(HTTP_OK, HGTYPE) | |
83 | bases = [] |
|
83 | bases = [] | |
84 | heads = [] |
|
84 | heads = [] | |
85 | if not web.allowpull: |
|
85 | if not web.allowpull: | |
86 | return |
|
86 | return | |
87 |
|
87 | |||
88 | if 'bases' in req.form: |
|
88 | if 'bases' in req.form: | |
89 | bases = [bin(x) for x in req.form['bases'][0].split(' ')] |
|
89 | bases = [bin(x) for x in req.form['bases'][0].split(' ')] | |
90 | if 'heads' in req.form: |
|
90 | if 'heads' in req.form: | |
91 | heads = [bin(x) for x in req.form['heads'][0].split(' ')] |
|
91 | heads = [bin(x) for x in req.form['heads'][0].split(' ')] | |
92 |
|
92 | |||
93 | z = zlib.compressobj() |
|
93 | z = zlib.compressobj() | |
94 | f = web.repo.changegroupsubset(bases, heads, 'serve') |
|
94 | f = web.repo.changegroupsubset(bases, heads, 'serve') | |
95 | while 1: |
|
95 | while 1: | |
96 | chunk = f.read(4096) |
|
96 | chunk = f.read(4096) | |
97 | if not chunk: |
|
97 | if not chunk: | |
98 | break |
|
98 | break | |
99 | req.write(z.compress(chunk)) |
|
99 | req.write(z.compress(chunk)) | |
100 |
|
100 | |||
101 | req.write(z.flush()) |
|
101 | req.write(z.flush()) | |
102 |
|
102 | |||
103 | def capabilities(web, req): |
|
103 | def capabilities(web, req): | |
104 | caps = ['lookup', 'changegroupsubset'] |
|
104 | resp = ' '.join(web.capabilities()) | |
105 | if web.configbool('server', 'uncompressed'): |
|
|||
106 | caps.append('stream=%d' % web.repo.changelog.version) |
|
|||
107 | # XXX: make configurable and/or share code with do_unbundle: |
|
|||
108 | unbundleversions = ['HG10GZ', 'HG10BZ', 'HG10UN'] |
|
|||
109 | if unbundleversions: |
|
|||
110 | caps.append('unbundle=%s' % ','.join(unbundleversions)) |
|
|||
111 | resp = ' '.join(caps) |
|
|||
112 | req.respond(HTTP_OK, HGTYPE, length=len(resp)) |
|
105 | req.respond(HTTP_OK, HGTYPE, length=len(resp)) | |
113 | req.write(resp) |
|
106 | req.write(resp) | |
114 |
|
107 | |||
115 | def unbundle(web, req): |
|
108 | def unbundle(web, req): | |
116 | def bail(response, headers={}): |
|
109 | def bail(response, headers={}): | |
117 | length = int(req.env['CONTENT_LENGTH']) |
|
110 | length = int(req.env['CONTENT_LENGTH']) | |
118 | for s in util.filechunkiter(req, limit=length): |
|
111 | for s in util.filechunkiter(req, limit=length): | |
119 | # drain incoming bundle, else client will not see |
|
112 | # drain incoming bundle, else client will not see | |
120 | # response when run outside cgi script |
|
113 | # response when run outside cgi script | |
121 | pass |
|
114 | pass | |
122 | req.header(headers.items()) |
|
115 | req.header(headers.items()) | |
123 | req.respond(HTTP_OK, HGTYPE) |
|
116 | req.respond(HTTP_OK, HGTYPE) | |
124 | req.write('0\n') |
|
117 | req.write('0\n') | |
125 | req.write(response) |
|
118 | req.write(response) | |
126 |
|
119 | |||
127 | # require ssl by default, auth info cannot be sniffed and |
|
120 | # require ssl by default, auth info cannot be sniffed and | |
128 | # replayed |
|
121 | # replayed | |
129 | ssl_req = web.configbool('web', 'push_ssl', True) |
|
122 | ssl_req = web.configbool('web', 'push_ssl', True) | |
130 | if ssl_req: |
|
123 | if ssl_req: | |
131 | if req.env.get('wsgi.url_scheme') != 'https': |
|
124 | if req.env.get('wsgi.url_scheme') != 'https': | |
132 | bail(_('ssl required\n')) |
|
125 | bail(_('ssl required\n')) | |
133 | return |
|
126 | return | |
134 | proto = 'https' |
|
127 | proto = 'https' | |
135 | else: |
|
128 | else: | |
136 | proto = 'http' |
|
129 | proto = 'http' | |
137 |
|
130 | |||
138 | # do not allow push unless explicitly allowed |
|
131 | # do not allow push unless explicitly allowed | |
139 | if not web.check_perm(req, 'push', False): |
|
132 | if not web.check_perm(req, 'push', False): | |
140 | bail(_('push not authorized\n'), |
|
133 | bail(_('push not authorized\n'), | |
141 | headers={'status': '401 Unauthorized'}) |
|
134 | headers={'status': '401 Unauthorized'}) | |
142 | return |
|
135 | return | |
143 |
|
136 | |||
144 | their_heads = req.form['heads'][0].split(' ') |
|
137 | their_heads = req.form['heads'][0].split(' ') | |
145 |
|
138 | |||
146 | def check_heads(): |
|
139 | def check_heads(): | |
147 | heads = map(hex, web.repo.heads()) |
|
140 | heads = map(hex, web.repo.heads()) | |
148 | return their_heads == [hex('force')] or their_heads == heads |
|
141 | return their_heads == [hex('force')] or their_heads == heads | |
149 |
|
142 | |||
150 | # fail early if possible |
|
143 | # fail early if possible | |
151 | if not check_heads(): |
|
144 | if not check_heads(): | |
152 | bail(_('unsynced changes\n')) |
|
145 | bail(_('unsynced changes\n')) | |
153 | return |
|
146 | return | |
154 |
|
147 | |||
155 | req.respond(HTTP_OK, HGTYPE) |
|
148 | req.respond(HTTP_OK, HGTYPE) | |
156 |
|
149 | |||
157 | # do not lock repo until all changegroup data is |
|
150 | # do not lock repo until all changegroup data is | |
158 | # streamed. save to temporary file. |
|
151 | # streamed. save to temporary file. | |
159 |
|
152 | |||
160 | fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-') |
|
153 | fd, tempname = tempfile.mkstemp(prefix='hg-unbundle-') | |
161 | fp = os.fdopen(fd, 'wb+') |
|
154 | fp = os.fdopen(fd, 'wb+') | |
162 | try: |
|
155 | try: | |
163 | length = int(req.env['CONTENT_LENGTH']) |
|
156 | length = int(req.env['CONTENT_LENGTH']) | |
164 | for s in util.filechunkiter(req, limit=length): |
|
157 | for s in util.filechunkiter(req, limit=length): | |
165 | fp.write(s) |
|
158 | fp.write(s) | |
166 |
|
159 | |||
167 | try: |
|
160 | try: | |
168 | lock = web.repo.lock() |
|
161 | lock = web.repo.lock() | |
169 | try: |
|
162 | try: | |
170 | if not check_heads(): |
|
163 | if not check_heads(): | |
171 | req.write('0\n') |
|
164 | req.write('0\n') | |
172 | req.write(_('unsynced changes\n')) |
|
165 | req.write(_('unsynced changes\n')) | |
173 | return |
|
166 | return | |
174 |
|
167 | |||
175 | fp.seek(0) |
|
168 | fp.seek(0) | |
176 | header = fp.read(6) |
|
169 | header = fp.read(6) | |
177 | if not header.startswith("HG"): |
|
170 | if not header.startswith("HG"): | |
178 | # old client with uncompressed bundle |
|
171 | # old client with uncompressed bundle | |
179 | def generator(f): |
|
172 | def generator(f): | |
180 | yield header |
|
173 | yield header | |
181 | for chunk in f: |
|
174 | for chunk in f: | |
182 | yield chunk |
|
175 | yield chunk | |
183 | elif not header.startswith("HG10"): |
|
176 | elif not header.startswith("HG10"): | |
184 | req.write("0\n") |
|
177 | req.write("0\n") | |
185 | req.write(_("unknown bundle version\n")) |
|
178 | req.write(_("unknown bundle version\n")) | |
186 | return |
|
179 | return | |
187 | elif header == "HG10GZ": |
|
180 | elif header == "HG10GZ": | |
188 | def generator(f): |
|
181 | def generator(f): | |
189 | zd = zlib.decompressobj() |
|
182 | zd = zlib.decompressobj() | |
190 | for chunk in f: |
|
183 | for chunk in f: | |
191 | yield zd.decompress(chunk) |
|
184 | yield zd.decompress(chunk) | |
192 | elif header == "HG10BZ": |
|
185 | elif header == "HG10BZ": | |
193 | def generator(f): |
|
186 | def generator(f): | |
194 | zd = bz2.BZ2Decompressor() |
|
187 | zd = bz2.BZ2Decompressor() | |
195 | zd.decompress("BZ") |
|
188 | zd.decompress("BZ") | |
196 | for chunk in f: |
|
189 | for chunk in f: | |
197 | yield zd.decompress(chunk) |
|
190 | yield zd.decompress(chunk) | |
198 | elif header == "HG10UN": |
|
191 | elif header == "HG10UN": | |
199 | def generator(f): |
|
192 | def generator(f): | |
200 | for chunk in f: |
|
193 | for chunk in f: | |
201 | yield chunk |
|
194 | yield chunk | |
202 | else: |
|
195 | else: | |
203 | req.write("0\n") |
|
196 | req.write("0\n") | |
204 | req.write(_("unknown bundle compression type\n")) |
|
197 | req.write(_("unknown bundle compression type\n")) | |
205 | return |
|
198 | return | |
206 | gen = generator(util.filechunkiter(fp, 4096)) |
|
199 | gen = generator(util.filechunkiter(fp, 4096)) | |
207 |
|
200 | |||
208 | # send addchangegroup output to client |
|
201 | # send addchangegroup output to client | |
209 |
|
202 | |||
210 | old_stdout = sys.stdout |
|
203 | old_stdout = sys.stdout | |
211 | sys.stdout = cStringIO.StringIO() |
|
204 | sys.stdout = cStringIO.StringIO() | |
212 |
|
205 | |||
213 | try: |
|
206 | try: | |
214 | url = 'remote:%s:%s' % (proto, |
|
207 | url = 'remote:%s:%s' % (proto, | |
215 | req.env.get('REMOTE_HOST', '')) |
|
208 | req.env.get('REMOTE_HOST', '')) | |
216 | try: |
|
209 | try: | |
217 | ret = web.repo.addchangegroup( |
|
210 | ret = web.repo.addchangegroup( | |
218 | util.chunkbuffer(gen), 'serve', url) |
|
211 | util.chunkbuffer(gen), 'serve', url) | |
219 | except util.Abort, inst: |
|
212 | except util.Abort, inst: | |
220 | sys.stdout.write("abort: %s\n" % inst) |
|
213 | sys.stdout.write("abort: %s\n" % inst) | |
221 | ret = 0 |
|
214 | ret = 0 | |
222 | finally: |
|
215 | finally: | |
223 | val = sys.stdout.getvalue() |
|
216 | val = sys.stdout.getvalue() | |
224 | sys.stdout = old_stdout |
|
217 | sys.stdout = old_stdout | |
225 | req.write('%d\n' % ret) |
|
218 | req.write('%d\n' % ret) | |
226 | req.write(val) |
|
219 | req.write(val) | |
227 | finally: |
|
220 | finally: | |
228 | del lock |
|
221 | del lock | |
229 | except (OSError, IOError), inst: |
|
222 | except (OSError, IOError), inst: | |
230 | req.write('0\n') |
|
223 | req.write('0\n') | |
231 | filename = getattr(inst, 'filename', '') |
|
224 | filename = getattr(inst, 'filename', '') | |
232 | # Don't send our filesystem layout to the client |
|
225 | # Don't send our filesystem layout to the client | |
233 | if filename.startswith(web.repo.root): |
|
226 | if filename.startswith(web.repo.root): | |
234 | filename = filename[len(web.repo.root)+1:] |
|
227 | filename = filename[len(web.repo.root)+1:] | |
235 | else: |
|
228 | else: | |
236 | filename = '' |
|
229 | filename = '' | |
237 | error = getattr(inst, 'strerror', 'Unknown error') |
|
230 | error = getattr(inst, 'strerror', 'Unknown error') | |
238 | if inst.errno == errno.ENOENT: |
|
231 | if inst.errno == errno.ENOENT: | |
239 | code = HTTP_NOT_FOUND |
|
232 | code = HTTP_NOT_FOUND | |
240 | else: |
|
233 | else: | |
241 | code = HTTP_SERVER_ERROR |
|
234 | code = HTTP_SERVER_ERROR | |
242 | req.respond(code) |
|
235 | req.respond(code) | |
243 | req.write('%s: %s\n' % (error, filename)) |
|
236 | req.write('%s: %s\n' % (error, filename)) | |
244 | finally: |
|
237 | finally: | |
245 | fp.close() |
|
238 | fp.close() | |
246 | os.unlink(tempname) |
|
239 | os.unlink(tempname) | |
247 |
|
240 | |||
248 | def stream_out(web, req): |
|
241 | def stream_out(web, req): | |
249 | req.respond(HTTP_OK, HGTYPE) |
|
242 | req.respond(HTTP_OK, HGTYPE) | |
250 | streamclone.stream_out(web.repo, req, untrusted=True) |
|
243 | streamclone.stream_out(web.repo, req, untrusted=True) |
General Comments 0
You need to be logged in to leave comments.
Login now