Show More
@@ -1,365 +1,342 | |||||
1 | # archival.py - revision archival for mercurial |
|
1 | # archival.py - revision archival for mercurial | |
2 | # |
|
2 | # | |
3 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> |
|
3 | # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import gzip |
|
10 | import gzip | |
11 | import os |
|
11 | import os | |
12 | import struct |
|
12 | import struct | |
13 | import tarfile |
|
13 | import tarfile | |
14 | import time |
|
14 | import time | |
15 | import zipfile |
|
15 | import zipfile | |
16 | import zlib |
|
16 | import zlib | |
17 |
|
17 | |||
18 | from .i18n import _ |
|
18 | from .i18n import _ | |
19 |
|
19 | |||
20 | from . import ( |
|
20 | from . import ( | |
21 | error, |
|
21 | error, | |
22 | formatter, |
|
22 | formatter, | |
23 | match as matchmod, |
|
23 | match as matchmod, | |
24 | pycompat, |
|
24 | pycompat, | |
25 | scmutil, |
|
25 | scmutil, | |
26 | util, |
|
26 | util, | |
27 | vfs as vfsmod, |
|
27 | vfs as vfsmod, | |
28 | ) |
|
28 | ) | |
29 | stringio = util.stringio |
|
29 | stringio = util.stringio | |
30 |
|
30 | |||
31 | # from unzip source code: |
|
31 | # from unzip source code: | |
32 | _UNX_IFREG = 0x8000 |
|
32 | _UNX_IFREG = 0x8000 | |
33 | _UNX_IFLNK = 0xa000 |
|
33 | _UNX_IFLNK = 0xa000 | |
34 |
|
34 | |||
35 | def tidyprefix(dest, kind, prefix): |
|
35 | def tidyprefix(dest, kind, prefix): | |
36 | '''choose prefix to use for names in archive. make sure prefix is |
|
36 | '''choose prefix to use for names in archive. make sure prefix is | |
37 | safe for consumers.''' |
|
37 | safe for consumers.''' | |
38 |
|
38 | |||
39 | if prefix: |
|
39 | if prefix: | |
40 | prefix = util.normpath(prefix) |
|
40 | prefix = util.normpath(prefix) | |
41 | else: |
|
41 | else: | |
42 | if not isinstance(dest, bytes): |
|
42 | if not isinstance(dest, bytes): | |
43 | raise ValueError('dest must be string if no prefix') |
|
43 | raise ValueError('dest must be string if no prefix') | |
44 | prefix = os.path.basename(dest) |
|
44 | prefix = os.path.basename(dest) | |
45 | lower = prefix.lower() |
|
45 | lower = prefix.lower() | |
46 | for sfx in exts.get(kind, []): |
|
46 | for sfx in exts.get(kind, []): | |
47 | if lower.endswith(sfx): |
|
47 | if lower.endswith(sfx): | |
48 | prefix = prefix[:-len(sfx)] |
|
48 | prefix = prefix[:-len(sfx)] | |
49 | break |
|
49 | break | |
50 | lpfx = os.path.normpath(util.localpath(prefix)) |
|
50 | lpfx = os.path.normpath(util.localpath(prefix)) | |
51 | prefix = util.pconvert(lpfx) |
|
51 | prefix = util.pconvert(lpfx) | |
52 | if not prefix.endswith('/'): |
|
52 | if not prefix.endswith('/'): | |
53 | prefix += '/' |
|
53 | prefix += '/' | |
54 | # Drop the leading '.' path component if present, so Windows can read the |
|
54 | # Drop the leading '.' path component if present, so Windows can read the | |
55 | # zip files (issue4634) |
|
55 | # zip files (issue4634) | |
56 | if prefix.startswith('./'): |
|
56 | if prefix.startswith('./'): | |
57 | prefix = prefix[2:] |
|
57 | prefix = prefix[2:] | |
58 | if prefix.startswith('../') or os.path.isabs(lpfx) or '/../' in prefix: |
|
58 | if prefix.startswith('../') or os.path.isabs(lpfx) or '/../' in prefix: | |
59 | raise error.Abort(_('archive prefix contains illegal components')) |
|
59 | raise error.Abort(_('archive prefix contains illegal components')) | |
60 | return prefix |
|
60 | return prefix | |
61 |
|
61 | |||
62 | exts = { |
|
62 | exts = { | |
63 | 'tar': ['.tar'], |
|
63 | 'tar': ['.tar'], | |
64 | 'tbz2': ['.tbz2', '.tar.bz2'], |
|
64 | 'tbz2': ['.tbz2', '.tar.bz2'], | |
65 | 'tgz': ['.tgz', '.tar.gz'], |
|
65 | 'tgz': ['.tgz', '.tar.gz'], | |
66 | 'zip': ['.zip'], |
|
66 | 'zip': ['.zip'], | |
67 | } |
|
67 | } | |
68 |
|
68 | |||
69 | def guesskind(dest): |
|
69 | def guesskind(dest): | |
70 | for kind, extensions in exts.iteritems(): |
|
70 | for kind, extensions in exts.iteritems(): | |
71 | if any(dest.endswith(ext) for ext in extensions): |
|
71 | if any(dest.endswith(ext) for ext in extensions): | |
72 | return kind |
|
72 | return kind | |
73 | return None |
|
73 | return None | |
74 |
|
74 | |||
75 | def _rootctx(repo): |
|
75 | def _rootctx(repo): | |
76 | # repo[0] may be hidden |
|
76 | # repo[0] may be hidden | |
77 | for rev in repo: |
|
77 | for rev in repo: | |
78 | return repo[rev] |
|
78 | return repo[rev] | |
79 | return repo['null'] |
|
79 | return repo['null'] | |
80 |
|
80 | |||
81 | # {tags} on ctx includes local tags and 'tip', with no current way to limit |
|
81 | # {tags} on ctx includes local tags and 'tip', with no current way to limit | |
82 | # that to global tags. Therefore, use {latesttag} as a substitute when |
|
82 | # that to global tags. Therefore, use {latesttag} as a substitute when | |
83 | # the distance is 0, since that will be the list of global tags on ctx. |
|
83 | # the distance is 0, since that will be the list of global tags on ctx. | |
84 | _defaultmetatemplate = br''' |
|
84 | _defaultmetatemplate = br''' | |
85 | repo: {root} |
|
85 | repo: {root} | |
86 | node: {ifcontains(rev, revset("wdir()"), "{p1node}{dirty}", "{node}")} |
|
86 | node: {ifcontains(rev, revset("wdir()"), "{p1node}{dirty}", "{node}")} | |
87 | branch: {branch|utf8} |
|
87 | branch: {branch|utf8} | |
88 | {ifeq(latesttagdistance, 0, join(latesttag % "tag: {tag}", "\n"), |
|
88 | {ifeq(latesttagdistance, 0, join(latesttag % "tag: {tag}", "\n"), | |
89 | separate("\n", |
|
89 | separate("\n", | |
90 | join(latesttag % "latesttag: {tag}", "\n"), |
|
90 | join(latesttag % "latesttag: {tag}", "\n"), | |
91 | "latesttagdistance: {latesttagdistance}", |
|
91 | "latesttagdistance: {latesttagdistance}", | |
92 | "changessincelatesttag: {changessincelatesttag}"))} |
|
92 | "changessincelatesttag: {changessincelatesttag}"))} | |
93 | '''[1:] # drop leading '\n' |
|
93 | '''[1:] # drop leading '\n' | |
94 |
|
94 | |||
95 | def buildmetadata(ctx): |
|
95 | def buildmetadata(ctx): | |
96 | '''build content of .hg_archival.txt''' |
|
96 | '''build content of .hg_archival.txt''' | |
97 | repo = ctx.repo() |
|
97 | repo = ctx.repo() | |
98 |
|
98 | |||
99 | opts = { |
|
99 | opts = { | |
100 | 'template': repo.ui.config('experimental', 'archivemetatemplate', |
|
100 | 'template': repo.ui.config('experimental', 'archivemetatemplate', | |
101 | _defaultmetatemplate) |
|
101 | _defaultmetatemplate) | |
102 | } |
|
102 | } | |
103 |
|
103 | |||
104 | out = util.stringio() |
|
104 | out = util.stringio() | |
105 |
|
105 | |||
106 | fm = formatter.formatter(repo.ui, out, 'archive', opts) |
|
106 | fm = formatter.formatter(repo.ui, out, 'archive', opts) | |
107 | fm.startitem() |
|
107 | fm.startitem() | |
108 | fm.context(ctx=ctx) |
|
108 | fm.context(ctx=ctx) | |
109 | fm.data(root=_rootctx(repo).hex()) |
|
109 | fm.data(root=_rootctx(repo).hex()) | |
110 |
|
110 | |||
111 | if ctx.rev() is None: |
|
111 | if ctx.rev() is None: | |
112 | dirty = '' |
|
112 | dirty = '' | |
113 | if ctx.dirty(missing=True): |
|
113 | if ctx.dirty(missing=True): | |
114 | dirty = '+' |
|
114 | dirty = '+' | |
115 | fm.data(dirty=dirty) |
|
115 | fm.data(dirty=dirty) | |
116 | fm.end() |
|
116 | fm.end() | |
117 |
|
117 | |||
118 | return out.getvalue() |
|
118 | return out.getvalue() | |
119 |
|
119 | |||
120 | class tarit(object): |
|
120 | class tarit(object): | |
121 | '''write archive to tar file or stream. can write uncompressed, |
|
121 | '''write archive to tar file or stream. can write uncompressed, | |
122 | or compress with gzip or bzip2.''' |
|
122 | or compress with gzip or bzip2.''' | |
123 |
|
123 | |||
124 | class GzipFileWithTime(gzip.GzipFile): |
|
124 | class GzipFileWithTime(gzip.GzipFile): | |
125 |
|
125 | |||
126 | def __init__(self, *args, **kw): |
|
126 | def __init__(self, *args, **kw): | |
127 | timestamp = None |
|
127 | timestamp = None | |
128 | if r'timestamp' in kw: |
|
128 | if r'timestamp' in kw: | |
129 | timestamp = kw.pop(r'timestamp') |
|
129 | timestamp = kw.pop(r'timestamp') | |
130 | if timestamp is None: |
|
130 | if timestamp is None: | |
131 | self.timestamp = time.time() |
|
131 | self.timestamp = time.time() | |
132 | else: |
|
132 | else: | |
133 | self.timestamp = timestamp |
|
133 | self.timestamp = timestamp | |
134 | gzip.GzipFile.__init__(self, *args, **kw) |
|
134 | gzip.GzipFile.__init__(self, *args, **kw) | |
135 |
|
135 | |||
136 | def _write_gzip_header(self): |
|
136 | def _write_gzip_header(self): | |
137 | self.fileobj.write('\037\213') # magic header |
|
137 | self.fileobj.write('\037\213') # magic header | |
138 | self.fileobj.write('\010') # compression method |
|
138 | self.fileobj.write('\010') # compression method | |
139 | fname = self.name |
|
139 | fname = self.name | |
140 | if fname and fname.endswith('.gz'): |
|
140 | if fname and fname.endswith('.gz'): | |
141 | fname = fname[:-3] |
|
141 | fname = fname[:-3] | |
142 | flags = 0 |
|
142 | flags = 0 | |
143 | if fname: |
|
143 | if fname: | |
144 | flags = gzip.FNAME |
|
144 | flags = gzip.FNAME | |
145 | self.fileobj.write(pycompat.bytechr(flags)) |
|
145 | self.fileobj.write(pycompat.bytechr(flags)) | |
146 | gzip.write32u(self.fileobj, int(self.timestamp)) |
|
146 | gzip.write32u(self.fileobj, int(self.timestamp)) | |
147 | self.fileobj.write('\002') |
|
147 | self.fileobj.write('\002') | |
148 | self.fileobj.write('\377') |
|
148 | self.fileobj.write('\377') | |
149 | if fname: |
|
149 | if fname: | |
150 | self.fileobj.write(fname + '\000') |
|
150 | self.fileobj.write(fname + '\000') | |
151 |
|
151 | |||
152 | def __init__(self, dest, mtime, kind=''): |
|
152 | def __init__(self, dest, mtime, kind=''): | |
153 | self.mtime = mtime |
|
153 | self.mtime = mtime | |
154 | self.fileobj = None |
|
154 | self.fileobj = None | |
155 |
|
155 | |||
156 | def taropen(mode, name='', fileobj=None): |
|
156 | def taropen(mode, name='', fileobj=None): | |
157 | if kind == 'gz': |
|
157 | if kind == 'gz': | |
158 | mode = mode[0:1] |
|
158 | mode = mode[0:1] | |
159 | if not fileobj: |
|
159 | if not fileobj: | |
160 | fileobj = open(name, mode + 'b') |
|
160 | fileobj = open(name, mode + 'b') | |
161 | gzfileobj = self.GzipFileWithTime(name, |
|
161 | gzfileobj = self.GzipFileWithTime(name, | |
162 | pycompat.sysstr(mode + 'b'), |
|
162 | pycompat.sysstr(mode + 'b'), | |
163 | zlib.Z_BEST_COMPRESSION, |
|
163 | zlib.Z_BEST_COMPRESSION, | |
164 | fileobj, timestamp=mtime) |
|
164 | fileobj, timestamp=mtime) | |
165 | self.fileobj = gzfileobj |
|
165 | self.fileobj = gzfileobj | |
166 | return tarfile.TarFile.taropen( |
|
166 | return tarfile.TarFile.taropen( | |
167 | name, pycompat.sysstr(mode), gzfileobj) |
|
167 | name, pycompat.sysstr(mode), gzfileobj) | |
168 | else: |
|
168 | else: | |
169 | return tarfile.open( |
|
169 | return tarfile.open( | |
170 | name, pycompat.sysstr(mode + kind), fileobj) |
|
170 | name, pycompat.sysstr(mode + kind), fileobj) | |
171 |
|
171 | |||
172 | if isinstance(dest, bytes): |
|
172 | if isinstance(dest, bytes): | |
173 | self.z = taropen('w:', name=dest) |
|
173 | self.z = taropen('w:', name=dest) | |
174 | else: |
|
174 | else: | |
175 | self.z = taropen('w|', fileobj=dest) |
|
175 | self.z = taropen('w|', fileobj=dest) | |
176 |
|
176 | |||
177 | def addfile(self, name, mode, islink, data): |
|
177 | def addfile(self, name, mode, islink, data): | |
178 | name = pycompat.fsdecode(name) |
|
178 | name = pycompat.fsdecode(name) | |
179 | i = tarfile.TarInfo(name) |
|
179 | i = tarfile.TarInfo(name) | |
180 | i.mtime = self.mtime |
|
180 | i.mtime = self.mtime | |
181 | i.size = len(data) |
|
181 | i.size = len(data) | |
182 | if islink: |
|
182 | if islink: | |
183 | i.type = tarfile.SYMTYPE |
|
183 | i.type = tarfile.SYMTYPE | |
184 | i.mode = 0o777 |
|
184 | i.mode = 0o777 | |
185 | i.linkname = pycompat.fsdecode(data) |
|
185 | i.linkname = pycompat.fsdecode(data) | |
186 | data = None |
|
186 | data = None | |
187 | i.size = 0 |
|
187 | i.size = 0 | |
188 | else: |
|
188 | else: | |
189 | i.mode = mode |
|
189 | i.mode = mode | |
190 | data = stringio(data) |
|
190 | data = stringio(data) | |
191 | self.z.addfile(i, data) |
|
191 | self.z.addfile(i, data) | |
192 |
|
192 | |||
193 | def done(self): |
|
193 | def done(self): | |
194 | self.z.close() |
|
194 | self.z.close() | |
195 | if self.fileobj: |
|
195 | if self.fileobj: | |
196 | self.fileobj.close() |
|
196 | self.fileobj.close() | |
197 |
|
197 | |||
198 | class tellable(object): |
|
|||
199 | '''provide tell method for zipfile.ZipFile when writing to http |
|
|||
200 | response file object.''' |
|
|||
201 |
|
||||
202 | def __init__(self, fp): |
|
|||
203 | self.fp = fp |
|
|||
204 | self.offset = 0 |
|
|||
205 |
|
||||
206 | def __getattr__(self, key): |
|
|||
207 | return getattr(self.fp, key) |
|
|||
208 |
|
||||
209 | def write(self, s): |
|
|||
210 | self.fp.write(s) |
|
|||
211 | self.offset += len(s) |
|
|||
212 |
|
||||
213 | def tell(self): |
|
|||
214 | return self.offset |
|
|||
215 |
|
||||
216 | class zipit(object): |
|
198 | class zipit(object): | |
217 | '''write archive to zip file or stream. can write uncompressed, |
|
199 | '''write archive to zip file or stream. can write uncompressed, | |
218 | or compressed with deflate.''' |
|
200 | or compressed with deflate.''' | |
219 |
|
201 | |||
220 | def __init__(self, dest, mtime, compress=True): |
|
202 | def __init__(self, dest, mtime, compress=True): | |
221 | if not isinstance(dest, bytes): |
|
|||
222 | try: |
|
|||
223 | dest.tell() |
|
|||
224 | except (AttributeError, IOError): |
|
|||
225 | dest = tellable(dest) |
|
|||
226 | self.z = zipfile.ZipFile(pycompat.fsdecode(dest), r'w', |
|
203 | self.z = zipfile.ZipFile(pycompat.fsdecode(dest), r'w', | |
227 | compress and zipfile.ZIP_DEFLATED or |
|
204 | compress and zipfile.ZIP_DEFLATED or | |
228 | zipfile.ZIP_STORED) |
|
205 | zipfile.ZIP_STORED) | |
229 |
|
206 | |||
230 | # Python's zipfile module emits deprecation warnings if we try |
|
207 | # Python's zipfile module emits deprecation warnings if we try | |
231 | # to store files with a date before 1980. |
|
208 | # to store files with a date before 1980. | |
232 | epoch = 315532800 # calendar.timegm((1980, 1, 1, 0, 0, 0, 1, 1, 0)) |
|
209 | epoch = 315532800 # calendar.timegm((1980, 1, 1, 0, 0, 0, 1, 1, 0)) | |
233 | if mtime < epoch: |
|
210 | if mtime < epoch: | |
234 | mtime = epoch |
|
211 | mtime = epoch | |
235 |
|
212 | |||
236 | self.mtime = mtime |
|
213 | self.mtime = mtime | |
237 | self.date_time = time.gmtime(mtime)[:6] |
|
214 | self.date_time = time.gmtime(mtime)[:6] | |
238 |
|
215 | |||
239 | def addfile(self, name, mode, islink, data): |
|
216 | def addfile(self, name, mode, islink, data): | |
240 | i = zipfile.ZipInfo(pycompat.fsdecode(name), self.date_time) |
|
217 | i = zipfile.ZipInfo(pycompat.fsdecode(name), self.date_time) | |
241 | i.compress_type = self.z.compression |
|
218 | i.compress_type = self.z.compression | |
242 | # unzip will not honor unix file modes unless file creator is |
|
219 | # unzip will not honor unix file modes unless file creator is | |
243 | # set to unix (id 3). |
|
220 | # set to unix (id 3). | |
244 | i.create_system = 3 |
|
221 | i.create_system = 3 | |
245 | ftype = _UNX_IFREG |
|
222 | ftype = _UNX_IFREG | |
246 | if islink: |
|
223 | if islink: | |
247 | mode = 0o777 |
|
224 | mode = 0o777 | |
248 | ftype = _UNX_IFLNK |
|
225 | ftype = _UNX_IFLNK | |
249 | i.external_attr = (mode | ftype) << 16 |
|
226 | i.external_attr = (mode | ftype) << 16 | |
250 | # add "extended-timestamp" extra block, because zip archives |
|
227 | # add "extended-timestamp" extra block, because zip archives | |
251 | # without this will be extracted with unexpected timestamp, |
|
228 | # without this will be extracted with unexpected timestamp, | |
252 | # if TZ is not configured as GMT |
|
229 | # if TZ is not configured as GMT | |
253 | i.extra += struct.pack('<hhBl', |
|
230 | i.extra += struct.pack('<hhBl', | |
254 | 0x5455, # block type: "extended-timestamp" |
|
231 | 0x5455, # block type: "extended-timestamp" | |
255 | 1 + 4, # size of this block |
|
232 | 1 + 4, # size of this block | |
256 | 1, # "modification time is present" |
|
233 | 1, # "modification time is present" | |
257 | int(self.mtime)) # last modification (UTC) |
|
234 | int(self.mtime)) # last modification (UTC) | |
258 | self.z.writestr(i, data) |
|
235 | self.z.writestr(i, data) | |
259 |
|
236 | |||
260 | def done(self): |
|
237 | def done(self): | |
261 | self.z.close() |
|
238 | self.z.close() | |
262 |
|
239 | |||
263 | class fileit(object): |
|
240 | class fileit(object): | |
264 | '''write archive as files in directory.''' |
|
241 | '''write archive as files in directory.''' | |
265 |
|
242 | |||
266 | def __init__(self, name, mtime): |
|
243 | def __init__(self, name, mtime): | |
267 | self.basedir = name |
|
244 | self.basedir = name | |
268 | self.opener = vfsmod.vfs(self.basedir) |
|
245 | self.opener = vfsmod.vfs(self.basedir) | |
269 | self.mtime = mtime |
|
246 | self.mtime = mtime | |
270 |
|
247 | |||
271 | def addfile(self, name, mode, islink, data): |
|
248 | def addfile(self, name, mode, islink, data): | |
272 | if islink: |
|
249 | if islink: | |
273 | self.opener.symlink(data, name) |
|
250 | self.opener.symlink(data, name) | |
274 | return |
|
251 | return | |
275 | f = self.opener(name, "w", atomictemp=False) |
|
252 | f = self.opener(name, "w", atomictemp=False) | |
276 | f.write(data) |
|
253 | f.write(data) | |
277 | f.close() |
|
254 | f.close() | |
278 | destfile = os.path.join(self.basedir, name) |
|
255 | destfile = os.path.join(self.basedir, name) | |
279 | os.chmod(destfile, mode) |
|
256 | os.chmod(destfile, mode) | |
280 | if self.mtime is not None: |
|
257 | if self.mtime is not None: | |
281 | os.utime(destfile, (self.mtime, self.mtime)) |
|
258 | os.utime(destfile, (self.mtime, self.mtime)) | |
282 |
|
259 | |||
283 | def done(self): |
|
260 | def done(self): | |
284 | pass |
|
261 | pass | |
285 |
|
262 | |||
286 | archivers = { |
|
263 | archivers = { | |
287 | 'files': fileit, |
|
264 | 'files': fileit, | |
288 | 'tar': tarit, |
|
265 | 'tar': tarit, | |
289 | 'tbz2': lambda name, mtime: tarit(name, mtime, 'bz2'), |
|
266 | 'tbz2': lambda name, mtime: tarit(name, mtime, 'bz2'), | |
290 | 'tgz': lambda name, mtime: tarit(name, mtime, 'gz'), |
|
267 | 'tgz': lambda name, mtime: tarit(name, mtime, 'gz'), | |
291 | 'uzip': lambda name, mtime: zipit(name, mtime, False), |
|
268 | 'uzip': lambda name, mtime: zipit(name, mtime, False), | |
292 | 'zip': zipit, |
|
269 | 'zip': zipit, | |
293 | } |
|
270 | } | |
294 |
|
271 | |||
295 | def archive(repo, dest, node, kind, decode=True, matchfn=None, |
|
272 | def archive(repo, dest, node, kind, decode=True, matchfn=None, | |
296 | prefix='', mtime=None, subrepos=False): |
|
273 | prefix='', mtime=None, subrepos=False): | |
297 | '''create archive of repo as it was at node. |
|
274 | '''create archive of repo as it was at node. | |
298 |
|
275 | |||
299 | dest can be name of directory, name of archive file, or file |
|
276 | dest can be name of directory, name of archive file, or file | |
300 | object to write archive to. |
|
277 | object to write archive to. | |
301 |
|
278 | |||
302 | kind is type of archive to create. |
|
279 | kind is type of archive to create. | |
303 |
|
280 | |||
304 | decode tells whether to put files through decode filters from |
|
281 | decode tells whether to put files through decode filters from | |
305 | hgrc. |
|
282 | hgrc. | |
306 |
|
283 | |||
307 | matchfn is function to filter names of files to write to archive. |
|
284 | matchfn is function to filter names of files to write to archive. | |
308 |
|
285 | |||
309 | prefix is name of path to put before every archive member. |
|
286 | prefix is name of path to put before every archive member. | |
310 |
|
287 | |||
311 | mtime is the modified time, in seconds, or None to use the changeset time. |
|
288 | mtime is the modified time, in seconds, or None to use the changeset time. | |
312 |
|
289 | |||
313 | subrepos tells whether to include subrepos. |
|
290 | subrepos tells whether to include subrepos. | |
314 | ''' |
|
291 | ''' | |
315 |
|
292 | |||
316 | if kind == 'files': |
|
293 | if kind == 'files': | |
317 | if prefix: |
|
294 | if prefix: | |
318 | raise error.Abort(_('cannot give prefix when archiving to files')) |
|
295 | raise error.Abort(_('cannot give prefix when archiving to files')) | |
319 | else: |
|
296 | else: | |
320 | prefix = tidyprefix(dest, kind, prefix) |
|
297 | prefix = tidyprefix(dest, kind, prefix) | |
321 |
|
298 | |||
322 | def write(name, mode, islink, getdata): |
|
299 | def write(name, mode, islink, getdata): | |
323 | data = getdata() |
|
300 | data = getdata() | |
324 | if decode: |
|
301 | if decode: | |
325 | data = repo.wwritedata(name, data) |
|
302 | data = repo.wwritedata(name, data) | |
326 | archiver.addfile(prefix + name, mode, islink, data) |
|
303 | archiver.addfile(prefix + name, mode, islink, data) | |
327 |
|
304 | |||
328 | if kind not in archivers: |
|
305 | if kind not in archivers: | |
329 | raise error.Abort(_("unknown archive type '%s'") % kind) |
|
306 | raise error.Abort(_("unknown archive type '%s'") % kind) | |
330 |
|
307 | |||
331 | ctx = repo[node] |
|
308 | ctx = repo[node] | |
332 | archiver = archivers[kind](dest, mtime or ctx.date()[0]) |
|
309 | archiver = archivers[kind](dest, mtime or ctx.date()[0]) | |
333 |
|
310 | |||
334 | if repo.ui.configbool("ui", "archivemeta"): |
|
311 | if repo.ui.configbool("ui", "archivemeta"): | |
335 | name = '.hg_archival.txt' |
|
312 | name = '.hg_archival.txt' | |
336 | if not matchfn or matchfn(name): |
|
313 | if not matchfn or matchfn(name): | |
337 | write(name, 0o644, False, lambda: buildmetadata(ctx)) |
|
314 | write(name, 0o644, False, lambda: buildmetadata(ctx)) | |
338 |
|
315 | |||
339 | if matchfn: |
|
316 | if matchfn: | |
340 | files = [f for f in ctx.manifest().keys() if matchfn(f)] |
|
317 | files = [f for f in ctx.manifest().keys() if matchfn(f)] | |
341 | else: |
|
318 | else: | |
342 | files = ctx.manifest().keys() |
|
319 | files = ctx.manifest().keys() | |
343 | total = len(files) |
|
320 | total = len(files) | |
344 | if total: |
|
321 | if total: | |
345 | files.sort() |
|
322 | files.sort() | |
346 | scmutil.fileprefetchhooks(repo, ctx, files) |
|
323 | scmutil.fileprefetchhooks(repo, ctx, files) | |
347 | repo.ui.progress(_('archiving'), 0, unit=_('files'), total=total) |
|
324 | repo.ui.progress(_('archiving'), 0, unit=_('files'), total=total) | |
348 | for i, f in enumerate(files): |
|
325 | for i, f in enumerate(files): | |
349 | ff = ctx.flags(f) |
|
326 | ff = ctx.flags(f) | |
350 | write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, ctx[f].data) |
|
327 | write(f, 'x' in ff and 0o755 or 0o644, 'l' in ff, ctx[f].data) | |
351 | repo.ui.progress(_('archiving'), i + 1, item=f, |
|
328 | repo.ui.progress(_('archiving'), i + 1, item=f, | |
352 | unit=_('files'), total=total) |
|
329 | unit=_('files'), total=total) | |
353 | repo.ui.progress(_('archiving'), None) |
|
330 | repo.ui.progress(_('archiving'), None) | |
354 |
|
331 | |||
355 | if subrepos: |
|
332 | if subrepos: | |
356 | for subpath in sorted(ctx.substate): |
|
333 | for subpath in sorted(ctx.substate): | |
357 | sub = ctx.workingsub(subpath) |
|
334 | sub = ctx.workingsub(subpath) | |
358 | submatch = matchmod.subdirmatcher(subpath, matchfn) |
|
335 | submatch = matchmod.subdirmatcher(subpath, matchfn) | |
359 | total += sub.archive(archiver, prefix, submatch, decode) |
|
336 | total += sub.archive(archiver, prefix, submatch, decode) | |
360 |
|
337 | |||
361 | if total == 0: |
|
338 | if total == 0: | |
362 | raise error.Abort(_('no files match the archive pattern')) |
|
339 | raise error.Abort(_('no files match the archive pattern')) | |
363 |
|
340 | |||
364 | archiver.done() |
|
341 | archiver.done() | |
365 | return total |
|
342 | return total |
@@ -1,550 +1,581 | |||||
1 | # hgweb/request.py - An http request from either CGI or the standalone server. |
|
1 | # hgweb/request.py - An http request from either CGI or the standalone server. | |
2 | # |
|
2 | # | |
3 | # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net> |
|
3 | # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net> | |
4 | # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com> |
|
4 | # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com> | |
5 | # |
|
5 | # | |
6 | # This software may be used and distributed according to the terms of the |
|
6 | # This software may be used and distributed according to the terms of the | |
7 | # GNU General Public License version 2 or any later version. |
|
7 | # GNU General Public License version 2 or any later version. | |
8 |
|
8 | |||
9 | from __future__ import absolute_import |
|
9 | from __future__ import absolute_import | |
10 |
|
10 | |||
11 | import errno |
|
11 | import errno | |
12 | import socket |
|
12 | import socket | |
13 | import wsgiref.headers as wsgiheaders |
|
13 | import wsgiref.headers as wsgiheaders | |
14 | #import wsgiref.validate |
|
14 | #import wsgiref.validate | |
15 |
|
15 | |||
16 | from .common import ( |
|
16 | from .common import ( | |
17 | ErrorResponse, |
|
17 | ErrorResponse, | |
18 | HTTP_NOT_MODIFIED, |
|
18 | HTTP_NOT_MODIFIED, | |
19 | statusmessage, |
|
19 | statusmessage, | |
20 | ) |
|
20 | ) | |
21 |
|
21 | |||
22 | from ..thirdparty import ( |
|
22 | from ..thirdparty import ( | |
23 | attr, |
|
23 | attr, | |
24 | ) |
|
24 | ) | |
25 | from .. import ( |
|
25 | from .. import ( | |
26 | error, |
|
26 | error, | |
27 | pycompat, |
|
27 | pycompat, | |
28 | util, |
|
28 | util, | |
29 | ) |
|
29 | ) | |
30 |
|
30 | |||
31 | class multidict(object): |
|
31 | class multidict(object): | |
32 | """A dict like object that can store multiple values for a key. |
|
32 | """A dict like object that can store multiple values for a key. | |
33 |
|
33 | |||
34 | Used to store parsed request parameters. |
|
34 | Used to store parsed request parameters. | |
35 |
|
35 | |||
36 | This is inspired by WebOb's class of the same name. |
|
36 | This is inspired by WebOb's class of the same name. | |
37 | """ |
|
37 | """ | |
38 | def __init__(self): |
|
38 | def __init__(self): | |
39 | # Stores (key, value) 2-tuples. This isn't the most efficient. But we |
|
39 | # Stores (key, value) 2-tuples. This isn't the most efficient. But we | |
40 | # don't rely on parameters that much, so it shouldn't be a perf issue. |
|
40 | # don't rely on parameters that much, so it shouldn't be a perf issue. | |
41 | # we can always add dict for fast lookups. |
|
41 | # we can always add dict for fast lookups. | |
42 | self._items = [] |
|
42 | self._items = [] | |
43 |
|
43 | |||
44 | def __getitem__(self, key): |
|
44 | def __getitem__(self, key): | |
45 | """Returns the last set value for a key.""" |
|
45 | """Returns the last set value for a key.""" | |
46 | for k, v in reversed(self._items): |
|
46 | for k, v in reversed(self._items): | |
47 | if k == key: |
|
47 | if k == key: | |
48 | return v |
|
48 | return v | |
49 |
|
49 | |||
50 | raise KeyError(key) |
|
50 | raise KeyError(key) | |
51 |
|
51 | |||
52 | def __setitem__(self, key, value): |
|
52 | def __setitem__(self, key, value): | |
53 | """Replace a values for a key with a new value.""" |
|
53 | """Replace a values for a key with a new value.""" | |
54 | try: |
|
54 | try: | |
55 | del self[key] |
|
55 | del self[key] | |
56 | except KeyError: |
|
56 | except KeyError: | |
57 | pass |
|
57 | pass | |
58 |
|
58 | |||
59 | self._items.append((key, value)) |
|
59 | self._items.append((key, value)) | |
60 |
|
60 | |||
61 | def __delitem__(self, key): |
|
61 | def __delitem__(self, key): | |
62 | """Delete all values for a key.""" |
|
62 | """Delete all values for a key.""" | |
63 | oldlen = len(self._items) |
|
63 | oldlen = len(self._items) | |
64 |
|
64 | |||
65 | self._items[:] = [(k, v) for k, v in self._items if k != key] |
|
65 | self._items[:] = [(k, v) for k, v in self._items if k != key] | |
66 |
|
66 | |||
67 | if oldlen == len(self._items): |
|
67 | if oldlen == len(self._items): | |
68 | raise KeyError(key) |
|
68 | raise KeyError(key) | |
69 |
|
69 | |||
70 | def __contains__(self, key): |
|
70 | def __contains__(self, key): | |
71 | return any(k == key for k, v in self._items) |
|
71 | return any(k == key for k, v in self._items) | |
72 |
|
72 | |||
73 | def __len__(self): |
|
73 | def __len__(self): | |
74 | return len(self._items) |
|
74 | return len(self._items) | |
75 |
|
75 | |||
76 | def get(self, key, default=None): |
|
76 | def get(self, key, default=None): | |
77 | try: |
|
77 | try: | |
78 | return self.__getitem__(key) |
|
78 | return self.__getitem__(key) | |
79 | except KeyError: |
|
79 | except KeyError: | |
80 | return default |
|
80 | return default | |
81 |
|
81 | |||
82 | def add(self, key, value): |
|
82 | def add(self, key, value): | |
83 | """Add a new value for a key. Does not replace existing values.""" |
|
83 | """Add a new value for a key. Does not replace existing values.""" | |
84 | self._items.append((key, value)) |
|
84 | self._items.append((key, value)) | |
85 |
|
85 | |||
86 | def getall(self, key): |
|
86 | def getall(self, key): | |
87 | """Obtains all values for a key.""" |
|
87 | """Obtains all values for a key.""" | |
88 | return [v for k, v in self._items if k == key] |
|
88 | return [v for k, v in self._items if k == key] | |
89 |
|
89 | |||
90 | def getone(self, key): |
|
90 | def getone(self, key): | |
91 | """Obtain a single value for a key. |
|
91 | """Obtain a single value for a key. | |
92 |
|
92 | |||
93 | Raises KeyError if key not defined or it has multiple values set. |
|
93 | Raises KeyError if key not defined or it has multiple values set. | |
94 | """ |
|
94 | """ | |
95 | vals = self.getall(key) |
|
95 | vals = self.getall(key) | |
96 |
|
96 | |||
97 | if not vals: |
|
97 | if not vals: | |
98 | raise KeyError(key) |
|
98 | raise KeyError(key) | |
99 |
|
99 | |||
100 | if len(vals) > 1: |
|
100 | if len(vals) > 1: | |
101 | raise KeyError('multiple values for %r' % key) |
|
101 | raise KeyError('multiple values for %r' % key) | |
102 |
|
102 | |||
103 | return vals[0] |
|
103 | return vals[0] | |
104 |
|
104 | |||
105 | def asdictoflists(self): |
|
105 | def asdictoflists(self): | |
106 | d = {} |
|
106 | d = {} | |
107 | for k, v in self._items: |
|
107 | for k, v in self._items: | |
108 | if k in d: |
|
108 | if k in d: | |
109 | d[k].append(v) |
|
109 | d[k].append(v) | |
110 | else: |
|
110 | else: | |
111 | d[k] = [v] |
|
111 | d[k] = [v] | |
112 |
|
112 | |||
113 | return d |
|
113 | return d | |
114 |
|
114 | |||
115 | @attr.s(frozen=True) |
|
115 | @attr.s(frozen=True) | |
116 | class parsedrequest(object): |
|
116 | class parsedrequest(object): | |
117 | """Represents a parsed WSGI request. |
|
117 | """Represents a parsed WSGI request. | |
118 |
|
118 | |||
119 | Contains both parsed parameters as well as a handle on the input stream. |
|
119 | Contains both parsed parameters as well as a handle on the input stream. | |
120 | """ |
|
120 | """ | |
121 |
|
121 | |||
122 | # Request method. |
|
122 | # Request method. | |
123 | method = attr.ib() |
|
123 | method = attr.ib() | |
124 | # Full URL for this request. |
|
124 | # Full URL for this request. | |
125 | url = attr.ib() |
|
125 | url = attr.ib() | |
126 | # URL without any path components. Just <proto>://<host><port>. |
|
126 | # URL without any path components. Just <proto>://<host><port>. | |
127 | baseurl = attr.ib() |
|
127 | baseurl = attr.ib() | |
128 | # Advertised URL. Like ``url`` and ``baseurl`` but uses SERVER_NAME instead |
|
128 | # Advertised URL. Like ``url`` and ``baseurl`` but uses SERVER_NAME instead | |
129 | # of HTTP: Host header for hostname. This is likely what clients used. |
|
129 | # of HTTP: Host header for hostname. This is likely what clients used. | |
130 | advertisedurl = attr.ib() |
|
130 | advertisedurl = attr.ib() | |
131 | advertisedbaseurl = attr.ib() |
|
131 | advertisedbaseurl = attr.ib() | |
132 | # URL scheme (part before ``://``). e.g. ``http`` or ``https``. |
|
132 | # URL scheme (part before ``://``). e.g. ``http`` or ``https``. | |
133 | urlscheme = attr.ib() |
|
133 | urlscheme = attr.ib() | |
134 | # Value of REMOTE_USER, if set, or None. |
|
134 | # Value of REMOTE_USER, if set, or None. | |
135 | remoteuser = attr.ib() |
|
135 | remoteuser = attr.ib() | |
136 | # Value of REMOTE_HOST, if set, or None. |
|
136 | # Value of REMOTE_HOST, if set, or None. | |
137 | remotehost = attr.ib() |
|
137 | remotehost = attr.ib() | |
138 | # WSGI application path. |
|
138 | # WSGI application path. | |
139 | apppath = attr.ib() |
|
139 | apppath = attr.ib() | |
140 | # List of path parts to be used for dispatch. |
|
140 | # List of path parts to be used for dispatch. | |
141 | dispatchparts = attr.ib() |
|
141 | dispatchparts = attr.ib() | |
142 | # URL path component (no query string) used for dispatch. |
|
142 | # URL path component (no query string) used for dispatch. | |
143 | dispatchpath = attr.ib() |
|
143 | dispatchpath = attr.ib() | |
144 | # Whether there is a path component to this request. This can be true |
|
144 | # Whether there is a path component to this request. This can be true | |
145 | # when ``dispatchpath`` is empty due to REPO_NAME muckery. |
|
145 | # when ``dispatchpath`` is empty due to REPO_NAME muckery. | |
146 | havepathinfo = attr.ib() |
|
146 | havepathinfo = attr.ib() | |
147 | # The name of the repository being accessed. |
|
147 | # The name of the repository being accessed. | |
148 | reponame = attr.ib() |
|
148 | reponame = attr.ib() | |
149 | # Raw query string (part after "?" in URL). |
|
149 | # Raw query string (part after "?" in URL). | |
150 | querystring = attr.ib() |
|
150 | querystring = attr.ib() | |
151 | # multidict of query string parameters. |
|
151 | # multidict of query string parameters. | |
152 | qsparams = attr.ib() |
|
152 | qsparams = attr.ib() | |
153 | # wsgiref.headers.Headers instance. Operates like a dict with case |
|
153 | # wsgiref.headers.Headers instance. Operates like a dict with case | |
154 | # insensitive keys. |
|
154 | # insensitive keys. | |
155 | headers = attr.ib() |
|
155 | headers = attr.ib() | |
156 | # Request body input stream. |
|
156 | # Request body input stream. | |
157 | bodyfh = attr.ib() |
|
157 | bodyfh = attr.ib() | |
158 |
|
158 | |||
159 | def parserequestfromenv(env, bodyfh): |
|
159 | def parserequestfromenv(env, bodyfh): | |
160 | """Parse URL components from environment variables. |
|
160 | """Parse URL components from environment variables. | |
161 |
|
161 | |||
162 | WSGI defines request attributes via environment variables. This function |
|
162 | WSGI defines request attributes via environment variables. This function | |
163 | parses the environment variables into a data structure. |
|
163 | parses the environment variables into a data structure. | |
164 | """ |
|
164 | """ | |
165 | # PEP-0333 defines the WSGI spec and is a useful reference for this code. |
|
165 | # PEP-0333 defines the WSGI spec and is a useful reference for this code. | |
166 |
|
166 | |||
167 | # We first validate that the incoming object conforms with the WSGI spec. |
|
167 | # We first validate that the incoming object conforms with the WSGI spec. | |
168 | # We only want to be dealing with spec-conforming WSGI implementations. |
|
168 | # We only want to be dealing with spec-conforming WSGI implementations. | |
169 | # TODO enable this once we fix internal violations. |
|
169 | # TODO enable this once we fix internal violations. | |
170 | #wsgiref.validate.check_environ(env) |
|
170 | #wsgiref.validate.check_environ(env) | |
171 |
|
171 | |||
172 | # PEP-0333 states that environment keys and values are native strings |
|
172 | # PEP-0333 states that environment keys and values are native strings | |
173 | # (bytes on Python 2 and str on Python 3). The code points for the Unicode |
|
173 | # (bytes on Python 2 and str on Python 3). The code points for the Unicode | |
174 | # strings on Python 3 must be between \00000-\000FF. We deal with bytes |
|
174 | # strings on Python 3 must be between \00000-\000FF. We deal with bytes | |
175 | # in Mercurial, so mass convert string keys and values to bytes. |
|
175 | # in Mercurial, so mass convert string keys and values to bytes. | |
176 | if pycompat.ispy3: |
|
176 | if pycompat.ispy3: | |
177 | env = {k.encode('latin-1'): v for k, v in env.iteritems()} |
|
177 | env = {k.encode('latin-1'): v for k, v in env.iteritems()} | |
178 | env = {k: v.encode('latin-1') if isinstance(v, str) else v |
|
178 | env = {k: v.encode('latin-1') if isinstance(v, str) else v | |
179 | for k, v in env.iteritems()} |
|
179 | for k, v in env.iteritems()} | |
180 |
|
180 | |||
181 | # https://www.python.org/dev/peps/pep-0333/#environ-variables defines |
|
181 | # https://www.python.org/dev/peps/pep-0333/#environ-variables defines | |
182 | # the environment variables. |
|
182 | # the environment variables. | |
183 | # https://www.python.org/dev/peps/pep-0333/#url-reconstruction defines |
|
183 | # https://www.python.org/dev/peps/pep-0333/#url-reconstruction defines | |
184 | # how URLs are reconstructed. |
|
184 | # how URLs are reconstructed. | |
185 | fullurl = env['wsgi.url_scheme'] + '://' |
|
185 | fullurl = env['wsgi.url_scheme'] + '://' | |
186 | advertisedfullurl = fullurl |
|
186 | advertisedfullurl = fullurl | |
187 |
|
187 | |||
188 | def addport(s): |
|
188 | def addport(s): | |
189 | if env['wsgi.url_scheme'] == 'https': |
|
189 | if env['wsgi.url_scheme'] == 'https': | |
190 | if env['SERVER_PORT'] != '443': |
|
190 | if env['SERVER_PORT'] != '443': | |
191 | s += ':' + env['SERVER_PORT'] |
|
191 | s += ':' + env['SERVER_PORT'] | |
192 | else: |
|
192 | else: | |
193 | if env['SERVER_PORT'] != '80': |
|
193 | if env['SERVER_PORT'] != '80': | |
194 | s += ':' + env['SERVER_PORT'] |
|
194 | s += ':' + env['SERVER_PORT'] | |
195 |
|
195 | |||
196 | return s |
|
196 | return s | |
197 |
|
197 | |||
198 | if env.get('HTTP_HOST'): |
|
198 | if env.get('HTTP_HOST'): | |
199 | fullurl += env['HTTP_HOST'] |
|
199 | fullurl += env['HTTP_HOST'] | |
200 | else: |
|
200 | else: | |
201 | fullurl += env['SERVER_NAME'] |
|
201 | fullurl += env['SERVER_NAME'] | |
202 | fullurl = addport(fullurl) |
|
202 | fullurl = addport(fullurl) | |
203 |
|
203 | |||
204 | advertisedfullurl += env['SERVER_NAME'] |
|
204 | advertisedfullurl += env['SERVER_NAME'] | |
205 | advertisedfullurl = addport(advertisedfullurl) |
|
205 | advertisedfullurl = addport(advertisedfullurl) | |
206 |
|
206 | |||
207 | baseurl = fullurl |
|
207 | baseurl = fullurl | |
208 | advertisedbaseurl = advertisedfullurl |
|
208 | advertisedbaseurl = advertisedfullurl | |
209 |
|
209 | |||
210 | fullurl += util.urlreq.quote(env.get('SCRIPT_NAME', '')) |
|
210 | fullurl += util.urlreq.quote(env.get('SCRIPT_NAME', '')) | |
211 | advertisedfullurl += util.urlreq.quote(env.get('SCRIPT_NAME', '')) |
|
211 | advertisedfullurl += util.urlreq.quote(env.get('SCRIPT_NAME', '')) | |
212 | fullurl += util.urlreq.quote(env.get('PATH_INFO', '')) |
|
212 | fullurl += util.urlreq.quote(env.get('PATH_INFO', '')) | |
213 | advertisedfullurl += util.urlreq.quote(env.get('PATH_INFO', '')) |
|
213 | advertisedfullurl += util.urlreq.quote(env.get('PATH_INFO', '')) | |
214 |
|
214 | |||
215 | if env.get('QUERY_STRING'): |
|
215 | if env.get('QUERY_STRING'): | |
216 | fullurl += '?' + env['QUERY_STRING'] |
|
216 | fullurl += '?' + env['QUERY_STRING'] | |
217 | advertisedfullurl += '?' + env['QUERY_STRING'] |
|
217 | advertisedfullurl += '?' + env['QUERY_STRING'] | |
218 |
|
218 | |||
219 | # When dispatching requests, we look at the URL components (PATH_INFO |
|
219 | # When dispatching requests, we look at the URL components (PATH_INFO | |
220 | # and QUERY_STRING) after the application root (SCRIPT_NAME). But hgwebdir |
|
220 | # and QUERY_STRING) after the application root (SCRIPT_NAME). But hgwebdir | |
221 | # has the concept of "virtual" repositories. This is defined via REPO_NAME. |
|
221 | # has the concept of "virtual" repositories. This is defined via REPO_NAME. | |
222 | # If REPO_NAME is defined, we append it to SCRIPT_NAME to form a new app |
|
222 | # If REPO_NAME is defined, we append it to SCRIPT_NAME to form a new app | |
223 | # root. We also exclude its path components from PATH_INFO when resolving |
|
223 | # root. We also exclude its path components from PATH_INFO when resolving | |
224 | # the dispatch path. |
|
224 | # the dispatch path. | |
225 |
|
225 | |||
226 | apppath = env['SCRIPT_NAME'] |
|
226 | apppath = env['SCRIPT_NAME'] | |
227 |
|
227 | |||
228 | if env.get('REPO_NAME'): |
|
228 | if env.get('REPO_NAME'): | |
229 | if not apppath.endswith('/'): |
|
229 | if not apppath.endswith('/'): | |
230 | apppath += '/' |
|
230 | apppath += '/' | |
231 |
|
231 | |||
232 | apppath += env.get('REPO_NAME') |
|
232 | apppath += env.get('REPO_NAME') | |
233 |
|
233 | |||
234 | if 'PATH_INFO' in env: |
|
234 | if 'PATH_INFO' in env: | |
235 | dispatchparts = env['PATH_INFO'].strip('/').split('/') |
|
235 | dispatchparts = env['PATH_INFO'].strip('/').split('/') | |
236 |
|
236 | |||
237 | # Strip out repo parts. |
|
237 | # Strip out repo parts. | |
238 | repoparts = env.get('REPO_NAME', '').split('/') |
|
238 | repoparts = env.get('REPO_NAME', '').split('/') | |
239 | if dispatchparts[:len(repoparts)] == repoparts: |
|
239 | if dispatchparts[:len(repoparts)] == repoparts: | |
240 | dispatchparts = dispatchparts[len(repoparts):] |
|
240 | dispatchparts = dispatchparts[len(repoparts):] | |
241 | else: |
|
241 | else: | |
242 | dispatchparts = [] |
|
242 | dispatchparts = [] | |
243 |
|
243 | |||
244 | dispatchpath = '/'.join(dispatchparts) |
|
244 | dispatchpath = '/'.join(dispatchparts) | |
245 |
|
245 | |||
246 | querystring = env.get('QUERY_STRING', '') |
|
246 | querystring = env.get('QUERY_STRING', '') | |
247 |
|
247 | |||
248 | # We store as a list so we have ordering information. We also store as |
|
248 | # We store as a list so we have ordering information. We also store as | |
249 | # a dict to facilitate fast lookup. |
|
249 | # a dict to facilitate fast lookup. | |
250 | qsparams = multidict() |
|
250 | qsparams = multidict() | |
251 | for k, v in util.urlreq.parseqsl(querystring, keep_blank_values=True): |
|
251 | for k, v in util.urlreq.parseqsl(querystring, keep_blank_values=True): | |
252 | qsparams.add(k, v) |
|
252 | qsparams.add(k, v) | |
253 |
|
253 | |||
254 | # HTTP_* keys contain HTTP request headers. The Headers structure should |
|
254 | # HTTP_* keys contain HTTP request headers. The Headers structure should | |
255 | # perform case normalization for us. We just rewrite underscore to dash |
|
255 | # perform case normalization for us. We just rewrite underscore to dash | |
256 | # so keys match what likely went over the wire. |
|
256 | # so keys match what likely went over the wire. | |
257 | headers = [] |
|
257 | headers = [] | |
258 | for k, v in env.iteritems(): |
|
258 | for k, v in env.iteritems(): | |
259 | if k.startswith('HTTP_'): |
|
259 | if k.startswith('HTTP_'): | |
260 | headers.append((k[len('HTTP_'):].replace('_', '-'), v)) |
|
260 | headers.append((k[len('HTTP_'):].replace('_', '-'), v)) | |
261 |
|
261 | |||
262 | headers = wsgiheaders.Headers(headers) |
|
262 | headers = wsgiheaders.Headers(headers) | |
263 |
|
263 | |||
264 | # This is kind of a lie because the HTTP header wasn't explicitly |
|
264 | # This is kind of a lie because the HTTP header wasn't explicitly | |
265 | # sent. But for all intents and purposes it should be OK to lie about |
|
265 | # sent. But for all intents and purposes it should be OK to lie about | |
266 | # this, since a consumer will either either value to determine how many |
|
266 | # this, since a consumer will either either value to determine how many | |
267 | # bytes are available to read. |
|
267 | # bytes are available to read. | |
268 | if 'CONTENT_LENGTH' in env and 'HTTP_CONTENT_LENGTH' not in env: |
|
268 | if 'CONTENT_LENGTH' in env and 'HTTP_CONTENT_LENGTH' not in env: | |
269 | headers['Content-Length'] = env['CONTENT_LENGTH'] |
|
269 | headers['Content-Length'] = env['CONTENT_LENGTH'] | |
270 |
|
270 | |||
271 | # TODO do this once we remove wsgirequest.inp, otherwise we could have |
|
271 | # TODO do this once we remove wsgirequest.inp, otherwise we could have | |
272 | # multiple readers from the underlying input stream. |
|
272 | # multiple readers from the underlying input stream. | |
273 | #bodyfh = env['wsgi.input'] |
|
273 | #bodyfh = env['wsgi.input'] | |
274 | #if 'Content-Length' in headers: |
|
274 | #if 'Content-Length' in headers: | |
275 | # bodyfh = util.cappedreader(bodyfh, int(headers['Content-Length'])) |
|
275 | # bodyfh = util.cappedreader(bodyfh, int(headers['Content-Length'])) | |
276 |
|
276 | |||
277 | return parsedrequest(method=env['REQUEST_METHOD'], |
|
277 | return parsedrequest(method=env['REQUEST_METHOD'], | |
278 | url=fullurl, baseurl=baseurl, |
|
278 | url=fullurl, baseurl=baseurl, | |
279 | advertisedurl=advertisedfullurl, |
|
279 | advertisedurl=advertisedfullurl, | |
280 | advertisedbaseurl=advertisedbaseurl, |
|
280 | advertisedbaseurl=advertisedbaseurl, | |
281 | urlscheme=env['wsgi.url_scheme'], |
|
281 | urlscheme=env['wsgi.url_scheme'], | |
282 | remoteuser=env.get('REMOTE_USER'), |
|
282 | remoteuser=env.get('REMOTE_USER'), | |
283 | remotehost=env.get('REMOTE_HOST'), |
|
283 | remotehost=env.get('REMOTE_HOST'), | |
284 | apppath=apppath, |
|
284 | apppath=apppath, | |
285 | dispatchparts=dispatchparts, dispatchpath=dispatchpath, |
|
285 | dispatchparts=dispatchparts, dispatchpath=dispatchpath, | |
286 | havepathinfo='PATH_INFO' in env, |
|
286 | havepathinfo='PATH_INFO' in env, | |
287 | reponame=env.get('REPO_NAME'), |
|
287 | reponame=env.get('REPO_NAME'), | |
288 | querystring=querystring, |
|
288 | querystring=querystring, | |
289 | qsparams=qsparams, |
|
289 | qsparams=qsparams, | |
290 | headers=headers, |
|
290 | headers=headers, | |
291 | bodyfh=bodyfh) |
|
291 | bodyfh=bodyfh) | |
292 |
|
292 | |||
|
293 | class offsettrackingwriter(object): | |||
|
294 | """A file object like object that is append only and tracks write count. | |||
|
295 | ||||
|
296 | Instances are bound to a callable. This callable is called with data | |||
|
297 | whenever a ``write()`` is attempted. | |||
|
298 | ||||
|
299 | Instances track the amount of written data so they can answer ``tell()`` | |||
|
300 | requests. | |||
|
301 | ||||
|
302 | The intent of this class is to wrap the ``write()`` function returned by | |||
|
303 | a WSGI ``start_response()`` function. Since ``write()`` is a callable and | |||
|
304 | not a file object, it doesn't implement other file object methods. | |||
|
305 | """ | |||
|
306 | def __init__(self, writefn): | |||
|
307 | self._write = writefn | |||
|
308 | self._offset = 0 | |||
|
309 | ||||
|
310 | def write(self, s): | |||
|
311 | res = self._write(s) | |||
|
312 | # Some Python objects don't report the number of bytes written. | |||
|
313 | if res is None: | |||
|
314 | self._offset += len(s) | |||
|
315 | else: | |||
|
316 | self._offset += res | |||
|
317 | ||||
|
318 | def flush(self): | |||
|
319 | pass | |||
|
320 | ||||
|
321 | def tell(self): | |||
|
322 | return self._offset | |||
|
323 | ||||
293 | class wsgiresponse(object): |
|
324 | class wsgiresponse(object): | |
294 | """Represents a response to a WSGI request. |
|
325 | """Represents a response to a WSGI request. | |
295 |
|
326 | |||
296 | A response consists of a status line, headers, and a body. |
|
327 | A response consists of a status line, headers, and a body. | |
297 |
|
328 | |||
298 | Consumers must populate the ``status`` and ``headers`` fields and |
|
329 | Consumers must populate the ``status`` and ``headers`` fields and | |
299 | make a call to a ``setbody*()`` method before the response can be |
|
330 | make a call to a ``setbody*()`` method before the response can be | |
300 | issued. |
|
331 | issued. | |
301 |
|
332 | |||
302 | When it is time to start sending the response over the wire, |
|
333 | When it is time to start sending the response over the wire, | |
303 | ``sendresponse()`` is called. It handles emitting the header portion |
|
334 | ``sendresponse()`` is called. It handles emitting the header portion | |
304 | of the response message. It then yields chunks of body data to be |
|
335 | of the response message. It then yields chunks of body data to be | |
305 | written to the peer. Typically, the WSGI application itself calls |
|
336 | written to the peer. Typically, the WSGI application itself calls | |
306 | and returns the value from ``sendresponse()``. |
|
337 | and returns the value from ``sendresponse()``. | |
307 | """ |
|
338 | """ | |
308 |
|
339 | |||
309 | def __init__(self, req, startresponse): |
|
340 | def __init__(self, req, startresponse): | |
310 | """Create an empty response tied to a specific request. |
|
341 | """Create an empty response tied to a specific request. | |
311 |
|
342 | |||
312 | ``req`` is a ``parsedrequest``. ``startresponse`` is the |
|
343 | ``req`` is a ``parsedrequest``. ``startresponse`` is the | |
313 | ``start_response`` function passed to the WSGI application. |
|
344 | ``start_response`` function passed to the WSGI application. | |
314 | """ |
|
345 | """ | |
315 | self._req = req |
|
346 | self._req = req | |
316 | self._startresponse = startresponse |
|
347 | self._startresponse = startresponse | |
317 |
|
348 | |||
318 | self.status = None |
|
349 | self.status = None | |
319 | self.headers = wsgiheaders.Headers([]) |
|
350 | self.headers = wsgiheaders.Headers([]) | |
320 |
|
351 | |||
321 | self._bodybytes = None |
|
352 | self._bodybytes = None | |
322 | self._bodygen = None |
|
353 | self._bodygen = None | |
323 | self._started = False |
|
354 | self._started = False | |
324 |
|
355 | |||
325 | def setbodybytes(self, b): |
|
356 | def setbodybytes(self, b): | |
326 | """Define the response body as static bytes.""" |
|
357 | """Define the response body as static bytes.""" | |
327 | if self._bodybytes is not None or self._bodygen is not None: |
|
358 | if self._bodybytes is not None or self._bodygen is not None: | |
328 | raise error.ProgrammingError('cannot define body multiple times') |
|
359 | raise error.ProgrammingError('cannot define body multiple times') | |
329 |
|
360 | |||
330 | self._bodybytes = b |
|
361 | self._bodybytes = b | |
331 | self.headers['Content-Length'] = '%d' % len(b) |
|
362 | self.headers['Content-Length'] = '%d' % len(b) | |
332 |
|
363 | |||
333 | def setbodygen(self, gen): |
|
364 | def setbodygen(self, gen): | |
334 | """Define the response body as a generator of bytes.""" |
|
365 | """Define the response body as a generator of bytes.""" | |
335 | if self._bodybytes is not None or self._bodygen is not None: |
|
366 | if self._bodybytes is not None or self._bodygen is not None: | |
336 | raise error.ProgrammingError('cannot define body multiple times') |
|
367 | raise error.ProgrammingError('cannot define body multiple times') | |
337 |
|
368 | |||
338 | self._bodygen = gen |
|
369 | self._bodygen = gen | |
339 |
|
370 | |||
340 | def sendresponse(self): |
|
371 | def sendresponse(self): | |
341 | """Send the generated response to the client. |
|
372 | """Send the generated response to the client. | |
342 |
|
373 | |||
343 | Before this is called, ``status`` must be set and one of |
|
374 | Before this is called, ``status`` must be set and one of | |
344 | ``setbodybytes()`` or ``setbodygen()`` must be called. |
|
375 | ``setbodybytes()`` or ``setbodygen()`` must be called. | |
345 |
|
376 | |||
346 | Calling this method multiple times is not allowed. |
|
377 | Calling this method multiple times is not allowed. | |
347 | """ |
|
378 | """ | |
348 | if self._started: |
|
379 | if self._started: | |
349 | raise error.ProgrammingError('sendresponse() called multiple times') |
|
380 | raise error.ProgrammingError('sendresponse() called multiple times') | |
350 |
|
381 | |||
351 | self._started = True |
|
382 | self._started = True | |
352 |
|
383 | |||
353 | if not self.status: |
|
384 | if not self.status: | |
354 | raise error.ProgrammingError('status line not defined') |
|
385 | raise error.ProgrammingError('status line not defined') | |
355 |
|
386 | |||
356 | if self._bodybytes is None and self._bodygen is None: |
|
387 | if self._bodybytes is None and self._bodygen is None: | |
357 | raise error.ProgrammingError('response body not defined') |
|
388 | raise error.ProgrammingError('response body not defined') | |
358 |
|
389 | |||
359 | # Various HTTP clients (notably httplib) won't read the HTTP response |
|
390 | # Various HTTP clients (notably httplib) won't read the HTTP response | |
360 | # until the HTTP request has been sent in full. If servers (us) send a |
|
391 | # until the HTTP request has been sent in full. If servers (us) send a | |
361 | # response before the HTTP request has been fully sent, the connection |
|
392 | # response before the HTTP request has been fully sent, the connection | |
362 | # may deadlock because neither end is reading. |
|
393 | # may deadlock because neither end is reading. | |
363 | # |
|
394 | # | |
364 | # We work around this by "draining" the request data before |
|
395 | # We work around this by "draining" the request data before | |
365 | # sending any response in some conditions. |
|
396 | # sending any response in some conditions. | |
366 | drain = False |
|
397 | drain = False | |
367 | close = False |
|
398 | close = False | |
368 |
|
399 | |||
369 | # If the client sent Expect: 100-continue, we assume it is smart enough |
|
400 | # If the client sent Expect: 100-continue, we assume it is smart enough | |
370 | # to deal with the server sending a response before reading the request. |
|
401 | # to deal with the server sending a response before reading the request. | |
371 | # (httplib doesn't do this.) |
|
402 | # (httplib doesn't do this.) | |
372 | if self._req.headers.get('Expect', '').lower() == '100-continue': |
|
403 | if self._req.headers.get('Expect', '').lower() == '100-continue': | |
373 | pass |
|
404 | pass | |
374 | # Only tend to request methods that have bodies. Strictly speaking, |
|
405 | # Only tend to request methods that have bodies. Strictly speaking, | |
375 | # we should sniff for a body. But this is fine for our existing |
|
406 | # we should sniff for a body. But this is fine for our existing | |
376 | # WSGI applications. |
|
407 | # WSGI applications. | |
377 | elif self._req.method not in ('POST', 'PUT'): |
|
408 | elif self._req.method not in ('POST', 'PUT'): | |
378 | pass |
|
409 | pass | |
379 | else: |
|
410 | else: | |
380 | # If we don't know how much data to read, there's no guarantee |
|
411 | # If we don't know how much data to read, there's no guarantee | |
381 | # that we can drain the request responsibly. The WSGI |
|
412 | # that we can drain the request responsibly. The WSGI | |
382 | # specification only says that servers *should* ensure the |
|
413 | # specification only says that servers *should* ensure the | |
383 | # input stream doesn't overrun the actual request. So there's |
|
414 | # input stream doesn't overrun the actual request. So there's | |
384 | # no guarantee that reading until EOF won't corrupt the stream |
|
415 | # no guarantee that reading until EOF won't corrupt the stream | |
385 | # state. |
|
416 | # state. | |
386 | if not isinstance(self._req.bodyfh, util.cappedreader): |
|
417 | if not isinstance(self._req.bodyfh, util.cappedreader): | |
387 | close = True |
|
418 | close = True | |
388 | else: |
|
419 | else: | |
389 | # We /could/ only drain certain HTTP response codes. But 200 and |
|
420 | # We /could/ only drain certain HTTP response codes. But 200 and | |
390 | # non-200 wire protocol responses both require draining. Since |
|
421 | # non-200 wire protocol responses both require draining. Since | |
391 | # we have a capped reader in place for all situations where we |
|
422 | # we have a capped reader in place for all situations where we | |
392 | # drain, it is safe to read from that stream. We'll either do |
|
423 | # drain, it is safe to read from that stream. We'll either do | |
393 | # a drain or no-op if we're already at EOF. |
|
424 | # a drain or no-op if we're already at EOF. | |
394 | drain = True |
|
425 | drain = True | |
395 |
|
426 | |||
396 | if close: |
|
427 | if close: | |
397 | self.headers['Connection'] = 'Close' |
|
428 | self.headers['Connection'] = 'Close' | |
398 |
|
429 | |||
399 | if drain: |
|
430 | if drain: | |
400 | assert isinstance(self._req.bodyfh, util.cappedreader) |
|
431 | assert isinstance(self._req.bodyfh, util.cappedreader) | |
401 | while True: |
|
432 | while True: | |
402 | chunk = self._req.bodyfh.read(32768) |
|
433 | chunk = self._req.bodyfh.read(32768) | |
403 | if not chunk: |
|
434 | if not chunk: | |
404 | break |
|
435 | break | |
405 |
|
436 | |||
406 | self._startresponse(pycompat.sysstr(self.status), self.headers.items()) |
|
437 | self._startresponse(pycompat.sysstr(self.status), self.headers.items()) | |
407 | if self._bodybytes: |
|
438 | if self._bodybytes: | |
408 | yield self._bodybytes |
|
439 | yield self._bodybytes | |
409 | elif self._bodygen: |
|
440 | elif self._bodygen: | |
410 | for chunk in self._bodygen: |
|
441 | for chunk in self._bodygen: | |
411 | yield chunk |
|
442 | yield chunk | |
412 | else: |
|
443 | else: | |
413 | error.ProgrammingError('do not know how to send body') |
|
444 | error.ProgrammingError('do not know how to send body') | |
414 |
|
445 | |||
415 | class wsgirequest(object): |
|
446 | class wsgirequest(object): | |
416 | """Higher-level API for a WSGI request. |
|
447 | """Higher-level API for a WSGI request. | |
417 |
|
448 | |||
418 | WSGI applications are invoked with 2 arguments. They are used to |
|
449 | WSGI applications are invoked with 2 arguments. They are used to | |
419 | instantiate instances of this class, which provides higher-level APIs |
|
450 | instantiate instances of this class, which provides higher-level APIs | |
420 | for obtaining request parameters, writing HTTP output, etc. |
|
451 | for obtaining request parameters, writing HTTP output, etc. | |
421 | """ |
|
452 | """ | |
422 | def __init__(self, wsgienv, start_response): |
|
453 | def __init__(self, wsgienv, start_response): | |
423 | version = wsgienv[r'wsgi.version'] |
|
454 | version = wsgienv[r'wsgi.version'] | |
424 | if (version < (1, 0)) or (version >= (2, 0)): |
|
455 | if (version < (1, 0)) or (version >= (2, 0)): | |
425 | raise RuntimeError("Unknown and unsupported WSGI version %d.%d" |
|
456 | raise RuntimeError("Unknown and unsupported WSGI version %d.%d" | |
426 | % version) |
|
457 | % version) | |
427 |
|
458 | |||
428 | inp = wsgienv[r'wsgi.input'] |
|
459 | inp = wsgienv[r'wsgi.input'] | |
429 |
|
460 | |||
430 | if r'HTTP_CONTENT_LENGTH' in wsgienv: |
|
461 | if r'HTTP_CONTENT_LENGTH' in wsgienv: | |
431 | inp = util.cappedreader(inp, int(wsgienv[r'HTTP_CONTENT_LENGTH'])) |
|
462 | inp = util.cappedreader(inp, int(wsgienv[r'HTTP_CONTENT_LENGTH'])) | |
432 | elif r'CONTENT_LENGTH' in wsgienv: |
|
463 | elif r'CONTENT_LENGTH' in wsgienv: | |
433 | inp = util.cappedreader(inp, int(wsgienv[r'CONTENT_LENGTH'])) |
|
464 | inp = util.cappedreader(inp, int(wsgienv[r'CONTENT_LENGTH'])) | |
434 |
|
465 | |||
435 | self.err = wsgienv[r'wsgi.errors'] |
|
466 | self.err = wsgienv[r'wsgi.errors'] | |
436 | self.threaded = wsgienv[r'wsgi.multithread'] |
|
467 | self.threaded = wsgienv[r'wsgi.multithread'] | |
437 | self.multiprocess = wsgienv[r'wsgi.multiprocess'] |
|
468 | self.multiprocess = wsgienv[r'wsgi.multiprocess'] | |
438 | self.run_once = wsgienv[r'wsgi.run_once'] |
|
469 | self.run_once = wsgienv[r'wsgi.run_once'] | |
439 | self.env = wsgienv |
|
470 | self.env = wsgienv | |
440 | self.req = parserequestfromenv(wsgienv, inp) |
|
471 | self.req = parserequestfromenv(wsgienv, inp) | |
441 | self.res = wsgiresponse(self.req, start_response) |
|
472 | self.res = wsgiresponse(self.req, start_response) | |
442 | self._start_response = start_response |
|
473 | self._start_response = start_response | |
443 | self.server_write = None |
|
474 | self.server_write = None | |
444 | self.headers = [] |
|
475 | self.headers = [] | |
445 |
|
476 | |||
446 | def respond(self, status, type, filename=None, body=None): |
|
477 | def respond(self, status, type, filename=None, body=None): | |
447 | if not isinstance(type, str): |
|
478 | if not isinstance(type, str): | |
448 | type = pycompat.sysstr(type) |
|
479 | type = pycompat.sysstr(type) | |
449 | if self._start_response is not None: |
|
480 | if self._start_response is not None: | |
450 | self.headers.append((r'Content-Type', type)) |
|
481 | self.headers.append((r'Content-Type', type)) | |
451 | if filename: |
|
482 | if filename: | |
452 | filename = (filename.rpartition('/')[-1] |
|
483 | filename = (filename.rpartition('/')[-1] | |
453 | .replace('\\', '\\\\').replace('"', '\\"')) |
|
484 | .replace('\\', '\\\\').replace('"', '\\"')) | |
454 | self.headers.append(('Content-Disposition', |
|
485 | self.headers.append(('Content-Disposition', | |
455 | 'inline; filename="%s"' % filename)) |
|
486 | 'inline; filename="%s"' % filename)) | |
456 | if body is not None: |
|
487 | if body is not None: | |
457 | self.headers.append((r'Content-Length', str(len(body)))) |
|
488 | self.headers.append((r'Content-Length', str(len(body)))) | |
458 |
|
489 | |||
459 | for k, v in self.headers: |
|
490 | for k, v in self.headers: | |
460 | if not isinstance(v, str): |
|
491 | if not isinstance(v, str): | |
461 | raise TypeError('header value must be string: %r' % (v,)) |
|
492 | raise TypeError('header value must be string: %r' % (v,)) | |
462 |
|
493 | |||
463 | if isinstance(status, ErrorResponse): |
|
494 | if isinstance(status, ErrorResponse): | |
464 | self.headers.extend(status.headers) |
|
495 | self.headers.extend(status.headers) | |
465 | if status.code == HTTP_NOT_MODIFIED: |
|
496 | if status.code == HTTP_NOT_MODIFIED: | |
466 | # RFC 2616 Section 10.3.5: 304 Not Modified has cases where |
|
497 | # RFC 2616 Section 10.3.5: 304 Not Modified has cases where | |
467 | # it MUST NOT include any headers other than these and no |
|
498 | # it MUST NOT include any headers other than these and no | |
468 | # body |
|
499 | # body | |
469 | self.headers = [(k, v) for (k, v) in self.headers if |
|
500 | self.headers = [(k, v) for (k, v) in self.headers if | |
470 | k in ('Date', 'ETag', 'Expires', |
|
501 | k in ('Date', 'ETag', 'Expires', | |
471 | 'Cache-Control', 'Vary')] |
|
502 | 'Cache-Control', 'Vary')] | |
472 | status = statusmessage(status.code, pycompat.bytestr(status)) |
|
503 | status = statusmessage(status.code, pycompat.bytestr(status)) | |
473 | elif status == 200: |
|
504 | elif status == 200: | |
474 | status = '200 Script output follows' |
|
505 | status = '200 Script output follows' | |
475 | elif isinstance(status, int): |
|
506 | elif isinstance(status, int): | |
476 | status = statusmessage(status) |
|
507 | status = statusmessage(status) | |
477 |
|
508 | |||
478 | # Various HTTP clients (notably httplib) won't read the HTTP |
|
509 | # Various HTTP clients (notably httplib) won't read the HTTP | |
479 | # response until the HTTP request has been sent in full. If servers |
|
510 | # response until the HTTP request has been sent in full. If servers | |
480 | # (us) send a response before the HTTP request has been fully sent, |
|
511 | # (us) send a response before the HTTP request has been fully sent, | |
481 | # the connection may deadlock because neither end is reading. |
|
512 | # the connection may deadlock because neither end is reading. | |
482 | # |
|
513 | # | |
483 | # We work around this by "draining" the request data before |
|
514 | # We work around this by "draining" the request data before | |
484 | # sending any response in some conditions. |
|
515 | # sending any response in some conditions. | |
485 | drain = False |
|
516 | drain = False | |
486 | close = False |
|
517 | close = False | |
487 |
|
518 | |||
488 | # If the client sent Expect: 100-continue, we assume it is smart |
|
519 | # If the client sent Expect: 100-continue, we assume it is smart | |
489 | # enough to deal with the server sending a response before reading |
|
520 | # enough to deal with the server sending a response before reading | |
490 | # the request. (httplib doesn't do this.) |
|
521 | # the request. (httplib doesn't do this.) | |
491 | if self.env.get(r'HTTP_EXPECT', r'').lower() == r'100-continue': |
|
522 | if self.env.get(r'HTTP_EXPECT', r'').lower() == r'100-continue': | |
492 | pass |
|
523 | pass | |
493 | # Only tend to request methods that have bodies. Strictly speaking, |
|
524 | # Only tend to request methods that have bodies. Strictly speaking, | |
494 | # we should sniff for a body. But this is fine for our existing |
|
525 | # we should sniff for a body. But this is fine for our existing | |
495 | # WSGI applications. |
|
526 | # WSGI applications. | |
496 | elif self.env[r'REQUEST_METHOD'] not in (r'POST', r'PUT'): |
|
527 | elif self.env[r'REQUEST_METHOD'] not in (r'POST', r'PUT'): | |
497 | pass |
|
528 | pass | |
498 | else: |
|
529 | else: | |
499 | # If we don't know how much data to read, there's no guarantee |
|
530 | # If we don't know how much data to read, there's no guarantee | |
500 | # that we can drain the request responsibly. The WSGI |
|
531 | # that we can drain the request responsibly. The WSGI | |
501 | # specification only says that servers *should* ensure the |
|
532 | # specification only says that servers *should* ensure the | |
502 | # input stream doesn't overrun the actual request. So there's |
|
533 | # input stream doesn't overrun the actual request. So there's | |
503 | # no guarantee that reading until EOF won't corrupt the stream |
|
534 | # no guarantee that reading until EOF won't corrupt the stream | |
504 | # state. |
|
535 | # state. | |
505 | if not isinstance(self.req.bodyfh, util.cappedreader): |
|
536 | if not isinstance(self.req.bodyfh, util.cappedreader): | |
506 | close = True |
|
537 | close = True | |
507 | else: |
|
538 | else: | |
508 | # We /could/ only drain certain HTTP response codes. But 200 |
|
539 | # We /could/ only drain certain HTTP response codes. But 200 | |
509 | # and non-200 wire protocol responses both require draining. |
|
540 | # and non-200 wire protocol responses both require draining. | |
510 | # Since we have a capped reader in place for all situations |
|
541 | # Since we have a capped reader in place for all situations | |
511 | # where we drain, it is safe to read from that stream. We'll |
|
542 | # where we drain, it is safe to read from that stream. We'll | |
512 | # either do a drain or no-op if we're already at EOF. |
|
543 | # either do a drain or no-op if we're already at EOF. | |
513 | drain = True |
|
544 | drain = True | |
514 |
|
545 | |||
515 | if close: |
|
546 | if close: | |
516 | self.headers.append((r'Connection', r'Close')) |
|
547 | self.headers.append((r'Connection', r'Close')) | |
517 |
|
548 | |||
518 | if drain: |
|
549 | if drain: | |
519 | assert isinstance(self.req.bodyfh, util.cappedreader) |
|
550 | assert isinstance(self.req.bodyfh, util.cappedreader) | |
520 | while True: |
|
551 | while True: | |
521 | chunk = self.req.bodyfh.read(32768) |
|
552 | chunk = self.req.bodyfh.read(32768) | |
522 | if not chunk: |
|
553 | if not chunk: | |
523 | break |
|
554 | break | |
524 |
|
555 | |||
525 | self.server_write = self._start_response( |
|
556 | self.server_write = self._start_response( | |
526 | pycompat.sysstr(status), self.headers) |
|
557 | pycompat.sysstr(status), self.headers) | |
527 | self._start_response = None |
|
558 | self._start_response = None | |
528 | self.headers = [] |
|
559 | self.headers = [] | |
529 | if body is not None: |
|
560 | if body is not None: | |
530 | self.write(body) |
|
561 | self.write(body) | |
531 | self.server_write = None |
|
562 | self.server_write = None | |
532 |
|
563 | |||
533 | def write(self, thing): |
|
564 | def write(self, thing): | |
534 | if thing: |
|
565 | if thing: | |
535 | try: |
|
566 | try: | |
536 | self.server_write(thing) |
|
567 | self.server_write(thing) | |
537 | except socket.error as inst: |
|
568 | except socket.error as inst: | |
538 | if inst[0] != errno.ECONNRESET: |
|
569 | if inst[0] != errno.ECONNRESET: | |
539 | raise |
|
570 | raise | |
540 |
|
571 | |||
541 | def flush(self): |
|
572 | def flush(self): | |
542 | return None |
|
573 | return None | |
543 |
|
574 | |||
544 | def wsgiapplication(app_maker): |
|
575 | def wsgiapplication(app_maker): | |
545 | '''For compatibility with old CGI scripts. A plain hgweb() or hgwebdir() |
|
576 | '''For compatibility with old CGI scripts. A plain hgweb() or hgwebdir() | |
546 | can and should now be used as a WSGI application.''' |
|
577 | can and should now be used as a WSGI application.''' | |
547 | application = app_maker() |
|
578 | application = app_maker() | |
548 | def run_wsgi(env, respond): |
|
579 | def run_wsgi(env, respond): | |
549 | return application(env, respond) |
|
580 | return application(env, respond) | |
550 | return run_wsgi |
|
581 | return run_wsgi |
@@ -1,1506 +1,1511 | |||||
1 | # |
|
1 | # | |
2 | # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net> |
|
2 | # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net> | |
3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> |
|
3 | # Copyright 2005-2007 Matt Mackall <mpm@selenic.com> | |
4 | # |
|
4 | # | |
5 | # This software may be used and distributed according to the terms of the |
|
5 | # This software may be used and distributed according to the terms of the | |
6 | # GNU General Public License version 2 or any later version. |
|
6 | # GNU General Public License version 2 or any later version. | |
7 |
|
7 | |||
8 | from __future__ import absolute_import |
|
8 | from __future__ import absolute_import | |
9 |
|
9 | |||
10 | import copy |
|
10 | import copy | |
11 | import mimetypes |
|
11 | import mimetypes | |
12 | import os |
|
12 | import os | |
13 | import re |
|
13 | import re | |
14 |
|
14 | |||
15 | from ..i18n import _ |
|
15 | from ..i18n import _ | |
16 | from ..node import hex, nullid, short |
|
16 | from ..node import hex, nullid, short | |
17 |
|
17 | |||
18 | from .common import ( |
|
18 | from .common import ( | |
19 | ErrorResponse, |
|
19 | ErrorResponse, | |
20 | HTTP_FORBIDDEN, |
|
20 | HTTP_FORBIDDEN, | |
21 | HTTP_NOT_FOUND, |
|
21 | HTTP_NOT_FOUND, | |
22 | HTTP_OK, |
|
22 | HTTP_OK, | |
23 | get_contact, |
|
23 | get_contact, | |
24 | paritygen, |
|
24 | paritygen, | |
25 | staticfile, |
|
25 | staticfile, | |
26 | ) |
|
26 | ) | |
|
27 | from . import ( | |||
|
28 | request as requestmod, | |||
|
29 | ) | |||
27 |
|
30 | |||
28 | from .. import ( |
|
31 | from .. import ( | |
29 | archival, |
|
32 | archival, | |
30 | dagop, |
|
33 | dagop, | |
31 | encoding, |
|
34 | encoding, | |
32 | error, |
|
35 | error, | |
33 | graphmod, |
|
36 | graphmod, | |
34 | pycompat, |
|
37 | pycompat, | |
35 | revset, |
|
38 | revset, | |
36 | revsetlang, |
|
39 | revsetlang, | |
37 | scmutil, |
|
40 | scmutil, | |
38 | smartset, |
|
41 | smartset, | |
39 | templater, |
|
42 | templater, | |
40 | util, |
|
43 | util, | |
41 | ) |
|
44 | ) | |
42 |
|
45 | |||
43 | from . import ( |
|
46 | from . import ( | |
44 | webutil, |
|
47 | webutil, | |
45 | ) |
|
48 | ) | |
46 |
|
49 | |||
47 | __all__ = [] |
|
50 | __all__ = [] | |
48 | commands = {} |
|
51 | commands = {} | |
49 |
|
52 | |||
50 | class webcommand(object): |
|
53 | class webcommand(object): | |
51 | """Decorator used to register a web command handler. |
|
54 | """Decorator used to register a web command handler. | |
52 |
|
55 | |||
53 | The decorator takes as its positional arguments the name/path the |
|
56 | The decorator takes as its positional arguments the name/path the | |
54 | command should be accessible under. |
|
57 | command should be accessible under. | |
55 |
|
58 | |||
56 | When called, functions receive as arguments a ``requestcontext``, |
|
59 | When called, functions receive as arguments a ``requestcontext``, | |
57 | ``wsgirequest``, and a templater instance for generatoring output. |
|
60 | ``wsgirequest``, and a templater instance for generatoring output. | |
58 | The functions should populate the ``rctx.res`` object with details |
|
61 | The functions should populate the ``rctx.res`` object with details | |
59 | about the HTTP response. |
|
62 | about the HTTP response. | |
60 |
|
63 | |||
61 | The function can return the ``requestcontext.res`` instance to signal |
|
64 | The function can return the ``requestcontext.res`` instance to signal | |
62 | that it wants to use this object to generate the response. If an iterable |
|
65 | that it wants to use this object to generate the response. If an iterable | |
63 | is returned, the ``wsgirequest`` instance will be used and the returned |
|
66 | is returned, the ``wsgirequest`` instance will be used and the returned | |
64 | content will constitute the response body. |
|
67 | content will constitute the response body. | |
65 |
|
68 | |||
66 | Usage: |
|
69 | Usage: | |
67 |
|
70 | |||
68 | @webcommand('mycommand') |
|
71 | @webcommand('mycommand') | |
69 | def mycommand(web, req, tmpl): |
|
72 | def mycommand(web, req, tmpl): | |
70 | pass |
|
73 | pass | |
71 | """ |
|
74 | """ | |
72 |
|
75 | |||
73 | def __init__(self, name): |
|
76 | def __init__(self, name): | |
74 | self.name = name |
|
77 | self.name = name | |
75 |
|
78 | |||
76 | def __call__(self, func): |
|
79 | def __call__(self, func): | |
77 | __all__.append(self.name) |
|
80 | __all__.append(self.name) | |
78 | commands[self.name] = func |
|
81 | commands[self.name] = func | |
79 | return func |
|
82 | return func | |
80 |
|
83 | |||
81 | @webcommand('log') |
|
84 | @webcommand('log') | |
82 | def log(web, req, tmpl): |
|
85 | def log(web, req, tmpl): | |
83 | """ |
|
86 | """ | |
84 | /log[/{revision}[/{path}]] |
|
87 | /log[/{revision}[/{path}]] | |
85 | -------------------------- |
|
88 | -------------------------- | |
86 |
|
89 | |||
87 | Show repository or file history. |
|
90 | Show repository or file history. | |
88 |
|
91 | |||
89 | For URLs of the form ``/log/{revision}``, a list of changesets starting at |
|
92 | For URLs of the form ``/log/{revision}``, a list of changesets starting at | |
90 | the specified changeset identifier is shown. If ``{revision}`` is not |
|
93 | the specified changeset identifier is shown. If ``{revision}`` is not | |
91 | defined, the default is ``tip``. This form is equivalent to the |
|
94 | defined, the default is ``tip``. This form is equivalent to the | |
92 | ``changelog`` handler. |
|
95 | ``changelog`` handler. | |
93 |
|
96 | |||
94 | For URLs of the form ``/log/{revision}/{file}``, the history for a specific |
|
97 | For URLs of the form ``/log/{revision}/{file}``, the history for a specific | |
95 | file will be shown. This form is equivalent to the ``filelog`` handler. |
|
98 | file will be shown. This form is equivalent to the ``filelog`` handler. | |
96 | """ |
|
99 | """ | |
97 |
|
100 | |||
98 | if req.req.qsparams.get('file'): |
|
101 | if req.req.qsparams.get('file'): | |
99 | return filelog(web, req, tmpl) |
|
102 | return filelog(web, req, tmpl) | |
100 | else: |
|
103 | else: | |
101 | return changelog(web, req, tmpl) |
|
104 | return changelog(web, req, tmpl) | |
102 |
|
105 | |||
103 | @webcommand('rawfile') |
|
106 | @webcommand('rawfile') | |
104 | def rawfile(web, req, tmpl): |
|
107 | def rawfile(web, req, tmpl): | |
105 | guessmime = web.configbool('web', 'guessmime') |
|
108 | guessmime = web.configbool('web', 'guessmime') | |
106 |
|
109 | |||
107 | path = webutil.cleanpath(web.repo, req.req.qsparams.get('file', '')) |
|
110 | path = webutil.cleanpath(web.repo, req.req.qsparams.get('file', '')) | |
108 | if not path: |
|
111 | if not path: | |
109 | return manifest(web, req, tmpl) |
|
112 | return manifest(web, req, tmpl) | |
110 |
|
113 | |||
111 | try: |
|
114 | try: | |
112 | fctx = webutil.filectx(web.repo, req) |
|
115 | fctx = webutil.filectx(web.repo, req) | |
113 | except error.LookupError as inst: |
|
116 | except error.LookupError as inst: | |
114 | try: |
|
117 | try: | |
115 | return manifest(web, req, tmpl) |
|
118 | return manifest(web, req, tmpl) | |
116 | except ErrorResponse: |
|
119 | except ErrorResponse: | |
117 | raise inst |
|
120 | raise inst | |
118 |
|
121 | |||
119 | path = fctx.path() |
|
122 | path = fctx.path() | |
120 | text = fctx.data() |
|
123 | text = fctx.data() | |
121 | mt = 'application/binary' |
|
124 | mt = 'application/binary' | |
122 | if guessmime: |
|
125 | if guessmime: | |
123 | mt = mimetypes.guess_type(path)[0] |
|
126 | mt = mimetypes.guess_type(path)[0] | |
124 | if mt is None: |
|
127 | if mt is None: | |
125 | if util.binary(text): |
|
128 | if util.binary(text): | |
126 | mt = 'application/binary' |
|
129 | mt = 'application/binary' | |
127 | else: |
|
130 | else: | |
128 | mt = 'text/plain' |
|
131 | mt = 'text/plain' | |
129 | if mt.startswith('text/'): |
|
132 | if mt.startswith('text/'): | |
130 | mt += '; charset="%s"' % encoding.encoding |
|
133 | mt += '; charset="%s"' % encoding.encoding | |
131 |
|
134 | |||
132 | web.res.headers['Content-Type'] = mt |
|
135 | web.res.headers['Content-Type'] = mt | |
133 | filename = (path.rpartition('/')[-1] |
|
136 | filename = (path.rpartition('/')[-1] | |
134 | .replace('\\', '\\\\').replace('"', '\\"')) |
|
137 | .replace('\\', '\\\\').replace('"', '\\"')) | |
135 | web.res.headers['Content-Disposition'] = 'inline; filename="%s"' % filename |
|
138 | web.res.headers['Content-Disposition'] = 'inline; filename="%s"' % filename | |
136 | web.res.setbodybytes(text) |
|
139 | web.res.setbodybytes(text) | |
137 | return web.res |
|
140 | return web.res | |
138 |
|
141 | |||
139 | def _filerevision(web, req, tmpl, fctx): |
|
142 | def _filerevision(web, req, tmpl, fctx): | |
140 | f = fctx.path() |
|
143 | f = fctx.path() | |
141 | text = fctx.data() |
|
144 | text = fctx.data() | |
142 | parity = paritygen(web.stripecount) |
|
145 | parity = paritygen(web.stripecount) | |
143 | ishead = fctx.filerev() in fctx.filelog().headrevs() |
|
146 | ishead = fctx.filerev() in fctx.filelog().headrevs() | |
144 |
|
147 | |||
145 | if util.binary(text): |
|
148 | if util.binary(text): | |
146 | mt = mimetypes.guess_type(f)[0] or 'application/octet-stream' |
|
149 | mt = mimetypes.guess_type(f)[0] or 'application/octet-stream' | |
147 | text = '(binary:%s)' % mt |
|
150 | text = '(binary:%s)' % mt | |
148 |
|
151 | |||
149 | def lines(): |
|
152 | def lines(): | |
150 | for lineno, t in enumerate(text.splitlines(True)): |
|
153 | for lineno, t in enumerate(text.splitlines(True)): | |
151 | yield {"line": t, |
|
154 | yield {"line": t, | |
152 | "lineid": "l%d" % (lineno + 1), |
|
155 | "lineid": "l%d" % (lineno + 1), | |
153 | "linenumber": "% 6d" % (lineno + 1), |
|
156 | "linenumber": "% 6d" % (lineno + 1), | |
154 | "parity": next(parity)} |
|
157 | "parity": next(parity)} | |
155 |
|
158 | |||
156 | web.res.setbodygen(tmpl( |
|
159 | web.res.setbodygen(tmpl( | |
157 | 'filerevision', |
|
160 | 'filerevision', | |
158 | file=f, |
|
161 | file=f, | |
159 | path=webutil.up(f), |
|
162 | path=webutil.up(f), | |
160 | text=lines(), |
|
163 | text=lines(), | |
161 | symrev=webutil.symrevorshortnode(req, fctx), |
|
164 | symrev=webutil.symrevorshortnode(req, fctx), | |
162 | rename=webutil.renamelink(fctx), |
|
165 | rename=webutil.renamelink(fctx), | |
163 | permissions=fctx.manifest().flags(f), |
|
166 | permissions=fctx.manifest().flags(f), | |
164 | ishead=int(ishead), |
|
167 | ishead=int(ishead), | |
165 | **pycompat.strkwargs(webutil.commonentry(web.repo, fctx)))) |
|
168 | **pycompat.strkwargs(webutil.commonentry(web.repo, fctx)))) | |
166 |
|
169 | |||
167 | return web.res |
|
170 | return web.res | |
168 |
|
171 | |||
169 | @webcommand('file') |
|
172 | @webcommand('file') | |
170 | def file(web, req, tmpl): |
|
173 | def file(web, req, tmpl): | |
171 | """ |
|
174 | """ | |
172 | /file/{revision}[/{path}] |
|
175 | /file/{revision}[/{path}] | |
173 | ------------------------- |
|
176 | ------------------------- | |
174 |
|
177 | |||
175 | Show information about a directory or file in the repository. |
|
178 | Show information about a directory or file in the repository. | |
176 |
|
179 | |||
177 | Info about the ``path`` given as a URL parameter will be rendered. |
|
180 | Info about the ``path`` given as a URL parameter will be rendered. | |
178 |
|
181 | |||
179 | If ``path`` is a directory, information about the entries in that |
|
182 | If ``path`` is a directory, information about the entries in that | |
180 | directory will be rendered. This form is equivalent to the ``manifest`` |
|
183 | directory will be rendered. This form is equivalent to the ``manifest`` | |
181 | handler. |
|
184 | handler. | |
182 |
|
185 | |||
183 | If ``path`` is a file, information about that file will be shown via |
|
186 | If ``path`` is a file, information about that file will be shown via | |
184 | the ``filerevision`` template. |
|
187 | the ``filerevision`` template. | |
185 |
|
188 | |||
186 | If ``path`` is not defined, information about the root directory will |
|
189 | If ``path`` is not defined, information about the root directory will | |
187 | be rendered. |
|
190 | be rendered. | |
188 | """ |
|
191 | """ | |
189 | if web.req.qsparams.get('style') == 'raw': |
|
192 | if web.req.qsparams.get('style') == 'raw': | |
190 | return rawfile(web, req, tmpl) |
|
193 | return rawfile(web, req, tmpl) | |
191 |
|
194 | |||
192 | path = webutil.cleanpath(web.repo, req.req.qsparams.get('file', '')) |
|
195 | path = webutil.cleanpath(web.repo, req.req.qsparams.get('file', '')) | |
193 | if not path: |
|
196 | if not path: | |
194 | return manifest(web, req, tmpl) |
|
197 | return manifest(web, req, tmpl) | |
195 | try: |
|
198 | try: | |
196 | return _filerevision(web, req, tmpl, webutil.filectx(web.repo, req)) |
|
199 | return _filerevision(web, req, tmpl, webutil.filectx(web.repo, req)) | |
197 | except error.LookupError as inst: |
|
200 | except error.LookupError as inst: | |
198 | try: |
|
201 | try: | |
199 | return manifest(web, req, tmpl) |
|
202 | return manifest(web, req, tmpl) | |
200 | except ErrorResponse: |
|
203 | except ErrorResponse: | |
201 | raise inst |
|
204 | raise inst | |
202 |
|
205 | |||
203 | def _search(web, req, tmpl): |
|
206 | def _search(web, req, tmpl): | |
204 | MODE_REVISION = 'rev' |
|
207 | MODE_REVISION = 'rev' | |
205 | MODE_KEYWORD = 'keyword' |
|
208 | MODE_KEYWORD = 'keyword' | |
206 | MODE_REVSET = 'revset' |
|
209 | MODE_REVSET = 'revset' | |
207 |
|
210 | |||
208 | def revsearch(ctx): |
|
211 | def revsearch(ctx): | |
209 | yield ctx |
|
212 | yield ctx | |
210 |
|
213 | |||
211 | def keywordsearch(query): |
|
214 | def keywordsearch(query): | |
212 | lower = encoding.lower |
|
215 | lower = encoding.lower | |
213 | qw = lower(query).split() |
|
216 | qw = lower(query).split() | |
214 |
|
217 | |||
215 | def revgen(): |
|
218 | def revgen(): | |
216 | cl = web.repo.changelog |
|
219 | cl = web.repo.changelog | |
217 | for i in xrange(len(web.repo) - 1, 0, -100): |
|
220 | for i in xrange(len(web.repo) - 1, 0, -100): | |
218 | l = [] |
|
221 | l = [] | |
219 | for j in cl.revs(max(0, i - 99), i): |
|
222 | for j in cl.revs(max(0, i - 99), i): | |
220 | ctx = web.repo[j] |
|
223 | ctx = web.repo[j] | |
221 | l.append(ctx) |
|
224 | l.append(ctx) | |
222 | l.reverse() |
|
225 | l.reverse() | |
223 | for e in l: |
|
226 | for e in l: | |
224 | yield e |
|
227 | yield e | |
225 |
|
228 | |||
226 | for ctx in revgen(): |
|
229 | for ctx in revgen(): | |
227 | miss = 0 |
|
230 | miss = 0 | |
228 | for q in qw: |
|
231 | for q in qw: | |
229 | if not (q in lower(ctx.user()) or |
|
232 | if not (q in lower(ctx.user()) or | |
230 | q in lower(ctx.description()) or |
|
233 | q in lower(ctx.description()) or | |
231 | q in lower(" ".join(ctx.files()))): |
|
234 | q in lower(" ".join(ctx.files()))): | |
232 | miss = 1 |
|
235 | miss = 1 | |
233 | break |
|
236 | break | |
234 | if miss: |
|
237 | if miss: | |
235 | continue |
|
238 | continue | |
236 |
|
239 | |||
237 | yield ctx |
|
240 | yield ctx | |
238 |
|
241 | |||
239 | def revsetsearch(revs): |
|
242 | def revsetsearch(revs): | |
240 | for r in revs: |
|
243 | for r in revs: | |
241 | yield web.repo[r] |
|
244 | yield web.repo[r] | |
242 |
|
245 | |||
243 | searchfuncs = { |
|
246 | searchfuncs = { | |
244 | MODE_REVISION: (revsearch, 'exact revision search'), |
|
247 | MODE_REVISION: (revsearch, 'exact revision search'), | |
245 | MODE_KEYWORD: (keywordsearch, 'literal keyword search'), |
|
248 | MODE_KEYWORD: (keywordsearch, 'literal keyword search'), | |
246 | MODE_REVSET: (revsetsearch, 'revset expression search'), |
|
249 | MODE_REVSET: (revsetsearch, 'revset expression search'), | |
247 | } |
|
250 | } | |
248 |
|
251 | |||
249 | def getsearchmode(query): |
|
252 | def getsearchmode(query): | |
250 | try: |
|
253 | try: | |
251 | ctx = web.repo[query] |
|
254 | ctx = web.repo[query] | |
252 | except (error.RepoError, error.LookupError): |
|
255 | except (error.RepoError, error.LookupError): | |
253 | # query is not an exact revision pointer, need to |
|
256 | # query is not an exact revision pointer, need to | |
254 | # decide if it's a revset expression or keywords |
|
257 | # decide if it's a revset expression or keywords | |
255 | pass |
|
258 | pass | |
256 | else: |
|
259 | else: | |
257 | return MODE_REVISION, ctx |
|
260 | return MODE_REVISION, ctx | |
258 |
|
261 | |||
259 | revdef = 'reverse(%s)' % query |
|
262 | revdef = 'reverse(%s)' % query | |
260 | try: |
|
263 | try: | |
261 | tree = revsetlang.parse(revdef) |
|
264 | tree = revsetlang.parse(revdef) | |
262 | except error.ParseError: |
|
265 | except error.ParseError: | |
263 | # can't parse to a revset tree |
|
266 | # can't parse to a revset tree | |
264 | return MODE_KEYWORD, query |
|
267 | return MODE_KEYWORD, query | |
265 |
|
268 | |||
266 | if revsetlang.depth(tree) <= 2: |
|
269 | if revsetlang.depth(tree) <= 2: | |
267 | # no revset syntax used |
|
270 | # no revset syntax used | |
268 | return MODE_KEYWORD, query |
|
271 | return MODE_KEYWORD, query | |
269 |
|
272 | |||
270 | if any((token, (value or '')[:3]) == ('string', 're:') |
|
273 | if any((token, (value or '')[:3]) == ('string', 're:') | |
271 | for token, value, pos in revsetlang.tokenize(revdef)): |
|
274 | for token, value, pos in revsetlang.tokenize(revdef)): | |
272 | return MODE_KEYWORD, query |
|
275 | return MODE_KEYWORD, query | |
273 |
|
276 | |||
274 | funcsused = revsetlang.funcsused(tree) |
|
277 | funcsused = revsetlang.funcsused(tree) | |
275 | if not funcsused.issubset(revset.safesymbols): |
|
278 | if not funcsused.issubset(revset.safesymbols): | |
276 | return MODE_KEYWORD, query |
|
279 | return MODE_KEYWORD, query | |
277 |
|
280 | |||
278 | mfunc = revset.match(web.repo.ui, revdef, repo=web.repo) |
|
281 | mfunc = revset.match(web.repo.ui, revdef, repo=web.repo) | |
279 | try: |
|
282 | try: | |
280 | revs = mfunc(web.repo) |
|
283 | revs = mfunc(web.repo) | |
281 | return MODE_REVSET, revs |
|
284 | return MODE_REVSET, revs | |
282 | # ParseError: wrongly placed tokens, wrongs arguments, etc |
|
285 | # ParseError: wrongly placed tokens, wrongs arguments, etc | |
283 | # RepoLookupError: no such revision, e.g. in 'revision:' |
|
286 | # RepoLookupError: no such revision, e.g. in 'revision:' | |
284 | # Abort: bookmark/tag not exists |
|
287 | # Abort: bookmark/tag not exists | |
285 | # LookupError: ambiguous identifier, e.g. in '(bc)' on a large repo |
|
288 | # LookupError: ambiguous identifier, e.g. in '(bc)' on a large repo | |
286 | except (error.ParseError, error.RepoLookupError, error.Abort, |
|
289 | except (error.ParseError, error.RepoLookupError, error.Abort, | |
287 | LookupError): |
|
290 | LookupError): | |
288 | return MODE_KEYWORD, query |
|
291 | return MODE_KEYWORD, query | |
289 |
|
292 | |||
290 | def changelist(**map): |
|
293 | def changelist(**map): | |
291 | count = 0 |
|
294 | count = 0 | |
292 |
|
295 | |||
293 | for ctx in searchfunc[0](funcarg): |
|
296 | for ctx in searchfunc[0](funcarg): | |
294 | count += 1 |
|
297 | count += 1 | |
295 | n = ctx.node() |
|
298 | n = ctx.node() | |
296 | showtags = webutil.showtag(web.repo, tmpl, 'changelogtag', n) |
|
299 | showtags = webutil.showtag(web.repo, tmpl, 'changelogtag', n) | |
297 | files = webutil.listfilediffs(tmpl, ctx.files(), n, web.maxfiles) |
|
300 | files = webutil.listfilediffs(tmpl, ctx.files(), n, web.maxfiles) | |
298 |
|
301 | |||
299 | yield tmpl('searchentry', |
|
302 | yield tmpl('searchentry', | |
300 | parity=next(parity), |
|
303 | parity=next(parity), | |
301 | changelogtag=showtags, |
|
304 | changelogtag=showtags, | |
302 | files=files, |
|
305 | files=files, | |
303 | **pycompat.strkwargs(webutil.commonentry(web.repo, ctx))) |
|
306 | **pycompat.strkwargs(webutil.commonentry(web.repo, ctx))) | |
304 |
|
307 | |||
305 | if count >= revcount: |
|
308 | if count >= revcount: | |
306 | break |
|
309 | break | |
307 |
|
310 | |||
308 | query = req.req.qsparams['rev'] |
|
311 | query = req.req.qsparams['rev'] | |
309 | revcount = web.maxchanges |
|
312 | revcount = web.maxchanges | |
310 | if 'revcount' in req.req.qsparams: |
|
313 | if 'revcount' in req.req.qsparams: | |
311 | try: |
|
314 | try: | |
312 | revcount = int(req.req.qsparams.get('revcount', revcount)) |
|
315 | revcount = int(req.req.qsparams.get('revcount', revcount)) | |
313 | revcount = max(revcount, 1) |
|
316 | revcount = max(revcount, 1) | |
314 | tmpl.defaults['sessionvars']['revcount'] = revcount |
|
317 | tmpl.defaults['sessionvars']['revcount'] = revcount | |
315 | except ValueError: |
|
318 | except ValueError: | |
316 | pass |
|
319 | pass | |
317 |
|
320 | |||
318 | lessvars = copy.copy(tmpl.defaults['sessionvars']) |
|
321 | lessvars = copy.copy(tmpl.defaults['sessionvars']) | |
319 | lessvars['revcount'] = max(revcount // 2, 1) |
|
322 | lessvars['revcount'] = max(revcount // 2, 1) | |
320 | lessvars['rev'] = query |
|
323 | lessvars['rev'] = query | |
321 | morevars = copy.copy(tmpl.defaults['sessionvars']) |
|
324 | morevars = copy.copy(tmpl.defaults['sessionvars']) | |
322 | morevars['revcount'] = revcount * 2 |
|
325 | morevars['revcount'] = revcount * 2 | |
323 | morevars['rev'] = query |
|
326 | morevars['rev'] = query | |
324 |
|
327 | |||
325 | mode, funcarg = getsearchmode(query) |
|
328 | mode, funcarg = getsearchmode(query) | |
326 |
|
329 | |||
327 | if 'forcekw' in req.req.qsparams: |
|
330 | if 'forcekw' in req.req.qsparams: | |
328 | showforcekw = '' |
|
331 | showforcekw = '' | |
329 | showunforcekw = searchfuncs[mode][1] |
|
332 | showunforcekw = searchfuncs[mode][1] | |
330 | mode = MODE_KEYWORD |
|
333 | mode = MODE_KEYWORD | |
331 | funcarg = query |
|
334 | funcarg = query | |
332 | else: |
|
335 | else: | |
333 | if mode != MODE_KEYWORD: |
|
336 | if mode != MODE_KEYWORD: | |
334 | showforcekw = searchfuncs[MODE_KEYWORD][1] |
|
337 | showforcekw = searchfuncs[MODE_KEYWORD][1] | |
335 | else: |
|
338 | else: | |
336 | showforcekw = '' |
|
339 | showforcekw = '' | |
337 | showunforcekw = '' |
|
340 | showunforcekw = '' | |
338 |
|
341 | |||
339 | searchfunc = searchfuncs[mode] |
|
342 | searchfunc = searchfuncs[mode] | |
340 |
|
343 | |||
341 | tip = web.repo['tip'] |
|
344 | tip = web.repo['tip'] | |
342 | parity = paritygen(web.stripecount) |
|
345 | parity = paritygen(web.stripecount) | |
343 |
|
346 | |||
344 | web.res.setbodygen(tmpl( |
|
347 | web.res.setbodygen(tmpl( | |
345 | 'search', |
|
348 | 'search', | |
346 | query=query, |
|
349 | query=query, | |
347 | node=tip.hex(), |
|
350 | node=tip.hex(), | |
348 | symrev='tip', |
|
351 | symrev='tip', | |
349 | entries=changelist, |
|
352 | entries=changelist, | |
350 | archives=web.archivelist('tip'), |
|
353 | archives=web.archivelist('tip'), | |
351 | morevars=morevars, |
|
354 | morevars=morevars, | |
352 | lessvars=lessvars, |
|
355 | lessvars=lessvars, | |
353 | modedesc=searchfunc[1], |
|
356 | modedesc=searchfunc[1], | |
354 | showforcekw=showforcekw, |
|
357 | showforcekw=showforcekw, | |
355 | showunforcekw=showunforcekw)) |
|
358 | showunforcekw=showunforcekw)) | |
356 |
|
359 | |||
357 | return web.res |
|
360 | return web.res | |
358 |
|
361 | |||
359 | @webcommand('changelog') |
|
362 | @webcommand('changelog') | |
360 | def changelog(web, req, tmpl, shortlog=False): |
|
363 | def changelog(web, req, tmpl, shortlog=False): | |
361 | """ |
|
364 | """ | |
362 | /changelog[/{revision}] |
|
365 | /changelog[/{revision}] | |
363 | ----------------------- |
|
366 | ----------------------- | |
364 |
|
367 | |||
365 | Show information about multiple changesets. |
|
368 | Show information about multiple changesets. | |
366 |
|
369 | |||
367 | If the optional ``revision`` URL argument is absent, information about |
|
370 | If the optional ``revision`` URL argument is absent, information about | |
368 | all changesets starting at ``tip`` will be rendered. If the ``revision`` |
|
371 | all changesets starting at ``tip`` will be rendered. If the ``revision`` | |
369 | argument is present, changesets will be shown starting from the specified |
|
372 | argument is present, changesets will be shown starting from the specified | |
370 | revision. |
|
373 | revision. | |
371 |
|
374 | |||
372 | If ``revision`` is absent, the ``rev`` query string argument may be |
|
375 | If ``revision`` is absent, the ``rev`` query string argument may be | |
373 | defined. This will perform a search for changesets. |
|
376 | defined. This will perform a search for changesets. | |
374 |
|
377 | |||
375 | The argument for ``rev`` can be a single revision, a revision set, |
|
378 | The argument for ``rev`` can be a single revision, a revision set, | |
376 | or a literal keyword to search for in changeset data (equivalent to |
|
379 | or a literal keyword to search for in changeset data (equivalent to | |
377 | :hg:`log -k`). |
|
380 | :hg:`log -k`). | |
378 |
|
381 | |||
379 | The ``revcount`` query string argument defines the maximum numbers of |
|
382 | The ``revcount`` query string argument defines the maximum numbers of | |
380 | changesets to render. |
|
383 | changesets to render. | |
381 |
|
384 | |||
382 | For non-searches, the ``changelog`` template will be rendered. |
|
385 | For non-searches, the ``changelog`` template will be rendered. | |
383 | """ |
|
386 | """ | |
384 |
|
387 | |||
385 | query = '' |
|
388 | query = '' | |
386 | if 'node' in req.req.qsparams: |
|
389 | if 'node' in req.req.qsparams: | |
387 | ctx = webutil.changectx(web.repo, req) |
|
390 | ctx = webutil.changectx(web.repo, req) | |
388 | symrev = webutil.symrevorshortnode(req, ctx) |
|
391 | symrev = webutil.symrevorshortnode(req, ctx) | |
389 | elif 'rev' in req.req.qsparams: |
|
392 | elif 'rev' in req.req.qsparams: | |
390 | return _search(web, req, tmpl) |
|
393 | return _search(web, req, tmpl) | |
391 | else: |
|
394 | else: | |
392 | ctx = web.repo['tip'] |
|
395 | ctx = web.repo['tip'] | |
393 | symrev = 'tip' |
|
396 | symrev = 'tip' | |
394 |
|
397 | |||
395 | def changelist(): |
|
398 | def changelist(): | |
396 | revs = [] |
|
399 | revs = [] | |
397 | if pos != -1: |
|
400 | if pos != -1: | |
398 | revs = web.repo.changelog.revs(pos, 0) |
|
401 | revs = web.repo.changelog.revs(pos, 0) | |
399 | curcount = 0 |
|
402 | curcount = 0 | |
400 | for rev in revs: |
|
403 | for rev in revs: | |
401 | curcount += 1 |
|
404 | curcount += 1 | |
402 | if curcount > revcount + 1: |
|
405 | if curcount > revcount + 1: | |
403 | break |
|
406 | break | |
404 |
|
407 | |||
405 | entry = webutil.changelistentry(web, web.repo[rev], tmpl) |
|
408 | entry = webutil.changelistentry(web, web.repo[rev], tmpl) | |
406 | entry['parity'] = next(parity) |
|
409 | entry['parity'] = next(parity) | |
407 | yield entry |
|
410 | yield entry | |
408 |
|
411 | |||
409 | if shortlog: |
|
412 | if shortlog: | |
410 | revcount = web.maxshortchanges |
|
413 | revcount = web.maxshortchanges | |
411 | else: |
|
414 | else: | |
412 | revcount = web.maxchanges |
|
415 | revcount = web.maxchanges | |
413 |
|
416 | |||
414 | if 'revcount' in req.req.qsparams: |
|
417 | if 'revcount' in req.req.qsparams: | |
415 | try: |
|
418 | try: | |
416 | revcount = int(req.req.qsparams.get('revcount', revcount)) |
|
419 | revcount = int(req.req.qsparams.get('revcount', revcount)) | |
417 | revcount = max(revcount, 1) |
|
420 | revcount = max(revcount, 1) | |
418 | tmpl.defaults['sessionvars']['revcount'] = revcount |
|
421 | tmpl.defaults['sessionvars']['revcount'] = revcount | |
419 | except ValueError: |
|
422 | except ValueError: | |
420 | pass |
|
423 | pass | |
421 |
|
424 | |||
422 | lessvars = copy.copy(tmpl.defaults['sessionvars']) |
|
425 | lessvars = copy.copy(tmpl.defaults['sessionvars']) | |
423 | lessvars['revcount'] = max(revcount // 2, 1) |
|
426 | lessvars['revcount'] = max(revcount // 2, 1) | |
424 | morevars = copy.copy(tmpl.defaults['sessionvars']) |
|
427 | morevars = copy.copy(tmpl.defaults['sessionvars']) | |
425 | morevars['revcount'] = revcount * 2 |
|
428 | morevars['revcount'] = revcount * 2 | |
426 |
|
429 | |||
427 | count = len(web.repo) |
|
430 | count = len(web.repo) | |
428 | pos = ctx.rev() |
|
431 | pos = ctx.rev() | |
429 | parity = paritygen(web.stripecount) |
|
432 | parity = paritygen(web.stripecount) | |
430 |
|
433 | |||
431 | changenav = webutil.revnav(web.repo).gen(pos, revcount, count) |
|
434 | changenav = webutil.revnav(web.repo).gen(pos, revcount, count) | |
432 |
|
435 | |||
433 | entries = list(changelist()) |
|
436 | entries = list(changelist()) | |
434 | latestentry = entries[:1] |
|
437 | latestentry = entries[:1] | |
435 | if len(entries) > revcount: |
|
438 | if len(entries) > revcount: | |
436 | nextentry = entries[-1:] |
|
439 | nextentry = entries[-1:] | |
437 | entries = entries[:-1] |
|
440 | entries = entries[:-1] | |
438 | else: |
|
441 | else: | |
439 | nextentry = [] |
|
442 | nextentry = [] | |
440 |
|
443 | |||
441 | web.res.setbodygen(tmpl( |
|
444 | web.res.setbodygen(tmpl( | |
442 | 'shortlog' if shortlog else 'changelog', |
|
445 | 'shortlog' if shortlog else 'changelog', | |
443 | changenav=changenav, |
|
446 | changenav=changenav, | |
444 | node=ctx.hex(), |
|
447 | node=ctx.hex(), | |
445 | rev=pos, |
|
448 | rev=pos, | |
446 | symrev=symrev, |
|
449 | symrev=symrev, | |
447 | changesets=count, |
|
450 | changesets=count, | |
448 | entries=entries, |
|
451 | entries=entries, | |
449 | latestentry=latestentry, |
|
452 | latestentry=latestentry, | |
450 | nextentry=nextentry, |
|
453 | nextentry=nextentry, | |
451 | archives=web.archivelist('tip'), |
|
454 | archives=web.archivelist('tip'), | |
452 | revcount=revcount, |
|
455 | revcount=revcount, | |
453 | morevars=morevars, |
|
456 | morevars=morevars, | |
454 | lessvars=lessvars, |
|
457 | lessvars=lessvars, | |
455 | query=query)) |
|
458 | query=query)) | |
456 |
|
459 | |||
457 | return web.res |
|
460 | return web.res | |
458 |
|
461 | |||
459 | @webcommand('shortlog') |
|
462 | @webcommand('shortlog') | |
460 | def shortlog(web, req, tmpl): |
|
463 | def shortlog(web, req, tmpl): | |
461 | """ |
|
464 | """ | |
462 | /shortlog |
|
465 | /shortlog | |
463 | --------- |
|
466 | --------- | |
464 |
|
467 | |||
465 | Show basic information about a set of changesets. |
|
468 | Show basic information about a set of changesets. | |
466 |
|
469 | |||
467 | This accepts the same parameters as the ``changelog`` handler. The only |
|
470 | This accepts the same parameters as the ``changelog`` handler. The only | |
468 | difference is the ``shortlog`` template will be rendered instead of the |
|
471 | difference is the ``shortlog`` template will be rendered instead of the | |
469 | ``changelog`` template. |
|
472 | ``changelog`` template. | |
470 | """ |
|
473 | """ | |
471 | return changelog(web, req, tmpl, shortlog=True) |
|
474 | return changelog(web, req, tmpl, shortlog=True) | |
472 |
|
475 | |||
473 | @webcommand('changeset') |
|
476 | @webcommand('changeset') | |
474 | def changeset(web, req, tmpl): |
|
477 | def changeset(web, req, tmpl): | |
475 | """ |
|
478 | """ | |
476 | /changeset[/{revision}] |
|
479 | /changeset[/{revision}] | |
477 | ----------------------- |
|
480 | ----------------------- | |
478 |
|
481 | |||
479 | Show information about a single changeset. |
|
482 | Show information about a single changeset. | |
480 |
|
483 | |||
481 | A URL path argument is the changeset identifier to show. See ``hg help |
|
484 | A URL path argument is the changeset identifier to show. See ``hg help | |
482 | revisions`` for possible values. If not defined, the ``tip`` changeset |
|
485 | revisions`` for possible values. If not defined, the ``tip`` changeset | |
483 | will be shown. |
|
486 | will be shown. | |
484 |
|
487 | |||
485 | The ``changeset`` template is rendered. Contents of the ``changesettag``, |
|
488 | The ``changeset`` template is rendered. Contents of the ``changesettag``, | |
486 | ``changesetbookmark``, ``filenodelink``, ``filenolink``, and the many |
|
489 | ``changesetbookmark``, ``filenodelink``, ``filenolink``, and the many | |
487 | templates related to diffs may all be used to produce the output. |
|
490 | templates related to diffs may all be used to produce the output. | |
488 | """ |
|
491 | """ | |
489 | ctx = webutil.changectx(web.repo, req) |
|
492 | ctx = webutil.changectx(web.repo, req) | |
490 | web.res.setbodygen(tmpl('changeset', |
|
493 | web.res.setbodygen(tmpl('changeset', | |
491 | **webutil.changesetentry(web, req, tmpl, ctx))) |
|
494 | **webutil.changesetentry(web, req, tmpl, ctx))) | |
492 | return web.res |
|
495 | return web.res | |
493 |
|
496 | |||
494 | rev = webcommand('rev')(changeset) |
|
497 | rev = webcommand('rev')(changeset) | |
495 |
|
498 | |||
496 | def decodepath(path): |
|
499 | def decodepath(path): | |
497 | """Hook for mapping a path in the repository to a path in the |
|
500 | """Hook for mapping a path in the repository to a path in the | |
498 | working copy. |
|
501 | working copy. | |
499 |
|
502 | |||
500 | Extensions (e.g., largefiles) can override this to remap files in |
|
503 | Extensions (e.g., largefiles) can override this to remap files in | |
501 | the virtual file system presented by the manifest command below.""" |
|
504 | the virtual file system presented by the manifest command below.""" | |
502 | return path |
|
505 | return path | |
503 |
|
506 | |||
504 | @webcommand('manifest') |
|
507 | @webcommand('manifest') | |
505 | def manifest(web, req, tmpl): |
|
508 | def manifest(web, req, tmpl): | |
506 | """ |
|
509 | """ | |
507 | /manifest[/{revision}[/{path}]] |
|
510 | /manifest[/{revision}[/{path}]] | |
508 | ------------------------------- |
|
511 | ------------------------------- | |
509 |
|
512 | |||
510 | Show information about a directory. |
|
513 | Show information about a directory. | |
511 |
|
514 | |||
512 | If the URL path arguments are omitted, information about the root |
|
515 | If the URL path arguments are omitted, information about the root | |
513 | directory for the ``tip`` changeset will be shown. |
|
516 | directory for the ``tip`` changeset will be shown. | |
514 |
|
517 | |||
515 | Because this handler can only show information for directories, it |
|
518 | Because this handler can only show information for directories, it | |
516 | is recommended to use the ``file`` handler instead, as it can handle both |
|
519 | is recommended to use the ``file`` handler instead, as it can handle both | |
517 | directories and files. |
|
520 | directories and files. | |
518 |
|
521 | |||
519 | The ``manifest`` template will be rendered for this handler. |
|
522 | The ``manifest`` template will be rendered for this handler. | |
520 | """ |
|
523 | """ | |
521 | if 'node' in req.req.qsparams: |
|
524 | if 'node' in req.req.qsparams: | |
522 | ctx = webutil.changectx(web.repo, req) |
|
525 | ctx = webutil.changectx(web.repo, req) | |
523 | symrev = webutil.symrevorshortnode(req, ctx) |
|
526 | symrev = webutil.symrevorshortnode(req, ctx) | |
524 | else: |
|
527 | else: | |
525 | ctx = web.repo['tip'] |
|
528 | ctx = web.repo['tip'] | |
526 | symrev = 'tip' |
|
529 | symrev = 'tip' | |
527 | path = webutil.cleanpath(web.repo, req.req.qsparams.get('file', '')) |
|
530 | path = webutil.cleanpath(web.repo, req.req.qsparams.get('file', '')) | |
528 | mf = ctx.manifest() |
|
531 | mf = ctx.manifest() | |
529 | node = ctx.node() |
|
532 | node = ctx.node() | |
530 |
|
533 | |||
531 | files = {} |
|
534 | files = {} | |
532 | dirs = {} |
|
535 | dirs = {} | |
533 | parity = paritygen(web.stripecount) |
|
536 | parity = paritygen(web.stripecount) | |
534 |
|
537 | |||
535 | if path and path[-1:] != "/": |
|
538 | if path and path[-1:] != "/": | |
536 | path += "/" |
|
539 | path += "/" | |
537 | l = len(path) |
|
540 | l = len(path) | |
538 | abspath = "/" + path |
|
541 | abspath = "/" + path | |
539 |
|
542 | |||
540 | for full, n in mf.iteritems(): |
|
543 | for full, n in mf.iteritems(): | |
541 | # the virtual path (working copy path) used for the full |
|
544 | # the virtual path (working copy path) used for the full | |
542 | # (repository) path |
|
545 | # (repository) path | |
543 | f = decodepath(full) |
|
546 | f = decodepath(full) | |
544 |
|
547 | |||
545 | if f[:l] != path: |
|
548 | if f[:l] != path: | |
546 | continue |
|
549 | continue | |
547 | remain = f[l:] |
|
550 | remain = f[l:] | |
548 | elements = remain.split('/') |
|
551 | elements = remain.split('/') | |
549 | if len(elements) == 1: |
|
552 | if len(elements) == 1: | |
550 | files[remain] = full |
|
553 | files[remain] = full | |
551 | else: |
|
554 | else: | |
552 | h = dirs # need to retain ref to dirs (root) |
|
555 | h = dirs # need to retain ref to dirs (root) | |
553 | for elem in elements[0:-1]: |
|
556 | for elem in elements[0:-1]: | |
554 | if elem not in h: |
|
557 | if elem not in h: | |
555 | h[elem] = {} |
|
558 | h[elem] = {} | |
556 | h = h[elem] |
|
559 | h = h[elem] | |
557 | if len(h) > 1: |
|
560 | if len(h) > 1: | |
558 | break |
|
561 | break | |
559 | h[None] = None # denotes files present |
|
562 | h[None] = None # denotes files present | |
560 |
|
563 | |||
561 | if mf and not files and not dirs: |
|
564 | if mf and not files and not dirs: | |
562 | raise ErrorResponse(HTTP_NOT_FOUND, 'path not found: ' + path) |
|
565 | raise ErrorResponse(HTTP_NOT_FOUND, 'path not found: ' + path) | |
563 |
|
566 | |||
564 | def filelist(**map): |
|
567 | def filelist(**map): | |
565 | for f in sorted(files): |
|
568 | for f in sorted(files): | |
566 | full = files[f] |
|
569 | full = files[f] | |
567 |
|
570 | |||
568 | fctx = ctx.filectx(full) |
|
571 | fctx = ctx.filectx(full) | |
569 | yield {"file": full, |
|
572 | yield {"file": full, | |
570 | "parity": next(parity), |
|
573 | "parity": next(parity), | |
571 | "basename": f, |
|
574 | "basename": f, | |
572 | "date": fctx.date(), |
|
575 | "date": fctx.date(), | |
573 | "size": fctx.size(), |
|
576 | "size": fctx.size(), | |
574 | "permissions": mf.flags(full)} |
|
577 | "permissions": mf.flags(full)} | |
575 |
|
578 | |||
576 | def dirlist(**map): |
|
579 | def dirlist(**map): | |
577 | for d in sorted(dirs): |
|
580 | for d in sorted(dirs): | |
578 |
|
581 | |||
579 | emptydirs = [] |
|
582 | emptydirs = [] | |
580 | h = dirs[d] |
|
583 | h = dirs[d] | |
581 | while isinstance(h, dict) and len(h) == 1: |
|
584 | while isinstance(h, dict) and len(h) == 1: | |
582 | k, v = next(iter(h.items())) |
|
585 | k, v = next(iter(h.items())) | |
583 | if v: |
|
586 | if v: | |
584 | emptydirs.append(k) |
|
587 | emptydirs.append(k) | |
585 | h = v |
|
588 | h = v | |
586 |
|
589 | |||
587 | path = "%s%s" % (abspath, d) |
|
590 | path = "%s%s" % (abspath, d) | |
588 | yield {"parity": next(parity), |
|
591 | yield {"parity": next(parity), | |
589 | "path": path, |
|
592 | "path": path, | |
590 | "emptydirs": "/".join(emptydirs), |
|
593 | "emptydirs": "/".join(emptydirs), | |
591 | "basename": d} |
|
594 | "basename": d} | |
592 |
|
595 | |||
593 | web.res.setbodygen(tmpl( |
|
596 | web.res.setbodygen(tmpl( | |
594 | 'manifest', |
|
597 | 'manifest', | |
595 | symrev=symrev, |
|
598 | symrev=symrev, | |
596 | path=abspath, |
|
599 | path=abspath, | |
597 | up=webutil.up(abspath), |
|
600 | up=webutil.up(abspath), | |
598 | upparity=next(parity), |
|
601 | upparity=next(parity), | |
599 | fentries=filelist, |
|
602 | fentries=filelist, | |
600 | dentries=dirlist, |
|
603 | dentries=dirlist, | |
601 | archives=web.archivelist(hex(node)), |
|
604 | archives=web.archivelist(hex(node)), | |
602 | **pycompat.strkwargs(webutil.commonentry(web.repo, ctx)))) |
|
605 | **pycompat.strkwargs(webutil.commonentry(web.repo, ctx)))) | |
603 |
|
606 | |||
604 | return web.res |
|
607 | return web.res | |
605 |
|
608 | |||
606 | @webcommand('tags') |
|
609 | @webcommand('tags') | |
607 | def tags(web, req, tmpl): |
|
610 | def tags(web, req, tmpl): | |
608 | """ |
|
611 | """ | |
609 | /tags |
|
612 | /tags | |
610 | ----- |
|
613 | ----- | |
611 |
|
614 | |||
612 | Show information about tags. |
|
615 | Show information about tags. | |
613 |
|
616 | |||
614 | No arguments are accepted. |
|
617 | No arguments are accepted. | |
615 |
|
618 | |||
616 | The ``tags`` template is rendered. |
|
619 | The ``tags`` template is rendered. | |
617 | """ |
|
620 | """ | |
618 | i = list(reversed(web.repo.tagslist())) |
|
621 | i = list(reversed(web.repo.tagslist())) | |
619 | parity = paritygen(web.stripecount) |
|
622 | parity = paritygen(web.stripecount) | |
620 |
|
623 | |||
621 | def entries(notip, latestonly, **map): |
|
624 | def entries(notip, latestonly, **map): | |
622 | t = i |
|
625 | t = i | |
623 | if notip: |
|
626 | if notip: | |
624 | t = [(k, n) for k, n in i if k != "tip"] |
|
627 | t = [(k, n) for k, n in i if k != "tip"] | |
625 | if latestonly: |
|
628 | if latestonly: | |
626 | t = t[:1] |
|
629 | t = t[:1] | |
627 | for k, n in t: |
|
630 | for k, n in t: | |
628 | yield {"parity": next(parity), |
|
631 | yield {"parity": next(parity), | |
629 | "tag": k, |
|
632 | "tag": k, | |
630 | "date": web.repo[n].date(), |
|
633 | "date": web.repo[n].date(), | |
631 | "node": hex(n)} |
|
634 | "node": hex(n)} | |
632 |
|
635 | |||
633 | web.res.setbodygen(tmpl( |
|
636 | web.res.setbodygen(tmpl( | |
634 | 'tags', |
|
637 | 'tags', | |
635 | node=hex(web.repo.changelog.tip()), |
|
638 | node=hex(web.repo.changelog.tip()), | |
636 | entries=lambda **x: entries(False, False, **x), |
|
639 | entries=lambda **x: entries(False, False, **x), | |
637 | entriesnotip=lambda **x: entries(True, False, **x), |
|
640 | entriesnotip=lambda **x: entries(True, False, **x), | |
638 | latestentry=lambda **x: entries(True, True, **x))) |
|
641 | latestentry=lambda **x: entries(True, True, **x))) | |
639 |
|
642 | |||
640 | return web.res |
|
643 | return web.res | |
641 |
|
644 | |||
642 | @webcommand('bookmarks') |
|
645 | @webcommand('bookmarks') | |
643 | def bookmarks(web, req, tmpl): |
|
646 | def bookmarks(web, req, tmpl): | |
644 | """ |
|
647 | """ | |
645 | /bookmarks |
|
648 | /bookmarks | |
646 | ---------- |
|
649 | ---------- | |
647 |
|
650 | |||
648 | Show information about bookmarks. |
|
651 | Show information about bookmarks. | |
649 |
|
652 | |||
650 | No arguments are accepted. |
|
653 | No arguments are accepted. | |
651 |
|
654 | |||
652 | The ``bookmarks`` template is rendered. |
|
655 | The ``bookmarks`` template is rendered. | |
653 | """ |
|
656 | """ | |
654 | i = [b for b in web.repo._bookmarks.items() if b[1] in web.repo] |
|
657 | i = [b for b in web.repo._bookmarks.items() if b[1] in web.repo] | |
655 | sortkey = lambda b: (web.repo[b[1]].rev(), b[0]) |
|
658 | sortkey = lambda b: (web.repo[b[1]].rev(), b[0]) | |
656 | i = sorted(i, key=sortkey, reverse=True) |
|
659 | i = sorted(i, key=sortkey, reverse=True) | |
657 | parity = paritygen(web.stripecount) |
|
660 | parity = paritygen(web.stripecount) | |
658 |
|
661 | |||
659 | def entries(latestonly, **map): |
|
662 | def entries(latestonly, **map): | |
660 | t = i |
|
663 | t = i | |
661 | if latestonly: |
|
664 | if latestonly: | |
662 | t = i[:1] |
|
665 | t = i[:1] | |
663 | for k, n in t: |
|
666 | for k, n in t: | |
664 | yield {"parity": next(parity), |
|
667 | yield {"parity": next(parity), | |
665 | "bookmark": k, |
|
668 | "bookmark": k, | |
666 | "date": web.repo[n].date(), |
|
669 | "date": web.repo[n].date(), | |
667 | "node": hex(n)} |
|
670 | "node": hex(n)} | |
668 |
|
671 | |||
669 | if i: |
|
672 | if i: | |
670 | latestrev = i[0][1] |
|
673 | latestrev = i[0][1] | |
671 | else: |
|
674 | else: | |
672 | latestrev = -1 |
|
675 | latestrev = -1 | |
673 |
|
676 | |||
674 | web.res.setbodygen(tmpl( |
|
677 | web.res.setbodygen(tmpl( | |
675 | 'bookmarks', |
|
678 | 'bookmarks', | |
676 | node=hex(web.repo.changelog.tip()), |
|
679 | node=hex(web.repo.changelog.tip()), | |
677 | lastchange=[{'date': web.repo[latestrev].date()}], |
|
680 | lastchange=[{'date': web.repo[latestrev].date()}], | |
678 | entries=lambda **x: entries(latestonly=False, **x), |
|
681 | entries=lambda **x: entries(latestonly=False, **x), | |
679 | latestentry=lambda **x: entries(latestonly=True, **x))) |
|
682 | latestentry=lambda **x: entries(latestonly=True, **x))) | |
680 |
|
683 | |||
681 | return web.res |
|
684 | return web.res | |
682 |
|
685 | |||
683 | @webcommand('branches') |
|
686 | @webcommand('branches') | |
684 | def branches(web, req, tmpl): |
|
687 | def branches(web, req, tmpl): | |
685 | """ |
|
688 | """ | |
686 | /branches |
|
689 | /branches | |
687 | --------- |
|
690 | --------- | |
688 |
|
691 | |||
689 | Show information about branches. |
|
692 | Show information about branches. | |
690 |
|
693 | |||
691 | All known branches are contained in the output, even closed branches. |
|
694 | All known branches are contained in the output, even closed branches. | |
692 |
|
695 | |||
693 | No arguments are accepted. |
|
696 | No arguments are accepted. | |
694 |
|
697 | |||
695 | The ``branches`` template is rendered. |
|
698 | The ``branches`` template is rendered. | |
696 | """ |
|
699 | """ | |
697 | entries = webutil.branchentries(web.repo, web.stripecount) |
|
700 | entries = webutil.branchentries(web.repo, web.stripecount) | |
698 | latestentry = webutil.branchentries(web.repo, web.stripecount, 1) |
|
701 | latestentry = webutil.branchentries(web.repo, web.stripecount, 1) | |
699 |
|
702 | |||
700 | web.res.setbodygen(tmpl( |
|
703 | web.res.setbodygen(tmpl( | |
701 | 'branches', |
|
704 | 'branches', | |
702 | node=hex(web.repo.changelog.tip()), |
|
705 | node=hex(web.repo.changelog.tip()), | |
703 | entries=entries, |
|
706 | entries=entries, | |
704 | latestentry=latestentry)) |
|
707 | latestentry=latestentry)) | |
705 |
|
708 | |||
706 | return web.res |
|
709 | return web.res | |
707 |
|
710 | |||
708 | @webcommand('summary') |
|
711 | @webcommand('summary') | |
709 | def summary(web, req, tmpl): |
|
712 | def summary(web, req, tmpl): | |
710 | """ |
|
713 | """ | |
711 | /summary |
|
714 | /summary | |
712 | -------- |
|
715 | -------- | |
713 |
|
716 | |||
714 | Show a summary of repository state. |
|
717 | Show a summary of repository state. | |
715 |
|
718 | |||
716 | Information about the latest changesets, bookmarks, tags, and branches |
|
719 | Information about the latest changesets, bookmarks, tags, and branches | |
717 | is captured by this handler. |
|
720 | is captured by this handler. | |
718 |
|
721 | |||
719 | The ``summary`` template is rendered. |
|
722 | The ``summary`` template is rendered. | |
720 | """ |
|
723 | """ | |
721 | i = reversed(web.repo.tagslist()) |
|
724 | i = reversed(web.repo.tagslist()) | |
722 |
|
725 | |||
723 | def tagentries(**map): |
|
726 | def tagentries(**map): | |
724 | parity = paritygen(web.stripecount) |
|
727 | parity = paritygen(web.stripecount) | |
725 | count = 0 |
|
728 | count = 0 | |
726 | for k, n in i: |
|
729 | for k, n in i: | |
727 | if k == "tip": # skip tip |
|
730 | if k == "tip": # skip tip | |
728 | continue |
|
731 | continue | |
729 |
|
732 | |||
730 | count += 1 |
|
733 | count += 1 | |
731 | if count > 10: # limit to 10 tags |
|
734 | if count > 10: # limit to 10 tags | |
732 | break |
|
735 | break | |
733 |
|
736 | |||
734 | yield tmpl("tagentry", |
|
737 | yield tmpl("tagentry", | |
735 | parity=next(parity), |
|
738 | parity=next(parity), | |
736 | tag=k, |
|
739 | tag=k, | |
737 | node=hex(n), |
|
740 | node=hex(n), | |
738 | date=web.repo[n].date()) |
|
741 | date=web.repo[n].date()) | |
739 |
|
742 | |||
740 | def bookmarks(**map): |
|
743 | def bookmarks(**map): | |
741 | parity = paritygen(web.stripecount) |
|
744 | parity = paritygen(web.stripecount) | |
742 | marks = [b for b in web.repo._bookmarks.items() if b[1] in web.repo] |
|
745 | marks = [b for b in web.repo._bookmarks.items() if b[1] in web.repo] | |
743 | sortkey = lambda b: (web.repo[b[1]].rev(), b[0]) |
|
746 | sortkey = lambda b: (web.repo[b[1]].rev(), b[0]) | |
744 | marks = sorted(marks, key=sortkey, reverse=True) |
|
747 | marks = sorted(marks, key=sortkey, reverse=True) | |
745 | for k, n in marks[:10]: # limit to 10 bookmarks |
|
748 | for k, n in marks[:10]: # limit to 10 bookmarks | |
746 | yield {'parity': next(parity), |
|
749 | yield {'parity': next(parity), | |
747 | 'bookmark': k, |
|
750 | 'bookmark': k, | |
748 | 'date': web.repo[n].date(), |
|
751 | 'date': web.repo[n].date(), | |
749 | 'node': hex(n)} |
|
752 | 'node': hex(n)} | |
750 |
|
753 | |||
751 | def changelist(**map): |
|
754 | def changelist(**map): | |
752 | parity = paritygen(web.stripecount, offset=start - end) |
|
755 | parity = paritygen(web.stripecount, offset=start - end) | |
753 | l = [] # build a list in forward order for efficiency |
|
756 | l = [] # build a list in forward order for efficiency | |
754 | revs = [] |
|
757 | revs = [] | |
755 | if start < end: |
|
758 | if start < end: | |
756 | revs = web.repo.changelog.revs(start, end - 1) |
|
759 | revs = web.repo.changelog.revs(start, end - 1) | |
757 | for i in revs: |
|
760 | for i in revs: | |
758 | ctx = web.repo[i] |
|
761 | ctx = web.repo[i] | |
759 |
|
762 | |||
760 | l.append(tmpl( |
|
763 | l.append(tmpl( | |
761 | 'shortlogentry', |
|
764 | 'shortlogentry', | |
762 | parity=next(parity), |
|
765 | parity=next(parity), | |
763 | **pycompat.strkwargs(webutil.commonentry(web.repo, ctx)))) |
|
766 | **pycompat.strkwargs(webutil.commonentry(web.repo, ctx)))) | |
764 |
|
767 | |||
765 | for entry in reversed(l): |
|
768 | for entry in reversed(l): | |
766 | yield entry |
|
769 | yield entry | |
767 |
|
770 | |||
768 | tip = web.repo['tip'] |
|
771 | tip = web.repo['tip'] | |
769 | count = len(web.repo) |
|
772 | count = len(web.repo) | |
770 | start = max(0, count - web.maxchanges) |
|
773 | start = max(0, count - web.maxchanges) | |
771 | end = min(count, start + web.maxchanges) |
|
774 | end = min(count, start + web.maxchanges) | |
772 |
|
775 | |||
773 | desc = web.config("web", "description") |
|
776 | desc = web.config("web", "description") | |
774 | if not desc: |
|
777 | if not desc: | |
775 | desc = 'unknown' |
|
778 | desc = 'unknown' | |
776 |
|
779 | |||
777 | web.res.setbodygen(tmpl( |
|
780 | web.res.setbodygen(tmpl( | |
778 | 'summary', |
|
781 | 'summary', | |
779 | desc=desc, |
|
782 | desc=desc, | |
780 | owner=get_contact(web.config) or 'unknown', |
|
783 | owner=get_contact(web.config) or 'unknown', | |
781 | lastchange=tip.date(), |
|
784 | lastchange=tip.date(), | |
782 | tags=tagentries, |
|
785 | tags=tagentries, | |
783 | bookmarks=bookmarks, |
|
786 | bookmarks=bookmarks, | |
784 | branches=webutil.branchentries(web.repo, web.stripecount, 10), |
|
787 | branches=webutil.branchentries(web.repo, web.stripecount, 10), | |
785 | shortlog=changelist, |
|
788 | shortlog=changelist, | |
786 | node=tip.hex(), |
|
789 | node=tip.hex(), | |
787 | symrev='tip', |
|
790 | symrev='tip', | |
788 | archives=web.archivelist('tip'), |
|
791 | archives=web.archivelist('tip'), | |
789 | labels=web.configlist('web', 'labels'))) |
|
792 | labels=web.configlist('web', 'labels'))) | |
790 |
|
793 | |||
791 | return web.res |
|
794 | return web.res | |
792 |
|
795 | |||
793 | @webcommand('filediff') |
|
796 | @webcommand('filediff') | |
794 | def filediff(web, req, tmpl): |
|
797 | def filediff(web, req, tmpl): | |
795 | """ |
|
798 | """ | |
796 | /diff/{revision}/{path} |
|
799 | /diff/{revision}/{path} | |
797 | ----------------------- |
|
800 | ----------------------- | |
798 |
|
801 | |||
799 | Show how a file changed in a particular commit. |
|
802 | Show how a file changed in a particular commit. | |
800 |
|
803 | |||
801 | The ``filediff`` template is rendered. |
|
804 | The ``filediff`` template is rendered. | |
802 |
|
805 | |||
803 | This handler is registered under both the ``/diff`` and ``/filediff`` |
|
806 | This handler is registered under both the ``/diff`` and ``/filediff`` | |
804 | paths. ``/diff`` is used in modern code. |
|
807 | paths. ``/diff`` is used in modern code. | |
805 | """ |
|
808 | """ | |
806 | fctx, ctx = None, None |
|
809 | fctx, ctx = None, None | |
807 | try: |
|
810 | try: | |
808 | fctx = webutil.filectx(web.repo, req) |
|
811 | fctx = webutil.filectx(web.repo, req) | |
809 | except LookupError: |
|
812 | except LookupError: | |
810 | ctx = webutil.changectx(web.repo, req) |
|
813 | ctx = webutil.changectx(web.repo, req) | |
811 | path = webutil.cleanpath(web.repo, req.req.qsparams['file']) |
|
814 | path = webutil.cleanpath(web.repo, req.req.qsparams['file']) | |
812 | if path not in ctx.files(): |
|
815 | if path not in ctx.files(): | |
813 | raise |
|
816 | raise | |
814 |
|
817 | |||
815 | if fctx is not None: |
|
818 | if fctx is not None: | |
816 | path = fctx.path() |
|
819 | path = fctx.path() | |
817 | ctx = fctx.changectx() |
|
820 | ctx = fctx.changectx() | |
818 | basectx = ctx.p1() |
|
821 | basectx = ctx.p1() | |
819 |
|
822 | |||
820 | style = web.config('web', 'style') |
|
823 | style = web.config('web', 'style') | |
821 | if 'style' in req.req.qsparams: |
|
824 | if 'style' in req.req.qsparams: | |
822 | style = req.req.qsparams['style'] |
|
825 | style = req.req.qsparams['style'] | |
823 |
|
826 | |||
824 | diffs = webutil.diffs(web, tmpl, ctx, basectx, [path], style) |
|
827 | diffs = webutil.diffs(web, tmpl, ctx, basectx, [path], style) | |
825 | if fctx is not None: |
|
828 | if fctx is not None: | |
826 | rename = webutil.renamelink(fctx) |
|
829 | rename = webutil.renamelink(fctx) | |
827 | ctx = fctx |
|
830 | ctx = fctx | |
828 | else: |
|
831 | else: | |
829 | rename = [] |
|
832 | rename = [] | |
830 | ctx = ctx |
|
833 | ctx = ctx | |
831 |
|
834 | |||
832 | web.res.setbodygen(tmpl( |
|
835 | web.res.setbodygen(tmpl( | |
833 | 'filediff', |
|
836 | 'filediff', | |
834 | file=path, |
|
837 | file=path, | |
835 | symrev=webutil.symrevorshortnode(req, ctx), |
|
838 | symrev=webutil.symrevorshortnode(req, ctx), | |
836 | rename=rename, |
|
839 | rename=rename, | |
837 | diff=diffs, |
|
840 | diff=diffs, | |
838 | **pycompat.strkwargs(webutil.commonentry(web.repo, ctx)))) |
|
841 | **pycompat.strkwargs(webutil.commonentry(web.repo, ctx)))) | |
839 |
|
842 | |||
840 | return web.res |
|
843 | return web.res | |
841 |
|
844 | |||
842 | diff = webcommand('diff')(filediff) |
|
845 | diff = webcommand('diff')(filediff) | |
843 |
|
846 | |||
844 | @webcommand('comparison') |
|
847 | @webcommand('comparison') | |
845 | def comparison(web, req, tmpl): |
|
848 | def comparison(web, req, tmpl): | |
846 | """ |
|
849 | """ | |
847 | /comparison/{revision}/{path} |
|
850 | /comparison/{revision}/{path} | |
848 | ----------------------------- |
|
851 | ----------------------------- | |
849 |
|
852 | |||
850 | Show a comparison between the old and new versions of a file from changes |
|
853 | Show a comparison between the old and new versions of a file from changes | |
851 | made on a particular revision. |
|
854 | made on a particular revision. | |
852 |
|
855 | |||
853 | This is similar to the ``diff`` handler. However, this form features |
|
856 | This is similar to the ``diff`` handler. However, this form features | |
854 | a split or side-by-side diff rather than a unified diff. |
|
857 | a split or side-by-side diff rather than a unified diff. | |
855 |
|
858 | |||
856 | The ``context`` query string argument can be used to control the lines of |
|
859 | The ``context`` query string argument can be used to control the lines of | |
857 | context in the diff. |
|
860 | context in the diff. | |
858 |
|
861 | |||
859 | The ``filecomparison`` template is rendered. |
|
862 | The ``filecomparison`` template is rendered. | |
860 | """ |
|
863 | """ | |
861 | ctx = webutil.changectx(web.repo, req) |
|
864 | ctx = webutil.changectx(web.repo, req) | |
862 | if 'file' not in req.req.qsparams: |
|
865 | if 'file' not in req.req.qsparams: | |
863 | raise ErrorResponse(HTTP_NOT_FOUND, 'file not given') |
|
866 | raise ErrorResponse(HTTP_NOT_FOUND, 'file not given') | |
864 | path = webutil.cleanpath(web.repo, req.req.qsparams['file']) |
|
867 | path = webutil.cleanpath(web.repo, req.req.qsparams['file']) | |
865 |
|
868 | |||
866 | parsecontext = lambda v: v == 'full' and -1 or int(v) |
|
869 | parsecontext = lambda v: v == 'full' and -1 or int(v) | |
867 | if 'context' in req.req.qsparams: |
|
870 | if 'context' in req.req.qsparams: | |
868 | context = parsecontext(req.req.qsparams['context']) |
|
871 | context = parsecontext(req.req.qsparams['context']) | |
869 | else: |
|
872 | else: | |
870 | context = parsecontext(web.config('web', 'comparisoncontext', '5')) |
|
873 | context = parsecontext(web.config('web', 'comparisoncontext', '5')) | |
871 |
|
874 | |||
872 | def filelines(f): |
|
875 | def filelines(f): | |
873 | if f.isbinary(): |
|
876 | if f.isbinary(): | |
874 | mt = mimetypes.guess_type(f.path())[0] |
|
877 | mt = mimetypes.guess_type(f.path())[0] | |
875 | if not mt: |
|
878 | if not mt: | |
876 | mt = 'application/octet-stream' |
|
879 | mt = 'application/octet-stream' | |
877 | return [_('(binary file %s, hash: %s)') % (mt, hex(f.filenode()))] |
|
880 | return [_('(binary file %s, hash: %s)') % (mt, hex(f.filenode()))] | |
878 | return f.data().splitlines() |
|
881 | return f.data().splitlines() | |
879 |
|
882 | |||
880 | fctx = None |
|
883 | fctx = None | |
881 | parent = ctx.p1() |
|
884 | parent = ctx.p1() | |
882 | leftrev = parent.rev() |
|
885 | leftrev = parent.rev() | |
883 | leftnode = parent.node() |
|
886 | leftnode = parent.node() | |
884 | rightrev = ctx.rev() |
|
887 | rightrev = ctx.rev() | |
885 | rightnode = ctx.node() |
|
888 | rightnode = ctx.node() | |
886 | if path in ctx: |
|
889 | if path in ctx: | |
887 | fctx = ctx[path] |
|
890 | fctx = ctx[path] | |
888 | rightlines = filelines(fctx) |
|
891 | rightlines = filelines(fctx) | |
889 | if path not in parent: |
|
892 | if path not in parent: | |
890 | leftlines = () |
|
893 | leftlines = () | |
891 | else: |
|
894 | else: | |
892 | pfctx = parent[path] |
|
895 | pfctx = parent[path] | |
893 | leftlines = filelines(pfctx) |
|
896 | leftlines = filelines(pfctx) | |
894 | else: |
|
897 | else: | |
895 | rightlines = () |
|
898 | rightlines = () | |
896 | pfctx = ctx.parents()[0][path] |
|
899 | pfctx = ctx.parents()[0][path] | |
897 | leftlines = filelines(pfctx) |
|
900 | leftlines = filelines(pfctx) | |
898 |
|
901 | |||
899 | comparison = webutil.compare(tmpl, context, leftlines, rightlines) |
|
902 | comparison = webutil.compare(tmpl, context, leftlines, rightlines) | |
900 | if fctx is not None: |
|
903 | if fctx is not None: | |
901 | rename = webutil.renamelink(fctx) |
|
904 | rename = webutil.renamelink(fctx) | |
902 | ctx = fctx |
|
905 | ctx = fctx | |
903 | else: |
|
906 | else: | |
904 | rename = [] |
|
907 | rename = [] | |
905 | ctx = ctx |
|
908 | ctx = ctx | |
906 |
|
909 | |||
907 | web.res.setbodygen(tmpl( |
|
910 | web.res.setbodygen(tmpl( | |
908 | 'filecomparison', |
|
911 | 'filecomparison', | |
909 | file=path, |
|
912 | file=path, | |
910 | symrev=webutil.symrevorshortnode(req, ctx), |
|
913 | symrev=webutil.symrevorshortnode(req, ctx), | |
911 | rename=rename, |
|
914 | rename=rename, | |
912 | leftrev=leftrev, |
|
915 | leftrev=leftrev, | |
913 | leftnode=hex(leftnode), |
|
916 | leftnode=hex(leftnode), | |
914 | rightrev=rightrev, |
|
917 | rightrev=rightrev, | |
915 | rightnode=hex(rightnode), |
|
918 | rightnode=hex(rightnode), | |
916 | comparison=comparison, |
|
919 | comparison=comparison, | |
917 | **pycompat.strkwargs(webutil.commonentry(web.repo, ctx)))) |
|
920 | **pycompat.strkwargs(webutil.commonentry(web.repo, ctx)))) | |
918 |
|
921 | |||
919 | return web.res |
|
922 | return web.res | |
920 |
|
923 | |||
921 | @webcommand('annotate') |
|
924 | @webcommand('annotate') | |
922 | def annotate(web, req, tmpl): |
|
925 | def annotate(web, req, tmpl): | |
923 | """ |
|
926 | """ | |
924 | /annotate/{revision}/{path} |
|
927 | /annotate/{revision}/{path} | |
925 | --------------------------- |
|
928 | --------------------------- | |
926 |
|
929 | |||
927 | Show changeset information for each line in a file. |
|
930 | Show changeset information for each line in a file. | |
928 |
|
931 | |||
929 | The ``ignorews``, ``ignorewsamount``, ``ignorewseol``, and |
|
932 | The ``ignorews``, ``ignorewsamount``, ``ignorewseol``, and | |
930 | ``ignoreblanklines`` query string arguments have the same meaning as |
|
933 | ``ignoreblanklines`` query string arguments have the same meaning as | |
931 | their ``[annotate]`` config equivalents. It uses the hgrc boolean |
|
934 | their ``[annotate]`` config equivalents. It uses the hgrc boolean | |
932 | parsing logic to interpret the value. e.g. ``0`` and ``false`` are |
|
935 | parsing logic to interpret the value. e.g. ``0`` and ``false`` are | |
933 | false and ``1`` and ``true`` are true. If not defined, the server |
|
936 | false and ``1`` and ``true`` are true. If not defined, the server | |
934 | default settings are used. |
|
937 | default settings are used. | |
935 |
|
938 | |||
936 | The ``fileannotate`` template is rendered. |
|
939 | The ``fileannotate`` template is rendered. | |
937 | """ |
|
940 | """ | |
938 | fctx = webutil.filectx(web.repo, req) |
|
941 | fctx = webutil.filectx(web.repo, req) | |
939 | f = fctx.path() |
|
942 | f = fctx.path() | |
940 | parity = paritygen(web.stripecount) |
|
943 | parity = paritygen(web.stripecount) | |
941 | ishead = fctx.filerev() in fctx.filelog().headrevs() |
|
944 | ishead = fctx.filerev() in fctx.filelog().headrevs() | |
942 |
|
945 | |||
943 | # parents() is called once per line and several lines likely belong to |
|
946 | # parents() is called once per line and several lines likely belong to | |
944 | # same revision. So it is worth caching. |
|
947 | # same revision. So it is worth caching. | |
945 | # TODO there are still redundant operations within basefilectx.parents() |
|
948 | # TODO there are still redundant operations within basefilectx.parents() | |
946 | # and from the fctx.annotate() call itself that could be cached. |
|
949 | # and from the fctx.annotate() call itself that could be cached. | |
947 | parentscache = {} |
|
950 | parentscache = {} | |
948 | def parents(f): |
|
951 | def parents(f): | |
949 | rev = f.rev() |
|
952 | rev = f.rev() | |
950 | if rev not in parentscache: |
|
953 | if rev not in parentscache: | |
951 | parentscache[rev] = [] |
|
954 | parentscache[rev] = [] | |
952 | for p in f.parents(): |
|
955 | for p in f.parents(): | |
953 | entry = { |
|
956 | entry = { | |
954 | 'node': p.hex(), |
|
957 | 'node': p.hex(), | |
955 | 'rev': p.rev(), |
|
958 | 'rev': p.rev(), | |
956 | } |
|
959 | } | |
957 | parentscache[rev].append(entry) |
|
960 | parentscache[rev].append(entry) | |
958 |
|
961 | |||
959 | for p in parentscache[rev]: |
|
962 | for p in parentscache[rev]: | |
960 | yield p |
|
963 | yield p | |
961 |
|
964 | |||
962 | def annotate(**map): |
|
965 | def annotate(**map): | |
963 | if fctx.isbinary(): |
|
966 | if fctx.isbinary(): | |
964 | mt = (mimetypes.guess_type(fctx.path())[0] |
|
967 | mt = (mimetypes.guess_type(fctx.path())[0] | |
965 | or 'application/octet-stream') |
|
968 | or 'application/octet-stream') | |
966 | lines = [((fctx.filectx(fctx.filerev()), 1), '(binary:%s)' % mt)] |
|
969 | lines = [((fctx.filectx(fctx.filerev()), 1), '(binary:%s)' % mt)] | |
967 | else: |
|
970 | else: | |
968 | lines = webutil.annotate(req, fctx, web.repo.ui) |
|
971 | lines = webutil.annotate(req, fctx, web.repo.ui) | |
969 |
|
972 | |||
970 | previousrev = None |
|
973 | previousrev = None | |
971 | blockparitygen = paritygen(1) |
|
974 | blockparitygen = paritygen(1) | |
972 | for lineno, (aline, l) in enumerate(lines): |
|
975 | for lineno, (aline, l) in enumerate(lines): | |
973 | f = aline.fctx |
|
976 | f = aline.fctx | |
974 | rev = f.rev() |
|
977 | rev = f.rev() | |
975 | if rev != previousrev: |
|
978 | if rev != previousrev: | |
976 | blockhead = True |
|
979 | blockhead = True | |
977 | blockparity = next(blockparitygen) |
|
980 | blockparity = next(blockparitygen) | |
978 | else: |
|
981 | else: | |
979 | blockhead = None |
|
982 | blockhead = None | |
980 | previousrev = rev |
|
983 | previousrev = rev | |
981 | yield {"parity": next(parity), |
|
984 | yield {"parity": next(parity), | |
982 | "node": f.hex(), |
|
985 | "node": f.hex(), | |
983 | "rev": rev, |
|
986 | "rev": rev, | |
984 | "author": f.user(), |
|
987 | "author": f.user(), | |
985 | "parents": parents(f), |
|
988 | "parents": parents(f), | |
986 | "desc": f.description(), |
|
989 | "desc": f.description(), | |
987 | "extra": f.extra(), |
|
990 | "extra": f.extra(), | |
988 | "file": f.path(), |
|
991 | "file": f.path(), | |
989 | "blockhead": blockhead, |
|
992 | "blockhead": blockhead, | |
990 | "blockparity": blockparity, |
|
993 | "blockparity": blockparity, | |
991 | "targetline": aline.lineno, |
|
994 | "targetline": aline.lineno, | |
992 | "line": l, |
|
995 | "line": l, | |
993 | "lineno": lineno + 1, |
|
996 | "lineno": lineno + 1, | |
994 | "lineid": "l%d" % (lineno + 1), |
|
997 | "lineid": "l%d" % (lineno + 1), | |
995 | "linenumber": "% 6d" % (lineno + 1), |
|
998 | "linenumber": "% 6d" % (lineno + 1), | |
996 | "revdate": f.date()} |
|
999 | "revdate": f.date()} | |
997 |
|
1000 | |||
998 | diffopts = webutil.difffeatureopts(req, web.repo.ui, 'annotate') |
|
1001 | diffopts = webutil.difffeatureopts(req, web.repo.ui, 'annotate') | |
999 | diffopts = {k: getattr(diffopts, k) for k in diffopts.defaults} |
|
1002 | diffopts = {k: getattr(diffopts, k) for k in diffopts.defaults} | |
1000 |
|
1003 | |||
1001 | web.res.setbodygen(tmpl( |
|
1004 | web.res.setbodygen(tmpl( | |
1002 | 'fileannotate', |
|
1005 | 'fileannotate', | |
1003 | file=f, |
|
1006 | file=f, | |
1004 | annotate=annotate, |
|
1007 | annotate=annotate, | |
1005 | path=webutil.up(f), |
|
1008 | path=webutil.up(f), | |
1006 | symrev=webutil.symrevorshortnode(req, fctx), |
|
1009 | symrev=webutil.symrevorshortnode(req, fctx), | |
1007 | rename=webutil.renamelink(fctx), |
|
1010 | rename=webutil.renamelink(fctx), | |
1008 | permissions=fctx.manifest().flags(f), |
|
1011 | permissions=fctx.manifest().flags(f), | |
1009 | ishead=int(ishead), |
|
1012 | ishead=int(ishead), | |
1010 | diffopts=diffopts, |
|
1013 | diffopts=diffopts, | |
1011 | **pycompat.strkwargs(webutil.commonentry(web.repo, fctx)))) |
|
1014 | **pycompat.strkwargs(webutil.commonentry(web.repo, fctx)))) | |
1012 |
|
1015 | |||
1013 | return web.res |
|
1016 | return web.res | |
1014 |
|
1017 | |||
1015 | @webcommand('filelog') |
|
1018 | @webcommand('filelog') | |
1016 | def filelog(web, req, tmpl): |
|
1019 | def filelog(web, req, tmpl): | |
1017 | """ |
|
1020 | """ | |
1018 | /filelog/{revision}/{path} |
|
1021 | /filelog/{revision}/{path} | |
1019 | -------------------------- |
|
1022 | -------------------------- | |
1020 |
|
1023 | |||
1021 | Show information about the history of a file in the repository. |
|
1024 | Show information about the history of a file in the repository. | |
1022 |
|
1025 | |||
1023 | The ``revcount`` query string argument can be defined to control the |
|
1026 | The ``revcount`` query string argument can be defined to control the | |
1024 | maximum number of entries to show. |
|
1027 | maximum number of entries to show. | |
1025 |
|
1028 | |||
1026 | The ``filelog`` template will be rendered. |
|
1029 | The ``filelog`` template will be rendered. | |
1027 | """ |
|
1030 | """ | |
1028 |
|
1031 | |||
1029 | try: |
|
1032 | try: | |
1030 | fctx = webutil.filectx(web.repo, req) |
|
1033 | fctx = webutil.filectx(web.repo, req) | |
1031 | f = fctx.path() |
|
1034 | f = fctx.path() | |
1032 | fl = fctx.filelog() |
|
1035 | fl = fctx.filelog() | |
1033 | except error.LookupError: |
|
1036 | except error.LookupError: | |
1034 | f = webutil.cleanpath(web.repo, req.req.qsparams['file']) |
|
1037 | f = webutil.cleanpath(web.repo, req.req.qsparams['file']) | |
1035 | fl = web.repo.file(f) |
|
1038 | fl = web.repo.file(f) | |
1036 | numrevs = len(fl) |
|
1039 | numrevs = len(fl) | |
1037 | if not numrevs: # file doesn't exist at all |
|
1040 | if not numrevs: # file doesn't exist at all | |
1038 | raise |
|
1041 | raise | |
1039 | rev = webutil.changectx(web.repo, req).rev() |
|
1042 | rev = webutil.changectx(web.repo, req).rev() | |
1040 | first = fl.linkrev(0) |
|
1043 | first = fl.linkrev(0) | |
1041 | if rev < first: # current rev is from before file existed |
|
1044 | if rev < first: # current rev is from before file existed | |
1042 | raise |
|
1045 | raise | |
1043 | frev = numrevs - 1 |
|
1046 | frev = numrevs - 1 | |
1044 | while fl.linkrev(frev) > rev: |
|
1047 | while fl.linkrev(frev) > rev: | |
1045 | frev -= 1 |
|
1048 | frev -= 1 | |
1046 | fctx = web.repo.filectx(f, fl.linkrev(frev)) |
|
1049 | fctx = web.repo.filectx(f, fl.linkrev(frev)) | |
1047 |
|
1050 | |||
1048 | revcount = web.maxshortchanges |
|
1051 | revcount = web.maxshortchanges | |
1049 | if 'revcount' in req.req.qsparams: |
|
1052 | if 'revcount' in req.req.qsparams: | |
1050 | try: |
|
1053 | try: | |
1051 | revcount = int(req.req.qsparams.get('revcount', revcount)) |
|
1054 | revcount = int(req.req.qsparams.get('revcount', revcount)) | |
1052 | revcount = max(revcount, 1) |
|
1055 | revcount = max(revcount, 1) | |
1053 | tmpl.defaults['sessionvars']['revcount'] = revcount |
|
1056 | tmpl.defaults['sessionvars']['revcount'] = revcount | |
1054 | except ValueError: |
|
1057 | except ValueError: | |
1055 | pass |
|
1058 | pass | |
1056 |
|
1059 | |||
1057 | lrange = webutil.linerange(req) |
|
1060 | lrange = webutil.linerange(req) | |
1058 |
|
1061 | |||
1059 | lessvars = copy.copy(tmpl.defaults['sessionvars']) |
|
1062 | lessvars = copy.copy(tmpl.defaults['sessionvars']) | |
1060 | lessvars['revcount'] = max(revcount // 2, 1) |
|
1063 | lessvars['revcount'] = max(revcount // 2, 1) | |
1061 | morevars = copy.copy(tmpl.defaults['sessionvars']) |
|
1064 | morevars = copy.copy(tmpl.defaults['sessionvars']) | |
1062 | morevars['revcount'] = revcount * 2 |
|
1065 | morevars['revcount'] = revcount * 2 | |
1063 |
|
1066 | |||
1064 | patch = 'patch' in req.req.qsparams |
|
1067 | patch = 'patch' in req.req.qsparams | |
1065 | if patch: |
|
1068 | if patch: | |
1066 | lessvars['patch'] = morevars['patch'] = req.req.qsparams['patch'] |
|
1069 | lessvars['patch'] = morevars['patch'] = req.req.qsparams['patch'] | |
1067 | descend = 'descend' in req.req.qsparams |
|
1070 | descend = 'descend' in req.req.qsparams | |
1068 | if descend: |
|
1071 | if descend: | |
1069 | lessvars['descend'] = morevars['descend'] = req.req.qsparams['descend'] |
|
1072 | lessvars['descend'] = morevars['descend'] = req.req.qsparams['descend'] | |
1070 |
|
1073 | |||
1071 | count = fctx.filerev() + 1 |
|
1074 | count = fctx.filerev() + 1 | |
1072 | start = max(0, count - revcount) # first rev on this page |
|
1075 | start = max(0, count - revcount) # first rev on this page | |
1073 | end = min(count, start + revcount) # last rev on this page |
|
1076 | end = min(count, start + revcount) # last rev on this page | |
1074 | parity = paritygen(web.stripecount, offset=start - end) |
|
1077 | parity = paritygen(web.stripecount, offset=start - end) | |
1075 |
|
1078 | |||
1076 | repo = web.repo |
|
1079 | repo = web.repo | |
1077 | revs = fctx.filelog().revs(start, end - 1) |
|
1080 | revs = fctx.filelog().revs(start, end - 1) | |
1078 | entries = [] |
|
1081 | entries = [] | |
1079 |
|
1082 | |||
1080 | diffstyle = web.config('web', 'style') |
|
1083 | diffstyle = web.config('web', 'style') | |
1081 | if 'style' in req.req.qsparams: |
|
1084 | if 'style' in req.req.qsparams: | |
1082 | diffstyle = req.req.qsparams['style'] |
|
1085 | diffstyle = req.req.qsparams['style'] | |
1083 |
|
1086 | |||
1084 | def diff(fctx, linerange=None): |
|
1087 | def diff(fctx, linerange=None): | |
1085 | ctx = fctx.changectx() |
|
1088 | ctx = fctx.changectx() | |
1086 | basectx = ctx.p1() |
|
1089 | basectx = ctx.p1() | |
1087 | path = fctx.path() |
|
1090 | path = fctx.path() | |
1088 | return webutil.diffs(web, tmpl, ctx, basectx, [path], diffstyle, |
|
1091 | return webutil.diffs(web, tmpl, ctx, basectx, [path], diffstyle, | |
1089 | linerange=linerange, |
|
1092 | linerange=linerange, | |
1090 | lineidprefix='%s-' % ctx.hex()[:12]) |
|
1093 | lineidprefix='%s-' % ctx.hex()[:12]) | |
1091 |
|
1094 | |||
1092 | linerange = None |
|
1095 | linerange = None | |
1093 | if lrange is not None: |
|
1096 | if lrange is not None: | |
1094 | linerange = webutil.formatlinerange(*lrange) |
|
1097 | linerange = webutil.formatlinerange(*lrange) | |
1095 | # deactivate numeric nav links when linerange is specified as this |
|
1098 | # deactivate numeric nav links when linerange is specified as this | |
1096 | # would required a dedicated "revnav" class |
|
1099 | # would required a dedicated "revnav" class | |
1097 | nav = None |
|
1100 | nav = None | |
1098 | if descend: |
|
1101 | if descend: | |
1099 | it = dagop.blockdescendants(fctx, *lrange) |
|
1102 | it = dagop.blockdescendants(fctx, *lrange) | |
1100 | else: |
|
1103 | else: | |
1101 | it = dagop.blockancestors(fctx, *lrange) |
|
1104 | it = dagop.blockancestors(fctx, *lrange) | |
1102 | for i, (c, lr) in enumerate(it, 1): |
|
1105 | for i, (c, lr) in enumerate(it, 1): | |
1103 | diffs = None |
|
1106 | diffs = None | |
1104 | if patch: |
|
1107 | if patch: | |
1105 | diffs = diff(c, linerange=lr) |
|
1108 | diffs = diff(c, linerange=lr) | |
1106 | # follow renames accross filtered (not in range) revisions |
|
1109 | # follow renames accross filtered (not in range) revisions | |
1107 | path = c.path() |
|
1110 | path = c.path() | |
1108 | entries.append(dict( |
|
1111 | entries.append(dict( | |
1109 | parity=next(parity), |
|
1112 | parity=next(parity), | |
1110 | filerev=c.rev(), |
|
1113 | filerev=c.rev(), | |
1111 | file=path, |
|
1114 | file=path, | |
1112 | diff=diffs, |
|
1115 | diff=diffs, | |
1113 | linerange=webutil.formatlinerange(*lr), |
|
1116 | linerange=webutil.formatlinerange(*lr), | |
1114 | **pycompat.strkwargs(webutil.commonentry(repo, c)))) |
|
1117 | **pycompat.strkwargs(webutil.commonentry(repo, c)))) | |
1115 | if i == revcount: |
|
1118 | if i == revcount: | |
1116 | break |
|
1119 | break | |
1117 | lessvars['linerange'] = webutil.formatlinerange(*lrange) |
|
1120 | lessvars['linerange'] = webutil.formatlinerange(*lrange) | |
1118 | morevars['linerange'] = lessvars['linerange'] |
|
1121 | morevars['linerange'] = lessvars['linerange'] | |
1119 | else: |
|
1122 | else: | |
1120 | for i in revs: |
|
1123 | for i in revs: | |
1121 | iterfctx = fctx.filectx(i) |
|
1124 | iterfctx = fctx.filectx(i) | |
1122 | diffs = None |
|
1125 | diffs = None | |
1123 | if patch: |
|
1126 | if patch: | |
1124 | diffs = diff(iterfctx) |
|
1127 | diffs = diff(iterfctx) | |
1125 | entries.append(dict( |
|
1128 | entries.append(dict( | |
1126 | parity=next(parity), |
|
1129 | parity=next(parity), | |
1127 | filerev=i, |
|
1130 | filerev=i, | |
1128 | file=f, |
|
1131 | file=f, | |
1129 | diff=diffs, |
|
1132 | diff=diffs, | |
1130 | rename=webutil.renamelink(iterfctx), |
|
1133 | rename=webutil.renamelink(iterfctx), | |
1131 | **pycompat.strkwargs(webutil.commonentry(repo, iterfctx)))) |
|
1134 | **pycompat.strkwargs(webutil.commonentry(repo, iterfctx)))) | |
1132 | entries.reverse() |
|
1135 | entries.reverse() | |
1133 | revnav = webutil.filerevnav(web.repo, fctx.path()) |
|
1136 | revnav = webutil.filerevnav(web.repo, fctx.path()) | |
1134 | nav = revnav.gen(end - 1, revcount, count) |
|
1137 | nav = revnav.gen(end - 1, revcount, count) | |
1135 |
|
1138 | |||
1136 | latestentry = entries[:1] |
|
1139 | latestentry = entries[:1] | |
1137 |
|
1140 | |||
1138 | web.res.setbodygen(tmpl( |
|
1141 | web.res.setbodygen(tmpl( | |
1139 | 'filelog', |
|
1142 | 'filelog', | |
1140 | file=f, |
|
1143 | file=f, | |
1141 | nav=nav, |
|
1144 | nav=nav, | |
1142 | symrev=webutil.symrevorshortnode(req, fctx), |
|
1145 | symrev=webutil.symrevorshortnode(req, fctx), | |
1143 | entries=entries, |
|
1146 | entries=entries, | |
1144 | descend=descend, |
|
1147 | descend=descend, | |
1145 | patch=patch, |
|
1148 | patch=patch, | |
1146 | latestentry=latestentry, |
|
1149 | latestentry=latestentry, | |
1147 | linerange=linerange, |
|
1150 | linerange=linerange, | |
1148 | revcount=revcount, |
|
1151 | revcount=revcount, | |
1149 | morevars=morevars, |
|
1152 | morevars=morevars, | |
1150 | lessvars=lessvars, |
|
1153 | lessvars=lessvars, | |
1151 | **pycompat.strkwargs(webutil.commonentry(web.repo, fctx)))) |
|
1154 | **pycompat.strkwargs(webutil.commonentry(web.repo, fctx)))) | |
1152 |
|
1155 | |||
1153 | return web.res |
|
1156 | return web.res | |
1154 |
|
1157 | |||
1155 | @webcommand('archive') |
|
1158 | @webcommand('archive') | |
1156 | def archive(web, req, tmpl): |
|
1159 | def archive(web, req, tmpl): | |
1157 | """ |
|
1160 | """ | |
1158 | /archive/{revision}.{format}[/{path}] |
|
1161 | /archive/{revision}.{format}[/{path}] | |
1159 | ------------------------------------- |
|
1162 | ------------------------------------- | |
1160 |
|
1163 | |||
1161 | Obtain an archive of repository content. |
|
1164 | Obtain an archive of repository content. | |
1162 |
|
1165 | |||
1163 | The content and type of the archive is defined by a URL path parameter. |
|
1166 | The content and type of the archive is defined by a URL path parameter. | |
1164 | ``format`` is the file extension of the archive type to be generated. e.g. |
|
1167 | ``format`` is the file extension of the archive type to be generated. e.g. | |
1165 | ``zip`` or ``tar.bz2``. Not all archive types may be allowed by your |
|
1168 | ``zip`` or ``tar.bz2``. Not all archive types may be allowed by your | |
1166 | server configuration. |
|
1169 | server configuration. | |
1167 |
|
1170 | |||
1168 | The optional ``path`` URL parameter controls content to include in the |
|
1171 | The optional ``path`` URL parameter controls content to include in the | |
1169 | archive. If omitted, every file in the specified revision is present in the |
|
1172 | archive. If omitted, every file in the specified revision is present in the | |
1170 | archive. If included, only the specified file or contents of the specified |
|
1173 | archive. If included, only the specified file or contents of the specified | |
1171 | directory will be included in the archive. |
|
1174 | directory will be included in the archive. | |
1172 |
|
1175 | |||
1173 | No template is used for this handler. Raw, binary content is generated. |
|
1176 | No template is used for this handler. Raw, binary content is generated. | |
1174 | """ |
|
1177 | """ | |
1175 |
|
1178 | |||
1176 | type_ = req.req.qsparams.get('type') |
|
1179 | type_ = req.req.qsparams.get('type') | |
1177 | allowed = web.configlist("web", "allow_archive") |
|
1180 | allowed = web.configlist("web", "allow_archive") | |
1178 | key = req.req.qsparams['node'] |
|
1181 | key = req.req.qsparams['node'] | |
1179 |
|
1182 | |||
1180 | if type_ not in web.archivespecs: |
|
1183 | if type_ not in web.archivespecs: | |
1181 | msg = 'Unsupported archive type: %s' % type_ |
|
1184 | msg = 'Unsupported archive type: %s' % type_ | |
1182 | raise ErrorResponse(HTTP_NOT_FOUND, msg) |
|
1185 | raise ErrorResponse(HTTP_NOT_FOUND, msg) | |
1183 |
|
1186 | |||
1184 | if not ((type_ in allowed or |
|
1187 | if not ((type_ in allowed or | |
1185 | web.configbool("web", "allow" + type_))): |
|
1188 | web.configbool("web", "allow" + type_))): | |
1186 | msg = 'Archive type not allowed: %s' % type_ |
|
1189 | msg = 'Archive type not allowed: %s' % type_ | |
1187 | raise ErrorResponse(HTTP_FORBIDDEN, msg) |
|
1190 | raise ErrorResponse(HTTP_FORBIDDEN, msg) | |
1188 |
|
1191 | |||
1189 | reponame = re.sub(br"\W+", "-", os.path.basename(web.reponame)) |
|
1192 | reponame = re.sub(br"\W+", "-", os.path.basename(web.reponame)) | |
1190 | cnode = web.repo.lookup(key) |
|
1193 | cnode = web.repo.lookup(key) | |
1191 | arch_version = key |
|
1194 | arch_version = key | |
1192 | if cnode == key or key == 'tip': |
|
1195 | if cnode == key or key == 'tip': | |
1193 | arch_version = short(cnode) |
|
1196 | arch_version = short(cnode) | |
1194 | name = "%s-%s" % (reponame, arch_version) |
|
1197 | name = "%s-%s" % (reponame, arch_version) | |
1195 |
|
1198 | |||
1196 | ctx = webutil.changectx(web.repo, req) |
|
1199 | ctx = webutil.changectx(web.repo, req) | |
1197 | pats = [] |
|
1200 | pats = [] | |
1198 | match = scmutil.match(ctx, []) |
|
1201 | match = scmutil.match(ctx, []) | |
1199 | file = req.req.qsparams.get('file') |
|
1202 | file = req.req.qsparams.get('file') | |
1200 | if file: |
|
1203 | if file: | |
1201 | pats = ['path:' + file] |
|
1204 | pats = ['path:' + file] | |
1202 | match = scmutil.match(ctx, pats, default='path') |
|
1205 | match = scmutil.match(ctx, pats, default='path') | |
1203 | if pats: |
|
1206 | if pats: | |
1204 | files = [f for f in ctx.manifest().keys() if match(f)] |
|
1207 | files = [f for f in ctx.manifest().keys() if match(f)] | |
1205 | if not files: |
|
1208 | if not files: | |
1206 | raise ErrorResponse(HTTP_NOT_FOUND, |
|
1209 | raise ErrorResponse(HTTP_NOT_FOUND, | |
1207 | 'file(s) not found: %s' % file) |
|
1210 | 'file(s) not found: %s' % file) | |
1208 |
|
1211 | |||
1209 | mimetype, artype, extension, encoding = web.archivespecs[type_] |
|
1212 | mimetype, artype, extension, encoding = web.archivespecs[type_] | |
1210 | headers = [ |
|
1213 | headers = [ | |
1211 | ('Content-Disposition', 'attachment; filename=%s%s' % (name, extension)) |
|
1214 | ('Content-Disposition', 'attachment; filename=%s%s' % (name, extension)) | |
1212 | ] |
|
1215 | ] | |
1213 | if encoding: |
|
1216 | if encoding: | |
1214 | headers.append(('Content-Encoding', encoding)) |
|
1217 | headers.append(('Content-Encoding', encoding)) | |
1215 | req.headers.extend(headers) |
|
1218 | req.headers.extend(headers) | |
1216 | req.respond(HTTP_OK, mimetype) |
|
1219 | req.respond(HTTP_OK, mimetype) | |
1217 |
|
1220 | |||
1218 | archival.archive(web.repo, req, cnode, artype, prefix=name, |
|
1221 | bodyfh = requestmod.offsettrackingwriter(req.write) | |
|
1222 | ||||
|
1223 | archival.archive(web.repo, bodyfh, cnode, artype, prefix=name, | |||
1219 | matchfn=match, |
|
1224 | matchfn=match, | |
1220 | subrepos=web.configbool("web", "archivesubrepos")) |
|
1225 | subrepos=web.configbool("web", "archivesubrepos")) | |
1221 | return [] |
|
1226 | return [] | |
1222 |
|
1227 | |||
1223 |
|
1228 | |||
1224 | @webcommand('static') |
|
1229 | @webcommand('static') | |
1225 | def static(web, req, tmpl): |
|
1230 | def static(web, req, tmpl): | |
1226 | fname = req.req.qsparams['file'] |
|
1231 | fname = req.req.qsparams['file'] | |
1227 | # a repo owner may set web.static in .hg/hgrc to get any file |
|
1232 | # a repo owner may set web.static in .hg/hgrc to get any file | |
1228 | # readable by the user running the CGI script |
|
1233 | # readable by the user running the CGI script | |
1229 | static = web.config("web", "static", None, untrusted=False) |
|
1234 | static = web.config("web", "static", None, untrusted=False) | |
1230 | if not static: |
|
1235 | if not static: | |
1231 | tp = web.templatepath or templater.templatepaths() |
|
1236 | tp = web.templatepath or templater.templatepaths() | |
1232 | if isinstance(tp, str): |
|
1237 | if isinstance(tp, str): | |
1233 | tp = [tp] |
|
1238 | tp = [tp] | |
1234 | static = [os.path.join(p, 'static') for p in tp] |
|
1239 | static = [os.path.join(p, 'static') for p in tp] | |
1235 |
|
1240 | |||
1236 | staticfile(static, fname, web.res) |
|
1241 | staticfile(static, fname, web.res) | |
1237 | return web.res |
|
1242 | return web.res | |
1238 |
|
1243 | |||
1239 | @webcommand('graph') |
|
1244 | @webcommand('graph') | |
1240 | def graph(web, req, tmpl): |
|
1245 | def graph(web, req, tmpl): | |
1241 | """ |
|
1246 | """ | |
1242 | /graph[/{revision}] |
|
1247 | /graph[/{revision}] | |
1243 | ------------------- |
|
1248 | ------------------- | |
1244 |
|
1249 | |||
1245 | Show information about the graphical topology of the repository. |
|
1250 | Show information about the graphical topology of the repository. | |
1246 |
|
1251 | |||
1247 | Information rendered by this handler can be used to create visual |
|
1252 | Information rendered by this handler can be used to create visual | |
1248 | representations of repository topology. |
|
1253 | representations of repository topology. | |
1249 |
|
1254 | |||
1250 | The ``revision`` URL parameter controls the starting changeset. If it's |
|
1255 | The ``revision`` URL parameter controls the starting changeset. If it's | |
1251 | absent, the default is ``tip``. |
|
1256 | absent, the default is ``tip``. | |
1252 |
|
1257 | |||
1253 | The ``revcount`` query string argument can define the number of changesets |
|
1258 | The ``revcount`` query string argument can define the number of changesets | |
1254 | to show information for. |
|
1259 | to show information for. | |
1255 |
|
1260 | |||
1256 | The ``graphtop`` query string argument can specify the starting changeset |
|
1261 | The ``graphtop`` query string argument can specify the starting changeset | |
1257 | for producing ``jsdata`` variable that is used for rendering graph in |
|
1262 | for producing ``jsdata`` variable that is used for rendering graph in | |
1258 | JavaScript. By default it has the same value as ``revision``. |
|
1263 | JavaScript. By default it has the same value as ``revision``. | |
1259 |
|
1264 | |||
1260 | This handler will render the ``graph`` template. |
|
1265 | This handler will render the ``graph`` template. | |
1261 | """ |
|
1266 | """ | |
1262 |
|
1267 | |||
1263 | if 'node' in req.req.qsparams: |
|
1268 | if 'node' in req.req.qsparams: | |
1264 | ctx = webutil.changectx(web.repo, req) |
|
1269 | ctx = webutil.changectx(web.repo, req) | |
1265 | symrev = webutil.symrevorshortnode(req, ctx) |
|
1270 | symrev = webutil.symrevorshortnode(req, ctx) | |
1266 | else: |
|
1271 | else: | |
1267 | ctx = web.repo['tip'] |
|
1272 | ctx = web.repo['tip'] | |
1268 | symrev = 'tip' |
|
1273 | symrev = 'tip' | |
1269 | rev = ctx.rev() |
|
1274 | rev = ctx.rev() | |
1270 |
|
1275 | |||
1271 | bg_height = 39 |
|
1276 | bg_height = 39 | |
1272 | revcount = web.maxshortchanges |
|
1277 | revcount = web.maxshortchanges | |
1273 | if 'revcount' in req.req.qsparams: |
|
1278 | if 'revcount' in req.req.qsparams: | |
1274 | try: |
|
1279 | try: | |
1275 | revcount = int(req.req.qsparams.get('revcount', revcount)) |
|
1280 | revcount = int(req.req.qsparams.get('revcount', revcount)) | |
1276 | revcount = max(revcount, 1) |
|
1281 | revcount = max(revcount, 1) | |
1277 | tmpl.defaults['sessionvars']['revcount'] = revcount |
|
1282 | tmpl.defaults['sessionvars']['revcount'] = revcount | |
1278 | except ValueError: |
|
1283 | except ValueError: | |
1279 | pass |
|
1284 | pass | |
1280 |
|
1285 | |||
1281 | lessvars = copy.copy(tmpl.defaults['sessionvars']) |
|
1286 | lessvars = copy.copy(tmpl.defaults['sessionvars']) | |
1282 | lessvars['revcount'] = max(revcount // 2, 1) |
|
1287 | lessvars['revcount'] = max(revcount // 2, 1) | |
1283 | morevars = copy.copy(tmpl.defaults['sessionvars']) |
|
1288 | morevars = copy.copy(tmpl.defaults['sessionvars']) | |
1284 | morevars['revcount'] = revcount * 2 |
|
1289 | morevars['revcount'] = revcount * 2 | |
1285 |
|
1290 | |||
1286 | graphtop = req.req.qsparams.get('graphtop', ctx.hex()) |
|
1291 | graphtop = req.req.qsparams.get('graphtop', ctx.hex()) | |
1287 | graphvars = copy.copy(tmpl.defaults['sessionvars']) |
|
1292 | graphvars = copy.copy(tmpl.defaults['sessionvars']) | |
1288 | graphvars['graphtop'] = graphtop |
|
1293 | graphvars['graphtop'] = graphtop | |
1289 |
|
1294 | |||
1290 | count = len(web.repo) |
|
1295 | count = len(web.repo) | |
1291 | pos = rev |
|
1296 | pos = rev | |
1292 |
|
1297 | |||
1293 | uprev = min(max(0, count - 1), rev + revcount) |
|
1298 | uprev = min(max(0, count - 1), rev + revcount) | |
1294 | downrev = max(0, rev - revcount) |
|
1299 | downrev = max(0, rev - revcount) | |
1295 | changenav = webutil.revnav(web.repo).gen(pos, revcount, count) |
|
1300 | changenav = webutil.revnav(web.repo).gen(pos, revcount, count) | |
1296 |
|
1301 | |||
1297 | tree = [] |
|
1302 | tree = [] | |
1298 | nextentry = [] |
|
1303 | nextentry = [] | |
1299 | lastrev = 0 |
|
1304 | lastrev = 0 | |
1300 | if pos != -1: |
|
1305 | if pos != -1: | |
1301 | allrevs = web.repo.changelog.revs(pos, 0) |
|
1306 | allrevs = web.repo.changelog.revs(pos, 0) | |
1302 | revs = [] |
|
1307 | revs = [] | |
1303 | for i in allrevs: |
|
1308 | for i in allrevs: | |
1304 | revs.append(i) |
|
1309 | revs.append(i) | |
1305 | if len(revs) >= revcount + 1: |
|
1310 | if len(revs) >= revcount + 1: | |
1306 | break |
|
1311 | break | |
1307 |
|
1312 | |||
1308 | if len(revs) > revcount: |
|
1313 | if len(revs) > revcount: | |
1309 | nextentry = [webutil.commonentry(web.repo, web.repo[revs[-1]])] |
|
1314 | nextentry = [webutil.commonentry(web.repo, web.repo[revs[-1]])] | |
1310 | revs = revs[:-1] |
|
1315 | revs = revs[:-1] | |
1311 |
|
1316 | |||
1312 | lastrev = revs[-1] |
|
1317 | lastrev = revs[-1] | |
1313 |
|
1318 | |||
1314 | # We have to feed a baseset to dagwalker as it is expecting smartset |
|
1319 | # We have to feed a baseset to dagwalker as it is expecting smartset | |
1315 | # object. This does not have a big impact on hgweb performance itself |
|
1320 | # object. This does not have a big impact on hgweb performance itself | |
1316 | # since hgweb graphing code is not itself lazy yet. |
|
1321 | # since hgweb graphing code is not itself lazy yet. | |
1317 | dag = graphmod.dagwalker(web.repo, smartset.baseset(revs)) |
|
1322 | dag = graphmod.dagwalker(web.repo, smartset.baseset(revs)) | |
1318 | # As we said one line above... not lazy. |
|
1323 | # As we said one line above... not lazy. | |
1319 | tree = list(item for item in graphmod.colored(dag, web.repo) |
|
1324 | tree = list(item for item in graphmod.colored(dag, web.repo) | |
1320 | if item[1] == graphmod.CHANGESET) |
|
1325 | if item[1] == graphmod.CHANGESET) | |
1321 |
|
1326 | |||
1322 | def nodecurrent(ctx): |
|
1327 | def nodecurrent(ctx): | |
1323 | wpnodes = web.repo.dirstate.parents() |
|
1328 | wpnodes = web.repo.dirstate.parents() | |
1324 | if wpnodes[1] == nullid: |
|
1329 | if wpnodes[1] == nullid: | |
1325 | wpnodes = wpnodes[:1] |
|
1330 | wpnodes = wpnodes[:1] | |
1326 | if ctx.node() in wpnodes: |
|
1331 | if ctx.node() in wpnodes: | |
1327 | return '@' |
|
1332 | return '@' | |
1328 | return '' |
|
1333 | return '' | |
1329 |
|
1334 | |||
1330 | def nodesymbol(ctx): |
|
1335 | def nodesymbol(ctx): | |
1331 | if ctx.obsolete(): |
|
1336 | if ctx.obsolete(): | |
1332 | return 'x' |
|
1337 | return 'x' | |
1333 | elif ctx.isunstable(): |
|
1338 | elif ctx.isunstable(): | |
1334 | return '*' |
|
1339 | return '*' | |
1335 | elif ctx.closesbranch(): |
|
1340 | elif ctx.closesbranch(): | |
1336 | return '_' |
|
1341 | return '_' | |
1337 | else: |
|
1342 | else: | |
1338 | return 'o' |
|
1343 | return 'o' | |
1339 |
|
1344 | |||
1340 | def fulltree(): |
|
1345 | def fulltree(): | |
1341 | pos = web.repo[graphtop].rev() |
|
1346 | pos = web.repo[graphtop].rev() | |
1342 | tree = [] |
|
1347 | tree = [] | |
1343 | if pos != -1: |
|
1348 | if pos != -1: | |
1344 | revs = web.repo.changelog.revs(pos, lastrev) |
|
1349 | revs = web.repo.changelog.revs(pos, lastrev) | |
1345 | dag = graphmod.dagwalker(web.repo, smartset.baseset(revs)) |
|
1350 | dag = graphmod.dagwalker(web.repo, smartset.baseset(revs)) | |
1346 | tree = list(item for item in graphmod.colored(dag, web.repo) |
|
1351 | tree = list(item for item in graphmod.colored(dag, web.repo) | |
1347 | if item[1] == graphmod.CHANGESET) |
|
1352 | if item[1] == graphmod.CHANGESET) | |
1348 | return tree |
|
1353 | return tree | |
1349 |
|
1354 | |||
1350 | def jsdata(): |
|
1355 | def jsdata(): | |
1351 | return [{'node': pycompat.bytestr(ctx), |
|
1356 | return [{'node': pycompat.bytestr(ctx), | |
1352 | 'graphnode': nodecurrent(ctx) + nodesymbol(ctx), |
|
1357 | 'graphnode': nodecurrent(ctx) + nodesymbol(ctx), | |
1353 | 'vertex': vtx, |
|
1358 | 'vertex': vtx, | |
1354 | 'edges': edges} |
|
1359 | 'edges': edges} | |
1355 | for (id, type, ctx, vtx, edges) in fulltree()] |
|
1360 | for (id, type, ctx, vtx, edges) in fulltree()] | |
1356 |
|
1361 | |||
1357 | def nodes(): |
|
1362 | def nodes(): | |
1358 | parity = paritygen(web.stripecount) |
|
1363 | parity = paritygen(web.stripecount) | |
1359 | for row, (id, type, ctx, vtx, edges) in enumerate(tree): |
|
1364 | for row, (id, type, ctx, vtx, edges) in enumerate(tree): | |
1360 | entry = webutil.commonentry(web.repo, ctx) |
|
1365 | entry = webutil.commonentry(web.repo, ctx) | |
1361 | edgedata = [{'col': edge[0], |
|
1366 | edgedata = [{'col': edge[0], | |
1362 | 'nextcol': edge[1], |
|
1367 | 'nextcol': edge[1], | |
1363 | 'color': (edge[2] - 1) % 6 + 1, |
|
1368 | 'color': (edge[2] - 1) % 6 + 1, | |
1364 | 'width': edge[3], |
|
1369 | 'width': edge[3], | |
1365 | 'bcolor': edge[4]} |
|
1370 | 'bcolor': edge[4]} | |
1366 | for edge in edges] |
|
1371 | for edge in edges] | |
1367 |
|
1372 | |||
1368 | entry.update({'col': vtx[0], |
|
1373 | entry.update({'col': vtx[0], | |
1369 | 'color': (vtx[1] - 1) % 6 + 1, |
|
1374 | 'color': (vtx[1] - 1) % 6 + 1, | |
1370 | 'parity': next(parity), |
|
1375 | 'parity': next(parity), | |
1371 | 'edges': edgedata, |
|
1376 | 'edges': edgedata, | |
1372 | 'row': row, |
|
1377 | 'row': row, | |
1373 | 'nextrow': row + 1}) |
|
1378 | 'nextrow': row + 1}) | |
1374 |
|
1379 | |||
1375 | yield entry |
|
1380 | yield entry | |
1376 |
|
1381 | |||
1377 | rows = len(tree) |
|
1382 | rows = len(tree) | |
1378 |
|
1383 | |||
1379 | web.res.setbodygen(tmpl( |
|
1384 | web.res.setbodygen(tmpl( | |
1380 | 'graph', |
|
1385 | 'graph', | |
1381 | rev=rev, |
|
1386 | rev=rev, | |
1382 | symrev=symrev, |
|
1387 | symrev=symrev, | |
1383 | revcount=revcount, |
|
1388 | revcount=revcount, | |
1384 | uprev=uprev, |
|
1389 | uprev=uprev, | |
1385 | lessvars=lessvars, |
|
1390 | lessvars=lessvars, | |
1386 | morevars=morevars, |
|
1391 | morevars=morevars, | |
1387 | downrev=downrev, |
|
1392 | downrev=downrev, | |
1388 | graphvars=graphvars, |
|
1393 | graphvars=graphvars, | |
1389 | rows=rows, |
|
1394 | rows=rows, | |
1390 | bg_height=bg_height, |
|
1395 | bg_height=bg_height, | |
1391 | changesets=count, |
|
1396 | changesets=count, | |
1392 | nextentry=nextentry, |
|
1397 | nextentry=nextentry, | |
1393 | jsdata=lambda **x: jsdata(), |
|
1398 | jsdata=lambda **x: jsdata(), | |
1394 | nodes=lambda **x: nodes(), |
|
1399 | nodes=lambda **x: nodes(), | |
1395 | node=ctx.hex(), |
|
1400 | node=ctx.hex(), | |
1396 | changenav=changenav)) |
|
1401 | changenav=changenav)) | |
1397 |
|
1402 | |||
1398 | return web.res |
|
1403 | return web.res | |
1399 |
|
1404 | |||
1400 | def _getdoc(e): |
|
1405 | def _getdoc(e): | |
1401 | doc = e[0].__doc__ |
|
1406 | doc = e[0].__doc__ | |
1402 | if doc: |
|
1407 | if doc: | |
1403 | doc = _(doc).partition('\n')[0] |
|
1408 | doc = _(doc).partition('\n')[0] | |
1404 | else: |
|
1409 | else: | |
1405 | doc = _('(no help text available)') |
|
1410 | doc = _('(no help text available)') | |
1406 | return doc |
|
1411 | return doc | |
1407 |
|
1412 | |||
1408 | @webcommand('help') |
|
1413 | @webcommand('help') | |
1409 | def help(web, req, tmpl): |
|
1414 | def help(web, req, tmpl): | |
1410 | """ |
|
1415 | """ | |
1411 | /help[/{topic}] |
|
1416 | /help[/{topic}] | |
1412 | --------------- |
|
1417 | --------------- | |
1413 |
|
1418 | |||
1414 | Render help documentation. |
|
1419 | Render help documentation. | |
1415 |
|
1420 | |||
1416 | This web command is roughly equivalent to :hg:`help`. If a ``topic`` |
|
1421 | This web command is roughly equivalent to :hg:`help`. If a ``topic`` | |
1417 | is defined, that help topic will be rendered. If not, an index of |
|
1422 | is defined, that help topic will be rendered. If not, an index of | |
1418 | available help topics will be rendered. |
|
1423 | available help topics will be rendered. | |
1419 |
|
1424 | |||
1420 | The ``help`` template will be rendered when requesting help for a topic. |
|
1425 | The ``help`` template will be rendered when requesting help for a topic. | |
1421 | ``helptopics`` will be rendered for the index of help topics. |
|
1426 | ``helptopics`` will be rendered for the index of help topics. | |
1422 | """ |
|
1427 | """ | |
1423 | from .. import commands, help as helpmod # avoid cycle |
|
1428 | from .. import commands, help as helpmod # avoid cycle | |
1424 |
|
1429 | |||
1425 | topicname = req.req.qsparams.get('node') |
|
1430 | topicname = req.req.qsparams.get('node') | |
1426 | if not topicname: |
|
1431 | if not topicname: | |
1427 | def topics(**map): |
|
1432 | def topics(**map): | |
1428 | for entries, summary, _doc in helpmod.helptable: |
|
1433 | for entries, summary, _doc in helpmod.helptable: | |
1429 | yield {'topic': entries[0], 'summary': summary} |
|
1434 | yield {'topic': entries[0], 'summary': summary} | |
1430 |
|
1435 | |||
1431 | early, other = [], [] |
|
1436 | early, other = [], [] | |
1432 | primary = lambda s: s.partition('|')[0] |
|
1437 | primary = lambda s: s.partition('|')[0] | |
1433 | for c, e in commands.table.iteritems(): |
|
1438 | for c, e in commands.table.iteritems(): | |
1434 | doc = _getdoc(e) |
|
1439 | doc = _getdoc(e) | |
1435 | if 'DEPRECATED' in doc or c.startswith('debug'): |
|
1440 | if 'DEPRECATED' in doc or c.startswith('debug'): | |
1436 | continue |
|
1441 | continue | |
1437 | cmd = primary(c) |
|
1442 | cmd = primary(c) | |
1438 | if cmd.startswith('^'): |
|
1443 | if cmd.startswith('^'): | |
1439 | early.append((cmd[1:], doc)) |
|
1444 | early.append((cmd[1:], doc)) | |
1440 | else: |
|
1445 | else: | |
1441 | other.append((cmd, doc)) |
|
1446 | other.append((cmd, doc)) | |
1442 |
|
1447 | |||
1443 | early.sort() |
|
1448 | early.sort() | |
1444 | other.sort() |
|
1449 | other.sort() | |
1445 |
|
1450 | |||
1446 | def earlycommands(**map): |
|
1451 | def earlycommands(**map): | |
1447 | for c, doc in early: |
|
1452 | for c, doc in early: | |
1448 | yield {'topic': c, 'summary': doc} |
|
1453 | yield {'topic': c, 'summary': doc} | |
1449 |
|
1454 | |||
1450 | def othercommands(**map): |
|
1455 | def othercommands(**map): | |
1451 | for c, doc in other: |
|
1456 | for c, doc in other: | |
1452 | yield {'topic': c, 'summary': doc} |
|
1457 | yield {'topic': c, 'summary': doc} | |
1453 |
|
1458 | |||
1454 | web.res.setbodygen(tmpl( |
|
1459 | web.res.setbodygen(tmpl( | |
1455 | 'helptopics', |
|
1460 | 'helptopics', | |
1456 | topics=topics, |
|
1461 | topics=topics, | |
1457 | earlycommands=earlycommands, |
|
1462 | earlycommands=earlycommands, | |
1458 | othercommands=othercommands, |
|
1463 | othercommands=othercommands, | |
1459 | title='Index')) |
|
1464 | title='Index')) | |
1460 | return web.res |
|
1465 | return web.res | |
1461 |
|
1466 | |||
1462 | # Render an index of sub-topics. |
|
1467 | # Render an index of sub-topics. | |
1463 | if topicname in helpmod.subtopics: |
|
1468 | if topicname in helpmod.subtopics: | |
1464 | topics = [] |
|
1469 | topics = [] | |
1465 | for entries, summary, _doc in helpmod.subtopics[topicname]: |
|
1470 | for entries, summary, _doc in helpmod.subtopics[topicname]: | |
1466 | topics.append({ |
|
1471 | topics.append({ | |
1467 | 'topic': '%s.%s' % (topicname, entries[0]), |
|
1472 | 'topic': '%s.%s' % (topicname, entries[0]), | |
1468 | 'basename': entries[0], |
|
1473 | 'basename': entries[0], | |
1469 | 'summary': summary, |
|
1474 | 'summary': summary, | |
1470 | }) |
|
1475 | }) | |
1471 |
|
1476 | |||
1472 | web.res.setbodygen(tmpl( |
|
1477 | web.res.setbodygen(tmpl( | |
1473 | 'helptopics', |
|
1478 | 'helptopics', | |
1474 | topics=topics, |
|
1479 | topics=topics, | |
1475 | title=topicname, |
|
1480 | title=topicname, | |
1476 | subindex=True)) |
|
1481 | subindex=True)) | |
1477 | return web.res |
|
1482 | return web.res | |
1478 |
|
1483 | |||
1479 | u = webutil.wsgiui.load() |
|
1484 | u = webutil.wsgiui.load() | |
1480 | u.verbose = True |
|
1485 | u.verbose = True | |
1481 |
|
1486 | |||
1482 | # Render a page from a sub-topic. |
|
1487 | # Render a page from a sub-topic. | |
1483 | if '.' in topicname: |
|
1488 | if '.' in topicname: | |
1484 | # TODO implement support for rendering sections, like |
|
1489 | # TODO implement support for rendering sections, like | |
1485 | # `hg help` works. |
|
1490 | # `hg help` works. | |
1486 | topic, subtopic = topicname.split('.', 1) |
|
1491 | topic, subtopic = topicname.split('.', 1) | |
1487 | if topic not in helpmod.subtopics: |
|
1492 | if topic not in helpmod.subtopics: | |
1488 | raise ErrorResponse(HTTP_NOT_FOUND) |
|
1493 | raise ErrorResponse(HTTP_NOT_FOUND) | |
1489 | else: |
|
1494 | else: | |
1490 | topic = topicname |
|
1495 | topic = topicname | |
1491 | subtopic = None |
|
1496 | subtopic = None | |
1492 |
|
1497 | |||
1493 | try: |
|
1498 | try: | |
1494 | doc = helpmod.help_(u, commands, topic, subtopic=subtopic) |
|
1499 | doc = helpmod.help_(u, commands, topic, subtopic=subtopic) | |
1495 | except error.Abort: |
|
1500 | except error.Abort: | |
1496 | raise ErrorResponse(HTTP_NOT_FOUND) |
|
1501 | raise ErrorResponse(HTTP_NOT_FOUND) | |
1497 |
|
1502 | |||
1498 | web.res.setbodygen(tmpl( |
|
1503 | web.res.setbodygen(tmpl( | |
1499 | 'help', |
|
1504 | 'help', | |
1500 | topic=topicname, |
|
1505 | topic=topicname, | |
1501 | doc=doc)) |
|
1506 | doc=doc)) | |
1502 |
|
1507 | |||
1503 | return web.res |
|
1508 | return web.res | |
1504 |
|
1509 | |||
1505 | # tell hggettext to extract docstrings from these functions: |
|
1510 | # tell hggettext to extract docstrings from these functions: | |
1506 | i18nfunctions = commands.values() |
|
1511 | i18nfunctions = commands.values() |
General Comments 0
You need to be logged in to leave comments.
Login now