Show More
@@ -1,530 +1,533 b'' | |||
|
1 | 1 | # hgweb/hgwebdir_mod.py - Web interface for a directory of repositories. |
|
2 | 2 | # |
|
3 | 3 | # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net> |
|
4 | 4 | # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com> |
|
5 | 5 | # |
|
6 | 6 | # This software may be used and distributed according to the terms of the |
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | from __future__ import absolute_import |
|
10 | 10 | |
|
11 | 11 | import gc |
|
12 | 12 | import os |
|
13 | 13 | import time |
|
14 | 14 | |
|
15 | 15 | from ..i18n import _ |
|
16 | 16 | |
|
17 | 17 | from .common import ( |
|
18 | 18 | ErrorResponse, |
|
19 | 19 | HTTP_SERVER_ERROR, |
|
20 | 20 | cspvalues, |
|
21 | 21 | get_contact, |
|
22 | 22 | get_mtime, |
|
23 | 23 | ismember, |
|
24 | 24 | paritygen, |
|
25 | 25 | staticfile, |
|
26 | 26 | statusmessage, |
|
27 | 27 | ) |
|
28 | 28 | |
|
29 | 29 | from .. import ( |
|
30 | 30 | configitems, |
|
31 | 31 | encoding, |
|
32 | 32 | error, |
|
33 | 33 | hg, |
|
34 | 34 | profiling, |
|
35 | 35 | pycompat, |
|
36 | 36 | scmutil, |
|
37 | 37 | templater, |
|
38 | 38 | templateutil, |
|
39 | 39 | ui as uimod, |
|
40 | 40 | util, |
|
41 | 41 | ) |
|
42 | 42 | |
|
43 | 43 | from . import ( |
|
44 | 44 | hgweb_mod, |
|
45 | 45 | request as requestmod, |
|
46 | 46 | webutil, |
|
47 | 47 | wsgicgi, |
|
48 | 48 | ) |
|
49 | 49 | from ..utils import dateutil |
|
50 | 50 | |
|
51 | 51 | def cleannames(items): |
|
52 | 52 | return [(util.pconvert(name).strip('/'), path) for name, path in items] |
|
53 | 53 | |
|
54 | 54 | def findrepos(paths): |
|
55 | 55 | repos = [] |
|
56 | 56 | for prefix, root in cleannames(paths): |
|
57 | 57 | roothead, roottail = os.path.split(root) |
|
58 | 58 | # "foo = /bar/*" or "foo = /bar/**" lets every repo /bar/N in or below |
|
59 | 59 | # /bar/ be served as as foo/N . |
|
60 | 60 | # '*' will not search inside dirs with .hg (except .hg/patches), |
|
61 | 61 | # '**' will search inside dirs with .hg (and thus also find subrepos). |
|
62 | 62 | try: |
|
63 | 63 | recurse = {'*': False, '**': True}[roottail] |
|
64 | 64 | except KeyError: |
|
65 | 65 | repos.append((prefix, root)) |
|
66 | 66 | continue |
|
67 | 67 | roothead = os.path.normpath(os.path.abspath(roothead)) |
|
68 | 68 | paths = scmutil.walkrepos(roothead, followsym=True, recurse=recurse) |
|
69 | 69 | repos.extend(urlrepos(prefix, roothead, paths)) |
|
70 | 70 | return repos |
|
71 | 71 | |
|
72 | 72 | def urlrepos(prefix, roothead, paths): |
|
73 | 73 | """yield url paths and filesystem paths from a list of repo paths |
|
74 | 74 | |
|
75 | 75 | >>> conv = lambda seq: [(v, util.pconvert(p)) for v,p in seq] |
|
76 | 76 | >>> conv(urlrepos(b'hg', b'/opt', [b'/opt/r', b'/opt/r/r', b'/opt'])) |
|
77 | 77 | [('hg/r', '/opt/r'), ('hg/r/r', '/opt/r/r'), ('hg', '/opt')] |
|
78 | 78 | >>> conv(urlrepos(b'', b'/opt', [b'/opt/r', b'/opt/r/r', b'/opt'])) |
|
79 | 79 | [('r', '/opt/r'), ('r/r', '/opt/r/r'), ('', '/opt')] |
|
80 | 80 | """ |
|
81 | 81 | for path in paths: |
|
82 | 82 | path = os.path.normpath(path) |
|
83 | 83 | yield (prefix + '/' + |
|
84 | 84 | util.pconvert(path[len(roothead):]).lstrip('/')).strip('/'), path |
|
85 | 85 | |
|
86 | 86 | def readallowed(ui, req): |
|
87 | 87 | """Check allow_read and deny_read config options of a repo's ui object |
|
88 | 88 | to determine user permissions. By default, with neither option set (or |
|
89 | 89 | both empty), allow all users to read the repo. There are two ways a |
|
90 | 90 | user can be denied read access: (1) deny_read is not empty, and the |
|
91 | 91 | user is unauthenticated or deny_read contains user (or *), and (2) |
|
92 | 92 | allow_read is not empty and the user is not in allow_read. Return True |
|
93 | 93 | if user is allowed to read the repo, else return False.""" |
|
94 | 94 | |
|
95 | 95 | user = req.remoteuser |
|
96 | 96 | |
|
97 | 97 | deny_read = ui.configlist('web', 'deny_read', untrusted=True) |
|
98 | 98 | if deny_read and (not user or ismember(ui, user, deny_read)): |
|
99 | 99 | return False |
|
100 | 100 | |
|
101 | 101 | allow_read = ui.configlist('web', 'allow_read', untrusted=True) |
|
102 | 102 | # by default, allow reading if no allow_read option has been set |
|
103 | 103 | if not allow_read or ismember(ui, user, allow_read): |
|
104 | 104 | return True |
|
105 | 105 | |
|
106 | 106 | return False |
|
107 | 107 | |
|
108 | 108 | def rawindexentries(ui, repos, req, subdir=''): |
|
109 | 109 | descend = ui.configbool('web', 'descend') |
|
110 | 110 | collapse = ui.configbool('web', 'collapse') |
|
111 | 111 | seenrepos = set() |
|
112 | 112 | seendirs = set() |
|
113 | 113 | for name, path in repos: |
|
114 | 114 | |
|
115 | 115 | if not name.startswith(subdir): |
|
116 | 116 | continue |
|
117 | 117 | name = name[len(subdir):] |
|
118 | 118 | directory = False |
|
119 | 119 | |
|
120 | 120 | if '/' in name: |
|
121 | 121 | if not descend: |
|
122 | 122 | continue |
|
123 | 123 | |
|
124 | 124 | nameparts = name.split('/') |
|
125 | 125 | rootname = nameparts[0] |
|
126 | 126 | |
|
127 | 127 | if not collapse: |
|
128 | 128 | pass |
|
129 | 129 | elif rootname in seendirs: |
|
130 | 130 | continue |
|
131 | 131 | elif rootname in seenrepos: |
|
132 | 132 | pass |
|
133 | 133 | else: |
|
134 | 134 | directory = True |
|
135 | 135 | name = rootname |
|
136 | 136 | |
|
137 | 137 | # redefine the path to refer to the directory |
|
138 | 138 | discarded = '/'.join(nameparts[1:]) |
|
139 | 139 | |
|
140 | 140 | # remove name parts plus accompanying slash |
|
141 | 141 | path = path[:-len(discarded) - 1] |
|
142 | 142 | |
|
143 | 143 | try: |
|
144 | 144 | r = hg.repository(ui, path) |
|
145 | 145 | directory = False |
|
146 | 146 | except (IOError, error.RepoError): |
|
147 | 147 | pass |
|
148 | 148 | |
|
149 | 149 | parts = [ |
|
150 | 150 | req.apppath.strip('/'), |
|
151 | 151 | subdir.strip('/'), |
|
152 | 152 | name.strip('/'), |
|
153 | 153 | ] |
|
154 | 154 | url = '/' + '/'.join(p for p in parts if p) + '/' |
|
155 | 155 | |
|
156 | 156 | # show either a directory entry or a repository |
|
157 | 157 | if directory: |
|
158 | 158 | # get the directory's time information |
|
159 | 159 | try: |
|
160 | 160 | d = (get_mtime(path), dateutil.makedate()[1]) |
|
161 | 161 | except OSError: |
|
162 | 162 | continue |
|
163 | 163 | |
|
164 | 164 | # add '/' to the name to make it obvious that |
|
165 | 165 | # the entry is a directory, not a regular repository |
|
166 | 166 | row = {'contact': "", |
|
167 | 167 | 'contact_sort': "", |
|
168 | 168 | 'name': name + '/', |
|
169 | 169 | 'name_sort': name, |
|
170 | 170 | 'url': url, |
|
171 | 171 | 'description': "", |
|
172 | 172 | 'description_sort': "", |
|
173 | 173 | 'lastchange': d, |
|
174 | 174 | 'lastchange_sort': d[1] - d[0], |
|
175 | 175 | 'archives': templateutil.mappinglist([]), |
|
176 | 176 | 'isdirectory': True, |
|
177 | 177 | 'labels': templateutil.hybridlist([], name='label'), |
|
178 | 178 | } |
|
179 | 179 | |
|
180 | 180 | seendirs.add(name) |
|
181 | 181 | yield row |
|
182 | 182 | continue |
|
183 | 183 | |
|
184 | 184 | u = ui.copy() |
|
185 | 185 | try: |
|
186 | 186 | u.readconfig(os.path.join(path, '.hg', 'hgrc')) |
|
187 | 187 | except Exception as e: |
|
188 | 188 | u.warn(_('error reading %s/.hg/hgrc: %s\n') % (path, e)) |
|
189 | 189 | continue |
|
190 | 190 | |
|
191 | 191 | def get(section, name, default=uimod._unset): |
|
192 | 192 | return u.config(section, name, default, untrusted=True) |
|
193 | 193 | |
|
194 | 194 | if u.configbool("web", "hidden", untrusted=True): |
|
195 | 195 | continue |
|
196 | 196 | |
|
197 | 197 | if not readallowed(u, req): |
|
198 | 198 | continue |
|
199 | 199 | |
|
200 | 200 | # update time with local timezone |
|
201 | 201 | try: |
|
202 | 202 | r = hg.repository(ui, path) |
|
203 | 203 | except IOError: |
|
204 | 204 | u.warn(_('error accessing repository at %s\n') % path) |
|
205 | 205 | continue |
|
206 | 206 | except error.RepoError: |
|
207 | 207 | u.warn(_('error accessing repository at %s\n') % path) |
|
208 | 208 | continue |
|
209 | 209 | try: |
|
210 | 210 | d = (get_mtime(r.spath), dateutil.makedate()[1]) |
|
211 | 211 | except OSError: |
|
212 | 212 | continue |
|
213 | 213 | |
|
214 | 214 | contact = get_contact(get) |
|
215 | 215 | description = get("web", "description") |
|
216 | 216 | seenrepos.add(name) |
|
217 | 217 | name = get("web", "name", name) |
|
218 | 218 | labels = u.configlist('web', 'labels', untrusted=True) |
|
219 | 219 | row = {'contact': contact or "unknown", |
|
220 | 220 | 'contact_sort': contact.upper() or "unknown", |
|
221 | 221 | 'name': name, |
|
222 | 222 | 'name_sort': name, |
|
223 | 223 | 'url': url, |
|
224 | 224 | 'description': description or "unknown", |
|
225 | 225 | 'description_sort': description.upper() or "unknown", |
|
226 | 226 | 'lastchange': d, |
|
227 | 227 | 'lastchange_sort': d[1] - d[0], |
|
228 | 228 | 'archives': webutil.archivelist(u, "tip", url), |
|
229 | 229 | 'isdirectory': None, |
|
230 | 230 | 'labels': templateutil.hybridlist(labels, name='label'), |
|
231 | 231 | } |
|
232 | 232 | |
|
233 | 233 | yield row |
|
234 | 234 | |
|
235 | 235 | def _indexentriesgen(context, ui, repos, req, stripecount, sortcolumn, |
|
236 | 236 | descending, subdir): |
|
237 | 237 | rows = rawindexentries(ui, repos, req, subdir=subdir) |
|
238 | 238 | |
|
239 | 239 | sortdefault = None, False |
|
240 | 240 | |
|
241 | 241 | if sortcolumn and sortdefault != (sortcolumn, descending): |
|
242 | 242 | sortkey = '%s_sort' % sortcolumn |
|
243 | 243 | rows = sorted(rows, key=lambda x: x[sortkey], |
|
244 | 244 | reverse=descending) |
|
245 | 245 | |
|
246 | 246 | for row, parity in zip(rows, paritygen(stripecount)): |
|
247 | 247 | row['parity'] = parity |
|
248 | 248 | yield row |
|
249 | 249 | |
|
250 | 250 | def indexentries(ui, repos, req, stripecount, sortcolumn='', |
|
251 | 251 | descending=False, subdir=''): |
|
252 | 252 | args = (ui, repos, req, stripecount, sortcolumn, descending, subdir) |
|
253 | 253 | return templateutil.mappinggenerator(_indexentriesgen, args=args) |
|
254 | 254 | |
|
255 | 255 | class hgwebdir(object): |
|
256 | 256 | """HTTP server for multiple repositories. |
|
257 | 257 | |
|
258 | 258 | Given a configuration, different repositories will be served depending |
|
259 | 259 | on the request path. |
|
260 | 260 | |
|
261 | 261 | Instances are typically used as WSGI applications. |
|
262 | 262 | """ |
|
263 | 263 | def __init__(self, conf, baseui=None): |
|
264 | 264 | self.conf = conf |
|
265 | 265 | self.baseui = baseui |
|
266 | 266 | self.ui = None |
|
267 | 267 | self.lastrefresh = 0 |
|
268 | 268 | self.motd = None |
|
269 | 269 | self.refresh() |
|
270 | 270 | |
|
271 | 271 | def refresh(self): |
|
272 | 272 | if self.ui: |
|
273 | 273 | refreshinterval = self.ui.configint('web', 'refreshinterval') |
|
274 | 274 | else: |
|
275 | 275 | item = configitems.coreitems['web']['refreshinterval'] |
|
276 | 276 | refreshinterval = item.default |
|
277 | 277 | |
|
278 | 278 | # refreshinterval <= 0 means to always refresh. |
|
279 | 279 | if (refreshinterval > 0 and |
|
280 | 280 | self.lastrefresh + refreshinterval > time.time()): |
|
281 | 281 | return |
|
282 | 282 | |
|
283 | 283 | if self.baseui: |
|
284 | 284 | u = self.baseui.copy() |
|
285 | 285 | else: |
|
286 | 286 | u = uimod.ui.load() |
|
287 | 287 | u.setconfig('ui', 'report_untrusted', 'off', 'hgwebdir') |
|
288 | 288 | u.setconfig('ui', 'nontty', 'true', 'hgwebdir') |
|
289 | 289 | # displaying bundling progress bar while serving feels wrong and may |
|
290 | 290 | # break some wsgi implementations. |
|
291 | 291 | u.setconfig('progress', 'disable', 'true', 'hgweb') |
|
292 | 292 | |
|
293 | 293 | if not isinstance(self.conf, (dict, list, tuple)): |
|
294 | 294 | map = {'paths': 'hgweb-paths'} |
|
295 | 295 | if not os.path.exists(self.conf): |
|
296 | 296 | raise error.Abort(_('config file %s not found!') % self.conf) |
|
297 | 297 | u.readconfig(self.conf, remap=map, trust=True) |
|
298 | 298 | paths = [] |
|
299 | 299 | for name, ignored in u.configitems('hgweb-paths'): |
|
300 | 300 | for path in u.configlist('hgweb-paths', name): |
|
301 | 301 | paths.append((name, path)) |
|
302 | 302 | elif isinstance(self.conf, (list, tuple)): |
|
303 | 303 | paths = self.conf |
|
304 | 304 | elif isinstance(self.conf, dict): |
|
305 | 305 | paths = self.conf.items() |
|
306 | 306 | |
|
307 | 307 | repos = findrepos(paths) |
|
308 | 308 | for prefix, root in u.configitems('collections'): |
|
309 | 309 | prefix = util.pconvert(prefix) |
|
310 | 310 | for path in scmutil.walkrepos(root, followsym=True): |
|
311 | 311 | repo = os.path.normpath(path) |
|
312 | 312 | name = util.pconvert(repo) |
|
313 | 313 | if name.startswith(prefix): |
|
314 | 314 | name = name[len(prefix):] |
|
315 | 315 | repos.append((name.lstrip('/'), repo)) |
|
316 | 316 | |
|
317 | 317 | self.repos = repos |
|
318 | 318 | self.ui = u |
|
319 | 319 | encoding.encoding = self.ui.config('web', 'encoding') |
|
320 | 320 | self.style = self.ui.config('web', 'style') |
|
321 | 321 | self.templatepath = self.ui.config('web', 'templates', untrusted=False) |
|
322 | 322 | self.stripecount = self.ui.config('web', 'stripes') |
|
323 | 323 | if self.stripecount: |
|
324 | 324 | self.stripecount = int(self.stripecount) |
|
325 | 325 | prefix = self.ui.config('web', 'prefix') |
|
326 | 326 | if prefix.startswith('/'): |
|
327 | 327 | prefix = prefix[1:] |
|
328 | 328 | if prefix.endswith('/'): |
|
329 | 329 | prefix = prefix[:-1] |
|
330 | 330 | self.prefix = prefix |
|
331 | 331 | self.lastrefresh = time.time() |
|
332 | 332 | |
|
333 | 333 | def run(self): |
|
334 | 334 | if not encoding.environ.get('GATEWAY_INTERFACE', |
|
335 | 335 | '').startswith("CGI/1."): |
|
336 | 336 | raise RuntimeError("This function is only intended to be " |
|
337 | 337 | "called while running as a CGI script.") |
|
338 | 338 | wsgicgi.launch(self) |
|
339 | 339 | |
|
340 | 340 | def __call__(self, env, respond): |
|
341 | 341 | baseurl = self.ui.config('web', 'baseurl') |
|
342 | 342 | req = requestmod.parserequestfromenv(env, altbaseurl=baseurl) |
|
343 | 343 | res = requestmod.wsgiresponse(req, respond) |
|
344 | 344 | |
|
345 | 345 | return self.run_wsgi(req, res) |
|
346 | 346 | |
|
347 | 347 | def run_wsgi(self, req, res): |
|
348 | 348 | profile = self.ui.configbool('profiling', 'enabled') |
|
349 | 349 | with profiling.profile(self.ui, enabled=profile): |
|
350 | 350 | try: |
|
351 | 351 | for r in self._runwsgi(req, res): |
|
352 | 352 | yield r |
|
353 | 353 | finally: |
|
354 | 354 | # There are known cycles in localrepository that prevent |
|
355 | 355 | # those objects (and tons of held references) from being |
|
356 | 356 | # collected through normal refcounting. We mitigate those |
|
357 | 357 | # leaks by performing an explicit GC on every request. |
|
358 | 358 | # TODO remove this once leaks are fixed. |
|
359 | 359 | # TODO only run this on requests that create localrepository |
|
360 | 360 | # instances instead of every request. |
|
361 | 361 | gc.collect() |
|
362 | 362 | |
|
363 | 363 | def _runwsgi(self, req, res): |
|
364 | 364 | try: |
|
365 | 365 | self.refresh() |
|
366 | 366 | |
|
367 | 367 | csp, nonce = cspvalues(self.ui) |
|
368 | 368 | if csp: |
|
369 | 369 | res.headers['Content-Security-Policy'] = csp |
|
370 | 370 | |
|
371 | 371 | virtual = req.dispatchpath.strip('/') |
|
372 | 372 | tmpl = self.templater(req, nonce) |
|
373 | 373 | ctype = tmpl.render('mimetype', {'encoding': encoding.encoding}) |
|
374 | 374 | |
|
375 | 375 | # Global defaults. These can be overridden by any handler. |
|
376 | 376 | res.status = '200 Script output follows' |
|
377 | 377 | res.headers['Content-Type'] = ctype |
|
378 | 378 | |
|
379 | 379 | # a static file |
|
380 | 380 | if virtual.startswith('static/') or 'static' in req.qsparams: |
|
381 | 381 | if virtual.startswith('static/'): |
|
382 | 382 | fname = virtual[7:] |
|
383 | 383 | else: |
|
384 | 384 | fname = req.qsparams['static'] |
|
385 | 385 | static = self.ui.config("web", "static", None, |
|
386 | 386 | untrusted=False) |
|
387 | 387 | if not static: |
|
388 | 388 | tp = self.templatepath or templater.templatepaths() |
|
389 | 389 | if isinstance(tp, str): |
|
390 | 390 | tp = [tp] |
|
391 | 391 | static = [os.path.join(p, 'static') for p in tp] |
|
392 | 392 | |
|
393 | 393 | staticfile(static, fname, res) |
|
394 | 394 | return res.sendresponse() |
|
395 | 395 | |
|
396 | 396 | # top-level index |
|
397 | 397 | |
|
398 | 398 | repos = dict(self.repos) |
|
399 | 399 | |
|
400 | 400 | if (not virtual or virtual == 'index') and virtual not in repos: |
|
401 | 401 | return self.makeindex(req, res, tmpl) |
|
402 | 402 | |
|
403 | 403 | # nested indexes and hgwebs |
|
404 | 404 | |
|
405 | 405 | if virtual.endswith('/index') and virtual not in repos: |
|
406 | 406 | subdir = virtual[:-len('index')] |
|
407 | 407 | if any(r.startswith(subdir) for r in repos): |
|
408 | 408 | return self.makeindex(req, res, tmpl, subdir) |
|
409 | 409 | |
|
410 | 410 | def _virtualdirs(): |
|
411 | 411 | # Check the full virtual path, each parent, and the root ('') |
|
412 | 412 | if virtual != '': |
|
413 | 413 | yield virtual |
|
414 | 414 | |
|
415 | 415 | for p in util.finddirs(virtual): |
|
416 | 416 | yield p |
|
417 | 417 | |
|
418 | 418 | yield '' |
|
419 | 419 | |
|
420 | 420 | for virtualrepo in _virtualdirs(): |
|
421 | 421 | real = repos.get(virtualrepo) |
|
422 | 422 | if real: |
|
423 | 423 | # Re-parse the WSGI environment to take into account our |
|
424 | 424 | # repository path component. |
|
425 | 425 | uenv = req.rawenv |
|
426 | 426 | if pycompat.ispy3: |
|
427 | 427 | uenv = {k.decode('latin1'): v for k, v in |
|
428 | 428 | uenv.iteritems()} |
|
429 | 429 | req = requestmod.parserequestfromenv( |
|
430 | 430 | uenv, reponame=virtualrepo, |
|
431 |
altbaseurl=self.ui.config('web', 'baseurl') |
|
|
431 | altbaseurl=self.ui.config('web', 'baseurl'), | |
|
432 | # Reuse wrapped body file object otherwise state | |
|
433 | # tracking can get confused. | |
|
434 | bodyfh=req.bodyfh) | |
|
432 | 435 | try: |
|
433 | 436 | # ensure caller gets private copy of ui |
|
434 | 437 | repo = hg.repository(self.ui.copy(), real) |
|
435 | 438 | return hgweb_mod.hgweb(repo).run_wsgi(req, res) |
|
436 | 439 | except IOError as inst: |
|
437 | 440 | msg = encoding.strtolocal(inst.strerror) |
|
438 | 441 | raise ErrorResponse(HTTP_SERVER_ERROR, msg) |
|
439 | 442 | except error.RepoError as inst: |
|
440 | 443 | raise ErrorResponse(HTTP_SERVER_ERROR, bytes(inst)) |
|
441 | 444 | |
|
442 | 445 | # browse subdirectories |
|
443 | 446 | subdir = virtual + '/' |
|
444 | 447 | if [r for r in repos if r.startswith(subdir)]: |
|
445 | 448 | return self.makeindex(req, res, tmpl, subdir) |
|
446 | 449 | |
|
447 | 450 | # prefixes not found |
|
448 | 451 | res.status = '404 Not Found' |
|
449 | 452 | res.setbodygen(tmpl.generate('notfound', {'repo': virtual})) |
|
450 | 453 | return res.sendresponse() |
|
451 | 454 | |
|
452 | 455 | except ErrorResponse as e: |
|
453 | 456 | res.status = statusmessage(e.code, pycompat.bytestr(e)) |
|
454 | 457 | res.setbodygen(tmpl.generate('error', {'error': e.message or ''})) |
|
455 | 458 | return res.sendresponse() |
|
456 | 459 | finally: |
|
457 | 460 | tmpl = None |
|
458 | 461 | |
|
459 | 462 | def makeindex(self, req, res, tmpl, subdir=""): |
|
460 | 463 | self.refresh() |
|
461 | 464 | sortable = ["name", "description", "contact", "lastchange"] |
|
462 | 465 | sortcolumn, descending = None, False |
|
463 | 466 | if 'sort' in req.qsparams: |
|
464 | 467 | sortcolumn = req.qsparams['sort'] |
|
465 | 468 | descending = sortcolumn.startswith('-') |
|
466 | 469 | if descending: |
|
467 | 470 | sortcolumn = sortcolumn[1:] |
|
468 | 471 | if sortcolumn not in sortable: |
|
469 | 472 | sortcolumn = "" |
|
470 | 473 | |
|
471 | 474 | sort = [("sort_%s" % column, |
|
472 | 475 | "%s%s" % ((not descending and column == sortcolumn) |
|
473 | 476 | and "-" or "", column)) |
|
474 | 477 | for column in sortable] |
|
475 | 478 | |
|
476 | 479 | self.refresh() |
|
477 | 480 | |
|
478 | 481 | entries = indexentries(self.ui, self.repos, req, |
|
479 | 482 | self.stripecount, sortcolumn=sortcolumn, |
|
480 | 483 | descending=descending, subdir=subdir) |
|
481 | 484 | |
|
482 | 485 | mapping = { |
|
483 | 486 | 'entries': entries, |
|
484 | 487 | 'subdir': subdir, |
|
485 | 488 | 'pathdef': hgweb_mod.makebreadcrumb('/' + subdir, self.prefix), |
|
486 | 489 | 'sortcolumn': sortcolumn, |
|
487 | 490 | 'descending': descending, |
|
488 | 491 | } |
|
489 | 492 | mapping.update(sort) |
|
490 | 493 | res.setbodygen(tmpl.generate('index', mapping)) |
|
491 | 494 | return res.sendresponse() |
|
492 | 495 | |
|
493 | 496 | def templater(self, req, nonce): |
|
494 | 497 | |
|
495 | 498 | def motd(**map): |
|
496 | 499 | if self.motd is not None: |
|
497 | 500 | yield self.motd |
|
498 | 501 | else: |
|
499 | 502 | yield config('web', 'motd') |
|
500 | 503 | |
|
501 | 504 | def config(section, name, default=uimod._unset, untrusted=True): |
|
502 | 505 | return self.ui.config(section, name, default, untrusted) |
|
503 | 506 | |
|
504 | 507 | vars = {} |
|
505 | 508 | styles, (style, mapfile) = hgweb_mod.getstyle(req, config, |
|
506 | 509 | self.templatepath) |
|
507 | 510 | if style == styles[0]: |
|
508 | 511 | vars['style'] = style |
|
509 | 512 | |
|
510 | 513 | sessionvars = webutil.sessionvars(vars, r'?') |
|
511 | 514 | logourl = config('web', 'logourl') |
|
512 | 515 | logoimg = config('web', 'logoimg') |
|
513 | 516 | staticurl = (config('web', 'staticurl') |
|
514 | 517 | or req.apppath + '/static/') |
|
515 | 518 | if not staticurl.endswith('/'): |
|
516 | 519 | staticurl += '/' |
|
517 | 520 | |
|
518 | 521 | defaults = { |
|
519 | 522 | "encoding": encoding.encoding, |
|
520 | 523 | "motd": motd, |
|
521 | 524 | "url": req.apppath + '/', |
|
522 | 525 | "logourl": logourl, |
|
523 | 526 | "logoimg": logoimg, |
|
524 | 527 | "staticurl": staticurl, |
|
525 | 528 | "sessionvars": sessionvars, |
|
526 | 529 | "style": style, |
|
527 | 530 | "nonce": nonce, |
|
528 | 531 | } |
|
529 | 532 | tmpl = templater.templater.frommapfile(mapfile, defaults=defaults) |
|
530 | 533 | return tmpl |
@@ -1,570 +1,574 b'' | |||
|
1 | 1 | # hgweb/request.py - An http request from either CGI or the standalone server. |
|
2 | 2 | # |
|
3 | 3 | # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net> |
|
4 | 4 | # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com> |
|
5 | 5 | # |
|
6 | 6 | # This software may be used and distributed according to the terms of the |
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | from __future__ import absolute_import |
|
10 | 10 | |
|
11 | 11 | #import wsgiref.validate |
|
12 | 12 | |
|
13 | 13 | from ..thirdparty import ( |
|
14 | 14 | attr, |
|
15 | 15 | ) |
|
16 | 16 | from .. import ( |
|
17 | 17 | error, |
|
18 | 18 | pycompat, |
|
19 | 19 | util, |
|
20 | 20 | ) |
|
21 | 21 | |
|
22 | 22 | class multidict(object): |
|
23 | 23 | """A dict like object that can store multiple values for a key. |
|
24 | 24 | |
|
25 | 25 | Used to store parsed request parameters. |
|
26 | 26 | |
|
27 | 27 | This is inspired by WebOb's class of the same name. |
|
28 | 28 | """ |
|
29 | 29 | def __init__(self): |
|
30 | 30 | self._items = {} |
|
31 | 31 | |
|
32 | 32 | def __getitem__(self, key): |
|
33 | 33 | """Returns the last set value for a key.""" |
|
34 | 34 | return self._items[key][-1] |
|
35 | 35 | |
|
36 | 36 | def __setitem__(self, key, value): |
|
37 | 37 | """Replace a values for a key with a new value.""" |
|
38 | 38 | self._items[key] = [value] |
|
39 | 39 | |
|
40 | 40 | def __delitem__(self, key): |
|
41 | 41 | """Delete all values for a key.""" |
|
42 | 42 | del self._items[key] |
|
43 | 43 | |
|
44 | 44 | def __contains__(self, key): |
|
45 | 45 | return key in self._items |
|
46 | 46 | |
|
47 | 47 | def __len__(self): |
|
48 | 48 | return len(self._items) |
|
49 | 49 | |
|
50 | 50 | def get(self, key, default=None): |
|
51 | 51 | try: |
|
52 | 52 | return self.__getitem__(key) |
|
53 | 53 | except KeyError: |
|
54 | 54 | return default |
|
55 | 55 | |
|
56 | 56 | def add(self, key, value): |
|
57 | 57 | """Add a new value for a key. Does not replace existing values.""" |
|
58 | 58 | self._items.setdefault(key, []).append(value) |
|
59 | 59 | |
|
60 | 60 | def getall(self, key): |
|
61 | 61 | """Obtains all values for a key.""" |
|
62 | 62 | return self._items.get(key, []) |
|
63 | 63 | |
|
64 | 64 | def getone(self, key): |
|
65 | 65 | """Obtain a single value for a key. |
|
66 | 66 | |
|
67 | 67 | Raises KeyError if key not defined or it has multiple values set. |
|
68 | 68 | """ |
|
69 | 69 | vals = self._items[key] |
|
70 | 70 | |
|
71 | 71 | if len(vals) > 1: |
|
72 | 72 | raise KeyError('multiple values for %r' % key) |
|
73 | 73 | |
|
74 | 74 | return vals[0] |
|
75 | 75 | |
|
76 | 76 | def asdictoflists(self): |
|
77 | 77 | return {k: list(v) for k, v in self._items.iteritems()} |
|
78 | 78 | |
|
79 | 79 | @attr.s(frozen=True) |
|
80 | 80 | class parsedrequest(object): |
|
81 | 81 | """Represents a parsed WSGI request. |
|
82 | 82 | |
|
83 | 83 | Contains both parsed parameters as well as a handle on the input stream. |
|
84 | 84 | """ |
|
85 | 85 | |
|
86 | 86 | # Request method. |
|
87 | 87 | method = attr.ib() |
|
88 | 88 | # Full URL for this request. |
|
89 | 89 | url = attr.ib() |
|
90 | 90 | # URL without any path components. Just <proto>://<host><port>. |
|
91 | 91 | baseurl = attr.ib() |
|
92 | 92 | # Advertised URL. Like ``url`` and ``baseurl`` but uses SERVER_NAME instead |
|
93 | 93 | # of HTTP: Host header for hostname. This is likely what clients used. |
|
94 | 94 | advertisedurl = attr.ib() |
|
95 | 95 | advertisedbaseurl = attr.ib() |
|
96 | 96 | # URL scheme (part before ``://``). e.g. ``http`` or ``https``. |
|
97 | 97 | urlscheme = attr.ib() |
|
98 | 98 | # Value of REMOTE_USER, if set, or None. |
|
99 | 99 | remoteuser = attr.ib() |
|
100 | 100 | # Value of REMOTE_HOST, if set, or None. |
|
101 | 101 | remotehost = attr.ib() |
|
102 | 102 | # Relative WSGI application path. If defined, will begin with a |
|
103 | 103 | # ``/``. |
|
104 | 104 | apppath = attr.ib() |
|
105 | 105 | # List of path parts to be used for dispatch. |
|
106 | 106 | dispatchparts = attr.ib() |
|
107 | 107 | # URL path component (no query string) used for dispatch. Can be |
|
108 | 108 | # ``None`` to signal no path component given to the request, an |
|
109 | 109 | # empty string to signal a request to the application's root URL, |
|
110 | 110 | # or a string not beginning with ``/`` containing the requested |
|
111 | 111 | # path under the application. |
|
112 | 112 | dispatchpath = attr.ib() |
|
113 | 113 | # The name of the repository being accessed. |
|
114 | 114 | reponame = attr.ib() |
|
115 | 115 | # Raw query string (part after "?" in URL). |
|
116 | 116 | querystring = attr.ib() |
|
117 | 117 | # multidict of query string parameters. |
|
118 | 118 | qsparams = attr.ib() |
|
119 | 119 | # wsgiref.headers.Headers instance. Operates like a dict with case |
|
120 | 120 | # insensitive keys. |
|
121 | 121 | headers = attr.ib() |
|
122 | 122 | # Request body input stream. |
|
123 | 123 | bodyfh = attr.ib() |
|
124 | 124 | # WSGI environment dict, unmodified. |
|
125 | 125 | rawenv = attr.ib() |
|
126 | 126 | |
|
127 | def parserequestfromenv(env, reponame=None, altbaseurl=None): | |
|
127 | def parserequestfromenv(env, reponame=None, altbaseurl=None, bodyfh=None): | |
|
128 | 128 | """Parse URL components from environment variables. |
|
129 | 129 | |
|
130 | 130 | WSGI defines request attributes via environment variables. This function |
|
131 | 131 | parses the environment variables into a data structure. |
|
132 | 132 | |
|
133 | 133 | If ``reponame`` is defined, the leading path components matching that |
|
134 | 134 | string are effectively shifted from ``PATH_INFO`` to ``SCRIPT_NAME``. |
|
135 | 135 | This simulates the world view of a WSGI application that processes |
|
136 | 136 | requests from the base URL of a repo. |
|
137 | 137 | |
|
138 | 138 | If ``altbaseurl`` (typically comes from ``web.baseurl`` config option) |
|
139 | 139 | is defined, it is used - instead of the WSGI environment variables - for |
|
140 | 140 | constructing URL components up to and including the WSGI application path. |
|
141 | 141 | For example, if the current WSGI application is at ``/repo`` and a request |
|
142 | 142 | is made to ``/rev/@`` with this argument set to |
|
143 | 143 | ``http://myserver:9000/prefix``, the URL and path components will resolve as |
|
144 | 144 | if the request were to ``http://myserver:9000/prefix/rev/@``. In other |
|
145 | 145 | words, ``wsgi.url_scheme``, ``SERVER_NAME``, ``SERVER_PORT``, and |
|
146 | 146 | ``SCRIPT_NAME`` are all effectively replaced by components from this URL. |
|
147 | ||
|
148 | ``bodyfh`` can be used to specify a file object to read the request body | |
|
149 | from. If not defined, ``wsgi.input`` from the environment dict is used. | |
|
147 | 150 | """ |
|
148 | 151 | # PEP 3333 defines the WSGI spec and is a useful reference for this code. |
|
149 | 152 | |
|
150 | 153 | # We first validate that the incoming object conforms with the WSGI spec. |
|
151 | 154 | # We only want to be dealing with spec-conforming WSGI implementations. |
|
152 | 155 | # TODO enable this once we fix internal violations. |
|
153 | 156 | #wsgiref.validate.check_environ(env) |
|
154 | 157 | |
|
155 | 158 | # PEP-0333 states that environment keys and values are native strings |
|
156 | 159 | # (bytes on Python 2 and str on Python 3). The code points for the Unicode |
|
157 | 160 | # strings on Python 3 must be between \00000-\000FF. We deal with bytes |
|
158 | 161 | # in Mercurial, so mass convert string keys and values to bytes. |
|
159 | 162 | if pycompat.ispy3: |
|
160 | 163 | env = {k.encode('latin-1'): v for k, v in env.iteritems()} |
|
161 | 164 | env = {k: v.encode('latin-1') if isinstance(v, str) else v |
|
162 | 165 | for k, v in env.iteritems()} |
|
163 | 166 | |
|
164 | 167 | # Some hosting solutions are emulating hgwebdir, and dispatching directly |
|
165 | 168 | # to an hgweb instance using this environment variable. This was always |
|
166 | 169 | # checked prior to d7fd203e36cc; keep doing so to avoid breaking them. |
|
167 | 170 | if not reponame: |
|
168 | 171 | reponame = env.get('REPO_NAME') |
|
169 | 172 | |
|
170 | 173 | if altbaseurl: |
|
171 | 174 | altbaseurl = util.url(altbaseurl) |
|
172 | 175 | |
|
173 | 176 | # https://www.python.org/dev/peps/pep-0333/#environ-variables defines |
|
174 | 177 | # the environment variables. |
|
175 | 178 | # https://www.python.org/dev/peps/pep-0333/#url-reconstruction defines |
|
176 | 179 | # how URLs are reconstructed. |
|
177 | 180 | fullurl = env['wsgi.url_scheme'] + '://' |
|
178 | 181 | |
|
179 | 182 | if altbaseurl and altbaseurl.scheme: |
|
180 | 183 | advertisedfullurl = altbaseurl.scheme + '://' |
|
181 | 184 | else: |
|
182 | 185 | advertisedfullurl = fullurl |
|
183 | 186 | |
|
184 | 187 | def addport(s, port): |
|
185 | 188 | if s.startswith('https://'): |
|
186 | 189 | if port != '443': |
|
187 | 190 | s += ':' + port |
|
188 | 191 | else: |
|
189 | 192 | if port != '80': |
|
190 | 193 | s += ':' + port |
|
191 | 194 | |
|
192 | 195 | return s |
|
193 | 196 | |
|
194 | 197 | if env.get('HTTP_HOST'): |
|
195 | 198 | fullurl += env['HTTP_HOST'] |
|
196 | 199 | else: |
|
197 | 200 | fullurl += env['SERVER_NAME'] |
|
198 | 201 | fullurl = addport(fullurl, env['SERVER_PORT']) |
|
199 | 202 | |
|
200 | 203 | if altbaseurl and altbaseurl.host: |
|
201 | 204 | advertisedfullurl += altbaseurl.host |
|
202 | 205 | |
|
203 | 206 | if altbaseurl.port: |
|
204 | 207 | port = altbaseurl.port |
|
205 | 208 | elif altbaseurl.scheme == 'http' and not altbaseurl.port: |
|
206 | 209 | port = '80' |
|
207 | 210 | elif altbaseurl.scheme == 'https' and not altbaseurl.port: |
|
208 | 211 | port = '443' |
|
209 | 212 | else: |
|
210 | 213 | port = env['SERVER_PORT'] |
|
211 | 214 | |
|
212 | 215 | advertisedfullurl = addport(advertisedfullurl, port) |
|
213 | 216 | else: |
|
214 | 217 | advertisedfullurl += env['SERVER_NAME'] |
|
215 | 218 | advertisedfullurl = addport(advertisedfullurl, env['SERVER_PORT']) |
|
216 | 219 | |
|
217 | 220 | baseurl = fullurl |
|
218 | 221 | advertisedbaseurl = advertisedfullurl |
|
219 | 222 | |
|
220 | 223 | fullurl += util.urlreq.quote(env.get('SCRIPT_NAME', '')) |
|
221 | 224 | fullurl += util.urlreq.quote(env.get('PATH_INFO', '')) |
|
222 | 225 | |
|
223 | 226 | if altbaseurl: |
|
224 | 227 | path = altbaseurl.path or '' |
|
225 | 228 | if path and not path.startswith('/'): |
|
226 | 229 | path = '/' + path |
|
227 | 230 | advertisedfullurl += util.urlreq.quote(path) |
|
228 | 231 | else: |
|
229 | 232 | advertisedfullurl += util.urlreq.quote(env.get('SCRIPT_NAME', '')) |
|
230 | 233 | |
|
231 | 234 | advertisedfullurl += util.urlreq.quote(env.get('PATH_INFO', '')) |
|
232 | 235 | |
|
233 | 236 | if env.get('QUERY_STRING'): |
|
234 | 237 | fullurl += '?' + env['QUERY_STRING'] |
|
235 | 238 | advertisedfullurl += '?' + env['QUERY_STRING'] |
|
236 | 239 | |
|
237 | 240 | # If ``reponame`` is defined, that must be a prefix on PATH_INFO |
|
238 | 241 | # that represents the repository being dispatched to. When computing |
|
239 | 242 | # the dispatch info, we ignore these leading path components. |
|
240 | 243 | |
|
241 | 244 | if altbaseurl: |
|
242 | 245 | apppath = altbaseurl.path or '' |
|
243 | 246 | if apppath and not apppath.startswith('/'): |
|
244 | 247 | apppath = '/' + apppath |
|
245 | 248 | else: |
|
246 | 249 | apppath = env.get('SCRIPT_NAME', '') |
|
247 | 250 | |
|
248 | 251 | if reponame: |
|
249 | 252 | repoprefix = '/' + reponame.strip('/') |
|
250 | 253 | |
|
251 | 254 | if not env.get('PATH_INFO'): |
|
252 | 255 | raise error.ProgrammingError('reponame requires PATH_INFO') |
|
253 | 256 | |
|
254 | 257 | if not env['PATH_INFO'].startswith(repoprefix): |
|
255 | 258 | raise error.ProgrammingError('PATH_INFO does not begin with repo ' |
|
256 | 259 | 'name: %s (%s)' % (env['PATH_INFO'], |
|
257 | 260 | reponame)) |
|
258 | 261 | |
|
259 | 262 | dispatchpath = env['PATH_INFO'][len(repoprefix):] |
|
260 | 263 | |
|
261 | 264 | if dispatchpath and not dispatchpath.startswith('/'): |
|
262 | 265 | raise error.ProgrammingError('reponame prefix of PATH_INFO does ' |
|
263 | 266 | 'not end at path delimiter: %s (%s)' % |
|
264 | 267 | (env['PATH_INFO'], reponame)) |
|
265 | 268 | |
|
266 | 269 | apppath = apppath.rstrip('/') + repoprefix |
|
267 | 270 | dispatchparts = dispatchpath.strip('/').split('/') |
|
268 | 271 | dispatchpath = '/'.join(dispatchparts) |
|
269 | 272 | |
|
270 | 273 | elif 'PATH_INFO' in env: |
|
271 | 274 | if env['PATH_INFO'].strip('/'): |
|
272 | 275 | dispatchparts = env['PATH_INFO'].strip('/').split('/') |
|
273 | 276 | dispatchpath = '/'.join(dispatchparts) |
|
274 | 277 | else: |
|
275 | 278 | dispatchparts = [] |
|
276 | 279 | dispatchpath = '' |
|
277 | 280 | else: |
|
278 | 281 | dispatchparts = [] |
|
279 | 282 | dispatchpath = None |
|
280 | 283 | |
|
281 | 284 | querystring = env.get('QUERY_STRING', '') |
|
282 | 285 | |
|
283 | 286 | # We store as a list so we have ordering information. We also store as |
|
284 | 287 | # a dict to facilitate fast lookup. |
|
285 | 288 | qsparams = multidict() |
|
286 | 289 | for k, v in util.urlreq.parseqsl(querystring, keep_blank_values=True): |
|
287 | 290 | qsparams.add(k, v) |
|
288 | 291 | |
|
289 | 292 | # HTTP_* keys contain HTTP request headers. The Headers structure should |
|
290 | 293 | # perform case normalization for us. We just rewrite underscore to dash |
|
291 | 294 | # so keys match what likely went over the wire. |
|
292 | 295 | headers = [] |
|
293 | 296 | for k, v in env.iteritems(): |
|
294 | 297 | if k.startswith('HTTP_'): |
|
295 | 298 | headers.append((k[len('HTTP_'):].replace('_', '-'), v)) |
|
296 | 299 | |
|
297 | 300 | from . import wsgiheaders # avoid cycle |
|
298 | 301 | headers = wsgiheaders.Headers(headers) |
|
299 | 302 | |
|
300 | 303 | # This is kind of a lie because the HTTP header wasn't explicitly |
|
301 | 304 | # sent. But for all intents and purposes it should be OK to lie about |
|
302 | 305 | # this, since a consumer will either either value to determine how many |
|
303 | 306 | # bytes are available to read. |
|
304 | 307 | if 'CONTENT_LENGTH' in env and 'HTTP_CONTENT_LENGTH' not in env: |
|
305 | 308 | headers['Content-Length'] = env['CONTENT_LENGTH'] |
|
306 | 309 | |
|
307 | 310 | if 'CONTENT_TYPE' in env and 'HTTP_CONTENT_TYPE' not in env: |
|
308 | 311 | headers['Content-Type'] = env['CONTENT_TYPE'] |
|
309 | 312 | |
|
310 | bodyfh = env['wsgi.input'] | |
|
311 | if 'Content-Length' in headers: | |
|
312 | bodyfh = util.cappedreader(bodyfh, int(headers['Content-Length'])) | |
|
313 | if bodyfh is None: | |
|
314 | bodyfh = env['wsgi.input'] | |
|
315 | if 'Content-Length' in headers: | |
|
316 | bodyfh = util.cappedreader(bodyfh, int(headers['Content-Length'])) | |
|
313 | 317 | |
|
314 | 318 | return parsedrequest(method=env['REQUEST_METHOD'], |
|
315 | 319 | url=fullurl, baseurl=baseurl, |
|
316 | 320 | advertisedurl=advertisedfullurl, |
|
317 | 321 | advertisedbaseurl=advertisedbaseurl, |
|
318 | 322 | urlscheme=env['wsgi.url_scheme'], |
|
319 | 323 | remoteuser=env.get('REMOTE_USER'), |
|
320 | 324 | remotehost=env.get('REMOTE_HOST'), |
|
321 | 325 | apppath=apppath, |
|
322 | 326 | dispatchparts=dispatchparts, dispatchpath=dispatchpath, |
|
323 | 327 | reponame=reponame, |
|
324 | 328 | querystring=querystring, |
|
325 | 329 | qsparams=qsparams, |
|
326 | 330 | headers=headers, |
|
327 | 331 | bodyfh=bodyfh, |
|
328 | 332 | rawenv=env) |
|
329 | 333 | |
|
330 | 334 | class offsettrackingwriter(object): |
|
331 | 335 | """A file object like object that is append only and tracks write count. |
|
332 | 336 | |
|
333 | 337 | Instances are bound to a callable. This callable is called with data |
|
334 | 338 | whenever a ``write()`` is attempted. |
|
335 | 339 | |
|
336 | 340 | Instances track the amount of written data so they can answer ``tell()`` |
|
337 | 341 | requests. |
|
338 | 342 | |
|
339 | 343 | The intent of this class is to wrap the ``write()`` function returned by |
|
340 | 344 | a WSGI ``start_response()`` function. Since ``write()`` is a callable and |
|
341 | 345 | not a file object, it doesn't implement other file object methods. |
|
342 | 346 | """ |
|
343 | 347 | def __init__(self, writefn): |
|
344 | 348 | self._write = writefn |
|
345 | 349 | self._offset = 0 |
|
346 | 350 | |
|
347 | 351 | def write(self, s): |
|
348 | 352 | res = self._write(s) |
|
349 | 353 | # Some Python objects don't report the number of bytes written. |
|
350 | 354 | if res is None: |
|
351 | 355 | self._offset += len(s) |
|
352 | 356 | else: |
|
353 | 357 | self._offset += res |
|
354 | 358 | |
|
355 | 359 | def flush(self): |
|
356 | 360 | pass |
|
357 | 361 | |
|
358 | 362 | def tell(self): |
|
359 | 363 | return self._offset |
|
360 | 364 | |
|
361 | 365 | class wsgiresponse(object): |
|
362 | 366 | """Represents a response to a WSGI request. |
|
363 | 367 | |
|
364 | 368 | A response consists of a status line, headers, and a body. |
|
365 | 369 | |
|
366 | 370 | Consumers must populate the ``status`` and ``headers`` fields and |
|
367 | 371 | make a call to a ``setbody*()`` method before the response can be |
|
368 | 372 | issued. |
|
369 | 373 | |
|
370 | 374 | When it is time to start sending the response over the wire, |
|
371 | 375 | ``sendresponse()`` is called. It handles emitting the header portion |
|
372 | 376 | of the response message. It then yields chunks of body data to be |
|
373 | 377 | written to the peer. Typically, the WSGI application itself calls |
|
374 | 378 | and returns the value from ``sendresponse()``. |
|
375 | 379 | """ |
|
376 | 380 | |
|
377 | 381 | def __init__(self, req, startresponse): |
|
378 | 382 | """Create an empty response tied to a specific request. |
|
379 | 383 | |
|
380 | 384 | ``req`` is a ``parsedrequest``. ``startresponse`` is the |
|
381 | 385 | ``start_response`` function passed to the WSGI application. |
|
382 | 386 | """ |
|
383 | 387 | self._req = req |
|
384 | 388 | self._startresponse = startresponse |
|
385 | 389 | |
|
386 | 390 | self.status = None |
|
387 | 391 | from . import wsgiheaders # avoid cycle |
|
388 | 392 | self.headers = wsgiheaders.Headers([]) |
|
389 | 393 | |
|
390 | 394 | self._bodybytes = None |
|
391 | 395 | self._bodygen = None |
|
392 | 396 | self._bodywillwrite = False |
|
393 | 397 | self._started = False |
|
394 | 398 | self._bodywritefn = None |
|
395 | 399 | |
|
396 | 400 | def _verifybody(self): |
|
397 | 401 | if (self._bodybytes is not None or self._bodygen is not None |
|
398 | 402 | or self._bodywillwrite): |
|
399 | 403 | raise error.ProgrammingError('cannot define body multiple times') |
|
400 | 404 | |
|
401 | 405 | def setbodybytes(self, b): |
|
402 | 406 | """Define the response body as static bytes. |
|
403 | 407 | |
|
404 | 408 | The empty string signals that there is no response body. |
|
405 | 409 | """ |
|
406 | 410 | self._verifybody() |
|
407 | 411 | self._bodybytes = b |
|
408 | 412 | self.headers['Content-Length'] = '%d' % len(b) |
|
409 | 413 | |
|
410 | 414 | def setbodygen(self, gen): |
|
411 | 415 | """Define the response body as a generator of bytes.""" |
|
412 | 416 | self._verifybody() |
|
413 | 417 | self._bodygen = gen |
|
414 | 418 | |
|
415 | 419 | def setbodywillwrite(self): |
|
416 | 420 | """Signal an intent to use write() to emit the response body. |
|
417 | 421 | |
|
418 | 422 | **This is the least preferred way to send a body.** |
|
419 | 423 | |
|
420 | 424 | It is preferred for WSGI applications to emit a generator of chunks |
|
421 | 425 | constituting the response body. However, some consumers can't emit |
|
422 | 426 | data this way. So, WSGI provides a way to obtain a ``write(data)`` |
|
423 | 427 | function that can be used to synchronously perform an unbuffered |
|
424 | 428 | write. |
|
425 | 429 | |
|
426 | 430 | Calling this function signals an intent to produce the body in this |
|
427 | 431 | manner. |
|
428 | 432 | """ |
|
429 | 433 | self._verifybody() |
|
430 | 434 | self._bodywillwrite = True |
|
431 | 435 | |
|
432 | 436 | def sendresponse(self): |
|
433 | 437 | """Send the generated response to the client. |
|
434 | 438 | |
|
435 | 439 | Before this is called, ``status`` must be set and one of |
|
436 | 440 | ``setbodybytes()`` or ``setbodygen()`` must be called. |
|
437 | 441 | |
|
438 | 442 | Calling this method multiple times is not allowed. |
|
439 | 443 | """ |
|
440 | 444 | if self._started: |
|
441 | 445 | raise error.ProgrammingError('sendresponse() called multiple times') |
|
442 | 446 | |
|
443 | 447 | self._started = True |
|
444 | 448 | |
|
445 | 449 | if not self.status: |
|
446 | 450 | raise error.ProgrammingError('status line not defined') |
|
447 | 451 | |
|
448 | 452 | if (self._bodybytes is None and self._bodygen is None |
|
449 | 453 | and not self._bodywillwrite): |
|
450 | 454 | raise error.ProgrammingError('response body not defined') |
|
451 | 455 | |
|
452 | 456 | # RFC 7232 Section 4.1 states that a 304 MUST generate one of |
|
453 | 457 | # {Cache-Control, Content-Location, Date, ETag, Expires, Vary} |
|
454 | 458 | # and SHOULD NOT generate other headers unless they could be used |
|
455 | 459 | # to guide cache updates. Furthermore, RFC 7230 Section 3.3.2 |
|
456 | 460 | # states that no response body can be issued. Content-Length can |
|
457 | 461 | # be sent. But if it is present, it should be the size of the response |
|
458 | 462 | # that wasn't transferred. |
|
459 | 463 | if self.status.startswith('304 '): |
|
460 | 464 | # setbodybytes('') will set C-L to 0. This doesn't conform with the |
|
461 | 465 | # spec. So remove it. |
|
462 | 466 | if self.headers.get('Content-Length') == '0': |
|
463 | 467 | del self.headers['Content-Length'] |
|
464 | 468 | |
|
465 | 469 | # Strictly speaking, this is too strict. But until it causes |
|
466 | 470 | # problems, let's be strict. |
|
467 | 471 | badheaders = {k for k in self.headers.keys() |
|
468 | 472 | if k.lower() not in ('date', 'etag', 'expires', |
|
469 | 473 | 'cache-control', |
|
470 | 474 | 'content-location', |
|
471 | 475 | 'vary')} |
|
472 | 476 | if badheaders: |
|
473 | 477 | raise error.ProgrammingError( |
|
474 | 478 | 'illegal header on 304 response: %s' % |
|
475 | 479 | ', '.join(sorted(badheaders))) |
|
476 | 480 | |
|
477 | 481 | if self._bodygen is not None or self._bodywillwrite: |
|
478 | 482 | raise error.ProgrammingError("must use setbodybytes('') with " |
|
479 | 483 | "304 responses") |
|
480 | 484 | |
|
481 | 485 | # Various HTTP clients (notably httplib) won't read the HTTP response |
|
482 | 486 | # until the HTTP request has been sent in full. If servers (us) send a |
|
483 | 487 | # response before the HTTP request has been fully sent, the connection |
|
484 | 488 | # may deadlock because neither end is reading. |
|
485 | 489 | # |
|
486 | 490 | # We work around this by "draining" the request data before |
|
487 | 491 | # sending any response in some conditions. |
|
488 | 492 | drain = False |
|
489 | 493 | close = False |
|
490 | 494 | |
|
491 | 495 | # If the client sent Expect: 100-continue, we assume it is smart enough |
|
492 | 496 | # to deal with the server sending a response before reading the request. |
|
493 | 497 | # (httplib doesn't do this.) |
|
494 | 498 | if self._req.headers.get('Expect', '').lower() == '100-continue': |
|
495 | 499 | pass |
|
496 | 500 | # Only tend to request methods that have bodies. Strictly speaking, |
|
497 | 501 | # we should sniff for a body. But this is fine for our existing |
|
498 | 502 | # WSGI applications. |
|
499 | 503 | elif self._req.method not in ('POST', 'PUT'): |
|
500 | 504 | pass |
|
501 | 505 | else: |
|
502 | 506 | # If we don't know how much data to read, there's no guarantee |
|
503 | 507 | # that we can drain the request responsibly. The WSGI |
|
504 | 508 | # specification only says that servers *should* ensure the |
|
505 | 509 | # input stream doesn't overrun the actual request. So there's |
|
506 | 510 | # no guarantee that reading until EOF won't corrupt the stream |
|
507 | 511 | # state. |
|
508 | 512 | if not isinstance(self._req.bodyfh, util.cappedreader): |
|
509 | 513 | close = True |
|
510 | 514 | else: |
|
511 | 515 | # We /could/ only drain certain HTTP response codes. But 200 and |
|
512 | 516 | # non-200 wire protocol responses both require draining. Since |
|
513 | 517 | # we have a capped reader in place for all situations where we |
|
514 | 518 | # drain, it is safe to read from that stream. We'll either do |
|
515 | 519 | # a drain or no-op if we're already at EOF. |
|
516 | 520 | drain = True |
|
517 | 521 | |
|
518 | 522 | if close: |
|
519 | 523 | self.headers['Connection'] = 'Close' |
|
520 | 524 | |
|
521 | 525 | if drain: |
|
522 | 526 | assert isinstance(self._req.bodyfh, util.cappedreader) |
|
523 | 527 | while True: |
|
524 | 528 | chunk = self._req.bodyfh.read(32768) |
|
525 | 529 | if not chunk: |
|
526 | 530 | break |
|
527 | 531 | |
|
528 | 532 | strheaders = [(pycompat.strurl(k), pycompat.strurl(v)) for |
|
529 | 533 | k, v in self.headers.items()] |
|
530 | 534 | write = self._startresponse(pycompat.sysstr(self.status), |
|
531 | 535 | strheaders) |
|
532 | 536 | |
|
533 | 537 | if self._bodybytes: |
|
534 | 538 | yield self._bodybytes |
|
535 | 539 | elif self._bodygen: |
|
536 | 540 | for chunk in self._bodygen: |
|
537 | 541 | yield chunk |
|
538 | 542 | elif self._bodywillwrite: |
|
539 | 543 | self._bodywritefn = write |
|
540 | 544 | else: |
|
541 | 545 | error.ProgrammingError('do not know how to send body') |
|
542 | 546 | |
|
543 | 547 | def getbodyfile(self): |
|
544 | 548 | """Obtain a file object like object representing the response body. |
|
545 | 549 | |
|
546 | 550 | For this to work, you must call ``setbodywillwrite()`` and then |
|
547 | 551 | ``sendresponse()`` first. ``sendresponse()`` is a generator and the |
|
548 | 552 | function won't run to completion unless the generator is advanced. The |
|
549 | 553 | generator yields not items. The easiest way to consume it is with |
|
550 | 554 | ``list(res.sendresponse())``, which should resolve to an empty list - |
|
551 | 555 | ``[]``. |
|
552 | 556 | """ |
|
553 | 557 | if not self._bodywillwrite: |
|
554 | 558 | raise error.ProgrammingError('must call setbodywillwrite() first') |
|
555 | 559 | |
|
556 | 560 | if not self._started: |
|
557 | 561 | raise error.ProgrammingError('must call sendresponse() first; did ' |
|
558 | 562 | 'you remember to consume it since it ' |
|
559 | 563 | 'is a generator?') |
|
560 | 564 | |
|
561 | 565 | assert self._bodywritefn |
|
562 | 566 | return offsettrackingwriter(self._bodywritefn) |
|
563 | 567 | |
|
564 | 568 | def wsgiapplication(app_maker): |
|
565 | 569 | '''For compatibility with old CGI scripts. A plain hgweb() or hgwebdir() |
|
566 | 570 | can and should now be used as a WSGI application.''' |
|
567 | 571 | application = app_maker() |
|
568 | 572 | def run_wsgi(env, respond): |
|
569 | 573 | return application(env, respond) |
|
570 | 574 | return run_wsgi |
@@ -1,382 +1,426 b'' | |||
|
1 | 1 | #require killdaemons |
|
2 | 2 | |
|
3 | 3 | #testcases bundle1 bundle2 |
|
4 | 4 | |
|
5 | 5 | #if bundle1 |
|
6 | 6 | $ cat << EOF >> $HGRCPATH |
|
7 | 7 | > [devel] |
|
8 | 8 | > # This test is dedicated to interaction through old bundle |
|
9 | 9 | > legacy.exchange = bundle1 |
|
10 | 10 | > EOF |
|
11 | 11 | #endif |
|
12 | 12 | |
|
13 | 13 | $ hg init test |
|
14 | 14 | $ cd test |
|
15 | 15 | $ echo a > a |
|
16 | 16 | $ hg ci -Ama |
|
17 | 17 | adding a |
|
18 | 18 | $ cd .. |
|
19 | 19 | $ hg clone test test2 |
|
20 | 20 | updating to branch default |
|
21 | 21 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved |
|
22 | 22 | $ cd test2 |
|
23 | 23 | $ echo a >> a |
|
24 | 24 | $ hg ci -mb |
|
25 | 25 | $ req() { |
|
26 | 26 | > hg $1 serve -p $HGPORT -d --pid-file=hg.pid -E errors.log |
|
27 | 27 | > cat hg.pid >> $DAEMON_PIDS |
|
28 | 28 | > hg --cwd ../test2 push http://localhost:$HGPORT/ |
|
29 | 29 | > exitstatus=$? |
|
30 | 30 | > killdaemons.py |
|
31 | 31 | > echo % serve errors |
|
32 | 32 | > cat errors.log |
|
33 | 33 | > return $exitstatus |
|
34 | 34 | > } |
|
35 | 35 | $ cd ../test |
|
36 | 36 | |
|
37 | 37 | expect ssl error |
|
38 | 38 | |
|
39 | 39 | $ req |
|
40 | 40 | pushing to http://localhost:$HGPORT/ |
|
41 | 41 | searching for changes |
|
42 | 42 | abort: HTTP Error 403: ssl required |
|
43 | 43 | % serve errors |
|
44 | 44 | [255] |
|
45 | 45 | |
|
46 | 46 | expect authorization error |
|
47 | 47 | |
|
48 | 48 | $ echo '[web]' > .hg/hgrc |
|
49 | 49 | $ echo 'push_ssl = false' >> .hg/hgrc |
|
50 | 50 | $ req |
|
51 | 51 | pushing to http://localhost:$HGPORT/ |
|
52 | 52 | searching for changes |
|
53 | 53 | abort: authorization failed |
|
54 | 54 | % serve errors |
|
55 | 55 | [255] |
|
56 | 56 | |
|
57 | 57 | expect authorization error: must have authorized user |
|
58 | 58 | |
|
59 | 59 | $ echo 'allow_push = unperson' >> .hg/hgrc |
|
60 | 60 | $ req |
|
61 | 61 | pushing to http://localhost:$HGPORT/ |
|
62 | 62 | searching for changes |
|
63 | 63 | abort: authorization failed |
|
64 | 64 | % serve errors |
|
65 | 65 | [255] |
|
66 | 66 | |
|
67 | 67 | expect success |
|
68 | 68 | |
|
69 | 69 | $ cat > $TESTTMP/hook.sh <<'EOF' |
|
70 | 70 | > echo "phase-move: $HG_NODE: $HG_OLDPHASE -> $HG_PHASE" |
|
71 | 71 | > EOF |
|
72 | 72 | |
|
73 | 73 | #if bundle1 |
|
74 | 74 | $ cat >> .hg/hgrc <<EOF |
|
75 | 75 | > allow_push = * |
|
76 | 76 | > [hooks] |
|
77 | 77 | > changegroup = sh -c "printenv.py changegroup 0" |
|
78 | 78 | > pushkey = sh -c "printenv.py pushkey 0" |
|
79 | 79 | > txnclose-phase.test = sh $TESTTMP/hook.sh |
|
80 | 80 | > EOF |
|
81 | 81 | $ req "--debug --config extensions.blackbox=" |
|
82 | 82 | listening at http://*:$HGPORT/ (bound to $LOCALIP:$HGPORT) (glob) (?) |
|
83 | 83 | pushing to http://localhost:$HGPORT/ |
|
84 | 84 | searching for changes |
|
85 | 85 | remote: redirecting incoming bundle to */hg-unbundle-* (glob) |
|
86 | 86 | remote: adding changesets |
|
87 | 87 | remote: add changeset ba677d0156c1 |
|
88 | 88 | remote: adding manifests |
|
89 | 89 | remote: adding file changes |
|
90 | 90 | remote: adding a revisions |
|
91 | 91 | remote: added 1 changesets with 1 changes to 1 files |
|
92 | 92 | remote: updating the branch cache |
|
93 | 93 | remote: running hook txnclose-phase.test: sh $TESTTMP/hook.sh |
|
94 | 94 | remote: phase-move: cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b: draft -> public |
|
95 | 95 | remote: running hook txnclose-phase.test: sh $TESTTMP/hook.sh |
|
96 | 96 | remote: phase-move: ba677d0156c1196c1a699fa53f390dcfc3ce3872: -> public |
|
97 | 97 | remote: running hook changegroup: sh -c "printenv.py changegroup 0" |
|
98 | 98 | remote: changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:http:$LOCALIP: (glob) |
|
99 | 99 | % serve errors |
|
100 | 100 | $ hg rollback |
|
101 | 101 | repository tip rolled back to revision 0 (undo serve) |
|
102 | 102 | $ req "--debug --config server.streamunbundle=True --config extensions.blackbox=" |
|
103 | 103 | listening at http://*:$HGPORT/ (bound to $LOCALIP:$HGPORT) (glob) (?) |
|
104 | 104 | pushing to http://localhost:$HGPORT/ |
|
105 | 105 | searching for changes |
|
106 | 106 | remote: adding changesets |
|
107 | 107 | remote: add changeset ba677d0156c1 |
|
108 | 108 | remote: adding manifests |
|
109 | 109 | remote: adding file changes |
|
110 | 110 | remote: adding a revisions |
|
111 | 111 | remote: added 1 changesets with 1 changes to 1 files |
|
112 | 112 | remote: updating the branch cache |
|
113 | 113 | remote: running hook txnclose-phase.test: sh $TESTTMP/hook.sh |
|
114 | 114 | remote: phase-move: cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b: draft -> public |
|
115 | 115 | remote: running hook txnclose-phase.test: sh $TESTTMP/hook.sh |
|
116 | 116 | remote: phase-move: ba677d0156c1196c1a699fa53f390dcfc3ce3872: -> public |
|
117 | 117 | remote: running hook changegroup: sh -c "printenv.py changegroup 0" |
|
118 | 118 | remote: changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:http:$LOCALIP: (glob) |
|
119 | 119 | % serve errors |
|
120 | 120 | $ hg rollback |
|
121 | 121 | repository tip rolled back to revision 0 (undo serve) |
|
122 | 122 | #endif |
|
123 | 123 | |
|
124 | 124 | #if bundle2 |
|
125 | 125 | $ cat >> .hg/hgrc <<EOF |
|
126 | 126 | > allow_push = * |
|
127 | 127 | > [hooks] |
|
128 | 128 | > changegroup = sh -c "printenv.py changegroup 0" |
|
129 | 129 | > pushkey = sh -c "printenv.py pushkey 0" |
|
130 | 130 | > txnclose-phase.test = sh $TESTTMP/hook.sh |
|
131 | 131 | > EOF |
|
132 | 132 | $ req |
|
133 | 133 | pushing to http://localhost:$HGPORT/ |
|
134 | 134 | searching for changes |
|
135 | 135 | remote: adding changesets |
|
136 | 136 | remote: adding manifests |
|
137 | 137 | remote: adding file changes |
|
138 | 138 | remote: added 1 changesets with 1 changes to 1 files |
|
139 | 139 | remote: phase-move: cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b: draft -> public |
|
140 | 140 | remote: phase-move: ba677d0156c1196c1a699fa53f390dcfc3ce3872: -> public |
|
141 | 141 | remote: changegroup hook: HG_BUNDLE2=1 HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:http:$LOCALIP: (glob) |
|
142 | 142 | % serve errors |
|
143 | 143 | $ hg rollback |
|
144 | 144 | repository tip rolled back to revision 0 (undo serve) |
|
145 | 145 | #endif |
|
146 | 146 | |
|
147 | 147 | expect success, server lacks the httpheader capability |
|
148 | 148 | |
|
149 | 149 | $ CAP=httpheader |
|
150 | 150 | $ . "$TESTDIR/notcapable" |
|
151 | 151 | $ req |
|
152 | 152 | pushing to http://localhost:$HGPORT/ |
|
153 | 153 | searching for changes |
|
154 | 154 | remote: adding changesets |
|
155 | 155 | remote: adding manifests |
|
156 | 156 | remote: adding file changes |
|
157 | 157 | remote: added 1 changesets with 1 changes to 1 files |
|
158 | 158 | remote: phase-move: cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b: draft -> public |
|
159 | 159 | remote: phase-move: ba677d0156c1196c1a699fa53f390dcfc3ce3872: -> public |
|
160 | 160 | remote: changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:http:$LOCALIP: (glob) (bundle1 !) |
|
161 | 161 | remote: changegroup hook: HG_BUNDLE2=1 HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:http:$LOCALIP: (glob) (bundle2 !) |
|
162 | 162 | % serve errors |
|
163 | 163 | $ hg rollback |
|
164 | 164 | repository tip rolled back to revision 0 (undo serve) |
|
165 | 165 | |
|
166 | 166 | expect success, server lacks the unbundlehash capability |
|
167 | 167 | |
|
168 | 168 | $ CAP=unbundlehash |
|
169 | 169 | $ . "$TESTDIR/notcapable" |
|
170 | 170 | $ req |
|
171 | 171 | pushing to http://localhost:$HGPORT/ |
|
172 | 172 | searching for changes |
|
173 | 173 | remote: adding changesets |
|
174 | 174 | remote: adding manifests |
|
175 | 175 | remote: adding file changes |
|
176 | 176 | remote: added 1 changesets with 1 changes to 1 files |
|
177 | 177 | remote: phase-move: cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b: draft -> public |
|
178 | 178 | remote: phase-move: ba677d0156c1196c1a699fa53f390dcfc3ce3872: -> public |
|
179 | 179 | remote: changegroup hook: HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:http:$LOCALIP: (glob) (bundle1 !) |
|
180 | 180 | remote: changegroup hook: HG_BUNDLE2=1 HG_HOOKNAME=changegroup HG_HOOKTYPE=changegroup HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:http:$LOCALIP: (glob) (bundle2 !) |
|
181 | 181 | % serve errors |
|
182 | 182 | $ hg rollback |
|
183 | 183 | repository tip rolled back to revision 0 (undo serve) |
|
184 | 184 | |
|
185 | 185 | expect success, pre-d1b16a746db6 server supports the unbundle capability, but |
|
186 | 186 | has no parameter |
|
187 | 187 | |
|
188 | 188 | $ cat <<EOF > notcapable-unbundleparam.py |
|
189 | 189 | > from mercurial import extensions, httppeer |
|
190 | 190 | > def capable(orig, self, name): |
|
191 | 191 | > if name == 'unbundle': |
|
192 | 192 | > return True |
|
193 | 193 | > return orig(self, name) |
|
194 | 194 | > def uisetup(ui): |
|
195 | 195 | > extensions.wrapfunction(httppeer.httppeer, 'capable', capable) |
|
196 | 196 | > EOF |
|
197 | 197 | $ cp $HGRCPATH $HGRCPATH.orig |
|
198 | 198 | $ cat <<EOF >> $HGRCPATH |
|
199 | 199 | > [extensions] |
|
200 | 200 | > notcapable-unbundleparam = `pwd`/notcapable-unbundleparam.py |
|
201 | 201 | > EOF |
|
202 | 202 | $ req |
|
203 | 203 | pushing to http://localhost:$HGPORT/ |
|
204 | 204 | searching for changes |
|
205 | 205 | remote: adding changesets |
|
206 | 206 | remote: adding manifests |
|
207 | 207 | remote: adding file changes |
|
208 | 208 | remote: added 1 changesets with 1 changes to 1 files |
|
209 | 209 | remote: phase-move: cb9a9f314b8b07ba71012fcdbc544b5a4d82ff5b: draft -> public |
|
210 | 210 | remote: phase-move: ba677d0156c1196c1a699fa53f390dcfc3ce3872: -> public |
|
211 | 211 | remote: changegroup hook: * (glob) |
|
212 | 212 | % serve errors |
|
213 | 213 | $ hg rollback |
|
214 | 214 | repository tip rolled back to revision 0 (undo serve) |
|
215 | 215 | $ mv $HGRCPATH.orig $HGRCPATH |
|
216 | 216 | |
|
217 | 217 | Test pushing to a publishing repository with a failing prepushkey hook |
|
218 | 218 | |
|
219 | 219 | $ cat > .hg/hgrc <<EOF |
|
220 | 220 | > [web] |
|
221 | 221 | > push_ssl = false |
|
222 | 222 | > allow_push = * |
|
223 | 223 | > [hooks] |
|
224 | 224 | > prepushkey = sh -c "printenv.py prepushkey 1" |
|
225 | 225 | > [devel] |
|
226 | 226 | > legacy.exchange=phases |
|
227 | 227 | > EOF |
|
228 | 228 | |
|
229 | 229 | #if bundle1 |
|
230 | 230 | Bundle1 works because a) phases are updated as part of changegroup application |
|
231 | 231 | and b) client checks phases after the "unbundle" command. Since it sees no |
|
232 | 232 | phase changes are necessary, it doesn't send the "pushkey" command and the |
|
233 | 233 | prepushkey hook never has to fire. |
|
234 | 234 | |
|
235 | 235 | $ req |
|
236 | 236 | pushing to http://localhost:$HGPORT/ |
|
237 | 237 | searching for changes |
|
238 | 238 | remote: adding changesets |
|
239 | 239 | remote: adding manifests |
|
240 | 240 | remote: adding file changes |
|
241 | 241 | remote: added 1 changesets with 1 changes to 1 files |
|
242 | 242 | % serve errors |
|
243 | 243 | |
|
244 | 244 | #endif |
|
245 | 245 | |
|
246 | 246 | #if bundle2 |
|
247 | 247 | Bundle2 sends a "pushkey" bundle2 part. This runs as part of the transaction |
|
248 | 248 | and fails the entire push. |
|
249 | 249 | $ req |
|
250 | 250 | pushing to http://localhost:$HGPORT/ |
|
251 | 251 | searching for changes |
|
252 | 252 | remote: adding changesets |
|
253 | 253 | remote: adding manifests |
|
254 | 254 | remote: adding file changes |
|
255 | 255 | remote: added 1 changesets with 1 changes to 1 files |
|
256 | 256 | remote: prepushkey hook: HG_BUNDLE2=1 HG_HOOKNAME=prepushkey HG_HOOKTYPE=prepushkey HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_OLD=1 HG_PENDING=$TESTTMP/test HG_PHASES_MOVED=1 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:http:$LOCALIP: (glob) |
|
257 | 257 | remote: pushkey-abort: prepushkey hook exited with status 1 |
|
258 | 258 | remote: transaction abort! |
|
259 | 259 | remote: rollback completed |
|
260 | 260 | abort: updating ba677d0156c1 to public failed |
|
261 | 261 | % serve errors |
|
262 | 262 | [255] |
|
263 | 263 | |
|
264 | 264 | #endif |
|
265 | 265 | |
|
266 | 266 | Now remove the failing prepushkey hook. |
|
267 | 267 | |
|
268 | 268 | $ cat >> .hg/hgrc <<EOF |
|
269 | 269 | > [hooks] |
|
270 | 270 | > prepushkey = sh -c "printenv.py prepushkey 0" |
|
271 | 271 | > EOF |
|
272 | 272 | |
|
273 | 273 | We don't need to test bundle1 because it succeeded above. |
|
274 | 274 | |
|
275 | 275 | #if bundle2 |
|
276 | 276 | $ req |
|
277 | 277 | pushing to http://localhost:$HGPORT/ |
|
278 | 278 | searching for changes |
|
279 | 279 | remote: adding changesets |
|
280 | 280 | remote: adding manifests |
|
281 | 281 | remote: adding file changes |
|
282 | 282 | remote: added 1 changesets with 1 changes to 1 files |
|
283 | 283 | remote: prepushkey hook: HG_BUNDLE2=1 HG_HOOKNAME=prepushkey HG_HOOKTYPE=prepushkey HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_OLD=1 HG_PENDING=$TESTTMP/test HG_PHASES_MOVED=1 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:http:$LOCALIP: (glob) |
|
284 | 284 | % serve errors |
|
285 | 285 | #endif |
|
286 | 286 | |
|
287 | 287 | $ hg --config extensions.strip= strip -r 1: |
|
288 | 288 | saved backup bundle to $TESTTMP/test/.hg/strip-backup/ba677d0156c1-eea704d7-backup.hg |
|
289 | 289 | |
|
290 | 290 | Now do a variant of the above, except on a non-publishing repository |
|
291 | 291 | |
|
292 | 292 | $ cat >> .hg/hgrc <<EOF |
|
293 | 293 | > [phases] |
|
294 | 294 | > publish = false |
|
295 | 295 | > [hooks] |
|
296 | 296 | > prepushkey = sh -c "printenv.py prepushkey 1" |
|
297 | 297 | > EOF |
|
298 | 298 | |
|
299 | 299 | #if bundle1 |
|
300 | 300 | $ req |
|
301 | 301 | pushing to http://localhost:$HGPORT/ |
|
302 | 302 | searching for changes |
|
303 | 303 | remote: adding changesets |
|
304 | 304 | remote: adding manifests |
|
305 | 305 | remote: adding file changes |
|
306 | 306 | remote: added 1 changesets with 1 changes to 1 files |
|
307 | 307 | remote: prepushkey hook: HG_HOOKNAME=prepushkey HG_HOOKTYPE=prepushkey HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_OLD=1 |
|
308 | 308 | remote: pushkey-abort: prepushkey hook exited with status 1 |
|
309 | 309 | updating ba677d0156c1 to public failed! |
|
310 | 310 | % serve errors |
|
311 | 311 | #endif |
|
312 | 312 | |
|
313 | 313 | #if bundle2 |
|
314 | 314 | $ req |
|
315 | 315 | pushing to http://localhost:$HGPORT/ |
|
316 | 316 | searching for changes |
|
317 | 317 | remote: adding changesets |
|
318 | 318 | remote: adding manifests |
|
319 | 319 | remote: adding file changes |
|
320 | 320 | remote: added 1 changesets with 1 changes to 1 files |
|
321 | 321 | remote: prepushkey hook: HG_BUNDLE2=1 HG_HOOKNAME=prepushkey HG_HOOKTYPE=prepushkey HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_OLD=1 HG_PENDING=$TESTTMP/test HG_PHASES_MOVED=1 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:http:$LOCALIP: (glob) |
|
322 | 322 | remote: pushkey-abort: prepushkey hook exited with status 1 |
|
323 | 323 | remote: transaction abort! |
|
324 | 324 | remote: rollback completed |
|
325 | 325 | abort: updating ba677d0156c1 to public failed |
|
326 | 326 | % serve errors |
|
327 | 327 | [255] |
|
328 | 328 | #endif |
|
329 | 329 | |
|
330 | 330 | Make phases updates work |
|
331 | 331 | |
|
332 | 332 | $ cat >> .hg/hgrc <<EOF |
|
333 | 333 | > [hooks] |
|
334 | 334 | > prepushkey = sh -c "printenv.py prepushkey 0" |
|
335 | 335 | > EOF |
|
336 | 336 | |
|
337 | 337 | #if bundle1 |
|
338 | 338 | $ req |
|
339 | 339 | pushing to http://localhost:$HGPORT/ |
|
340 | 340 | searching for changes |
|
341 | 341 | no changes found |
|
342 | 342 | remote: prepushkey hook: HG_HOOKNAME=prepushkey HG_HOOKTYPE=prepushkey HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_OLD=1 |
|
343 | 343 | % serve errors |
|
344 | 344 | [1] |
|
345 | 345 | #endif |
|
346 | 346 | |
|
347 | 347 | #if bundle2 |
|
348 | 348 | $ req |
|
349 | 349 | pushing to http://localhost:$HGPORT/ |
|
350 | 350 | searching for changes |
|
351 | 351 | remote: adding changesets |
|
352 | 352 | remote: adding manifests |
|
353 | 353 | remote: adding file changes |
|
354 | 354 | remote: added 1 changesets with 1 changes to 1 files |
|
355 | 355 | remote: prepushkey hook: HG_BUNDLE2=1 HG_HOOKNAME=prepushkey HG_HOOKTYPE=prepushkey HG_KEY=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NAMESPACE=phases HG_NEW=0 HG_NODE=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_NODE_LAST=ba677d0156c1196c1a699fa53f390dcfc3ce3872 HG_OLD=1 HG_PENDING=$TESTTMP/test HG_PHASES_MOVED=1 HG_SOURCE=serve HG_TXNID=TXN:$ID$ HG_URL=remote:http:$LOCALIP: (glob) |
|
356 | 356 | % serve errors |
|
357 | 357 | #endif |
|
358 | 358 | |
|
359 | 359 | $ hg --config extensions.strip= strip -r 1: |
|
360 | 360 | saved backup bundle to $TESTTMP/test/.hg/strip-backup/ba677d0156c1-eea704d7-backup.hg |
|
361 | 361 | |
|
362 | 362 | #if bundle2 |
|
363 | 363 | |
|
364 | 364 | $ cat > .hg/hgrc <<EOF |
|
365 | 365 | > [web] |
|
366 | 366 | > push_ssl = false |
|
367 | 367 | > allow_push = * |
|
368 | 368 | > [experimental] |
|
369 | 369 | > httppostargs=true |
|
370 | 370 | > EOF |
|
371 | 371 | $ req |
|
372 | 372 | pushing to http://localhost:$HGPORT/ |
|
373 | 373 | searching for changes |
|
374 | 374 | remote: adding changesets |
|
375 | 375 | remote: adding manifests |
|
376 | 376 | remote: adding file changes |
|
377 | 377 | remote: added 1 changesets with 1 changes to 1 files |
|
378 | 378 | % serve errors |
|
379 | 379 | |
|
380 | 380 | #endif |
|
381 | 381 | |
|
382 | 382 | $ cd .. |
|
383 | ||
|
384 | Pushing via hgwebdir works | |
|
385 | ||
|
386 | $ hg init hgwebdir | |
|
387 | $ cd hgwebdir | |
|
388 | $ echo 0 > a | |
|
389 | $ hg -q commit -A -m initial | |
|
390 | $ cd .. | |
|
391 | ||
|
392 | $ cat > web.conf << EOF | |
|
393 | > [paths] | |
|
394 | > / = * | |
|
395 | > [web] | |
|
396 | > push_ssl = false | |
|
397 | > allow_push = * | |
|
398 | > EOF | |
|
399 | ||
|
400 | $ hg serve --web-conf web.conf -p $HGPORT -d --pid-file hg.pid | |
|
401 | $ cat hg.pid > $DAEMON_PIDS | |
|
402 | ||
|
403 | $ hg clone http://localhost:$HGPORT/hgwebdir hgwebdir-local | |
|
404 | requesting all changes | |
|
405 | adding changesets | |
|
406 | adding manifests | |
|
407 | adding file changes | |
|
408 | added 1 changesets with 1 changes to 1 files | |
|
409 | new changesets 98a3f8f02ba7 | |
|
410 | updating to branch default | |
|
411 | 1 files updated, 0 files merged, 0 files removed, 0 files unresolved | |
|
412 | $ cd hgwebdir-local | |
|
413 | $ echo commit > a | |
|
414 | $ hg commit -m 'local commit' | |
|
415 | ||
|
416 | $ hg push | |
|
417 | pushing to http://localhost:$HGPORT/hgwebdir | |
|
418 | searching for changes | |
|
419 | remote: adding changesets | |
|
420 | remote: adding manifests | |
|
421 | remote: adding file changes | |
|
422 | remote: added 1 changesets with 1 changes to 1 files | |
|
423 | ||
|
424 | $ killdaemons.py | |
|
425 | ||
|
426 | $ cd .. |
General Comments 0
You need to be logged in to leave comments.
Login now