Show More
@@ -1,108 +1,108 b'' | |||
|
1 | 1 | # highlight - syntax highlighting in hgweb, based on Pygments |
|
2 | 2 | # |
|
3 | 3 | # Copyright 2008, 2009 Patrick Mezard <pmezard@gmail.com> and others |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | # |
|
8 | 8 | # The original module was split in an interface and an implementation |
|
9 | 9 | # file to defer pygments loading and speedup extension setup. |
|
10 | 10 | |
|
11 | 11 | """syntax highlighting for hgweb (requires Pygments) |
|
12 | 12 | |
|
13 | 13 | It depends on the Pygments syntax highlighting library: |
|
14 | 14 | http://pygments.org/ |
|
15 | 15 | |
|
16 | 16 | There are the following configuration options:: |
|
17 | 17 | |
|
18 | 18 | [web] |
|
19 | 19 | pygments_style = <style> (default: colorful) |
|
20 | 20 | highlightfiles = <fileset> (default: size('<5M')) |
|
21 | 21 | highlightonlymatchfilename = <bool> (default False) |
|
22 | 22 | |
|
23 | 23 | ``highlightonlymatchfilename`` will only highlight files if their type could |
|
24 | 24 | be identified by their filename. When this is not enabled (the default), |
|
25 | 25 | Pygments will try very hard to identify the file type from content and any |
|
26 | 26 | match (even matches with a low confidence score) will be used. |
|
27 | 27 | """ |
|
28 | 28 | |
|
29 | 29 | |
|
30 | 30 | from . import highlight |
|
31 | 31 | from mercurial.hgweb import ( |
|
32 | 32 | webcommands, |
|
33 | 33 | webutil, |
|
34 | 34 | ) |
|
35 | 35 | |
|
36 | 36 | from mercurial import ( |
|
37 | 37 | extensions, |
|
38 | 38 | pycompat, |
|
39 | 39 | ) |
|
40 | 40 | |
|
41 | 41 | # Note for extension authors: ONLY specify testedwith = 'ships-with-hg-core' for |
|
42 | 42 | # extensions which SHIP WITH MERCURIAL. Non-mainline extensions should |
|
43 | 43 | # be specifying the version(s) of Mercurial they are tested with, or |
|
44 | 44 | # leave the attribute unspecified. |
|
45 | 45 | testedwith = b'ships-with-hg-core' |
|
46 | 46 | |
|
47 | 47 | |
|
48 | 48 | def pygmentize(web, field, fctx, tmpl): |
|
49 | 49 | style = web.config(b'web', b'pygments_style', b'colorful') |
|
50 | 50 | expr = web.config(b'web', b'highlightfiles', b"size('<5M')") |
|
51 | 51 | filenameonly = web.configbool(b'web', b'highlightonlymatchfilename', False) |
|
52 | 52 | |
|
53 | 53 | ctx = fctx.changectx() |
|
54 | 54 | m = ctx.matchfileset(fctx.repo().root, expr) |
|
55 | 55 | if m(fctx.path()): |
|
56 | 56 | highlight.pygmentize( |
|
57 | 57 | field, fctx, style, tmpl, guessfilenameonly=filenameonly |
|
58 | 58 | ) |
|
59 | 59 | |
|
60 | 60 | |
|
61 | 61 | def filerevision_highlight(orig, web, fctx): |
|
62 | 62 | mt = web.res.headers[b'Content-Type'] |
|
63 | 63 | # only pygmentize for mimetype containing 'html' so we both match |
|
64 | 64 | # 'text/html' and possibly 'application/xhtml+xml' in the future |
|
65 | 65 | # so that we don't have to touch the extension when the mimetype |
|
66 | 66 | # for a template changes; also hgweb optimizes the case that a |
|
67 | 67 | # raw file is sent using rawfile() and doesn't call us, so we |
|
68 | 68 | # can't clash with the file's content-type here in case we |
|
69 | 69 | # pygmentize a html file |
|
70 | 70 | if b'html' in mt: |
|
71 | 71 | pygmentize(web, b'fileline', fctx, web.tmpl) |
|
72 | 72 | |
|
73 | 73 | return orig(web, fctx) |
|
74 | 74 | |
|
75 | 75 | |
|
76 | 76 | def annotate_highlight(orig, web): |
|
77 | 77 | mt = web.res.headers[b'Content-Type'] |
|
78 | 78 | if b'html' in mt: |
|
79 | 79 | fctx = webutil.filectx(web.repo, web.req) |
|
80 | 80 | pygmentize(web, b'annotateline', fctx, web.tmpl) |
|
81 | 81 | |
|
82 | 82 | return orig(web) |
|
83 | 83 | |
|
84 | 84 | |
|
85 | 85 | def generate_css(web): |
|
86 | 86 | pg_style = web.config(b'web', b'pygments_style', b'colorful') |
|
87 | 87 | fmter = highlight.HtmlFormatter(style=pycompat.sysstr(pg_style)) |
|
88 | 88 | web.res.headers[b'Content-Type'] = b'text/css' |
|
89 | 89 | style_defs = fmter.get_style_defs(pycompat.sysstr(b'')) |
|
90 | 90 | web.res.setbodybytes( |
|
91 | 91 | b''.join( |
|
92 | 92 | [ |
|
93 | 93 | b'/* pygments_style = %s */\n\n' % pg_style, |
|
94 | 94 | pycompat.bytestr(style_defs), |
|
95 | 95 | ] |
|
96 | 96 | ) |
|
97 | 97 | ) |
|
98 | 98 | return web.res.sendresponse() |
|
99 | 99 | |
|
100 | 100 | |
|
101 | 101 | def extsetup(ui): |
|
102 | 102 | # monkeypatch in the new version |
|
103 | 103 | extensions.wrapfunction( |
|
104 | 104 | webcommands, '_filerevision', filerevision_highlight |
|
105 | 105 | ) |
|
106 | 106 | extensions.wrapfunction(webcommands, 'annotate', annotate_highlight) |
|
107 | 107 | webcommands.highlightcss = generate_css |
|
108 |
webcommands.__all__.append( |
|
|
108 | webcommands.__all__.append('highlightcss') |
@@ -1,1890 +1,1890 b'' | |||
|
1 | 1 | """ Multicast DNS Service Discovery for Python, v0.12 |
|
2 | 2 | Copyright (C) 2003, Paul Scott-Murphy |
|
3 | 3 | |
|
4 | 4 | This module provides a framework for the use of DNS Service Discovery |
|
5 | 5 | using IP multicast. It has been tested against the JRendezvous |
|
6 | 6 | implementation from <a href="http://strangeberry.com">StrangeBerry</a>, |
|
7 | 7 | and against the mDNSResponder from Mac OS X 10.3.8. |
|
8 | 8 | |
|
9 | 9 | This library is free software; you can redistribute it and/or |
|
10 | 10 | modify it under the terms of the GNU Lesser General Public |
|
11 | 11 | License as published by the Free Software Foundation; either |
|
12 | 12 | version 2.1 of the License, or (at your option) any later version. |
|
13 | 13 | |
|
14 | 14 | This library is distributed in the hope that it will be useful, |
|
15 | 15 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
16 | 16 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
|
17 | 17 | Lesser General Public License for more details. |
|
18 | 18 | |
|
19 | 19 | You should have received a copy of the GNU Lesser General Public |
|
20 | 20 | License along with this library; if not, see |
|
21 | 21 | <http://www.gnu.org/licenses/>. |
|
22 | 22 | |
|
23 | 23 | """ |
|
24 | 24 | |
|
25 | 25 | """0.12 update - allow selection of binding interface |
|
26 | 26 | typo fix - Thanks A. M. Kuchlingi |
|
27 | 27 | removed all use of word 'Rendezvous' - this is an API change""" |
|
28 | 28 | |
|
29 | 29 | """0.11 update - correction to comments for addListener method |
|
30 | 30 | support for new record types seen from OS X |
|
31 | 31 | - IPv6 address |
|
32 | 32 | - hostinfo |
|
33 | 33 | ignore unknown DNS record types |
|
34 | 34 | fixes to name decoding |
|
35 | 35 | works alongside other processes using port 5353 (e.g. Mac OS X) |
|
36 | 36 | tested against Mac OS X 10.3.2's mDNSResponder |
|
37 | 37 | corrections to removal of list entries for service browser""" |
|
38 | 38 | |
|
39 | 39 | """0.10 update - Jonathon Paisley contributed these corrections: |
|
40 | 40 | always multicast replies, even when query is unicast |
|
41 | 41 | correct a pointer encoding problem |
|
42 | 42 | can now write records in any order |
|
43 | 43 | traceback shown on failure |
|
44 | 44 | better TXT record parsing |
|
45 | 45 | server is now separate from name |
|
46 | 46 | can cancel a service browser |
|
47 | 47 | |
|
48 | 48 | modified some unit tests to accommodate these changes""" |
|
49 | 49 | |
|
50 | 50 | """0.09 update - remove all records on service unregistration |
|
51 | 51 | fix DOS security problem with readName""" |
|
52 | 52 | |
|
53 | 53 | """0.08 update - changed licensing to LGPL""" |
|
54 | 54 | |
|
55 | 55 | """0.07 update - faster shutdown on engine |
|
56 | 56 | pointer encoding of outgoing names |
|
57 | 57 | ServiceBrowser now works |
|
58 | 58 | new unit tests""" |
|
59 | 59 | |
|
60 | 60 | """0.06 update - small improvements with unit tests |
|
61 | 61 | added defined exception types |
|
62 | 62 | new style objects |
|
63 | 63 | fixed hostname/interface problem |
|
64 | 64 | fixed socket timeout problem |
|
65 | 65 | fixed addServiceListener() typo bug |
|
66 | 66 | using select() for socket reads |
|
67 | 67 | tested on Debian unstable with Python 2.2.2""" |
|
68 | 68 | |
|
69 | 69 | """0.05 update - ensure case insensitivity on domain names |
|
70 | 70 | support for unicast DNS queries""" |
|
71 | 71 | |
|
72 | 72 | """0.04 update - added some unit tests |
|
73 | 73 | added __ne__ adjuncts where required |
|
74 | 74 | ensure names end in '.local.' |
|
75 | 75 | timeout on receiving socket for clean shutdown""" |
|
76 | 76 | |
|
77 | 77 | __author__ = b"Paul Scott-Murphy" |
|
78 | 78 | __email__ = b"paul at scott dash murphy dot com" |
|
79 | 79 | __version__ = b"0.12" |
|
80 | 80 | |
|
81 | 81 | import errno |
|
82 | 82 | import itertools |
|
83 | 83 | import select |
|
84 | 84 | import socket |
|
85 | 85 | import struct |
|
86 | 86 | import threading |
|
87 | 87 | import time |
|
88 | 88 | import traceback |
|
89 | 89 | |
|
90 | 90 | from mercurial import pycompat |
|
91 | 91 | |
|
92 |
__all__ = [ |
|
|
92 | __all__ = ["Zeroconf", "ServiceInfo", "ServiceBrowser"] | |
|
93 | 93 | |
|
94 | 94 | # hook for threads |
|
95 | 95 | |
|
96 | 96 | globals()[b'_GLOBAL_DONE'] = 0 |
|
97 | 97 | |
|
98 | 98 | # Some timing constants |
|
99 | 99 | |
|
100 | 100 | _UNREGISTER_TIME = 125 |
|
101 | 101 | _CHECK_TIME = 175 |
|
102 | 102 | _REGISTER_TIME = 225 |
|
103 | 103 | _LISTENER_TIME = 200 |
|
104 | 104 | _BROWSER_TIME = 500 |
|
105 | 105 | |
|
106 | 106 | # Some DNS constants |
|
107 | 107 | |
|
108 | 108 | _MDNS_ADDR = r'224.0.0.251' |
|
109 | 109 | _MDNS_PORT = 5353 |
|
110 | 110 | _DNS_PORT = 53 |
|
111 | 111 | _DNS_TTL = 60 * 60 # one hour default TTL |
|
112 | 112 | |
|
113 | 113 | _MAX_MSG_TYPICAL = 1460 # unused |
|
114 | 114 | _MAX_MSG_ABSOLUTE = 8972 |
|
115 | 115 | |
|
116 | 116 | _FLAGS_QR_MASK = 0x8000 # query response mask |
|
117 | 117 | _FLAGS_QR_QUERY = 0x0000 # query |
|
118 | 118 | _FLAGS_QR_RESPONSE = 0x8000 # response |
|
119 | 119 | |
|
120 | 120 | _FLAGS_AA = 0x0400 # Authoritative answer |
|
121 | 121 | _FLAGS_TC = 0x0200 # Truncated |
|
122 | 122 | _FLAGS_RD = 0x0100 # Recursion desired |
|
123 | 123 | _FLAGS_RA = 0x8000 # Recursion available |
|
124 | 124 | |
|
125 | 125 | _FLAGS_Z = 0x0040 # Zero |
|
126 | 126 | _FLAGS_AD = 0x0020 # Authentic data |
|
127 | 127 | _FLAGS_CD = 0x0010 # Checking disabled |
|
128 | 128 | |
|
129 | 129 | _CLASS_IN = 1 |
|
130 | 130 | _CLASS_CS = 2 |
|
131 | 131 | _CLASS_CH = 3 |
|
132 | 132 | _CLASS_HS = 4 |
|
133 | 133 | _CLASS_NONE = 254 |
|
134 | 134 | _CLASS_ANY = 255 |
|
135 | 135 | _CLASS_MASK = 0x7FFF |
|
136 | 136 | _CLASS_UNIQUE = 0x8000 |
|
137 | 137 | |
|
138 | 138 | _TYPE_A = 1 |
|
139 | 139 | _TYPE_NS = 2 |
|
140 | 140 | _TYPE_MD = 3 |
|
141 | 141 | _TYPE_MF = 4 |
|
142 | 142 | _TYPE_CNAME = 5 |
|
143 | 143 | _TYPE_SOA = 6 |
|
144 | 144 | _TYPE_MB = 7 |
|
145 | 145 | _TYPE_MG = 8 |
|
146 | 146 | _TYPE_MR = 9 |
|
147 | 147 | _TYPE_NULL = 10 |
|
148 | 148 | _TYPE_WKS = 11 |
|
149 | 149 | _TYPE_PTR = 12 |
|
150 | 150 | _TYPE_HINFO = 13 |
|
151 | 151 | _TYPE_MINFO = 14 |
|
152 | 152 | _TYPE_MX = 15 |
|
153 | 153 | _TYPE_TXT = 16 |
|
154 | 154 | _TYPE_AAAA = 28 |
|
155 | 155 | _TYPE_SRV = 33 |
|
156 | 156 | _TYPE_ANY = 255 |
|
157 | 157 | |
|
158 | 158 | # Mapping constants to names |
|
159 | 159 | |
|
160 | 160 | _CLASSES = { |
|
161 | 161 | _CLASS_IN: b"in", |
|
162 | 162 | _CLASS_CS: b"cs", |
|
163 | 163 | _CLASS_CH: b"ch", |
|
164 | 164 | _CLASS_HS: b"hs", |
|
165 | 165 | _CLASS_NONE: b"none", |
|
166 | 166 | _CLASS_ANY: b"any", |
|
167 | 167 | } |
|
168 | 168 | |
|
169 | 169 | _TYPES = { |
|
170 | 170 | _TYPE_A: b"a", |
|
171 | 171 | _TYPE_NS: b"ns", |
|
172 | 172 | _TYPE_MD: b"md", |
|
173 | 173 | _TYPE_MF: b"mf", |
|
174 | 174 | _TYPE_CNAME: b"cname", |
|
175 | 175 | _TYPE_SOA: b"soa", |
|
176 | 176 | _TYPE_MB: b"mb", |
|
177 | 177 | _TYPE_MG: b"mg", |
|
178 | 178 | _TYPE_MR: b"mr", |
|
179 | 179 | _TYPE_NULL: b"null", |
|
180 | 180 | _TYPE_WKS: b"wks", |
|
181 | 181 | _TYPE_PTR: b"ptr", |
|
182 | 182 | _TYPE_HINFO: b"hinfo", |
|
183 | 183 | _TYPE_MINFO: b"minfo", |
|
184 | 184 | _TYPE_MX: b"mx", |
|
185 | 185 | _TYPE_TXT: b"txt", |
|
186 | 186 | _TYPE_AAAA: b"quada", |
|
187 | 187 | _TYPE_SRV: b"srv", |
|
188 | 188 | _TYPE_ANY: b"any", |
|
189 | 189 | } |
|
190 | 190 | |
|
191 | 191 | # utility functions |
|
192 | 192 | |
|
193 | 193 | |
|
194 | 194 | def currentTimeMillis(): |
|
195 | 195 | """Current system time in milliseconds""" |
|
196 | 196 | return time.time() * 1000 |
|
197 | 197 | |
|
198 | 198 | |
|
199 | 199 | # Exceptions |
|
200 | 200 | |
|
201 | 201 | |
|
202 | 202 | class NonLocalNameException(Exception): |
|
203 | 203 | pass |
|
204 | 204 | |
|
205 | 205 | |
|
206 | 206 | class NonUniqueNameException(Exception): |
|
207 | 207 | pass |
|
208 | 208 | |
|
209 | 209 | |
|
210 | 210 | class NamePartTooLongException(Exception): |
|
211 | 211 | pass |
|
212 | 212 | |
|
213 | 213 | |
|
214 | 214 | class AbstractMethodException(Exception): |
|
215 | 215 | pass |
|
216 | 216 | |
|
217 | 217 | |
|
218 | 218 | class BadTypeInNameException(Exception): |
|
219 | 219 | pass |
|
220 | 220 | |
|
221 | 221 | |
|
222 | 222 | class BadDomainName(Exception): |
|
223 | 223 | def __init__(self, pos): |
|
224 | 224 | Exception.__init__(self, b"at position %s" % pos) |
|
225 | 225 | |
|
226 | 226 | |
|
227 | 227 | class BadDomainNameCircular(BadDomainName): |
|
228 | 228 | pass |
|
229 | 229 | |
|
230 | 230 | |
|
231 | 231 | # implementation classes |
|
232 | 232 | |
|
233 | 233 | |
|
234 | 234 | class DNSEntry: |
|
235 | 235 | """A DNS entry""" |
|
236 | 236 | |
|
237 | 237 | def __init__(self, name, type, clazz): |
|
238 | 238 | self.key = name.lower() |
|
239 | 239 | self.name = name |
|
240 | 240 | self.type = type |
|
241 | 241 | self.clazz = clazz & _CLASS_MASK |
|
242 | 242 | self.unique = (clazz & _CLASS_UNIQUE) != 0 |
|
243 | 243 | |
|
244 | 244 | def __eq__(self, other): |
|
245 | 245 | """Equality test on name, type, and class""" |
|
246 | 246 | if isinstance(other, DNSEntry): |
|
247 | 247 | return ( |
|
248 | 248 | self.name == other.name |
|
249 | 249 | and self.type == other.type |
|
250 | 250 | and self.clazz == other.clazz |
|
251 | 251 | ) |
|
252 | 252 | return 0 |
|
253 | 253 | |
|
254 | 254 | def __ne__(self, other): |
|
255 | 255 | """Non-equality test""" |
|
256 | 256 | return not self.__eq__(other) |
|
257 | 257 | |
|
258 | 258 | def getClazz(self, clazz): |
|
259 | 259 | """Class accessor""" |
|
260 | 260 | try: |
|
261 | 261 | return _CLASSES[clazz] |
|
262 | 262 | except KeyError: |
|
263 | 263 | return b"?(%s)" % clazz |
|
264 | 264 | |
|
265 | 265 | def getType(self, type): |
|
266 | 266 | """Type accessor""" |
|
267 | 267 | try: |
|
268 | 268 | return _TYPES[type] |
|
269 | 269 | except KeyError: |
|
270 | 270 | return b"?(%s)" % type |
|
271 | 271 | |
|
272 | 272 | def toString(self, hdr, other): |
|
273 | 273 | """String representation with additional information""" |
|
274 | 274 | result = b"%s[%s,%s" % ( |
|
275 | 275 | hdr, |
|
276 | 276 | self.getType(self.type), |
|
277 | 277 | self.getClazz(self.clazz), |
|
278 | 278 | ) |
|
279 | 279 | if self.unique: |
|
280 | 280 | result += b"-unique," |
|
281 | 281 | else: |
|
282 | 282 | result += b"," |
|
283 | 283 | result += self.name |
|
284 | 284 | if other is not None: |
|
285 | 285 | result += b",%s]" % other |
|
286 | 286 | else: |
|
287 | 287 | result += b"]" |
|
288 | 288 | return result |
|
289 | 289 | |
|
290 | 290 | |
|
291 | 291 | class DNSQuestion(DNSEntry): |
|
292 | 292 | """A DNS question entry""" |
|
293 | 293 | |
|
294 | 294 | def __init__(self, name, type, clazz): |
|
295 | 295 | if isinstance(name, str): |
|
296 | 296 | name = name.encode('ascii') |
|
297 | 297 | if not name.endswith(b".local."): |
|
298 | 298 | raise NonLocalNameException(name) |
|
299 | 299 | DNSEntry.__init__(self, name, type, clazz) |
|
300 | 300 | |
|
301 | 301 | def answeredBy(self, rec): |
|
302 | 302 | """Returns true if the question is answered by the record""" |
|
303 | 303 | return ( |
|
304 | 304 | self.clazz == rec.clazz |
|
305 | 305 | and (self.type == rec.type or self.type == _TYPE_ANY) |
|
306 | 306 | and self.name == rec.name |
|
307 | 307 | ) |
|
308 | 308 | |
|
309 | 309 | def __repr__(self): |
|
310 | 310 | """String representation""" |
|
311 | 311 | return DNSEntry.toString(self, b"question", None) |
|
312 | 312 | |
|
313 | 313 | |
|
314 | 314 | class DNSRecord(DNSEntry): |
|
315 | 315 | """A DNS record - like a DNS entry, but has a TTL""" |
|
316 | 316 | |
|
317 | 317 | def __init__(self, name, type, clazz, ttl): |
|
318 | 318 | DNSEntry.__init__(self, name, type, clazz) |
|
319 | 319 | self.ttl = ttl |
|
320 | 320 | self.created = currentTimeMillis() |
|
321 | 321 | |
|
322 | 322 | def __eq__(self, other): |
|
323 | 323 | """Tests equality as per DNSRecord""" |
|
324 | 324 | if isinstance(other, DNSRecord): |
|
325 | 325 | return DNSEntry.__eq__(self, other) |
|
326 | 326 | return 0 |
|
327 | 327 | |
|
328 | 328 | def suppressedBy(self, msg): |
|
329 | 329 | """Returns true if any answer in a message can suffice for the |
|
330 | 330 | information held in this record.""" |
|
331 | 331 | for record in msg.answers: |
|
332 | 332 | if self.suppressedByAnswer(record): |
|
333 | 333 | return 1 |
|
334 | 334 | return 0 |
|
335 | 335 | |
|
336 | 336 | def suppressedByAnswer(self, other): |
|
337 | 337 | """Returns true if another record has same name, type and class, |
|
338 | 338 | and if its TTL is at least half of this record's.""" |
|
339 | 339 | if self == other and other.ttl > (self.ttl / 2): |
|
340 | 340 | return 1 |
|
341 | 341 | return 0 |
|
342 | 342 | |
|
343 | 343 | def getExpirationTime(self, percent): |
|
344 | 344 | """Returns the time at which this record will have expired |
|
345 | 345 | by a certain percentage.""" |
|
346 | 346 | return self.created + (percent * self.ttl * 10) |
|
347 | 347 | |
|
348 | 348 | def getRemainingTTL(self, now): |
|
349 | 349 | """Returns the remaining TTL in seconds.""" |
|
350 | 350 | return max(0, (self.getExpirationTime(100) - now) / 1000) |
|
351 | 351 | |
|
352 | 352 | def isExpired(self, now): |
|
353 | 353 | """Returns true if this record has expired.""" |
|
354 | 354 | return self.getExpirationTime(100) <= now |
|
355 | 355 | |
|
356 | 356 | def isStale(self, now): |
|
357 | 357 | """Returns true if this record is at least half way expired.""" |
|
358 | 358 | return self.getExpirationTime(50) <= now |
|
359 | 359 | |
|
360 | 360 | def resetTTL(self, other): |
|
361 | 361 | """Sets this record's TTL and created time to that of |
|
362 | 362 | another record.""" |
|
363 | 363 | self.created = other.created |
|
364 | 364 | self.ttl = other.ttl |
|
365 | 365 | |
|
366 | 366 | def write(self, out): |
|
367 | 367 | """Abstract method""" |
|
368 | 368 | raise AbstractMethodException |
|
369 | 369 | |
|
370 | 370 | def toString(self, other): |
|
371 | 371 | """String representation with additional information""" |
|
372 | 372 | arg = b"%s/%s,%s" % ( |
|
373 | 373 | self.ttl, |
|
374 | 374 | self.getRemainingTTL(currentTimeMillis()), |
|
375 | 375 | other, |
|
376 | 376 | ) |
|
377 | 377 | return DNSEntry.toString(self, b"record", arg) |
|
378 | 378 | |
|
379 | 379 | |
|
380 | 380 | class DNSAddress(DNSRecord): |
|
381 | 381 | """A DNS address record""" |
|
382 | 382 | |
|
383 | 383 | def __init__(self, name, type, clazz, ttl, address): |
|
384 | 384 | DNSRecord.__init__(self, name, type, clazz, ttl) |
|
385 | 385 | self.address = address |
|
386 | 386 | |
|
387 | 387 | def write(self, out): |
|
388 | 388 | """Used in constructing an outgoing packet""" |
|
389 | 389 | out.writeString(self.address, len(self.address)) |
|
390 | 390 | |
|
391 | 391 | def __eq__(self, other): |
|
392 | 392 | """Tests equality on address""" |
|
393 | 393 | if isinstance(other, DNSAddress): |
|
394 | 394 | return self.address == other.address |
|
395 | 395 | return 0 |
|
396 | 396 | |
|
397 | 397 | def __repr__(self): |
|
398 | 398 | """String representation""" |
|
399 | 399 | try: |
|
400 | 400 | return socket.inet_ntoa(self.address) |
|
401 | 401 | except Exception: |
|
402 | 402 | return self.address |
|
403 | 403 | |
|
404 | 404 | |
|
405 | 405 | class DNSHinfo(DNSRecord): |
|
406 | 406 | """A DNS host information record""" |
|
407 | 407 | |
|
408 | 408 | def __init__(self, name, type, clazz, ttl, cpu, os): |
|
409 | 409 | DNSRecord.__init__(self, name, type, clazz, ttl) |
|
410 | 410 | self.cpu = cpu |
|
411 | 411 | self.os = os |
|
412 | 412 | |
|
413 | 413 | def write(self, out): |
|
414 | 414 | """Used in constructing an outgoing packet""" |
|
415 | 415 | out.writeString(self.cpu, len(self.cpu)) |
|
416 | 416 | out.writeString(self.os, len(self.os)) |
|
417 | 417 | |
|
418 | 418 | def __eq__(self, other): |
|
419 | 419 | """Tests equality on cpu and os""" |
|
420 | 420 | if isinstance(other, DNSHinfo): |
|
421 | 421 | return self.cpu == other.cpu and self.os == other.os |
|
422 | 422 | return 0 |
|
423 | 423 | |
|
424 | 424 | def __repr__(self): |
|
425 | 425 | """String representation""" |
|
426 | 426 | return self.cpu + b" " + self.os |
|
427 | 427 | |
|
428 | 428 | |
|
429 | 429 | class DNSPointer(DNSRecord): |
|
430 | 430 | """A DNS pointer record""" |
|
431 | 431 | |
|
432 | 432 | def __init__(self, name, type, clazz, ttl, alias): |
|
433 | 433 | DNSRecord.__init__(self, name, type, clazz, ttl) |
|
434 | 434 | self.alias = alias |
|
435 | 435 | |
|
436 | 436 | def write(self, out): |
|
437 | 437 | """Used in constructing an outgoing packet""" |
|
438 | 438 | out.writeName(self.alias) |
|
439 | 439 | |
|
440 | 440 | def __eq__(self, other): |
|
441 | 441 | """Tests equality on alias""" |
|
442 | 442 | if isinstance(other, DNSPointer): |
|
443 | 443 | return self.alias == other.alias |
|
444 | 444 | return 0 |
|
445 | 445 | |
|
446 | 446 | def __repr__(self): |
|
447 | 447 | """String representation""" |
|
448 | 448 | return self.toString(self.alias) |
|
449 | 449 | |
|
450 | 450 | |
|
451 | 451 | class DNSText(DNSRecord): |
|
452 | 452 | """A DNS text record""" |
|
453 | 453 | |
|
454 | 454 | def __init__(self, name, type, clazz, ttl, text): |
|
455 | 455 | DNSRecord.__init__(self, name, type, clazz, ttl) |
|
456 | 456 | self.text = text |
|
457 | 457 | |
|
458 | 458 | def write(self, out): |
|
459 | 459 | """Used in constructing an outgoing packet""" |
|
460 | 460 | out.writeString(self.text, len(self.text)) |
|
461 | 461 | |
|
462 | 462 | def __eq__(self, other): |
|
463 | 463 | """Tests equality on text""" |
|
464 | 464 | if isinstance(other, DNSText): |
|
465 | 465 | return self.text == other.text |
|
466 | 466 | return 0 |
|
467 | 467 | |
|
468 | 468 | def __repr__(self): |
|
469 | 469 | """String representation""" |
|
470 | 470 | if len(self.text) > 10: |
|
471 | 471 | return self.toString(self.text[:7] + b"...") |
|
472 | 472 | else: |
|
473 | 473 | return self.toString(self.text) |
|
474 | 474 | |
|
475 | 475 | |
|
476 | 476 | class DNSService(DNSRecord): |
|
477 | 477 | """A DNS service record""" |
|
478 | 478 | |
|
479 | 479 | def __init__(self, name, type, clazz, ttl, priority, weight, port, server): |
|
480 | 480 | DNSRecord.__init__(self, name, type, clazz, ttl) |
|
481 | 481 | self.priority = priority |
|
482 | 482 | self.weight = weight |
|
483 | 483 | self.port = port |
|
484 | 484 | self.server = server |
|
485 | 485 | |
|
486 | 486 | def write(self, out): |
|
487 | 487 | """Used in constructing an outgoing packet""" |
|
488 | 488 | out.writeShort(self.priority) |
|
489 | 489 | out.writeShort(self.weight) |
|
490 | 490 | out.writeShort(self.port) |
|
491 | 491 | out.writeName(self.server) |
|
492 | 492 | |
|
493 | 493 | def __eq__(self, other): |
|
494 | 494 | """Tests equality on priority, weight, port and server""" |
|
495 | 495 | if isinstance(other, DNSService): |
|
496 | 496 | return ( |
|
497 | 497 | self.priority == other.priority |
|
498 | 498 | and self.weight == other.weight |
|
499 | 499 | and self.port == other.port |
|
500 | 500 | and self.server == other.server |
|
501 | 501 | ) |
|
502 | 502 | return 0 |
|
503 | 503 | |
|
504 | 504 | def __repr__(self): |
|
505 | 505 | """String representation""" |
|
506 | 506 | return self.toString(b"%s:%s" % (self.server, self.port)) |
|
507 | 507 | |
|
508 | 508 | |
|
509 | 509 | class DNSIncoming: |
|
510 | 510 | """Object representation of an incoming DNS packet""" |
|
511 | 511 | |
|
512 | 512 | def __init__(self, data): |
|
513 | 513 | """Constructor from string holding bytes of packet""" |
|
514 | 514 | self.offset = 0 |
|
515 | 515 | self.data = data |
|
516 | 516 | self.questions = [] |
|
517 | 517 | self.answers = [] |
|
518 | 518 | self.numquestions = 0 |
|
519 | 519 | self.numanswers = 0 |
|
520 | 520 | self.numauthorities = 0 |
|
521 | 521 | self.numadditionals = 0 |
|
522 | 522 | |
|
523 | 523 | self.readHeader() |
|
524 | 524 | self.readQuestions() |
|
525 | 525 | self.readOthers() |
|
526 | 526 | |
|
527 | 527 | def readHeader(self): |
|
528 | 528 | """Reads header portion of packet""" |
|
529 | 529 | format = b'!HHHHHH' |
|
530 | 530 | length = struct.calcsize(format) |
|
531 | 531 | info = struct.unpack( |
|
532 | 532 | format, self.data[self.offset : self.offset + length] |
|
533 | 533 | ) |
|
534 | 534 | self.offset += length |
|
535 | 535 | |
|
536 | 536 | self.id = info[0] |
|
537 | 537 | self.flags = info[1] |
|
538 | 538 | self.numquestions = info[2] |
|
539 | 539 | self.numanswers = info[3] |
|
540 | 540 | self.numauthorities = info[4] |
|
541 | 541 | self.numadditionals = info[5] |
|
542 | 542 | |
|
543 | 543 | def readQuestions(self): |
|
544 | 544 | """Reads questions section of packet""" |
|
545 | 545 | format = b'!HH' |
|
546 | 546 | length = struct.calcsize(format) |
|
547 | 547 | for i in range(0, self.numquestions): |
|
548 | 548 | name = self.readName() |
|
549 | 549 | info = struct.unpack( |
|
550 | 550 | format, self.data[self.offset : self.offset + length] |
|
551 | 551 | ) |
|
552 | 552 | self.offset += length |
|
553 | 553 | |
|
554 | 554 | try: |
|
555 | 555 | question = DNSQuestion(name, info[0], info[1]) |
|
556 | 556 | self.questions.append(question) |
|
557 | 557 | except NonLocalNameException: |
|
558 | 558 | pass |
|
559 | 559 | |
|
560 | 560 | def readInt(self): |
|
561 | 561 | """Reads an integer from the packet""" |
|
562 | 562 | format = b'!I' |
|
563 | 563 | length = struct.calcsize(format) |
|
564 | 564 | info = struct.unpack( |
|
565 | 565 | format, self.data[self.offset : self.offset + length] |
|
566 | 566 | ) |
|
567 | 567 | self.offset += length |
|
568 | 568 | return info[0] |
|
569 | 569 | |
|
570 | 570 | def readCharacterString(self): |
|
571 | 571 | """Reads a character string from the packet""" |
|
572 | 572 | length = ord(self.data[self.offset]) |
|
573 | 573 | self.offset += 1 |
|
574 | 574 | return self.readString(length) |
|
575 | 575 | |
|
576 | 576 | def readString(self, len): |
|
577 | 577 | """Reads a string of a given length from the packet""" |
|
578 | 578 | format = b'!%ds' % len |
|
579 | 579 | length = struct.calcsize(format) |
|
580 | 580 | info = struct.unpack( |
|
581 | 581 | format, self.data[self.offset : self.offset + length] |
|
582 | 582 | ) |
|
583 | 583 | self.offset += length |
|
584 | 584 | return info[0] |
|
585 | 585 | |
|
586 | 586 | def readUnsignedShort(self): |
|
587 | 587 | """Reads an unsigned short from the packet""" |
|
588 | 588 | format = b'!H' |
|
589 | 589 | length = struct.calcsize(format) |
|
590 | 590 | info = struct.unpack( |
|
591 | 591 | format, self.data[self.offset : self.offset + length] |
|
592 | 592 | ) |
|
593 | 593 | self.offset += length |
|
594 | 594 | return info[0] |
|
595 | 595 | |
|
596 | 596 | def readOthers(self): |
|
597 | 597 | """Reads answers, authorities and additionals section of the packet""" |
|
598 | 598 | format = b'!HHiH' |
|
599 | 599 | length = struct.calcsize(format) |
|
600 | 600 | n = self.numanswers + self.numauthorities + self.numadditionals |
|
601 | 601 | for i in range(0, n): |
|
602 | 602 | domain = self.readName() |
|
603 | 603 | info = struct.unpack( |
|
604 | 604 | format, self.data[self.offset : self.offset + length] |
|
605 | 605 | ) |
|
606 | 606 | self.offset += length |
|
607 | 607 | |
|
608 | 608 | rec = None |
|
609 | 609 | if info[0] == _TYPE_A: |
|
610 | 610 | rec = DNSAddress( |
|
611 | 611 | domain, info[0], info[1], info[2], self.readString(4) |
|
612 | 612 | ) |
|
613 | 613 | elif info[0] == _TYPE_CNAME or info[0] == _TYPE_PTR: |
|
614 | 614 | rec = DNSPointer( |
|
615 | 615 | domain, info[0], info[1], info[2], self.readName() |
|
616 | 616 | ) |
|
617 | 617 | elif info[0] == _TYPE_TXT: |
|
618 | 618 | rec = DNSText( |
|
619 | 619 | domain, info[0], info[1], info[2], self.readString(info[3]) |
|
620 | 620 | ) |
|
621 | 621 | elif info[0] == _TYPE_SRV: |
|
622 | 622 | rec = DNSService( |
|
623 | 623 | domain, |
|
624 | 624 | info[0], |
|
625 | 625 | info[1], |
|
626 | 626 | info[2], |
|
627 | 627 | self.readUnsignedShort(), |
|
628 | 628 | self.readUnsignedShort(), |
|
629 | 629 | self.readUnsignedShort(), |
|
630 | 630 | self.readName(), |
|
631 | 631 | ) |
|
632 | 632 | elif info[0] == _TYPE_HINFO: |
|
633 | 633 | rec = DNSHinfo( |
|
634 | 634 | domain, |
|
635 | 635 | info[0], |
|
636 | 636 | info[1], |
|
637 | 637 | info[2], |
|
638 | 638 | self.readCharacterString(), |
|
639 | 639 | self.readCharacterString(), |
|
640 | 640 | ) |
|
641 | 641 | elif info[0] == _TYPE_AAAA: |
|
642 | 642 | rec = DNSAddress( |
|
643 | 643 | domain, info[0], info[1], info[2], self.readString(16) |
|
644 | 644 | ) |
|
645 | 645 | else: |
|
646 | 646 | # Try to ignore types we don't know about |
|
647 | 647 | # this may mean the rest of the name is |
|
648 | 648 | # unable to be parsed, and may show errors |
|
649 | 649 | # so this is left for debugging. New types |
|
650 | 650 | # encountered need to be parsed properly. |
|
651 | 651 | # |
|
652 | 652 | # print "UNKNOWN TYPE = " + str(info[0]) |
|
653 | 653 | # raise BadTypeInNameException |
|
654 | 654 | self.offset += info[3] |
|
655 | 655 | |
|
656 | 656 | if rec is not None: |
|
657 | 657 | self.answers.append(rec) |
|
658 | 658 | |
|
659 | 659 | def isQuery(self): |
|
660 | 660 | """Returns true if this is a query""" |
|
661 | 661 | return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_QUERY |
|
662 | 662 | |
|
663 | 663 | def isResponse(self): |
|
664 | 664 | """Returns true if this is a response""" |
|
665 | 665 | return (self.flags & _FLAGS_QR_MASK) == _FLAGS_QR_RESPONSE |
|
666 | 666 | |
|
667 | 667 | def readUTF(self, offset, len): |
|
668 | 668 | """Reads a UTF-8 string of a given length from the packet""" |
|
669 | 669 | return self.data[offset : offset + len].decode('utf-8') |
|
670 | 670 | |
|
671 | 671 | def readName(self): |
|
672 | 672 | """Reads a domain name from the packet""" |
|
673 | 673 | result = r'' |
|
674 | 674 | off = self.offset |
|
675 | 675 | next = -1 |
|
676 | 676 | first = off |
|
677 | 677 | |
|
678 | 678 | while True: |
|
679 | 679 | len = ord(self.data[off : off + 1]) |
|
680 | 680 | off += 1 |
|
681 | 681 | if len == 0: |
|
682 | 682 | break |
|
683 | 683 | t = len & 0xC0 |
|
684 | 684 | if t == 0x00: |
|
685 | 685 | result = ''.join((result, self.readUTF(off, len) + '.')) |
|
686 | 686 | off += len |
|
687 | 687 | elif t == 0xC0: |
|
688 | 688 | if next < 0: |
|
689 | 689 | next = off + 1 |
|
690 | 690 | off = ((len & 0x3F) << 8) | ord(self.data[off : off + 1]) |
|
691 | 691 | if off >= first: |
|
692 | 692 | raise BadDomainNameCircular(off) |
|
693 | 693 | first = off |
|
694 | 694 | else: |
|
695 | 695 | raise BadDomainName(off) |
|
696 | 696 | |
|
697 | 697 | if next >= 0: |
|
698 | 698 | self.offset = next |
|
699 | 699 | else: |
|
700 | 700 | self.offset = off |
|
701 | 701 | |
|
702 | 702 | return result |
|
703 | 703 | |
|
704 | 704 | |
|
705 | 705 | class DNSOutgoing: |
|
706 | 706 | """Object representation of an outgoing packet""" |
|
707 | 707 | |
|
708 | 708 | def __init__(self, flags, multicast=1): |
|
709 | 709 | self.finished = 0 |
|
710 | 710 | self.id = 0 |
|
711 | 711 | self.multicast = multicast |
|
712 | 712 | self.flags = flags |
|
713 | 713 | self.names = {} |
|
714 | 714 | self.data = [] |
|
715 | 715 | self.size = 12 |
|
716 | 716 | |
|
717 | 717 | self.questions = [] |
|
718 | 718 | self.answers = [] |
|
719 | 719 | self.authorities = [] |
|
720 | 720 | self.additionals = [] |
|
721 | 721 | |
|
722 | 722 | def addQuestion(self, record): |
|
723 | 723 | """Adds a question""" |
|
724 | 724 | self.questions.append(record) |
|
725 | 725 | |
|
726 | 726 | def addAnswer(self, inp, record): |
|
727 | 727 | """Adds an answer""" |
|
728 | 728 | if not record.suppressedBy(inp): |
|
729 | 729 | self.addAnswerAtTime(record, 0) |
|
730 | 730 | |
|
731 | 731 | def addAnswerAtTime(self, record, now): |
|
732 | 732 | """Adds an answer if if does not expire by a certain time""" |
|
733 | 733 | if record is not None: |
|
734 | 734 | if now == 0 or not record.isExpired(now): |
|
735 | 735 | self.answers.append((record, now)) |
|
736 | 736 | |
|
737 | 737 | def addAuthoritativeAnswer(self, record): |
|
738 | 738 | """Adds an authoritative answer""" |
|
739 | 739 | self.authorities.append(record) |
|
740 | 740 | |
|
741 | 741 | def addAdditionalAnswer(self, record): |
|
742 | 742 | """Adds an additional answer""" |
|
743 | 743 | self.additionals.append(record) |
|
744 | 744 | |
|
745 | 745 | def writeByte(self, value): |
|
746 | 746 | """Writes a single byte to the packet""" |
|
747 | 747 | format = b'!c' |
|
748 | 748 | self.data.append(struct.pack(format, chr(value))) |
|
749 | 749 | self.size += 1 |
|
750 | 750 | |
|
751 | 751 | def insertShort(self, index, value): |
|
752 | 752 | """Inserts an unsigned short in a certain position in the packet""" |
|
753 | 753 | format = b'!H' |
|
754 | 754 | self.data.insert(index, struct.pack(format, value)) |
|
755 | 755 | self.size += 2 |
|
756 | 756 | |
|
757 | 757 | def writeShort(self, value): |
|
758 | 758 | """Writes an unsigned short to the packet""" |
|
759 | 759 | format = b'!H' |
|
760 | 760 | self.data.append(struct.pack(format, value)) |
|
761 | 761 | self.size += 2 |
|
762 | 762 | |
|
763 | 763 | def writeInt(self, value): |
|
764 | 764 | """Writes an unsigned integer to the packet""" |
|
765 | 765 | format = b'!I' |
|
766 | 766 | self.data.append(struct.pack(format, int(value))) |
|
767 | 767 | self.size += 4 |
|
768 | 768 | |
|
769 | 769 | def writeString(self, value, length): |
|
770 | 770 | """Writes a string to the packet""" |
|
771 | 771 | format = '!' + str(length) + 's' |
|
772 | 772 | self.data.append(struct.pack(format, value)) |
|
773 | 773 | self.size += length |
|
774 | 774 | |
|
775 | 775 | def writeUTF(self, s): |
|
776 | 776 | """Writes a UTF-8 string of a given length to the packet""" |
|
777 | 777 | utfstr = s.encode('utf-8') |
|
778 | 778 | length = len(utfstr) |
|
779 | 779 | if length > 64: |
|
780 | 780 | raise NamePartTooLongException |
|
781 | 781 | self.writeByte(length) |
|
782 | 782 | self.writeString(utfstr, length) |
|
783 | 783 | |
|
784 | 784 | def writeName(self, name): |
|
785 | 785 | """Writes a domain name to the packet""" |
|
786 | 786 | |
|
787 | 787 | try: |
|
788 | 788 | # Find existing instance of this name in packet |
|
789 | 789 | # |
|
790 | 790 | index = self.names[name] |
|
791 | 791 | except KeyError: |
|
792 | 792 | # No record of this name already, so write it |
|
793 | 793 | # out as normal, recording the location of the name |
|
794 | 794 | # for future pointers to it. |
|
795 | 795 | # |
|
796 | 796 | self.names[name] = self.size |
|
797 | 797 | parts = name.split(b'.') |
|
798 | 798 | if parts[-1] == b'': |
|
799 | 799 | parts = parts[:-1] |
|
800 | 800 | for part in parts: |
|
801 | 801 | self.writeUTF(part) |
|
802 | 802 | self.writeByte(0) |
|
803 | 803 | return |
|
804 | 804 | |
|
805 | 805 | # An index was found, so write a pointer to it |
|
806 | 806 | # |
|
807 | 807 | self.writeByte((index >> 8) | 0xC0) |
|
808 | 808 | self.writeByte(index) |
|
809 | 809 | |
|
810 | 810 | def writeQuestion(self, question): |
|
811 | 811 | """Writes a question to the packet""" |
|
812 | 812 | self.writeName(question.name) |
|
813 | 813 | self.writeShort(question.type) |
|
814 | 814 | self.writeShort(question.clazz) |
|
815 | 815 | |
|
816 | 816 | def writeRecord(self, record, now): |
|
817 | 817 | """Writes a record (answer, authoritative answer, additional) to |
|
818 | 818 | the packet""" |
|
819 | 819 | self.writeName(record.name) |
|
820 | 820 | self.writeShort(record.type) |
|
821 | 821 | if record.unique and self.multicast: |
|
822 | 822 | self.writeShort(record.clazz | _CLASS_UNIQUE) |
|
823 | 823 | else: |
|
824 | 824 | self.writeShort(record.clazz) |
|
825 | 825 | if now == 0: |
|
826 | 826 | self.writeInt(record.ttl) |
|
827 | 827 | else: |
|
828 | 828 | self.writeInt(record.getRemainingTTL(now)) |
|
829 | 829 | index = len(self.data) |
|
830 | 830 | # Adjust size for the short we will write before this record |
|
831 | 831 | # |
|
832 | 832 | self.size += 2 |
|
833 | 833 | record.write(self) |
|
834 | 834 | self.size -= 2 |
|
835 | 835 | |
|
836 | 836 | length = len(b''.join(self.data[index:])) |
|
837 | 837 | self.insertShort(index, length) # Here is the short we adjusted for |
|
838 | 838 | |
|
839 | 839 | def packet(self): |
|
840 | 840 | """Returns a string containing the packet's bytes |
|
841 | 841 | |
|
842 | 842 | No further parts should be added to the packet once this |
|
843 | 843 | is done.""" |
|
844 | 844 | if not self.finished: |
|
845 | 845 | self.finished = 1 |
|
846 | 846 | for question in self.questions: |
|
847 | 847 | self.writeQuestion(question) |
|
848 | 848 | for answer, time_ in self.answers: |
|
849 | 849 | self.writeRecord(answer, time_) |
|
850 | 850 | for authority in self.authorities: |
|
851 | 851 | self.writeRecord(authority, 0) |
|
852 | 852 | for additional in self.additionals: |
|
853 | 853 | self.writeRecord(additional, 0) |
|
854 | 854 | |
|
855 | 855 | self.insertShort(0, len(self.additionals)) |
|
856 | 856 | self.insertShort(0, len(self.authorities)) |
|
857 | 857 | self.insertShort(0, len(self.answers)) |
|
858 | 858 | self.insertShort(0, len(self.questions)) |
|
859 | 859 | self.insertShort(0, self.flags) |
|
860 | 860 | if self.multicast: |
|
861 | 861 | self.insertShort(0, 0) |
|
862 | 862 | else: |
|
863 | 863 | self.insertShort(0, self.id) |
|
864 | 864 | return b''.join(self.data) |
|
865 | 865 | |
|
866 | 866 | |
|
867 | 867 | class DNSCache: |
|
868 | 868 | """A cache of DNS entries""" |
|
869 | 869 | |
|
870 | 870 | def __init__(self): |
|
871 | 871 | self.cache = {} |
|
872 | 872 | |
|
873 | 873 | def add(self, entry): |
|
874 | 874 | """Adds an entry""" |
|
875 | 875 | try: |
|
876 | 876 | list = self.cache[entry.key] |
|
877 | 877 | except KeyError: |
|
878 | 878 | list = self.cache[entry.key] = [] |
|
879 | 879 | list.append(entry) |
|
880 | 880 | |
|
881 | 881 | def remove(self, entry): |
|
882 | 882 | """Removes an entry""" |
|
883 | 883 | try: |
|
884 | 884 | list = self.cache[entry.key] |
|
885 | 885 | list.remove(entry) |
|
886 | 886 | except KeyError: |
|
887 | 887 | pass |
|
888 | 888 | |
|
889 | 889 | def get(self, entry): |
|
890 | 890 | """Gets an entry by key. Will return None if there is no |
|
891 | 891 | matching entry.""" |
|
892 | 892 | try: |
|
893 | 893 | list = self.cache[entry.key] |
|
894 | 894 | return list[list.index(entry)] |
|
895 | 895 | except (KeyError, ValueError): |
|
896 | 896 | return None |
|
897 | 897 | |
|
898 | 898 | def getByDetails(self, name, type, clazz): |
|
899 | 899 | """Gets an entry by details. Will return None if there is |
|
900 | 900 | no matching entry.""" |
|
901 | 901 | entry = DNSEntry(name, type, clazz) |
|
902 | 902 | return self.get(entry) |
|
903 | 903 | |
|
904 | 904 | def entriesWithName(self, name): |
|
905 | 905 | """Returns a list of entries whose key matches the name.""" |
|
906 | 906 | try: |
|
907 | 907 | return self.cache[name] |
|
908 | 908 | except KeyError: |
|
909 | 909 | return [] |
|
910 | 910 | |
|
911 | 911 | def entries(self): |
|
912 | 912 | """Returns a list of all entries""" |
|
913 | 913 | try: |
|
914 | 914 | return list(itertools.chain.from_iterable(self.cache.values())) |
|
915 | 915 | except Exception: |
|
916 | 916 | return [] |
|
917 | 917 | |
|
918 | 918 | |
|
919 | 919 | class Engine(threading.Thread): |
|
920 | 920 | """An engine wraps read access to sockets, allowing objects that |
|
921 | 921 | need to receive data from sockets to be called back when the |
|
922 | 922 | sockets are ready. |
|
923 | 923 | |
|
924 | 924 | A reader needs a handle_read() method, which is called when the socket |
|
925 | 925 | it is interested in is ready for reading. |
|
926 | 926 | |
|
927 | 927 | Writers are not implemented here, because we only send short |
|
928 | 928 | packets. |
|
929 | 929 | """ |
|
930 | 930 | |
|
931 | 931 | def __init__(self, zeroconf): |
|
932 | 932 | threading.Thread.__init__(self) |
|
933 | 933 | self.zeroconf = zeroconf |
|
934 | 934 | self.readers = {} # maps socket to reader |
|
935 | 935 | self.timeout = 5 |
|
936 | 936 | self.condition = threading.Condition() |
|
937 | 937 | self.start() |
|
938 | 938 | |
|
939 | 939 | def run(self): |
|
940 | 940 | while not globals()[b'_GLOBAL_DONE']: |
|
941 | 941 | rs = self.getReaders() |
|
942 | 942 | if len(rs) == 0: |
|
943 | 943 | # No sockets to manage, but we wait for the timeout |
|
944 | 944 | # or addition of a socket |
|
945 | 945 | # |
|
946 | 946 | self.condition.acquire() |
|
947 | 947 | self.condition.wait(self.timeout) |
|
948 | 948 | self.condition.release() |
|
949 | 949 | else: |
|
950 | 950 | try: |
|
951 | 951 | rr, wr, er = select.select(rs, [], [], self.timeout) |
|
952 | 952 | for sock in rr: |
|
953 | 953 | try: |
|
954 | 954 | self.readers[sock].handle_read() |
|
955 | 955 | except Exception: |
|
956 | 956 | if not globals()[b'_GLOBAL_DONE']: |
|
957 | 957 | traceback.print_exc() |
|
958 | 958 | except Exception: |
|
959 | 959 | pass |
|
960 | 960 | |
|
961 | 961 | def getReaders(self): |
|
962 | 962 | self.condition.acquire() |
|
963 | 963 | result = self.readers.keys() |
|
964 | 964 | self.condition.release() |
|
965 | 965 | return result |
|
966 | 966 | |
|
967 | 967 | def addReader(self, reader, socket): |
|
968 | 968 | self.condition.acquire() |
|
969 | 969 | self.readers[socket] = reader |
|
970 | 970 | self.condition.notify() |
|
971 | 971 | self.condition.release() |
|
972 | 972 | |
|
973 | 973 | def delReader(self, socket): |
|
974 | 974 | self.condition.acquire() |
|
975 | 975 | del self.readers[socket] |
|
976 | 976 | self.condition.notify() |
|
977 | 977 | self.condition.release() |
|
978 | 978 | |
|
979 | 979 | def notify(self): |
|
980 | 980 | self.condition.acquire() |
|
981 | 981 | self.condition.notify() |
|
982 | 982 | self.condition.release() |
|
983 | 983 | |
|
984 | 984 | |
|
985 | 985 | class Listener: |
|
986 | 986 | """A Listener is used by this module to listen on the multicast |
|
987 | 987 | group to which DNS messages are sent, allowing the implementation |
|
988 | 988 | to cache information as it arrives. |
|
989 | 989 | |
|
990 | 990 | It requires registration with an Engine object in order to have |
|
991 | 991 | the read() method called when a socket is available for reading.""" |
|
992 | 992 | |
|
993 | 993 | def __init__(self, zeroconf): |
|
994 | 994 | self.zeroconf = zeroconf |
|
995 | 995 | self.zeroconf.engine.addReader(self, self.zeroconf.socket) |
|
996 | 996 | |
|
997 | 997 | def handle_read(self): |
|
998 | 998 | sock = self.zeroconf.socket |
|
999 | 999 | try: |
|
1000 | 1000 | data, (addr, port) = sock.recvfrom(_MAX_MSG_ABSOLUTE) |
|
1001 | 1001 | except socket.error as e: |
|
1002 | 1002 | if e.errno == errno.EBADF: |
|
1003 | 1003 | # some other thread may close the socket |
|
1004 | 1004 | return |
|
1005 | 1005 | else: |
|
1006 | 1006 | raise |
|
1007 | 1007 | self.data = data |
|
1008 | 1008 | msg = DNSIncoming(data) |
|
1009 | 1009 | if msg.isQuery(): |
|
1010 | 1010 | # Always multicast responses |
|
1011 | 1011 | # |
|
1012 | 1012 | if port == _MDNS_PORT: |
|
1013 | 1013 | self.zeroconf.handleQuery(msg, _MDNS_ADDR, _MDNS_PORT) |
|
1014 | 1014 | # If it's not a multicast query, reply via unicast |
|
1015 | 1015 | # and multicast |
|
1016 | 1016 | # |
|
1017 | 1017 | elif port == _DNS_PORT: |
|
1018 | 1018 | self.zeroconf.handleQuery(msg, addr, port) |
|
1019 | 1019 | self.zeroconf.handleQuery(msg, _MDNS_ADDR, _MDNS_PORT) |
|
1020 | 1020 | else: |
|
1021 | 1021 | self.zeroconf.handleResponse(msg) |
|
1022 | 1022 | |
|
1023 | 1023 | |
|
1024 | 1024 | class Reaper(threading.Thread): |
|
1025 | 1025 | """A Reaper is used by this module to remove cache entries that |
|
1026 | 1026 | have expired.""" |
|
1027 | 1027 | |
|
1028 | 1028 | def __init__(self, zeroconf): |
|
1029 | 1029 | threading.Thread.__init__(self) |
|
1030 | 1030 | self.zeroconf = zeroconf |
|
1031 | 1031 | self.start() |
|
1032 | 1032 | |
|
1033 | 1033 | def run(self): |
|
1034 | 1034 | while True: |
|
1035 | 1035 | self.zeroconf.wait(10 * 1000) |
|
1036 | 1036 | if globals()[b'_GLOBAL_DONE']: |
|
1037 | 1037 | return |
|
1038 | 1038 | now = currentTimeMillis() |
|
1039 | 1039 | for record in self.zeroconf.cache.entries(): |
|
1040 | 1040 | if record.isExpired(now): |
|
1041 | 1041 | self.zeroconf.updateRecord(now, record) |
|
1042 | 1042 | self.zeroconf.cache.remove(record) |
|
1043 | 1043 | |
|
1044 | 1044 | |
|
1045 | 1045 | class ServiceBrowser(threading.Thread): |
|
1046 | 1046 | """Used to browse for a service of a specific type. |
|
1047 | 1047 | |
|
1048 | 1048 | The listener object will have its addService() and |
|
1049 | 1049 | removeService() methods called when this browser |
|
1050 | 1050 | discovers changes in the services availability.""" |
|
1051 | 1051 | |
|
1052 | 1052 | def __init__(self, zeroconf, type, listener): |
|
1053 | 1053 | """Creates a browser for a specific type""" |
|
1054 | 1054 | threading.Thread.__init__(self) |
|
1055 | 1055 | self.zeroconf = zeroconf |
|
1056 | 1056 | self.type = type |
|
1057 | 1057 | self.listener = listener |
|
1058 | 1058 | self.services = {} |
|
1059 | 1059 | self.nexttime = currentTimeMillis() |
|
1060 | 1060 | self.delay = _BROWSER_TIME |
|
1061 | 1061 | self.list = [] |
|
1062 | 1062 | |
|
1063 | 1063 | self.done = 0 |
|
1064 | 1064 | |
|
1065 | 1065 | self.zeroconf.addListener( |
|
1066 | 1066 | self, DNSQuestion(self.type, _TYPE_PTR, _CLASS_IN) |
|
1067 | 1067 | ) |
|
1068 | 1068 | self.start() |
|
1069 | 1069 | |
|
1070 | 1070 | def updateRecord(self, zeroconf, now, record): |
|
1071 | 1071 | """Callback invoked by Zeroconf when new information arrives. |
|
1072 | 1072 | |
|
1073 | 1073 | Updates information required by browser in the Zeroconf cache.""" |
|
1074 | 1074 | if record.type == _TYPE_PTR and record.name == self.type: |
|
1075 | 1075 | expired = record.isExpired(now) |
|
1076 | 1076 | try: |
|
1077 | 1077 | oldrecord = self.services[record.alias.lower()] |
|
1078 | 1078 | if not expired: |
|
1079 | 1079 | oldrecord.resetTTL(record) |
|
1080 | 1080 | else: |
|
1081 | 1081 | del self.services[record.alias.lower()] |
|
1082 | 1082 | callback = lambda x: self.listener.removeService( |
|
1083 | 1083 | x, self.type, record.alias |
|
1084 | 1084 | ) |
|
1085 | 1085 | self.list.append(callback) |
|
1086 | 1086 | return |
|
1087 | 1087 | except Exception: |
|
1088 | 1088 | if not expired: |
|
1089 | 1089 | self.services[record.alias.lower()] = record |
|
1090 | 1090 | callback = lambda x: self.listener.addService( |
|
1091 | 1091 | x, self.type, record.alias |
|
1092 | 1092 | ) |
|
1093 | 1093 | self.list.append(callback) |
|
1094 | 1094 | |
|
1095 | 1095 | expires = record.getExpirationTime(75) |
|
1096 | 1096 | if expires < self.nexttime: |
|
1097 | 1097 | self.nexttime = expires |
|
1098 | 1098 | |
|
1099 | 1099 | def cancel(self): |
|
1100 | 1100 | self.done = 1 |
|
1101 | 1101 | self.zeroconf.notifyAll() |
|
1102 | 1102 | |
|
1103 | 1103 | def run(self): |
|
1104 | 1104 | while True: |
|
1105 | 1105 | event = None |
|
1106 | 1106 | now = currentTimeMillis() |
|
1107 | 1107 | if len(self.list) == 0 and self.nexttime > now: |
|
1108 | 1108 | self.zeroconf.wait(self.nexttime - now) |
|
1109 | 1109 | if globals()[b'_GLOBAL_DONE'] or self.done: |
|
1110 | 1110 | return |
|
1111 | 1111 | now = currentTimeMillis() |
|
1112 | 1112 | |
|
1113 | 1113 | if self.nexttime <= now: |
|
1114 | 1114 | out = DNSOutgoing(_FLAGS_QR_QUERY) |
|
1115 | 1115 | out.addQuestion(DNSQuestion(self.type, _TYPE_PTR, _CLASS_IN)) |
|
1116 | 1116 | for record in self.services.values(): |
|
1117 | 1117 | if not record.isExpired(now): |
|
1118 | 1118 | out.addAnswerAtTime(record, now) |
|
1119 | 1119 | self.zeroconf.send(out) |
|
1120 | 1120 | self.nexttime = now + self.delay |
|
1121 | 1121 | self.delay = min(20 * 1000, self.delay * 2) |
|
1122 | 1122 | |
|
1123 | 1123 | if len(self.list) > 0: |
|
1124 | 1124 | event = self.list.pop(0) |
|
1125 | 1125 | |
|
1126 | 1126 | if event is not None: |
|
1127 | 1127 | event(self.zeroconf) |
|
1128 | 1128 | |
|
1129 | 1129 | |
|
1130 | 1130 | class ServiceInfo: |
|
1131 | 1131 | """Service information""" |
|
1132 | 1132 | |
|
1133 | 1133 | def __init__( |
|
1134 | 1134 | self, |
|
1135 | 1135 | type, |
|
1136 | 1136 | name, |
|
1137 | 1137 | address=None, |
|
1138 | 1138 | port=None, |
|
1139 | 1139 | weight=0, |
|
1140 | 1140 | priority=0, |
|
1141 | 1141 | properties=None, |
|
1142 | 1142 | server=None, |
|
1143 | 1143 | ): |
|
1144 | 1144 | """Create a service description. |
|
1145 | 1145 | |
|
1146 | 1146 | type: fully qualified service type name |
|
1147 | 1147 | name: fully qualified service name |
|
1148 | 1148 | address: IP address as unsigned short, network byte order |
|
1149 | 1149 | port: port that the service runs on |
|
1150 | 1150 | weight: weight of the service |
|
1151 | 1151 | priority: priority of the service |
|
1152 | 1152 | properties: dictionary of properties (or a string holding the bytes for |
|
1153 | 1153 | the text field) |
|
1154 | 1154 | server: fully qualified name for service host (defaults to name)""" |
|
1155 | 1155 | |
|
1156 | 1156 | if not name.endswith(type): |
|
1157 | 1157 | raise BadTypeInNameException |
|
1158 | 1158 | self.type = type |
|
1159 | 1159 | self.name = name |
|
1160 | 1160 | self.address = address |
|
1161 | 1161 | self.port = port |
|
1162 | 1162 | self.weight = weight |
|
1163 | 1163 | self.priority = priority |
|
1164 | 1164 | if server: |
|
1165 | 1165 | self.server = server |
|
1166 | 1166 | else: |
|
1167 | 1167 | self.server = name |
|
1168 | 1168 | self.setProperties(properties) |
|
1169 | 1169 | |
|
1170 | 1170 | def setProperties(self, properties): |
|
1171 | 1171 | """Sets properties and text of this info from a dictionary""" |
|
1172 | 1172 | if isinstance(properties, dict): |
|
1173 | 1173 | self.properties = properties |
|
1174 | 1174 | list = [] |
|
1175 | 1175 | result = b'' |
|
1176 | 1176 | for key in properties: |
|
1177 | 1177 | value = properties[key] |
|
1178 | 1178 | if value is None: |
|
1179 | 1179 | suffix = b'' |
|
1180 | 1180 | elif isinstance(value, str): |
|
1181 | 1181 | suffix = value |
|
1182 | 1182 | elif isinstance(value, int): |
|
1183 | 1183 | if value: |
|
1184 | 1184 | suffix = b'true' |
|
1185 | 1185 | else: |
|
1186 | 1186 | suffix = b'false' |
|
1187 | 1187 | else: |
|
1188 | 1188 | suffix = b'' |
|
1189 | 1189 | list.append(b'='.join((key, suffix))) |
|
1190 | 1190 | for item in list: |
|
1191 | 1191 | result = b''.join( |
|
1192 | 1192 | ( |
|
1193 | 1193 | result, |
|
1194 | 1194 | struct.pack(b'!c', pycompat.bytechr(len(item))), |
|
1195 | 1195 | item, |
|
1196 | 1196 | ) |
|
1197 | 1197 | ) |
|
1198 | 1198 | self.text = result |
|
1199 | 1199 | else: |
|
1200 | 1200 | self.text = properties |
|
1201 | 1201 | |
|
1202 | 1202 | def setText(self, text): |
|
1203 | 1203 | """Sets properties and text given a text field""" |
|
1204 | 1204 | self.text = text |
|
1205 | 1205 | try: |
|
1206 | 1206 | result = {} |
|
1207 | 1207 | end = len(text) |
|
1208 | 1208 | index = 0 |
|
1209 | 1209 | strs = [] |
|
1210 | 1210 | while index < end: |
|
1211 | 1211 | length = ord(text[index]) |
|
1212 | 1212 | index += 1 |
|
1213 | 1213 | strs.append(text[index : index + length]) |
|
1214 | 1214 | index += length |
|
1215 | 1215 | |
|
1216 | 1216 | for s in strs: |
|
1217 | 1217 | eindex = s.find(b'=') |
|
1218 | 1218 | if eindex == -1: |
|
1219 | 1219 | # No equals sign at all |
|
1220 | 1220 | key = s |
|
1221 | 1221 | value = 0 |
|
1222 | 1222 | else: |
|
1223 | 1223 | key = s[:eindex] |
|
1224 | 1224 | value = s[eindex + 1 :] |
|
1225 | 1225 | if value == b'true': |
|
1226 | 1226 | value = 1 |
|
1227 | 1227 | elif value == b'false' or not value: |
|
1228 | 1228 | value = 0 |
|
1229 | 1229 | |
|
1230 | 1230 | # Only update non-existent properties |
|
1231 | 1231 | if key and result.get(key) is None: |
|
1232 | 1232 | result[key] = value |
|
1233 | 1233 | |
|
1234 | 1234 | self.properties = result |
|
1235 | 1235 | except Exception: |
|
1236 | 1236 | traceback.print_exc() |
|
1237 | 1237 | self.properties = None |
|
1238 | 1238 | |
|
1239 | 1239 | def getType(self): |
|
1240 | 1240 | """Type accessor""" |
|
1241 | 1241 | return self.type |
|
1242 | 1242 | |
|
1243 | 1243 | def getName(self): |
|
1244 | 1244 | """Name accessor""" |
|
1245 | 1245 | if self.type is not None and self.name.endswith(b"." + self.type): |
|
1246 | 1246 | return self.name[: len(self.name) - len(self.type) - 1] |
|
1247 | 1247 | return self.name |
|
1248 | 1248 | |
|
1249 | 1249 | def getAddress(self): |
|
1250 | 1250 | """Address accessor""" |
|
1251 | 1251 | return self.address |
|
1252 | 1252 | |
|
1253 | 1253 | def getPort(self): |
|
1254 | 1254 | """Port accessor""" |
|
1255 | 1255 | return self.port |
|
1256 | 1256 | |
|
1257 | 1257 | def getPriority(self): |
|
1258 | 1258 | """Priority accessor""" |
|
1259 | 1259 | return self.priority |
|
1260 | 1260 | |
|
1261 | 1261 | def getWeight(self): |
|
1262 | 1262 | """Weight accessor""" |
|
1263 | 1263 | return self.weight |
|
1264 | 1264 | |
|
1265 | 1265 | def getProperties(self): |
|
1266 | 1266 | """Properties accessor""" |
|
1267 | 1267 | return self.properties |
|
1268 | 1268 | |
|
1269 | 1269 | def getText(self): |
|
1270 | 1270 | """Text accessor""" |
|
1271 | 1271 | return self.text |
|
1272 | 1272 | |
|
1273 | 1273 | def getServer(self): |
|
1274 | 1274 | """Server accessor""" |
|
1275 | 1275 | return self.server |
|
1276 | 1276 | |
|
1277 | 1277 | def updateRecord(self, zeroconf, now, record): |
|
1278 | 1278 | """Updates service information from a DNS record""" |
|
1279 | 1279 | if record is not None and not record.isExpired(now): |
|
1280 | 1280 | if record.type == _TYPE_A: |
|
1281 | 1281 | # if record.name == self.name: |
|
1282 | 1282 | if record.name == self.server: |
|
1283 | 1283 | self.address = record.address |
|
1284 | 1284 | elif record.type == _TYPE_SRV: |
|
1285 | 1285 | if record.name == self.name: |
|
1286 | 1286 | self.server = record.server |
|
1287 | 1287 | self.port = record.port |
|
1288 | 1288 | self.weight = record.weight |
|
1289 | 1289 | self.priority = record.priority |
|
1290 | 1290 | # self.address = None |
|
1291 | 1291 | self.updateRecord( |
|
1292 | 1292 | zeroconf, |
|
1293 | 1293 | now, |
|
1294 | 1294 | zeroconf.cache.getByDetails( |
|
1295 | 1295 | self.server, _TYPE_A, _CLASS_IN |
|
1296 | 1296 | ), |
|
1297 | 1297 | ) |
|
1298 | 1298 | elif record.type == _TYPE_TXT: |
|
1299 | 1299 | if record.name == self.name: |
|
1300 | 1300 | self.setText(record.text) |
|
1301 | 1301 | |
|
1302 | 1302 | def request(self, zeroconf, timeout): |
|
1303 | 1303 | """Returns true if the service could be discovered on the |
|
1304 | 1304 | network, and updates this object with details discovered. |
|
1305 | 1305 | """ |
|
1306 | 1306 | now = currentTimeMillis() |
|
1307 | 1307 | delay = _LISTENER_TIME |
|
1308 | 1308 | next = now + delay |
|
1309 | 1309 | last = now + timeout |
|
1310 | 1310 | try: |
|
1311 | 1311 | zeroconf.addListener( |
|
1312 | 1312 | self, DNSQuestion(self.name, _TYPE_ANY, _CLASS_IN) |
|
1313 | 1313 | ) |
|
1314 | 1314 | while ( |
|
1315 | 1315 | self.server is None or self.address is None or self.text is None |
|
1316 | 1316 | ): |
|
1317 | 1317 | if last <= now: |
|
1318 | 1318 | return 0 |
|
1319 | 1319 | if next <= now: |
|
1320 | 1320 | out = DNSOutgoing(_FLAGS_QR_QUERY) |
|
1321 | 1321 | out.addQuestion( |
|
1322 | 1322 | DNSQuestion(self.name, _TYPE_SRV, _CLASS_IN) |
|
1323 | 1323 | ) |
|
1324 | 1324 | out.addAnswerAtTime( |
|
1325 | 1325 | zeroconf.cache.getByDetails( |
|
1326 | 1326 | self.name, _TYPE_SRV, _CLASS_IN |
|
1327 | 1327 | ), |
|
1328 | 1328 | now, |
|
1329 | 1329 | ) |
|
1330 | 1330 | out.addQuestion( |
|
1331 | 1331 | DNSQuestion(self.name, _TYPE_TXT, _CLASS_IN) |
|
1332 | 1332 | ) |
|
1333 | 1333 | out.addAnswerAtTime( |
|
1334 | 1334 | zeroconf.cache.getByDetails( |
|
1335 | 1335 | self.name, _TYPE_TXT, _CLASS_IN |
|
1336 | 1336 | ), |
|
1337 | 1337 | now, |
|
1338 | 1338 | ) |
|
1339 | 1339 | if self.server is not None: |
|
1340 | 1340 | out.addQuestion( |
|
1341 | 1341 | DNSQuestion(self.server, _TYPE_A, _CLASS_IN) |
|
1342 | 1342 | ) |
|
1343 | 1343 | out.addAnswerAtTime( |
|
1344 | 1344 | zeroconf.cache.getByDetails( |
|
1345 | 1345 | self.server, _TYPE_A, _CLASS_IN |
|
1346 | 1346 | ), |
|
1347 | 1347 | now, |
|
1348 | 1348 | ) |
|
1349 | 1349 | zeroconf.send(out) |
|
1350 | 1350 | next = now + delay |
|
1351 | 1351 | delay = delay * 2 |
|
1352 | 1352 | |
|
1353 | 1353 | zeroconf.wait(min(next, last) - now) |
|
1354 | 1354 | now = currentTimeMillis() |
|
1355 | 1355 | result = 1 |
|
1356 | 1356 | finally: |
|
1357 | 1357 | zeroconf.removeListener(self) |
|
1358 | 1358 | |
|
1359 | 1359 | return result |
|
1360 | 1360 | |
|
1361 | 1361 | def __eq__(self, other): |
|
1362 | 1362 | """Tests equality of service name""" |
|
1363 | 1363 | if isinstance(other, ServiceInfo): |
|
1364 | 1364 | return other.name == self.name |
|
1365 | 1365 | return 0 |
|
1366 | 1366 | |
|
1367 | 1367 | def __ne__(self, other): |
|
1368 | 1368 | """Non-equality test""" |
|
1369 | 1369 | return not self.__eq__(other) |
|
1370 | 1370 | |
|
1371 | 1371 | def __repr__(self): |
|
1372 | 1372 | """String representation""" |
|
1373 | 1373 | result = b"service[%s,%s:%s," % ( |
|
1374 | 1374 | self.name, |
|
1375 | 1375 | socket.inet_ntoa(self.getAddress()), |
|
1376 | 1376 | self.port, |
|
1377 | 1377 | ) |
|
1378 | 1378 | if self.text is None: |
|
1379 | 1379 | result += b"None" |
|
1380 | 1380 | else: |
|
1381 | 1381 | if len(self.text) < 20: |
|
1382 | 1382 | result += self.text |
|
1383 | 1383 | else: |
|
1384 | 1384 | result += self.text[:17] + b"..." |
|
1385 | 1385 | result += b"]" |
|
1386 | 1386 | return result |
|
1387 | 1387 | |
|
1388 | 1388 | |
|
1389 | 1389 | class Zeroconf: |
|
1390 | 1390 | """Implementation of Zeroconf Multicast DNS Service Discovery |
|
1391 | 1391 | |
|
1392 | 1392 | Supports registration, unregistration, queries and browsing. |
|
1393 | 1393 | """ |
|
1394 | 1394 | |
|
1395 | 1395 | def __init__(self, bindaddress=None): |
|
1396 | 1396 | """Creates an instance of the Zeroconf class, establishing |
|
1397 | 1397 | multicast communications, listening and reaping threads.""" |
|
1398 | 1398 | globals()[b'_GLOBAL_DONE'] = 0 |
|
1399 | 1399 | if bindaddress is None: |
|
1400 | 1400 | self.intf = socket.gethostbyname(socket.gethostname()) |
|
1401 | 1401 | else: |
|
1402 | 1402 | self.intf = bindaddress |
|
1403 | 1403 | self.group = (b'', _MDNS_PORT) |
|
1404 | 1404 | self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) |
|
1405 | 1405 | try: |
|
1406 | 1406 | self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) |
|
1407 | 1407 | self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) |
|
1408 | 1408 | except Exception: |
|
1409 | 1409 | # SO_REUSEADDR should be equivalent to SO_REUSEPORT for |
|
1410 | 1410 | # multicast UDP sockets (p 731, "TCP/IP Illustrated, |
|
1411 | 1411 | # Volume 2"), but some BSD-derived systems require |
|
1412 | 1412 | # SO_REUSEPORT to be specified explicitly. Also, not all |
|
1413 | 1413 | # versions of Python have SO_REUSEPORT available. So |
|
1414 | 1414 | # if you're on a BSD-based system, and haven't upgraded |
|
1415 | 1415 | # to Python 2.3 yet, you may find this library doesn't |
|
1416 | 1416 | # work as expected. |
|
1417 | 1417 | # |
|
1418 | 1418 | pass |
|
1419 | 1419 | self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_TTL, b"\xff") |
|
1420 | 1420 | self.socket.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_LOOP, b"\x01") |
|
1421 | 1421 | try: |
|
1422 | 1422 | self.socket.bind(self.group) |
|
1423 | 1423 | except Exception: |
|
1424 | 1424 | # Some versions of linux raise an exception even though |
|
1425 | 1425 | # SO_REUSEADDR and SO_REUSEPORT have been set, so ignore it |
|
1426 | 1426 | pass |
|
1427 | 1427 | self.socket.setsockopt( |
|
1428 | 1428 | socket.SOL_IP, |
|
1429 | 1429 | socket.IP_ADD_MEMBERSHIP, |
|
1430 | 1430 | socket.inet_aton(_MDNS_ADDR) + socket.inet_aton('0.0.0.0'), |
|
1431 | 1431 | ) |
|
1432 | 1432 | |
|
1433 | 1433 | self.listeners = [] |
|
1434 | 1434 | self.browsers = [] |
|
1435 | 1435 | self.services = {} |
|
1436 | 1436 | self.servicetypes = {} |
|
1437 | 1437 | |
|
1438 | 1438 | self.cache = DNSCache() |
|
1439 | 1439 | |
|
1440 | 1440 | self.condition = threading.Condition() |
|
1441 | 1441 | |
|
1442 | 1442 | self.engine = Engine(self) |
|
1443 | 1443 | self.listener = Listener(self) |
|
1444 | 1444 | self.reaper = Reaper(self) |
|
1445 | 1445 | |
|
1446 | 1446 | def isLoopback(self): |
|
1447 | 1447 | return self.intf.startswith(b"127.0.0.1") |
|
1448 | 1448 | |
|
1449 | 1449 | def isLinklocal(self): |
|
1450 | 1450 | return self.intf.startswith(b"169.254.") |
|
1451 | 1451 | |
|
1452 | 1452 | def wait(self, timeout): |
|
1453 | 1453 | """Calling thread waits for a given number of milliseconds or |
|
1454 | 1454 | until notified.""" |
|
1455 | 1455 | self.condition.acquire() |
|
1456 | 1456 | self.condition.wait(timeout / 1000) |
|
1457 | 1457 | self.condition.release() |
|
1458 | 1458 | |
|
1459 | 1459 | def notifyAll(self): |
|
1460 | 1460 | """Notifies all waiting threads""" |
|
1461 | 1461 | self.condition.acquire() |
|
1462 | 1462 | self.condition.notify_all() |
|
1463 | 1463 | self.condition.release() |
|
1464 | 1464 | |
|
1465 | 1465 | def getServiceInfo(self, type, name, timeout=3000): |
|
1466 | 1466 | """Returns network's service information for a particular |
|
1467 | 1467 | name and type, or None if no service matches by the timeout, |
|
1468 | 1468 | which defaults to 3 seconds.""" |
|
1469 | 1469 | info = ServiceInfo(type, name) |
|
1470 | 1470 | if info.request(self, timeout): |
|
1471 | 1471 | return info |
|
1472 | 1472 | return None |
|
1473 | 1473 | |
|
1474 | 1474 | def addServiceListener(self, type, listener): |
|
1475 | 1475 | """Adds a listener for a particular service type. This object |
|
1476 | 1476 | will then have its updateRecord method called when information |
|
1477 | 1477 | arrives for that type.""" |
|
1478 | 1478 | self.removeServiceListener(listener) |
|
1479 | 1479 | self.browsers.append(ServiceBrowser(self, type, listener)) |
|
1480 | 1480 | |
|
1481 | 1481 | def removeServiceListener(self, listener): |
|
1482 | 1482 | """Removes a listener from the set that is currently listening.""" |
|
1483 | 1483 | for browser in self.browsers: |
|
1484 | 1484 | if browser.listener == listener: |
|
1485 | 1485 | browser.cancel() |
|
1486 | 1486 | del browser |
|
1487 | 1487 | |
|
1488 | 1488 | def registerService(self, info, ttl=_DNS_TTL): |
|
1489 | 1489 | """Registers service information to the network with a default TTL |
|
1490 | 1490 | of 60 seconds. Zeroconf will then respond to requests for |
|
1491 | 1491 | information for that service. The name of the service may be |
|
1492 | 1492 | changed if needed to make it unique on the network.""" |
|
1493 | 1493 | self.checkService(info) |
|
1494 | 1494 | self.services[info.name.lower()] = info |
|
1495 | 1495 | if info.type in self.servicetypes: |
|
1496 | 1496 | self.servicetypes[info.type] += 1 |
|
1497 | 1497 | else: |
|
1498 | 1498 | self.servicetypes[info.type] = 1 |
|
1499 | 1499 | now = currentTimeMillis() |
|
1500 | 1500 | nexttime = now |
|
1501 | 1501 | i = 0 |
|
1502 | 1502 | while i < 3: |
|
1503 | 1503 | if now < nexttime: |
|
1504 | 1504 | self.wait(nexttime - now) |
|
1505 | 1505 | now = currentTimeMillis() |
|
1506 | 1506 | continue |
|
1507 | 1507 | out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA) |
|
1508 | 1508 | out.addAnswerAtTime( |
|
1509 | 1509 | DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, ttl, info.name), 0 |
|
1510 | 1510 | ) |
|
1511 | 1511 | out.addAnswerAtTime( |
|
1512 | 1512 | DNSService( |
|
1513 | 1513 | info.name, |
|
1514 | 1514 | _TYPE_SRV, |
|
1515 | 1515 | _CLASS_IN, |
|
1516 | 1516 | ttl, |
|
1517 | 1517 | info.priority, |
|
1518 | 1518 | info.weight, |
|
1519 | 1519 | info.port, |
|
1520 | 1520 | info.server, |
|
1521 | 1521 | ), |
|
1522 | 1522 | 0, |
|
1523 | 1523 | ) |
|
1524 | 1524 | out.addAnswerAtTime( |
|
1525 | 1525 | DNSText(info.name, _TYPE_TXT, _CLASS_IN, ttl, info.text), 0 |
|
1526 | 1526 | ) |
|
1527 | 1527 | if info.address: |
|
1528 | 1528 | out.addAnswerAtTime( |
|
1529 | 1529 | DNSAddress( |
|
1530 | 1530 | info.server, _TYPE_A, _CLASS_IN, ttl, info.address |
|
1531 | 1531 | ), |
|
1532 | 1532 | 0, |
|
1533 | 1533 | ) |
|
1534 | 1534 | self.send(out) |
|
1535 | 1535 | i += 1 |
|
1536 | 1536 | nexttime += _REGISTER_TIME |
|
1537 | 1537 | |
|
1538 | 1538 | def unregisterService(self, info): |
|
1539 | 1539 | """Unregister a service.""" |
|
1540 | 1540 | try: |
|
1541 | 1541 | del self.services[info.name.lower()] |
|
1542 | 1542 | if self.servicetypes[info.type] > 1: |
|
1543 | 1543 | self.servicetypes[info.type] -= 1 |
|
1544 | 1544 | else: |
|
1545 | 1545 | del self.servicetypes[info.type] |
|
1546 | 1546 | except KeyError: |
|
1547 | 1547 | pass |
|
1548 | 1548 | now = currentTimeMillis() |
|
1549 | 1549 | nexttime = now |
|
1550 | 1550 | i = 0 |
|
1551 | 1551 | while i < 3: |
|
1552 | 1552 | if now < nexttime: |
|
1553 | 1553 | self.wait(nexttime - now) |
|
1554 | 1554 | now = currentTimeMillis() |
|
1555 | 1555 | continue |
|
1556 | 1556 | out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA) |
|
1557 | 1557 | out.addAnswerAtTime( |
|
1558 | 1558 | DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, 0, info.name), 0 |
|
1559 | 1559 | ) |
|
1560 | 1560 | out.addAnswerAtTime( |
|
1561 | 1561 | DNSService( |
|
1562 | 1562 | info.name, |
|
1563 | 1563 | _TYPE_SRV, |
|
1564 | 1564 | _CLASS_IN, |
|
1565 | 1565 | 0, |
|
1566 | 1566 | info.priority, |
|
1567 | 1567 | info.weight, |
|
1568 | 1568 | info.port, |
|
1569 | 1569 | info.name, |
|
1570 | 1570 | ), |
|
1571 | 1571 | 0, |
|
1572 | 1572 | ) |
|
1573 | 1573 | out.addAnswerAtTime( |
|
1574 | 1574 | DNSText(info.name, _TYPE_TXT, _CLASS_IN, 0, info.text), 0 |
|
1575 | 1575 | ) |
|
1576 | 1576 | if info.address: |
|
1577 | 1577 | out.addAnswerAtTime( |
|
1578 | 1578 | DNSAddress( |
|
1579 | 1579 | info.server, _TYPE_A, _CLASS_IN, 0, info.address |
|
1580 | 1580 | ), |
|
1581 | 1581 | 0, |
|
1582 | 1582 | ) |
|
1583 | 1583 | self.send(out) |
|
1584 | 1584 | i += 1 |
|
1585 | 1585 | nexttime += _UNREGISTER_TIME |
|
1586 | 1586 | |
|
1587 | 1587 | def unregisterAllServices(self): |
|
1588 | 1588 | """Unregister all registered services.""" |
|
1589 | 1589 | if len(self.services) > 0: |
|
1590 | 1590 | now = currentTimeMillis() |
|
1591 | 1591 | nexttime = now |
|
1592 | 1592 | i = 0 |
|
1593 | 1593 | while i < 3: |
|
1594 | 1594 | if now < nexttime: |
|
1595 | 1595 | self.wait(nexttime - now) |
|
1596 | 1596 | now = currentTimeMillis() |
|
1597 | 1597 | continue |
|
1598 | 1598 | out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA) |
|
1599 | 1599 | for info in self.services.values(): |
|
1600 | 1600 | out.addAnswerAtTime( |
|
1601 | 1601 | DNSPointer( |
|
1602 | 1602 | info.type, _TYPE_PTR, _CLASS_IN, 0, info.name |
|
1603 | 1603 | ), |
|
1604 | 1604 | 0, |
|
1605 | 1605 | ) |
|
1606 | 1606 | out.addAnswerAtTime( |
|
1607 | 1607 | DNSService( |
|
1608 | 1608 | info.name, |
|
1609 | 1609 | _TYPE_SRV, |
|
1610 | 1610 | _CLASS_IN, |
|
1611 | 1611 | 0, |
|
1612 | 1612 | info.priority, |
|
1613 | 1613 | info.weight, |
|
1614 | 1614 | info.port, |
|
1615 | 1615 | info.server, |
|
1616 | 1616 | ), |
|
1617 | 1617 | 0, |
|
1618 | 1618 | ) |
|
1619 | 1619 | out.addAnswerAtTime( |
|
1620 | 1620 | DNSText(info.name, _TYPE_TXT, _CLASS_IN, 0, info.text), |
|
1621 | 1621 | 0, |
|
1622 | 1622 | ) |
|
1623 | 1623 | if info.address: |
|
1624 | 1624 | out.addAnswerAtTime( |
|
1625 | 1625 | DNSAddress( |
|
1626 | 1626 | info.server, _TYPE_A, _CLASS_IN, 0, info.address |
|
1627 | 1627 | ), |
|
1628 | 1628 | 0, |
|
1629 | 1629 | ) |
|
1630 | 1630 | self.send(out) |
|
1631 | 1631 | i += 1 |
|
1632 | 1632 | nexttime += _UNREGISTER_TIME |
|
1633 | 1633 | |
|
1634 | 1634 | def checkService(self, info): |
|
1635 | 1635 | """Checks the network for a unique service name, modifying the |
|
1636 | 1636 | ServiceInfo passed in if it is not unique.""" |
|
1637 | 1637 | now = currentTimeMillis() |
|
1638 | 1638 | nexttime = now |
|
1639 | 1639 | i = 0 |
|
1640 | 1640 | while i < 3: |
|
1641 | 1641 | for record in self.cache.entriesWithName(info.type): |
|
1642 | 1642 | if ( |
|
1643 | 1643 | record.type == _TYPE_PTR |
|
1644 | 1644 | and not record.isExpired(now) |
|
1645 | 1645 | and record.alias == info.name |
|
1646 | 1646 | ): |
|
1647 | 1647 | if info.name.find(b'.') < 0: |
|
1648 | 1648 | info.name = b"%s.[%s:%d].%s" % ( |
|
1649 | 1649 | info.name, |
|
1650 | 1650 | info.address, |
|
1651 | 1651 | info.port, |
|
1652 | 1652 | info.type, |
|
1653 | 1653 | ) |
|
1654 | 1654 | self.checkService(info) |
|
1655 | 1655 | return |
|
1656 | 1656 | raise NonUniqueNameException |
|
1657 | 1657 | if now < nexttime: |
|
1658 | 1658 | self.wait(nexttime - now) |
|
1659 | 1659 | now = currentTimeMillis() |
|
1660 | 1660 | continue |
|
1661 | 1661 | out = DNSOutgoing(_FLAGS_QR_QUERY | _FLAGS_AA) |
|
1662 | 1662 | self.debug = out |
|
1663 | 1663 | out.addQuestion(DNSQuestion(info.type, _TYPE_PTR, _CLASS_IN)) |
|
1664 | 1664 | out.addAuthoritativeAnswer( |
|
1665 | 1665 | DNSPointer(info.type, _TYPE_PTR, _CLASS_IN, _DNS_TTL, info.name) |
|
1666 | 1666 | ) |
|
1667 | 1667 | self.send(out) |
|
1668 | 1668 | i += 1 |
|
1669 | 1669 | nexttime += _CHECK_TIME |
|
1670 | 1670 | |
|
1671 | 1671 | def addListener(self, listener, question): |
|
1672 | 1672 | """Adds a listener for a given question. The listener will have |
|
1673 | 1673 | its updateRecord method called when information is available to |
|
1674 | 1674 | answer the question.""" |
|
1675 | 1675 | now = currentTimeMillis() |
|
1676 | 1676 | self.listeners.append(listener) |
|
1677 | 1677 | if question is not None: |
|
1678 | 1678 | for record in self.cache.entriesWithName(question.name): |
|
1679 | 1679 | if question.answeredBy(record) and not record.isExpired(now): |
|
1680 | 1680 | listener.updateRecord(self, now, record) |
|
1681 | 1681 | self.notifyAll() |
|
1682 | 1682 | |
|
1683 | 1683 | def removeListener(self, listener): |
|
1684 | 1684 | """Removes a listener.""" |
|
1685 | 1685 | try: |
|
1686 | 1686 | self.listeners.remove(listener) |
|
1687 | 1687 | self.notifyAll() |
|
1688 | 1688 | except Exception: |
|
1689 | 1689 | pass |
|
1690 | 1690 | |
|
1691 | 1691 | def updateRecord(self, now, rec): |
|
1692 | 1692 | """Used to notify listeners of new information that has updated |
|
1693 | 1693 | a record.""" |
|
1694 | 1694 | for listener in self.listeners: |
|
1695 | 1695 | listener.updateRecord(self, now, rec) |
|
1696 | 1696 | self.notifyAll() |
|
1697 | 1697 | |
|
1698 | 1698 | def handleResponse(self, msg): |
|
1699 | 1699 | """Deal with incoming response packets. All answers |
|
1700 | 1700 | are held in the cache, and listeners are notified.""" |
|
1701 | 1701 | now = currentTimeMillis() |
|
1702 | 1702 | for record in msg.answers: |
|
1703 | 1703 | expired = record.isExpired(now) |
|
1704 | 1704 | if record in self.cache.entries(): |
|
1705 | 1705 | if expired: |
|
1706 | 1706 | self.cache.remove(record) |
|
1707 | 1707 | else: |
|
1708 | 1708 | entry = self.cache.get(record) |
|
1709 | 1709 | if entry is not None: |
|
1710 | 1710 | entry.resetTTL(record) |
|
1711 | 1711 | record = entry |
|
1712 | 1712 | else: |
|
1713 | 1713 | self.cache.add(record) |
|
1714 | 1714 | |
|
1715 | 1715 | self.updateRecord(now, record) |
|
1716 | 1716 | |
|
1717 | 1717 | def handleQuery(self, msg, addr, port): |
|
1718 | 1718 | """Deal with incoming query packets. Provides a response if |
|
1719 | 1719 | possible.""" |
|
1720 | 1720 | out = None |
|
1721 | 1721 | |
|
1722 | 1722 | # Support unicast client responses |
|
1723 | 1723 | # |
|
1724 | 1724 | if port != _MDNS_PORT: |
|
1725 | 1725 | out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA, 0) |
|
1726 | 1726 | for question in msg.questions: |
|
1727 | 1727 | out.addQuestion(question) |
|
1728 | 1728 | |
|
1729 | 1729 | for question in msg.questions: |
|
1730 | 1730 | if question.type == _TYPE_PTR: |
|
1731 | 1731 | if question.name == b"_services._dns-sd._udp.local.": |
|
1732 | 1732 | for stype in self.servicetypes.keys(): |
|
1733 | 1733 | if out is None: |
|
1734 | 1734 | out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA) |
|
1735 | 1735 | out.addAnswer( |
|
1736 | 1736 | msg, |
|
1737 | 1737 | DNSPointer( |
|
1738 | 1738 | b"_services._dns-sd._udp.local.", |
|
1739 | 1739 | _TYPE_PTR, |
|
1740 | 1740 | _CLASS_IN, |
|
1741 | 1741 | _DNS_TTL, |
|
1742 | 1742 | stype, |
|
1743 | 1743 | ), |
|
1744 | 1744 | ) |
|
1745 | 1745 | for service in self.services.values(): |
|
1746 | 1746 | if question.name == service.type: |
|
1747 | 1747 | if out is None: |
|
1748 | 1748 | out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA) |
|
1749 | 1749 | out.addAnswer( |
|
1750 | 1750 | msg, |
|
1751 | 1751 | DNSPointer( |
|
1752 | 1752 | service.type, |
|
1753 | 1753 | _TYPE_PTR, |
|
1754 | 1754 | _CLASS_IN, |
|
1755 | 1755 | _DNS_TTL, |
|
1756 | 1756 | service.name, |
|
1757 | 1757 | ), |
|
1758 | 1758 | ) |
|
1759 | 1759 | else: |
|
1760 | 1760 | try: |
|
1761 | 1761 | if out is None: |
|
1762 | 1762 | out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA) |
|
1763 | 1763 | |
|
1764 | 1764 | # Answer A record queries for any service addresses we know |
|
1765 | 1765 | if question.type == _TYPE_A or question.type == _TYPE_ANY: |
|
1766 | 1766 | for service in self.services.values(): |
|
1767 | 1767 | if service.server == question.name.lower(): |
|
1768 | 1768 | out.addAnswer( |
|
1769 | 1769 | msg, |
|
1770 | 1770 | DNSAddress( |
|
1771 | 1771 | question.name, |
|
1772 | 1772 | _TYPE_A, |
|
1773 | 1773 | _CLASS_IN | _CLASS_UNIQUE, |
|
1774 | 1774 | _DNS_TTL, |
|
1775 | 1775 | service.address, |
|
1776 | 1776 | ), |
|
1777 | 1777 | ) |
|
1778 | 1778 | |
|
1779 | 1779 | service = self.services.get(question.name.lower(), None) |
|
1780 | 1780 | if not service: |
|
1781 | 1781 | continue |
|
1782 | 1782 | |
|
1783 | 1783 | if question.type == _TYPE_SRV or question.type == _TYPE_ANY: |
|
1784 | 1784 | out.addAnswer( |
|
1785 | 1785 | msg, |
|
1786 | 1786 | DNSService( |
|
1787 | 1787 | question.name, |
|
1788 | 1788 | _TYPE_SRV, |
|
1789 | 1789 | _CLASS_IN | _CLASS_UNIQUE, |
|
1790 | 1790 | _DNS_TTL, |
|
1791 | 1791 | service.priority, |
|
1792 | 1792 | service.weight, |
|
1793 | 1793 | service.port, |
|
1794 | 1794 | service.server, |
|
1795 | 1795 | ), |
|
1796 | 1796 | ) |
|
1797 | 1797 | if question.type == _TYPE_TXT or question.type == _TYPE_ANY: |
|
1798 | 1798 | out.addAnswer( |
|
1799 | 1799 | msg, |
|
1800 | 1800 | DNSText( |
|
1801 | 1801 | question.name, |
|
1802 | 1802 | _TYPE_TXT, |
|
1803 | 1803 | _CLASS_IN | _CLASS_UNIQUE, |
|
1804 | 1804 | _DNS_TTL, |
|
1805 | 1805 | service.text, |
|
1806 | 1806 | ), |
|
1807 | 1807 | ) |
|
1808 | 1808 | if question.type == _TYPE_SRV: |
|
1809 | 1809 | out.addAdditionalAnswer( |
|
1810 | 1810 | DNSAddress( |
|
1811 | 1811 | service.server, |
|
1812 | 1812 | _TYPE_A, |
|
1813 | 1813 | _CLASS_IN | _CLASS_UNIQUE, |
|
1814 | 1814 | _DNS_TTL, |
|
1815 | 1815 | service.address, |
|
1816 | 1816 | ) |
|
1817 | 1817 | ) |
|
1818 | 1818 | except Exception: |
|
1819 | 1819 | traceback.print_exc() |
|
1820 | 1820 | |
|
1821 | 1821 | if out is not None and out.answers: |
|
1822 | 1822 | out.id = msg.id |
|
1823 | 1823 | self.send(out, addr, port) |
|
1824 | 1824 | |
|
1825 | 1825 | def send(self, out, addr=_MDNS_ADDR, port=_MDNS_PORT): |
|
1826 | 1826 | """Sends an outgoing packet.""" |
|
1827 | 1827 | # This is a quick test to see if we can parse the packets we generate |
|
1828 | 1828 | # temp = DNSIncoming(out.packet()) |
|
1829 | 1829 | try: |
|
1830 | 1830 | self.socket.sendto(out.packet(), 0, (addr, port)) |
|
1831 | 1831 | except Exception: |
|
1832 | 1832 | # Ignore this, it may be a temporary loss of network connection |
|
1833 | 1833 | pass |
|
1834 | 1834 | |
|
1835 | 1835 | def close(self): |
|
1836 | 1836 | """Ends the background threads, and prevent this instance from |
|
1837 | 1837 | servicing further queries.""" |
|
1838 | 1838 | if globals()[b'_GLOBAL_DONE'] == 0: |
|
1839 | 1839 | globals()[b'_GLOBAL_DONE'] = 1 |
|
1840 | 1840 | self.notifyAll() |
|
1841 | 1841 | self.engine.notify() |
|
1842 | 1842 | self.unregisterAllServices() |
|
1843 | 1843 | self.socket.setsockopt( |
|
1844 | 1844 | socket.SOL_IP, |
|
1845 | 1845 | socket.IP_DROP_MEMBERSHIP, |
|
1846 | 1846 | socket.inet_aton(_MDNS_ADDR) + socket.inet_aton('0.0.0.0'), |
|
1847 | 1847 | ) |
|
1848 | 1848 | self.socket.close() |
|
1849 | 1849 | |
|
1850 | 1850 | |
|
1851 | 1851 | # Test a few module features, including service registration, service |
|
1852 | 1852 | # query (for Zoe), and service unregistration. |
|
1853 | 1853 | |
|
1854 | 1854 | if __name__ == '__main__': |
|
1855 | 1855 | print(b"Multicast DNS Service Discovery for Python, version", __version__) |
|
1856 | 1856 | r = Zeroconf() |
|
1857 | 1857 | print(b"1. Testing registration of a service...") |
|
1858 | 1858 | desc = {b'version': b'0.10', b'a': b'test value', b'b': b'another value'} |
|
1859 | 1859 | info = ServiceInfo( |
|
1860 | 1860 | b"_http._tcp.local.", |
|
1861 | 1861 | b"My Service Name._http._tcp.local.", |
|
1862 | 1862 | socket.inet_aton("127.0.0.1"), |
|
1863 | 1863 | 1234, |
|
1864 | 1864 | 0, |
|
1865 | 1865 | 0, |
|
1866 | 1866 | desc, |
|
1867 | 1867 | ) |
|
1868 | 1868 | print(b" Registering service...") |
|
1869 | 1869 | r.registerService(info) |
|
1870 | 1870 | print(b" Registration done.") |
|
1871 | 1871 | print(b"2. Testing query of service information...") |
|
1872 | 1872 | print( |
|
1873 | 1873 | b" Getting ZOE service:", |
|
1874 | 1874 | str(r.getServiceInfo(b"_http._tcp.local.", b"ZOE._http._tcp.local.")), |
|
1875 | 1875 | ) |
|
1876 | 1876 | print(b" Query done.") |
|
1877 | 1877 | print(b"3. Testing query of own service...") |
|
1878 | 1878 | print( |
|
1879 | 1879 | b" Getting self:", |
|
1880 | 1880 | str( |
|
1881 | 1881 | r.getServiceInfo( |
|
1882 | 1882 | b"_http._tcp.local.", b"My Service Name._http._tcp.local." |
|
1883 | 1883 | ) |
|
1884 | 1884 | ), |
|
1885 | 1885 | ) |
|
1886 | 1886 | print(b" Query done.") |
|
1887 | 1887 | print(b"4. Testing unregister of service information...") |
|
1888 | 1888 | r.unregisterService(info) |
|
1889 | 1889 | print(b" Unregister done.") |
|
1890 | 1890 | r.close() |
@@ -1,525 +1,525 b'' | |||
|
1 | 1 | # hgweb/hgweb_mod.py - Web interface for a repository. |
|
2 | 2 | # |
|
3 | 3 | # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net> |
|
4 | 4 | # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com> |
|
5 | 5 | # |
|
6 | 6 | # This software may be used and distributed according to the terms of the |
|
7 | 7 | # GNU General Public License version 2 or any later version. |
|
8 | 8 | |
|
9 | 9 | |
|
10 | 10 | import contextlib |
|
11 | 11 | import os |
|
12 | 12 | |
|
13 | 13 | from .common import ( |
|
14 | 14 | ErrorResponse, |
|
15 | 15 | HTTP_BAD_REQUEST, |
|
16 | 16 | cspvalues, |
|
17 | 17 | permhooks, |
|
18 | 18 | statusmessage, |
|
19 | 19 | ) |
|
20 | 20 | |
|
21 | 21 | from .. import ( |
|
22 | 22 | encoding, |
|
23 | 23 | error, |
|
24 | 24 | extensions, |
|
25 | 25 | formatter, |
|
26 | 26 | hg, |
|
27 | 27 | hook, |
|
28 | 28 | profiling, |
|
29 | 29 | pycompat, |
|
30 | 30 | registrar, |
|
31 | 31 | repoview, |
|
32 | 32 | templatefilters, |
|
33 | 33 | templater, |
|
34 | 34 | templateutil, |
|
35 | 35 | ui as uimod, |
|
36 | 36 | wireprotoserver, |
|
37 | 37 | ) |
|
38 | 38 | |
|
39 | 39 | from . import ( |
|
40 | 40 | common, |
|
41 | 41 | request as requestmod, |
|
42 | 42 | webcommands, |
|
43 | 43 | webutil, |
|
44 | 44 | wsgicgi, |
|
45 | 45 | ) |
|
46 | 46 | |
|
47 | 47 | |
|
48 | 48 | def getstyle(req, configfn, templatepath): |
|
49 | 49 | styles = ( |
|
50 | 50 | req.qsparams.get(b'style', None), |
|
51 | 51 | configfn(b'web', b'style'), |
|
52 | 52 | b'paper', |
|
53 | 53 | ) |
|
54 | 54 | return styles, _stylemap(styles, templatepath) |
|
55 | 55 | |
|
56 | 56 | |
|
57 | 57 | def _stylemap(styles, path=None): |
|
58 | 58 | """Return path to mapfile for a given style. |
|
59 | 59 | |
|
60 | 60 | Searches mapfile in the following locations: |
|
61 | 61 | 1. templatepath/style/map |
|
62 | 62 | 2. templatepath/map-style |
|
63 | 63 | 3. templatepath/map |
|
64 | 64 | """ |
|
65 | 65 | |
|
66 | 66 | for style in styles: |
|
67 | 67 | # only plain name is allowed to honor template paths |
|
68 | 68 | if ( |
|
69 | 69 | not style |
|
70 | 70 | or style in (pycompat.oscurdir, pycompat.ospardir) |
|
71 | 71 | or pycompat.ossep in style |
|
72 | 72 | or pycompat.osaltsep |
|
73 | 73 | and pycompat.osaltsep in style |
|
74 | 74 | ): |
|
75 | 75 | continue |
|
76 | 76 | locations = (os.path.join(style, b'map'), b'map-' + style, b'map') |
|
77 | 77 | |
|
78 | 78 | for location in locations: |
|
79 | 79 | mapfile, fp = templater.try_open_template(location, path) |
|
80 | 80 | if mapfile: |
|
81 | 81 | return style, mapfile, fp |
|
82 | 82 | |
|
83 | 83 | raise RuntimeError(b"No hgweb templates found in %r" % path) |
|
84 | 84 | |
|
85 | 85 | |
|
86 | 86 | def makebreadcrumb(url, prefix=b''): |
|
87 | 87 | """Return a 'URL breadcrumb' list |
|
88 | 88 | |
|
89 | 89 | A 'URL breadcrumb' is a list of URL-name pairs, |
|
90 | 90 | corresponding to each of the path items on a URL. |
|
91 | 91 | This can be used to create path navigation entries. |
|
92 | 92 | """ |
|
93 | 93 | if url.endswith(b'/'): |
|
94 | 94 | url = url[:-1] |
|
95 | 95 | if prefix: |
|
96 | 96 | url = b'/' + prefix + url |
|
97 | 97 | relpath = url |
|
98 | 98 | if relpath.startswith(b'/'): |
|
99 | 99 | relpath = relpath[1:] |
|
100 | 100 | |
|
101 | 101 | breadcrumb = [] |
|
102 | 102 | urlel = url |
|
103 | 103 | pathitems = [b''] + relpath.split(b'/') |
|
104 | 104 | for pathel in reversed(pathitems): |
|
105 | 105 | if not pathel or not urlel: |
|
106 | 106 | break |
|
107 | 107 | breadcrumb.append({b'url': urlel, b'name': pathel}) |
|
108 | 108 | urlel = os.path.dirname(urlel) |
|
109 | 109 | return templateutil.mappinglist(reversed(breadcrumb)) |
|
110 | 110 | |
|
111 | 111 | |
|
112 | 112 | class requestcontext: |
|
113 | 113 | """Holds state/context for an individual request. |
|
114 | 114 | |
|
115 | 115 | Servers can be multi-threaded. Holding state on the WSGI application |
|
116 | 116 | is prone to race conditions. Instances of this class exist to hold |
|
117 | 117 | mutable and race-free state for requests. |
|
118 | 118 | """ |
|
119 | 119 | |
|
120 | 120 | def __init__(self, app, repo, req, res): |
|
121 | 121 | self.repo = repo |
|
122 | 122 | self.reponame = app.reponame |
|
123 | 123 | self.req = req |
|
124 | 124 | self.res = res |
|
125 | 125 | |
|
126 | 126 | # Only works if the filter actually support being upgraded to show |
|
127 | 127 | # visible changesets |
|
128 | 128 | current_filter = repo.filtername |
|
129 | 129 | if ( |
|
130 | 130 | common.hashiddenaccess(repo, req) |
|
131 | 131 | and current_filter is not None |
|
132 | 132 | and current_filter + b'.hidden' in repoview.filtertable |
|
133 | 133 | ): |
|
134 | 134 | self.repo = self.repo.filtered(repo.filtername + b'.hidden') |
|
135 | 135 | |
|
136 | 136 | self.maxchanges = self.configint(b'web', b'maxchanges') |
|
137 | 137 | self.stripecount = self.configint(b'web', b'stripes') |
|
138 | 138 | self.maxshortchanges = self.configint(b'web', b'maxshortchanges') |
|
139 | 139 | self.maxfiles = self.configint(b'web', b'maxfiles') |
|
140 | 140 | self.allowpull = self.configbool(b'web', b'allow-pull') |
|
141 | 141 | |
|
142 | 142 | # we use untrusted=False to prevent a repo owner from using |
|
143 | 143 | # web.templates in .hg/hgrc to get access to any file readable |
|
144 | 144 | # by the user running the CGI script |
|
145 | 145 | self.templatepath = self.config(b'web', b'templates', untrusted=False) |
|
146 | 146 | |
|
147 | 147 | # This object is more expensive to build than simple config values. |
|
148 | 148 | # It is shared across requests. The app will replace the object |
|
149 | 149 | # if it is updated. Since this is a reference and nothing should |
|
150 | 150 | # modify the underlying object, it should be constant for the lifetime |
|
151 | 151 | # of the request. |
|
152 | 152 | self.websubtable = app.websubtable |
|
153 | 153 | |
|
154 | 154 | self.csp, self.nonce = cspvalues(self.repo.ui) |
|
155 | 155 | |
|
156 | 156 | # Trust the settings from the .hg/hgrc files by default. |
|
157 | 157 | def config(self, *args, **kwargs): |
|
158 | 158 | kwargs.setdefault('untrusted', True) |
|
159 | 159 | return self.repo.ui.config(*args, **kwargs) |
|
160 | 160 | |
|
161 | 161 | def configbool(self, *args, **kwargs): |
|
162 | 162 | kwargs.setdefault('untrusted', True) |
|
163 | 163 | return self.repo.ui.configbool(*args, **kwargs) |
|
164 | 164 | |
|
165 | 165 | def configint(self, *args, **kwargs): |
|
166 | 166 | kwargs.setdefault('untrusted', True) |
|
167 | 167 | return self.repo.ui.configint(*args, **kwargs) |
|
168 | 168 | |
|
169 | 169 | def configlist(self, *args, **kwargs): |
|
170 | 170 | kwargs.setdefault('untrusted', True) |
|
171 | 171 | return self.repo.ui.configlist(*args, **kwargs) |
|
172 | 172 | |
|
173 | 173 | def archivelist(self, nodeid): |
|
174 | 174 | return webutil.archivelist(self.repo.ui, nodeid) |
|
175 | 175 | |
|
176 | 176 | def templater(self, req): |
|
177 | 177 | # determine scheme, port and server name |
|
178 | 178 | # this is needed to create absolute urls |
|
179 | 179 | logourl = self.config(b'web', b'logourl') |
|
180 | 180 | logoimg = self.config(b'web', b'logoimg') |
|
181 | 181 | staticurl = ( |
|
182 | 182 | self.config(b'web', b'staticurl') |
|
183 | 183 | or req.apppath.rstrip(b'/') + b'/static/' |
|
184 | 184 | ) |
|
185 | 185 | if not staticurl.endswith(b'/'): |
|
186 | 186 | staticurl += b'/' |
|
187 | 187 | |
|
188 | 188 | # figure out which style to use |
|
189 | 189 | |
|
190 | 190 | vars = {} |
|
191 | 191 | styles, (style, mapfile, fp) = getstyle( |
|
192 | 192 | req, self.config, self.templatepath |
|
193 | 193 | ) |
|
194 | 194 | if style == styles[0]: |
|
195 | 195 | vars[b'style'] = style |
|
196 | 196 | |
|
197 | 197 | sessionvars = webutil.sessionvars(vars, b'?') |
|
198 | 198 | |
|
199 | 199 | if not self.reponame: |
|
200 | 200 | self.reponame = ( |
|
201 | 201 | self.config(b'web', b'name', b'') |
|
202 | 202 | or req.reponame |
|
203 | 203 | or req.apppath |
|
204 | 204 | or self.repo.root |
|
205 | 205 | ) |
|
206 | 206 | |
|
207 | 207 | filters = {} |
|
208 | 208 | templatefilter = registrar.templatefilter(filters) |
|
209 | 209 | |
|
210 | 210 | @templatefilter(b'websub', intype=bytes) |
|
211 | 211 | def websubfilter(text): |
|
212 | 212 | return templatefilters.websub(text, self.websubtable) |
|
213 | 213 | |
|
214 | 214 | # create the templater |
|
215 | 215 | # TODO: export all keywords: defaults = templatekw.keywords.copy() |
|
216 | 216 | defaults = { |
|
217 | 217 | b'url': req.apppath + b'/', |
|
218 | 218 | b'logourl': logourl, |
|
219 | 219 | b'logoimg': logoimg, |
|
220 | 220 | b'staticurl': staticurl, |
|
221 | 221 | b'urlbase': req.advertisedbaseurl, |
|
222 | 222 | b'repo': self.reponame, |
|
223 | 223 | b'encoding': encoding.encoding, |
|
224 | 224 | b'sessionvars': sessionvars, |
|
225 | 225 | b'pathdef': makebreadcrumb(req.apppath), |
|
226 | 226 | b'style': style, |
|
227 | 227 | b'nonce': self.nonce, |
|
228 | 228 | } |
|
229 | 229 | templatekeyword = registrar.templatekeyword(defaults) |
|
230 | 230 | |
|
231 | 231 | @templatekeyword(b'motd', requires=()) |
|
232 | 232 | def motd(context, mapping): |
|
233 | 233 | yield self.config(b'web', b'motd') |
|
234 | 234 | |
|
235 | 235 | tres = formatter.templateresources(self.repo.ui, self.repo) |
|
236 | 236 | return templater.templater.frommapfile( |
|
237 | 237 | mapfile, fp=fp, filters=filters, defaults=defaults, resources=tres |
|
238 | 238 | ) |
|
239 | 239 | |
|
240 | 240 | def sendtemplate(self, name, **kwargs): |
|
241 | 241 | """Helper function to send a response generated from a template.""" |
|
242 | 242 | if self.req.method != b'HEAD': |
|
243 | 243 | kwargs = pycompat.byteskwargs(kwargs) |
|
244 | 244 | self.res.setbodygen(self.tmpl.generate(name, kwargs)) |
|
245 | 245 | return self.res.sendresponse() |
|
246 | 246 | |
|
247 | 247 | |
|
248 | 248 | class hgweb: |
|
249 | 249 | """HTTP server for individual repositories. |
|
250 | 250 | |
|
251 | 251 | Instances of this class serve HTTP responses for a particular |
|
252 | 252 | repository. |
|
253 | 253 | |
|
254 | 254 | Instances are typically used as WSGI applications. |
|
255 | 255 | |
|
256 | 256 | Some servers are multi-threaded. On these servers, there may |
|
257 | 257 | be multiple active threads inside __call__. |
|
258 | 258 | """ |
|
259 | 259 | |
|
260 | 260 | def __init__(self, repo, name=None, baseui=None): |
|
261 | 261 | if isinstance(repo, bytes): |
|
262 | 262 | if baseui: |
|
263 | 263 | u = baseui.copy() |
|
264 | 264 | else: |
|
265 | 265 | u = uimod.ui.load() |
|
266 | 266 | extensions.loadall(u) |
|
267 | 267 | extensions.populateui(u) |
|
268 | 268 | r = hg.repository(u, repo) |
|
269 | 269 | else: |
|
270 | 270 | # we trust caller to give us a private copy |
|
271 | 271 | r = repo |
|
272 | 272 | |
|
273 | 273 | r.ui.setconfig(b'ui', b'report_untrusted', b'off', b'hgweb') |
|
274 | 274 | r.baseui.setconfig(b'ui', b'report_untrusted', b'off', b'hgweb') |
|
275 | 275 | r.ui.setconfig(b'ui', b'nontty', b'true', b'hgweb') |
|
276 | 276 | r.baseui.setconfig(b'ui', b'nontty', b'true', b'hgweb') |
|
277 | 277 | # resolve file patterns relative to repo root |
|
278 | 278 | r.ui.setconfig(b'ui', b'forcecwd', r.root, b'hgweb') |
|
279 | 279 | r.baseui.setconfig(b'ui', b'forcecwd', r.root, b'hgweb') |
|
280 | 280 | # it's unlikely that we can replace signal handlers in WSGI server, |
|
281 | 281 | # and mod_wsgi issues a big warning. a plain hgweb process (with no |
|
282 | 282 | # threading) could replace signal handlers, but we don't bother |
|
283 | 283 | # conditionally enabling it. |
|
284 | 284 | r.ui.setconfig(b'ui', b'signal-safe-lock', b'false', b'hgweb') |
|
285 | 285 | r.baseui.setconfig(b'ui', b'signal-safe-lock', b'false', b'hgweb') |
|
286 | 286 | # displaying bundling progress bar while serving feel wrong and may |
|
287 | 287 | # break some wsgi implementation. |
|
288 | 288 | r.ui.setconfig(b'progress', b'disable', b'true', b'hgweb') |
|
289 | 289 | r.baseui.setconfig(b'progress', b'disable', b'true', b'hgweb') |
|
290 | 290 | self._repos = [hg.cachedlocalrepo(self._webifyrepo(r))] |
|
291 | 291 | self._lastrepo = self._repos[0] |
|
292 | 292 | hook.redirect(True) |
|
293 | 293 | self.reponame = name |
|
294 | 294 | |
|
295 | 295 | def _webifyrepo(self, repo): |
|
296 | 296 | repo = getwebview(repo) |
|
297 | 297 | self.websubtable = webutil.getwebsubs(repo) |
|
298 | 298 | return repo |
|
299 | 299 | |
|
300 | 300 | @contextlib.contextmanager |
|
301 | 301 | def _obtainrepo(self): |
|
302 | 302 | """Obtain a repo unique to the caller. |
|
303 | 303 | |
|
304 | 304 | Internally we maintain a stack of cachedlocalrepo instances |
|
305 | 305 | to be handed out. If one is available, we pop it and return it, |
|
306 | 306 | ensuring it is up to date in the process. If one is not available, |
|
307 | 307 | we clone the most recently used repo instance and return it. |
|
308 | 308 | |
|
309 | 309 | It is currently possible for the stack to grow without bounds |
|
310 | 310 | if the server allows infinite threads. However, servers should |
|
311 | 311 | have a thread limit, thus establishing our limit. |
|
312 | 312 | """ |
|
313 | 313 | if self._repos: |
|
314 | 314 | cached = self._repos.pop() |
|
315 | 315 | r, created = cached.fetch() |
|
316 | 316 | else: |
|
317 | 317 | cached = self._lastrepo.copy() |
|
318 | 318 | r, created = cached.fetch() |
|
319 | 319 | if created: |
|
320 | 320 | r = self._webifyrepo(r) |
|
321 | 321 | |
|
322 | 322 | self._lastrepo = cached |
|
323 | 323 | self.mtime = cached.mtime |
|
324 | 324 | try: |
|
325 | 325 | yield r |
|
326 | 326 | finally: |
|
327 | 327 | self._repos.append(cached) |
|
328 | 328 | |
|
329 | 329 | def run(self): |
|
330 | 330 | """Start a server from CGI environment. |
|
331 | 331 | |
|
332 | 332 | Modern servers should be using WSGI and should avoid this |
|
333 | 333 | method, if possible. |
|
334 | 334 | """ |
|
335 | 335 | if not encoding.environ.get(b'GATEWAY_INTERFACE', b'').startswith( |
|
336 | 336 | b"CGI/1." |
|
337 | 337 | ): |
|
338 | 338 | raise RuntimeError( |
|
339 | 339 | b"This function is only intended to be " |
|
340 | 340 | b"called while running as a CGI script." |
|
341 | 341 | ) |
|
342 | 342 | wsgicgi.launch(self) |
|
343 | 343 | |
|
344 | 344 | def __call__(self, env, respond): |
|
345 | 345 | """Run the WSGI application. |
|
346 | 346 | |
|
347 | 347 | This may be called by multiple threads. |
|
348 | 348 | """ |
|
349 | 349 | req = requestmod.parserequestfromenv(env) |
|
350 | 350 | res = requestmod.wsgiresponse(req, respond) |
|
351 | 351 | |
|
352 | 352 | return self.run_wsgi(req, res) |
|
353 | 353 | |
|
354 | 354 | def run_wsgi(self, req, res): |
|
355 | 355 | """Internal method to run the WSGI application. |
|
356 | 356 | |
|
357 | 357 | This is typically only called by Mercurial. External consumers |
|
358 | 358 | should be using instances of this class as the WSGI application. |
|
359 | 359 | """ |
|
360 | 360 | with self._obtainrepo() as repo: |
|
361 | 361 | profile = repo.ui.configbool(b'profiling', b'enabled') |
|
362 | 362 | with profiling.profile(repo.ui, enabled=profile): |
|
363 | 363 | for r in self._runwsgi(req, res, repo): |
|
364 | 364 | yield r |
|
365 | 365 | |
|
366 | 366 | def _runwsgi(self, req, res, repo): |
|
367 | 367 | rctx = requestcontext(self, repo, req, res) |
|
368 | 368 | |
|
369 | 369 | # This state is global across all threads. |
|
370 | 370 | encoding.encoding = rctx.config(b'web', b'encoding') |
|
371 | 371 | rctx.repo.ui.environ = req.rawenv |
|
372 | 372 | |
|
373 | 373 | if rctx.csp: |
|
374 | 374 | # hgwebdir may have added CSP header. Since we generate our own, |
|
375 | 375 | # replace it. |
|
376 | 376 | res.headers[b'Content-Security-Policy'] = rctx.csp |
|
377 | 377 | |
|
378 | 378 | handled = wireprotoserver.handlewsgirequest( |
|
379 | 379 | rctx, req, res, self.check_perm |
|
380 | 380 | ) |
|
381 | 381 | if handled: |
|
382 | 382 | return res.sendresponse() |
|
383 | 383 | |
|
384 | 384 | # Old implementations of hgweb supported dispatching the request via |
|
385 | 385 | # the initial query string parameter instead of using PATH_INFO. |
|
386 | 386 | # If PATH_INFO is present (signaled by ``req.dispatchpath`` having |
|
387 | 387 | # a value), we use it. Otherwise fall back to the query string. |
|
388 | 388 | if req.dispatchpath is not None: |
|
389 | 389 | query = req.dispatchpath |
|
390 | 390 | else: |
|
391 | 391 | query = req.querystring.partition(b'&')[0].partition(b';')[0] |
|
392 | 392 | |
|
393 | 393 | # translate user-visible url structure to internal structure |
|
394 | 394 | |
|
395 | 395 | args = query.split(b'/', 2) |
|
396 | 396 | if b'cmd' not in req.qsparams and args and args[0]: |
|
397 | 397 | cmd = args.pop(0) |
|
398 | 398 | style = cmd.rfind(b'-') |
|
399 | 399 | if style != -1: |
|
400 | 400 | req.qsparams[b'style'] = cmd[:style] |
|
401 | 401 | cmd = cmd[style + 1 :] |
|
402 | 402 | |
|
403 | 403 | # avoid accepting e.g. style parameter as command |
|
404 | 404 | if hasattr(webcommands, pycompat.sysstr(cmd)): |
|
405 | 405 | req.qsparams[b'cmd'] = cmd |
|
406 | 406 | |
|
407 | 407 | if cmd == b'static': |
|
408 | 408 | req.qsparams[b'file'] = b'/'.join(args) |
|
409 | 409 | else: |
|
410 | 410 | if args and args[0]: |
|
411 | 411 | node = args.pop(0).replace(b'%2F', b'/') |
|
412 | 412 | req.qsparams[b'node'] = node |
|
413 | 413 | if args: |
|
414 | 414 | if b'file' in req.qsparams: |
|
415 | 415 | del req.qsparams[b'file'] |
|
416 | 416 | for a in args: |
|
417 | 417 | req.qsparams.add(b'file', a) |
|
418 | 418 | |
|
419 | 419 | ua = req.headers.get(b'User-Agent', b'') |
|
420 | 420 | if cmd == b'rev' and b'mercurial' in ua: |
|
421 | 421 | req.qsparams[b'style'] = b'raw' |
|
422 | 422 | |
|
423 | 423 | if cmd == b'archive': |
|
424 | 424 | fn = req.qsparams[b'node'] |
|
425 | 425 | for type_, spec in webutil.archivespecs.items(): |
|
426 | 426 | ext = spec[2] |
|
427 | 427 | if fn.endswith(ext): |
|
428 | 428 | req.qsparams[b'node'] = fn[: -len(ext)] |
|
429 | 429 | req.qsparams[b'type'] = type_ |
|
430 | 430 | else: |
|
431 | 431 | cmd = req.qsparams.get(b'cmd', b'') |
|
432 | 432 | |
|
433 | 433 | # process the web interface request |
|
434 | 434 | |
|
435 | 435 | try: |
|
436 | 436 | rctx.tmpl = rctx.templater(req) |
|
437 | 437 | ctype = rctx.tmpl.render( |
|
438 | 438 | b'mimetype', {b'encoding': encoding.encoding} |
|
439 | 439 | ) |
|
440 | 440 | |
|
441 | 441 | # check read permissions non-static content |
|
442 | 442 | if cmd != b'static': |
|
443 | 443 | self.check_perm(rctx, req, None) |
|
444 | 444 | |
|
445 | 445 | if cmd == b'': |
|
446 | 446 | req.qsparams[b'cmd'] = rctx.tmpl.render(b'default', {}) |
|
447 | 447 | cmd = req.qsparams[b'cmd'] |
|
448 | 448 | |
|
449 | 449 | # Don't enable caching if using a CSP nonce because then it wouldn't |
|
450 | 450 | # be a nonce. |
|
451 | 451 | if rctx.configbool(b'web', b'cache') and not rctx.nonce: |
|
452 | 452 | tag = b'W/"%d"' % self.mtime |
|
453 | 453 | if req.headers.get(b'If-None-Match') == tag: |
|
454 | 454 | res.status = b'304 Not Modified' |
|
455 | 455 | # Content-Type may be defined globally. It isn't valid on a |
|
456 | 456 | # 304, so discard it. |
|
457 | 457 | try: |
|
458 | 458 | del res.headers[b'Content-Type'] |
|
459 | 459 | except KeyError: |
|
460 | 460 | pass |
|
461 | 461 | # Response body not allowed on 304. |
|
462 | 462 | res.setbodybytes(b'') |
|
463 | 463 | return res.sendresponse() |
|
464 | 464 | |
|
465 | 465 | res.headers[b'ETag'] = tag |
|
466 | 466 | |
|
467 | if cmd not in webcommands.__all__: | |
|
467 | if pycompat.sysstr(cmd) not in webcommands.__all__: | |
|
468 | 468 | msg = b'no such method: %s' % cmd |
|
469 | 469 | raise ErrorResponse(HTTP_BAD_REQUEST, msg) |
|
470 | 470 | else: |
|
471 | 471 | # Set some globals appropriate for web handlers. Commands can |
|
472 | 472 | # override easily enough. |
|
473 | 473 | res.status = b'200 Script output follows' |
|
474 | 474 | res.headers[b'Content-Type'] = ctype |
|
475 | 475 | return getattr(webcommands, pycompat.sysstr(cmd))(rctx) |
|
476 | 476 | |
|
477 | 477 | except (error.LookupError, error.RepoLookupError) as err: |
|
478 | 478 | msg = pycompat.bytestr(err) |
|
479 | 479 | if hasattr(err, 'name') and not isinstance( |
|
480 | 480 | err, error.ManifestLookupError |
|
481 | 481 | ): |
|
482 | 482 | msg = b'revision not found: %s' % err.name |
|
483 | 483 | |
|
484 | 484 | res.status = b'404 Not Found' |
|
485 | 485 | res.headers[b'Content-Type'] = ctype |
|
486 | 486 | return rctx.sendtemplate(b'error', error=msg) |
|
487 | 487 | except (error.RepoError, error.StorageError) as e: |
|
488 | 488 | res.status = b'500 Internal Server Error' |
|
489 | 489 | res.headers[b'Content-Type'] = ctype |
|
490 | 490 | return rctx.sendtemplate(b'error', error=pycompat.bytestr(e)) |
|
491 | 491 | except error.Abort as e: |
|
492 | 492 | res.status = b'403 Forbidden' |
|
493 | 493 | res.headers[b'Content-Type'] = ctype |
|
494 | 494 | return rctx.sendtemplate(b'error', error=e.message) |
|
495 | 495 | except ErrorResponse as e: |
|
496 | 496 | for k, v in e.headers: |
|
497 | 497 | res.headers[k] = v |
|
498 | 498 | res.status = statusmessage(e.code, pycompat.bytestr(e)) |
|
499 | 499 | res.headers[b'Content-Type'] = ctype |
|
500 | 500 | return rctx.sendtemplate(b'error', error=pycompat.bytestr(e)) |
|
501 | 501 | |
|
502 | 502 | def check_perm(self, rctx, req, op): |
|
503 | 503 | for permhook in permhooks: |
|
504 | 504 | permhook(rctx, req, op) |
|
505 | 505 | |
|
506 | 506 | |
|
507 | 507 | def getwebview(repo): |
|
508 | 508 | """The 'web.view' config controls changeset filter to hgweb. Possible |
|
509 | 509 | values are ``served``, ``visible`` and ``all``. Default is ``served``. |
|
510 | 510 | The ``served`` filter only shows changesets that can be pulled from the |
|
511 | 511 | hgweb instance. The``visible`` filter includes secret changesets but |
|
512 | 512 | still excludes "hidden" one. |
|
513 | 513 | |
|
514 | 514 | See the repoview module for details. |
|
515 | 515 | |
|
516 | 516 | The option has been around undocumented since Mercurial 2.5, but no |
|
517 | 517 | user ever asked about it. So we better keep it undocumented for now.""" |
|
518 | 518 | # experimental config: web.view |
|
519 | 519 | viewconfig = repo.ui.config(b'web', b'view', untrusted=True) |
|
520 | 520 | if viewconfig == b'all': |
|
521 | 521 | return repo.unfiltered() |
|
522 | 522 | elif viewconfig in repoview.filtertable: |
|
523 | 523 | return repo.filtered(viewconfig) |
|
524 | 524 | else: |
|
525 | 525 | return repo.filtered(b'served') |
@@ -1,1597 +1,1597 b'' | |||
|
1 | 1 | # |
|
2 | 2 | # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net> |
|
3 | 3 | # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com> |
|
4 | 4 | # |
|
5 | 5 | # This software may be used and distributed according to the terms of the |
|
6 | 6 | # GNU General Public License version 2 or any later version. |
|
7 | 7 | |
|
8 | 8 | |
|
9 | 9 | import copy |
|
10 | 10 | import mimetypes |
|
11 | 11 | import os |
|
12 | 12 | import re |
|
13 | 13 | |
|
14 | 14 | from ..i18n import _ |
|
15 | 15 | from ..node import hex, short |
|
16 | 16 | |
|
17 | 17 | from .common import ( |
|
18 | 18 | ErrorResponse, |
|
19 | 19 | HTTP_FORBIDDEN, |
|
20 | 20 | HTTP_NOT_FOUND, |
|
21 | 21 | get_contact, |
|
22 | 22 | paritygen, |
|
23 | 23 | staticfile, |
|
24 | 24 | ) |
|
25 | 25 | |
|
26 | 26 | from .. import ( |
|
27 | 27 | archival, |
|
28 | 28 | dagop, |
|
29 | 29 | encoding, |
|
30 | 30 | error, |
|
31 | 31 | graphmod, |
|
32 | 32 | pycompat, |
|
33 | 33 | revset, |
|
34 | 34 | revsetlang, |
|
35 | 35 | scmutil, |
|
36 | 36 | smartset, |
|
37 | 37 | templateutil, |
|
38 | 38 | ) |
|
39 | 39 | |
|
40 | 40 | from ..utils import stringutil |
|
41 | 41 | |
|
42 | 42 | from . import webutil |
|
43 | 43 | |
|
44 | 44 | __all__ = [] |
|
45 | 45 | commands = {} |
|
46 | 46 | |
|
47 | 47 | |
|
48 | 48 | class webcommand: |
|
49 | 49 | """Decorator used to register a web command handler. |
|
50 | 50 | |
|
51 | 51 | The decorator takes as its positional arguments the name/path the |
|
52 | 52 | command should be accessible under. |
|
53 | 53 | |
|
54 | 54 | When called, functions receive as arguments a ``requestcontext``, |
|
55 | 55 | ``wsgirequest``, and a templater instance for generatoring output. |
|
56 | 56 | The functions should populate the ``rctx.res`` object with details |
|
57 | 57 | about the HTTP response. |
|
58 | 58 | |
|
59 | 59 | The function returns a generator to be consumed by the WSGI application. |
|
60 | 60 | For most commands, this should be the result from |
|
61 | 61 | ``web.res.sendresponse()``. Many commands will call ``web.sendtemplate()`` |
|
62 | 62 | to render a template. |
|
63 | 63 | |
|
64 | 64 | Usage: |
|
65 | 65 | |
|
66 | 66 | @webcommand('mycommand') |
|
67 | 67 | def mycommand(web): |
|
68 | 68 | pass |
|
69 | 69 | """ |
|
70 | 70 | |
|
71 | 71 | def __init__(self, name): |
|
72 | 72 | self.name = name |
|
73 | 73 | |
|
74 | 74 | def __call__(self, func): |
|
75 | __all__.append(self.name) | |
|
75 | __all__.append(pycompat.sysstr(self.name)) | |
|
76 | 76 | commands[self.name] = func |
|
77 | 77 | return func |
|
78 | 78 | |
|
79 | 79 | |
|
80 | 80 | @webcommand(b'log') |
|
81 | 81 | def log(web): |
|
82 | 82 | """ |
|
83 | 83 | /log[/{revision}[/{path}]] |
|
84 | 84 | -------------------------- |
|
85 | 85 | |
|
86 | 86 | Show repository or file history. |
|
87 | 87 | |
|
88 | 88 | For URLs of the form ``/log/{revision}``, a list of changesets starting at |
|
89 | 89 | the specified changeset identifier is shown. If ``{revision}`` is not |
|
90 | 90 | defined, the default is ``tip``. This form is equivalent to the |
|
91 | 91 | ``changelog`` handler. |
|
92 | 92 | |
|
93 | 93 | For URLs of the form ``/log/{revision}/{file}``, the history for a specific |
|
94 | 94 | file will be shown. This form is equivalent to the ``filelog`` handler. |
|
95 | 95 | """ |
|
96 | 96 | |
|
97 | 97 | if web.req.qsparams.get(b'file'): |
|
98 | 98 | return filelog(web) |
|
99 | 99 | else: |
|
100 | 100 | return changelog(web) |
|
101 | 101 | |
|
102 | 102 | |
|
103 | 103 | @webcommand(b'rawfile') |
|
104 | 104 | def rawfile(web): |
|
105 | 105 | guessmime = web.configbool(b'web', b'guessmime') |
|
106 | 106 | |
|
107 | 107 | path = webutil.cleanpath(web.repo, web.req.qsparams.get(b'file', b'')) |
|
108 | 108 | if not path: |
|
109 | 109 | return manifest(web) |
|
110 | 110 | |
|
111 | 111 | try: |
|
112 | 112 | fctx = webutil.filectx(web.repo, web.req) |
|
113 | 113 | except error.LookupError as inst: |
|
114 | 114 | try: |
|
115 | 115 | return manifest(web) |
|
116 | 116 | except ErrorResponse: |
|
117 | 117 | raise inst |
|
118 | 118 | |
|
119 | 119 | path = fctx.path() |
|
120 | 120 | text = fctx.data() |
|
121 | 121 | mt = b'application/binary' |
|
122 | 122 | if guessmime: |
|
123 | 123 | mt = mimetypes.guess_type(pycompat.fsdecode(path))[0] |
|
124 | 124 | if mt is None: |
|
125 | 125 | if stringutil.binary(text): |
|
126 | 126 | mt = b'application/binary' |
|
127 | 127 | else: |
|
128 | 128 | mt = b'text/plain' |
|
129 | 129 | else: |
|
130 | 130 | mt = pycompat.sysbytes(mt) |
|
131 | 131 | |
|
132 | 132 | if mt.startswith(b'text/'): |
|
133 | 133 | mt += b'; charset="%s"' % encoding.encoding |
|
134 | 134 | |
|
135 | 135 | web.res.headers[b'Content-Type'] = mt |
|
136 | 136 | filename = ( |
|
137 | 137 | path.rpartition(b'/')[-1].replace(b'\\', b'\\\\').replace(b'"', b'\\"') |
|
138 | 138 | ) |
|
139 | 139 | web.res.headers[b'Content-Disposition'] = ( |
|
140 | 140 | b'inline; filename="%s"' % filename |
|
141 | 141 | ) |
|
142 | 142 | web.res.setbodybytes(text) |
|
143 | 143 | return web.res.sendresponse() |
|
144 | 144 | |
|
145 | 145 | |
|
146 | 146 | def _filerevision(web, fctx): |
|
147 | 147 | f = fctx.path() |
|
148 | 148 | text = fctx.data() |
|
149 | 149 | parity = paritygen(web.stripecount) |
|
150 | 150 | ishead = fctx.filenode() in fctx.filelog().heads() |
|
151 | 151 | |
|
152 | 152 | if stringutil.binary(text): |
|
153 | 153 | mt = pycompat.sysbytes( |
|
154 | 154 | mimetypes.guess_type(pycompat.fsdecode(f))[0] |
|
155 | 155 | or r'application/octet-stream' |
|
156 | 156 | ) |
|
157 | 157 | text = b'(binary:%s)' % mt |
|
158 | 158 | |
|
159 | 159 | def lines(context): |
|
160 | 160 | for lineno, t in enumerate(text.splitlines(True)): |
|
161 | 161 | yield { |
|
162 | 162 | b"line": t, |
|
163 | 163 | b"lineid": b"l%d" % (lineno + 1), |
|
164 | 164 | b"linenumber": b"% 6d" % (lineno + 1), |
|
165 | 165 | b"parity": next(parity), |
|
166 | 166 | } |
|
167 | 167 | |
|
168 | 168 | return web.sendtemplate( |
|
169 | 169 | b'filerevision', |
|
170 | 170 | file=f, |
|
171 | 171 | path=webutil.up(f), |
|
172 | 172 | text=templateutil.mappinggenerator(lines), |
|
173 | 173 | symrev=webutil.symrevorshortnode(web.req, fctx), |
|
174 | 174 | rename=webutil.renamelink(fctx), |
|
175 | 175 | permissions=fctx.manifest().flags(f), |
|
176 | 176 | ishead=int(ishead), |
|
177 | 177 | **pycompat.strkwargs(webutil.commonentry(web.repo, fctx)) |
|
178 | 178 | ) |
|
179 | 179 | |
|
180 | 180 | |
|
181 | 181 | @webcommand(b'file') |
|
182 | 182 | def file(web): |
|
183 | 183 | """ |
|
184 | 184 | /file/{revision}[/{path}] |
|
185 | 185 | ------------------------- |
|
186 | 186 | |
|
187 | 187 | Show information about a directory or file in the repository. |
|
188 | 188 | |
|
189 | 189 | Info about the ``path`` given as a URL parameter will be rendered. |
|
190 | 190 | |
|
191 | 191 | If ``path`` is a directory, information about the entries in that |
|
192 | 192 | directory will be rendered. This form is equivalent to the ``manifest`` |
|
193 | 193 | handler. |
|
194 | 194 | |
|
195 | 195 | If ``path`` is a file, information about that file will be shown via |
|
196 | 196 | the ``filerevision`` template. |
|
197 | 197 | |
|
198 | 198 | If ``path`` is not defined, information about the root directory will |
|
199 | 199 | be rendered. |
|
200 | 200 | """ |
|
201 | 201 | if web.req.qsparams.get(b'style') == b'raw': |
|
202 | 202 | return rawfile(web) |
|
203 | 203 | |
|
204 | 204 | path = webutil.cleanpath(web.repo, web.req.qsparams.get(b'file', b'')) |
|
205 | 205 | if not path: |
|
206 | 206 | return manifest(web) |
|
207 | 207 | try: |
|
208 | 208 | return _filerevision(web, webutil.filectx(web.repo, web.req)) |
|
209 | 209 | except error.LookupError as inst: |
|
210 | 210 | try: |
|
211 | 211 | return manifest(web) |
|
212 | 212 | except ErrorResponse: |
|
213 | 213 | raise inst |
|
214 | 214 | |
|
215 | 215 | |
|
216 | 216 | def _search(web): |
|
217 | 217 | MODE_REVISION = b'rev' |
|
218 | 218 | MODE_KEYWORD = b'keyword' |
|
219 | 219 | MODE_REVSET = b'revset' |
|
220 | 220 | |
|
221 | 221 | def revsearch(ctx): |
|
222 | 222 | yield ctx |
|
223 | 223 | |
|
224 | 224 | def keywordsearch(query): |
|
225 | 225 | lower = encoding.lower |
|
226 | 226 | qw = lower(query).split() |
|
227 | 227 | |
|
228 | 228 | def revgen(): |
|
229 | 229 | cl = web.repo.changelog |
|
230 | 230 | for i in range(len(web.repo) - 1, 0, -100): |
|
231 | 231 | l = [] |
|
232 | 232 | for j in cl.revs(max(0, i - 99), i): |
|
233 | 233 | ctx = web.repo[j] |
|
234 | 234 | l.append(ctx) |
|
235 | 235 | l.reverse() |
|
236 | 236 | for e in l: |
|
237 | 237 | yield e |
|
238 | 238 | |
|
239 | 239 | for ctx in revgen(): |
|
240 | 240 | miss = 0 |
|
241 | 241 | for q in qw: |
|
242 | 242 | if not ( |
|
243 | 243 | q in lower(ctx.user()) |
|
244 | 244 | or q in lower(ctx.description()) |
|
245 | 245 | or q in lower(b" ".join(ctx.files())) |
|
246 | 246 | ): |
|
247 | 247 | miss = 1 |
|
248 | 248 | break |
|
249 | 249 | if miss: |
|
250 | 250 | continue |
|
251 | 251 | |
|
252 | 252 | yield ctx |
|
253 | 253 | |
|
254 | 254 | def revsetsearch(revs): |
|
255 | 255 | for r in revs: |
|
256 | 256 | yield web.repo[r] |
|
257 | 257 | |
|
258 | 258 | searchfuncs = { |
|
259 | 259 | MODE_REVISION: (revsearch, b'exact revision search'), |
|
260 | 260 | MODE_KEYWORD: (keywordsearch, b'literal keyword search'), |
|
261 | 261 | MODE_REVSET: (revsetsearch, b'revset expression search'), |
|
262 | 262 | } |
|
263 | 263 | |
|
264 | 264 | def getsearchmode(query): |
|
265 | 265 | try: |
|
266 | 266 | ctx = scmutil.revsymbol(web.repo, query) |
|
267 | 267 | except (error.RepoError, error.LookupError): |
|
268 | 268 | # query is not an exact revision pointer, need to |
|
269 | 269 | # decide if it's a revset expression or keywords |
|
270 | 270 | pass |
|
271 | 271 | else: |
|
272 | 272 | return MODE_REVISION, ctx |
|
273 | 273 | |
|
274 | 274 | revdef = b'reverse(%s)' % query |
|
275 | 275 | try: |
|
276 | 276 | tree = revsetlang.parse(revdef) |
|
277 | 277 | except error.ParseError: |
|
278 | 278 | # can't parse to a revset tree |
|
279 | 279 | return MODE_KEYWORD, query |
|
280 | 280 | |
|
281 | 281 | if revsetlang.depth(tree) <= 2: |
|
282 | 282 | # no revset syntax used |
|
283 | 283 | return MODE_KEYWORD, query |
|
284 | 284 | |
|
285 | 285 | if any( |
|
286 | 286 | (token, (value or b'')[:3]) == (b'string', b're:') |
|
287 | 287 | for token, value, pos in revsetlang.tokenize(revdef) |
|
288 | 288 | ): |
|
289 | 289 | return MODE_KEYWORD, query |
|
290 | 290 | |
|
291 | 291 | funcsused = revsetlang.funcsused(tree) |
|
292 | 292 | if not funcsused.issubset(revset.safesymbols): |
|
293 | 293 | return MODE_KEYWORD, query |
|
294 | 294 | |
|
295 | 295 | try: |
|
296 | 296 | mfunc = revset.match( |
|
297 | 297 | web.repo.ui, revdef, lookup=revset.lookupfn(web.repo) |
|
298 | 298 | ) |
|
299 | 299 | revs = mfunc(web.repo) |
|
300 | 300 | return MODE_REVSET, revs |
|
301 | 301 | # ParseError: wrongly placed tokens, wrongs arguments, etc |
|
302 | 302 | # RepoLookupError: no such revision, e.g. in 'revision:' |
|
303 | 303 | # Abort: bookmark/tag not exists |
|
304 | 304 | # LookupError: ambiguous identifier, e.g. in '(bc)' on a large repo |
|
305 | 305 | except ( |
|
306 | 306 | error.ParseError, |
|
307 | 307 | error.RepoLookupError, |
|
308 | 308 | error.Abort, |
|
309 | 309 | LookupError, |
|
310 | 310 | ): |
|
311 | 311 | return MODE_KEYWORD, query |
|
312 | 312 | |
|
313 | 313 | def changelist(context): |
|
314 | 314 | count = 0 |
|
315 | 315 | |
|
316 | 316 | for ctx in searchfunc[0](funcarg): |
|
317 | 317 | count += 1 |
|
318 | 318 | n = scmutil.binnode(ctx) |
|
319 | 319 | showtags = webutil.showtag(web.repo, b'changelogtag', n) |
|
320 | 320 | files = webutil.listfilediffs(ctx.files(), n, web.maxfiles) |
|
321 | 321 | |
|
322 | 322 | lm = webutil.commonentry(web.repo, ctx) |
|
323 | 323 | lm.update( |
|
324 | 324 | { |
|
325 | 325 | b'parity': next(parity), |
|
326 | 326 | b'changelogtag': showtags, |
|
327 | 327 | b'files': files, |
|
328 | 328 | } |
|
329 | 329 | ) |
|
330 | 330 | yield lm |
|
331 | 331 | |
|
332 | 332 | if count >= revcount: |
|
333 | 333 | break |
|
334 | 334 | |
|
335 | 335 | query = web.req.qsparams[b'rev'] |
|
336 | 336 | revcount = web.maxchanges |
|
337 | 337 | if b'revcount' in web.req.qsparams: |
|
338 | 338 | try: |
|
339 | 339 | revcount = int(web.req.qsparams.get(b'revcount', revcount)) |
|
340 | 340 | revcount = max(revcount, 1) |
|
341 | 341 | web.tmpl.defaults[b'sessionvars'][b'revcount'] = revcount |
|
342 | 342 | except ValueError: |
|
343 | 343 | pass |
|
344 | 344 | |
|
345 | 345 | lessvars = copy.copy(web.tmpl.defaults[b'sessionvars']) |
|
346 | 346 | lessvars[b'revcount'] = max(revcount // 2, 1) |
|
347 | 347 | lessvars[b'rev'] = query |
|
348 | 348 | morevars = copy.copy(web.tmpl.defaults[b'sessionvars']) |
|
349 | 349 | morevars[b'revcount'] = revcount * 2 |
|
350 | 350 | morevars[b'rev'] = query |
|
351 | 351 | |
|
352 | 352 | mode, funcarg = getsearchmode(query) |
|
353 | 353 | |
|
354 | 354 | if b'forcekw' in web.req.qsparams: |
|
355 | 355 | showforcekw = b'' |
|
356 | 356 | showunforcekw = searchfuncs[mode][1] |
|
357 | 357 | mode = MODE_KEYWORD |
|
358 | 358 | funcarg = query |
|
359 | 359 | else: |
|
360 | 360 | if mode != MODE_KEYWORD: |
|
361 | 361 | showforcekw = searchfuncs[MODE_KEYWORD][1] |
|
362 | 362 | else: |
|
363 | 363 | showforcekw = b'' |
|
364 | 364 | showunforcekw = b'' |
|
365 | 365 | |
|
366 | 366 | searchfunc = searchfuncs[mode] |
|
367 | 367 | |
|
368 | 368 | tip = web.repo[b'tip'] |
|
369 | 369 | parity = paritygen(web.stripecount) |
|
370 | 370 | |
|
371 | 371 | return web.sendtemplate( |
|
372 | 372 | b'search', |
|
373 | 373 | query=query, |
|
374 | 374 | node=tip.hex(), |
|
375 | 375 | symrev=b'tip', |
|
376 | 376 | entries=templateutil.mappinggenerator(changelist, name=b'searchentry'), |
|
377 | 377 | archives=web.archivelist(b'tip'), |
|
378 | 378 | morevars=morevars, |
|
379 | 379 | lessvars=lessvars, |
|
380 | 380 | modedesc=searchfunc[1], |
|
381 | 381 | showforcekw=showforcekw, |
|
382 | 382 | showunforcekw=showunforcekw, |
|
383 | 383 | ) |
|
384 | 384 | |
|
385 | 385 | |
|
386 | 386 | @webcommand(b'changelog') |
|
387 | 387 | def changelog(web, shortlog=False): |
|
388 | 388 | """ |
|
389 | 389 | /changelog[/{revision}] |
|
390 | 390 | ----------------------- |
|
391 | 391 | |
|
392 | 392 | Show information about multiple changesets. |
|
393 | 393 | |
|
394 | 394 | If the optional ``revision`` URL argument is absent, information about |
|
395 | 395 | all changesets starting at ``tip`` will be rendered. If the ``revision`` |
|
396 | 396 | argument is present, changesets will be shown starting from the specified |
|
397 | 397 | revision. |
|
398 | 398 | |
|
399 | 399 | If ``revision`` is absent, the ``rev`` query string argument may be |
|
400 | 400 | defined. This will perform a search for changesets. |
|
401 | 401 | |
|
402 | 402 | The argument for ``rev`` can be a single revision, a revision set, |
|
403 | 403 | or a literal keyword to search for in changeset data (equivalent to |
|
404 | 404 | :hg:`log -k`). |
|
405 | 405 | |
|
406 | 406 | The ``revcount`` query string argument defines the maximum numbers of |
|
407 | 407 | changesets to render. |
|
408 | 408 | |
|
409 | 409 | For non-searches, the ``changelog`` template will be rendered. |
|
410 | 410 | """ |
|
411 | 411 | |
|
412 | 412 | query = b'' |
|
413 | 413 | if b'node' in web.req.qsparams: |
|
414 | 414 | ctx = webutil.changectx(web.repo, web.req) |
|
415 | 415 | symrev = webutil.symrevorshortnode(web.req, ctx) |
|
416 | 416 | elif b'rev' in web.req.qsparams: |
|
417 | 417 | return _search(web) |
|
418 | 418 | else: |
|
419 | 419 | ctx = web.repo[b'tip'] |
|
420 | 420 | symrev = b'tip' |
|
421 | 421 | |
|
422 | 422 | def changelist(maxcount): |
|
423 | 423 | revs = [] |
|
424 | 424 | if pos != -1: |
|
425 | 425 | revs = web.repo.changelog.revs(pos, 0) |
|
426 | 426 | |
|
427 | 427 | for entry in webutil.changelistentries(web, revs, maxcount, parity): |
|
428 | 428 | yield entry |
|
429 | 429 | |
|
430 | 430 | if shortlog: |
|
431 | 431 | revcount = web.maxshortchanges |
|
432 | 432 | else: |
|
433 | 433 | revcount = web.maxchanges |
|
434 | 434 | |
|
435 | 435 | if b'revcount' in web.req.qsparams: |
|
436 | 436 | try: |
|
437 | 437 | revcount = int(web.req.qsparams.get(b'revcount', revcount)) |
|
438 | 438 | revcount = max(revcount, 1) |
|
439 | 439 | web.tmpl.defaults[b'sessionvars'][b'revcount'] = revcount |
|
440 | 440 | except ValueError: |
|
441 | 441 | pass |
|
442 | 442 | |
|
443 | 443 | lessvars = copy.copy(web.tmpl.defaults[b'sessionvars']) |
|
444 | 444 | lessvars[b'revcount'] = max(revcount // 2, 1) |
|
445 | 445 | morevars = copy.copy(web.tmpl.defaults[b'sessionvars']) |
|
446 | 446 | morevars[b'revcount'] = revcount * 2 |
|
447 | 447 | |
|
448 | 448 | count = len(web.repo) |
|
449 | 449 | pos = ctx.rev() |
|
450 | 450 | parity = paritygen(web.stripecount) |
|
451 | 451 | |
|
452 | 452 | changenav = webutil.revnav(web.repo).gen(pos, revcount, count) |
|
453 | 453 | |
|
454 | 454 | entries = list(changelist(revcount + 1)) |
|
455 | 455 | latestentry = entries[:1] |
|
456 | 456 | if len(entries) > revcount: |
|
457 | 457 | nextentry = entries[-1:] |
|
458 | 458 | entries = entries[:-1] |
|
459 | 459 | else: |
|
460 | 460 | nextentry = [] |
|
461 | 461 | |
|
462 | 462 | return web.sendtemplate( |
|
463 | 463 | b'shortlog' if shortlog else b'changelog', |
|
464 | 464 | changenav=changenav, |
|
465 | 465 | node=ctx.hex(), |
|
466 | 466 | rev=pos, |
|
467 | 467 | symrev=symrev, |
|
468 | 468 | changesets=count, |
|
469 | 469 | entries=templateutil.mappinglist(entries), |
|
470 | 470 | latestentry=templateutil.mappinglist(latestentry), |
|
471 | 471 | nextentry=templateutil.mappinglist(nextentry), |
|
472 | 472 | archives=web.archivelist(b'tip'), |
|
473 | 473 | revcount=revcount, |
|
474 | 474 | morevars=morevars, |
|
475 | 475 | lessvars=lessvars, |
|
476 | 476 | query=query, |
|
477 | 477 | ) |
|
478 | 478 | |
|
479 | 479 | |
|
480 | 480 | @webcommand(b'shortlog') |
|
481 | 481 | def shortlog(web): |
|
482 | 482 | """ |
|
483 | 483 | /shortlog |
|
484 | 484 | --------- |
|
485 | 485 | |
|
486 | 486 | Show basic information about a set of changesets. |
|
487 | 487 | |
|
488 | 488 | This accepts the same parameters as the ``changelog`` handler. The only |
|
489 | 489 | difference is the ``shortlog`` template will be rendered instead of the |
|
490 | 490 | ``changelog`` template. |
|
491 | 491 | """ |
|
492 | 492 | return changelog(web, shortlog=True) |
|
493 | 493 | |
|
494 | 494 | |
|
495 | 495 | @webcommand(b'changeset') |
|
496 | 496 | def changeset(web): |
|
497 | 497 | """ |
|
498 | 498 | /changeset[/{revision}] |
|
499 | 499 | ----------------------- |
|
500 | 500 | |
|
501 | 501 | Show information about a single changeset. |
|
502 | 502 | |
|
503 | 503 | A URL path argument is the changeset identifier to show. See ``hg help |
|
504 | 504 | revisions`` for possible values. If not defined, the ``tip`` changeset |
|
505 | 505 | will be shown. |
|
506 | 506 | |
|
507 | 507 | The ``changeset`` template is rendered. Contents of the ``changesettag``, |
|
508 | 508 | ``changesetbookmark``, ``filenodelink``, ``filenolink``, and the many |
|
509 | 509 | templates related to diffs may all be used to produce the output. |
|
510 | 510 | """ |
|
511 | 511 | ctx = webutil.changectx(web.repo, web.req) |
|
512 | 512 | |
|
513 | 513 | return web.sendtemplate(b'changeset', **webutil.changesetentry(web, ctx)) |
|
514 | 514 | |
|
515 | 515 | |
|
516 | 516 | rev = webcommand(b'rev')(changeset) |
|
517 | 517 | |
|
518 | 518 | |
|
519 | 519 | def decodepath(path: bytes) -> bytes: |
|
520 | 520 | """Hook for mapping a path in the repository to a path in the |
|
521 | 521 | working copy. |
|
522 | 522 | |
|
523 | 523 | Extensions (e.g., largefiles) can override this to remap files in |
|
524 | 524 | the virtual file system presented by the manifest command below.""" |
|
525 | 525 | return path |
|
526 | 526 | |
|
527 | 527 | |
|
528 | 528 | @webcommand(b'manifest') |
|
529 | 529 | def manifest(web): |
|
530 | 530 | """ |
|
531 | 531 | /manifest[/{revision}[/{path}]] |
|
532 | 532 | ------------------------------- |
|
533 | 533 | |
|
534 | 534 | Show information about a directory. |
|
535 | 535 | |
|
536 | 536 | If the URL path arguments are omitted, information about the root |
|
537 | 537 | directory for the ``tip`` changeset will be shown. |
|
538 | 538 | |
|
539 | 539 | Because this handler can only show information for directories, it |
|
540 | 540 | is recommended to use the ``file`` handler instead, as it can handle both |
|
541 | 541 | directories and files. |
|
542 | 542 | |
|
543 | 543 | The ``manifest`` template will be rendered for this handler. |
|
544 | 544 | """ |
|
545 | 545 | if b'node' in web.req.qsparams: |
|
546 | 546 | ctx = webutil.changectx(web.repo, web.req) |
|
547 | 547 | symrev = webutil.symrevorshortnode(web.req, ctx) |
|
548 | 548 | else: |
|
549 | 549 | ctx = web.repo[b'tip'] |
|
550 | 550 | symrev = b'tip' |
|
551 | 551 | path = webutil.cleanpath(web.repo, web.req.qsparams.get(b'file', b'')) |
|
552 | 552 | mf = ctx.manifest() |
|
553 | 553 | node = scmutil.binnode(ctx) |
|
554 | 554 | |
|
555 | 555 | files = {} |
|
556 | 556 | dirs = {} |
|
557 | 557 | parity = paritygen(web.stripecount) |
|
558 | 558 | |
|
559 | 559 | if path and path[-1:] != b"/": |
|
560 | 560 | path += b"/" |
|
561 | 561 | l = len(path) |
|
562 | 562 | abspath = b"/" + path |
|
563 | 563 | |
|
564 | 564 | for full, n in mf.items(): |
|
565 | 565 | # the virtual path (working copy path) used for the full |
|
566 | 566 | # (repository) path |
|
567 | 567 | f = decodepath(full) |
|
568 | 568 | |
|
569 | 569 | if f[:l] != path: |
|
570 | 570 | continue |
|
571 | 571 | remain = f[l:] |
|
572 | 572 | elements = remain.split(b'/') |
|
573 | 573 | if len(elements) == 1: |
|
574 | 574 | files[remain] = full |
|
575 | 575 | else: |
|
576 | 576 | h = dirs # need to retain ref to dirs (root) |
|
577 | 577 | for elem in elements[0:-1]: |
|
578 | 578 | if elem not in h: |
|
579 | 579 | h[elem] = {} |
|
580 | 580 | h = h[elem] |
|
581 | 581 | if len(h) > 1: |
|
582 | 582 | break |
|
583 | 583 | h[None] = None # denotes files present |
|
584 | 584 | |
|
585 | 585 | if mf and not files and not dirs: |
|
586 | 586 | raise ErrorResponse(HTTP_NOT_FOUND, b'path not found: ' + path) |
|
587 | 587 | |
|
588 | 588 | def filelist(context): |
|
589 | 589 | for f in sorted(files): |
|
590 | 590 | full = files[f] |
|
591 | 591 | |
|
592 | 592 | fctx = ctx.filectx(full) |
|
593 | 593 | yield { |
|
594 | 594 | b"file": full, |
|
595 | 595 | b"parity": next(parity), |
|
596 | 596 | b"basename": f, |
|
597 | 597 | b"date": fctx.date(), |
|
598 | 598 | b"size": fctx.size(), |
|
599 | 599 | b"permissions": mf.flags(full), |
|
600 | 600 | } |
|
601 | 601 | |
|
602 | 602 | def dirlist(context): |
|
603 | 603 | for d in sorted(dirs): |
|
604 | 604 | |
|
605 | 605 | emptydirs = [] |
|
606 | 606 | h = dirs[d] |
|
607 | 607 | while isinstance(h, dict) and len(h) == 1: |
|
608 | 608 | k, v = next(iter(h.items())) |
|
609 | 609 | if v: |
|
610 | 610 | emptydirs.append(k) |
|
611 | 611 | h = v |
|
612 | 612 | |
|
613 | 613 | path = b"%s%s" % (abspath, d) |
|
614 | 614 | yield { |
|
615 | 615 | b"parity": next(parity), |
|
616 | 616 | b"path": path, |
|
617 | 617 | # pytype: disable=wrong-arg-types |
|
618 | 618 | b"emptydirs": b"/".join(emptydirs), |
|
619 | 619 | # pytype: enable=wrong-arg-types |
|
620 | 620 | b"basename": d, |
|
621 | 621 | } |
|
622 | 622 | |
|
623 | 623 | return web.sendtemplate( |
|
624 | 624 | b'manifest', |
|
625 | 625 | symrev=symrev, |
|
626 | 626 | path=abspath, |
|
627 | 627 | up=webutil.up(abspath), |
|
628 | 628 | upparity=next(parity), |
|
629 | 629 | fentries=templateutil.mappinggenerator(filelist), |
|
630 | 630 | dentries=templateutil.mappinggenerator(dirlist), |
|
631 | 631 | archives=web.archivelist(hex(node)), |
|
632 | 632 | **pycompat.strkwargs(webutil.commonentry(web.repo, ctx)) |
|
633 | 633 | ) |
|
634 | 634 | |
|
635 | 635 | |
|
636 | 636 | @webcommand(b'tags') |
|
637 | 637 | def tags(web): |
|
638 | 638 | """ |
|
639 | 639 | /tags |
|
640 | 640 | ----- |
|
641 | 641 | |
|
642 | 642 | Show information about tags. |
|
643 | 643 | |
|
644 | 644 | No arguments are accepted. |
|
645 | 645 | |
|
646 | 646 | The ``tags`` template is rendered. |
|
647 | 647 | """ |
|
648 | 648 | i = list(reversed(web.repo.tagslist())) |
|
649 | 649 | parity = paritygen(web.stripecount) |
|
650 | 650 | |
|
651 | 651 | def entries(context, notip, latestonly): |
|
652 | 652 | t = i |
|
653 | 653 | if notip: |
|
654 | 654 | t = [(k, n) for k, n in i if k != b"tip"] |
|
655 | 655 | if latestonly: |
|
656 | 656 | t = t[:1] |
|
657 | 657 | for k, n in t: |
|
658 | 658 | yield { |
|
659 | 659 | b"parity": next(parity), |
|
660 | 660 | b"tag": k, |
|
661 | 661 | b"date": web.repo[n].date(), |
|
662 | 662 | b"node": hex(n), |
|
663 | 663 | } |
|
664 | 664 | |
|
665 | 665 | return web.sendtemplate( |
|
666 | 666 | b'tags', |
|
667 | 667 | node=hex(web.repo.changelog.tip()), |
|
668 | 668 | entries=templateutil.mappinggenerator(entries, args=(False, False)), |
|
669 | 669 | entriesnotip=templateutil.mappinggenerator(entries, args=(True, False)), |
|
670 | 670 | latestentry=templateutil.mappinggenerator(entries, args=(True, True)), |
|
671 | 671 | ) |
|
672 | 672 | |
|
673 | 673 | |
|
674 | 674 | @webcommand(b'bookmarks') |
|
675 | 675 | def bookmarks(web): |
|
676 | 676 | """ |
|
677 | 677 | /bookmarks |
|
678 | 678 | ---------- |
|
679 | 679 | |
|
680 | 680 | Show information about bookmarks. |
|
681 | 681 | |
|
682 | 682 | No arguments are accepted. |
|
683 | 683 | |
|
684 | 684 | The ``bookmarks`` template is rendered. |
|
685 | 685 | """ |
|
686 | 686 | i = [b for b in web.repo._bookmarks.items() if b[1] in web.repo] |
|
687 | 687 | sortkey = lambda b: (web.repo[b[1]].rev(), b[0]) |
|
688 | 688 | i = sorted(i, key=sortkey, reverse=True) |
|
689 | 689 | parity = paritygen(web.stripecount) |
|
690 | 690 | |
|
691 | 691 | def entries(context, latestonly): |
|
692 | 692 | t = i |
|
693 | 693 | if latestonly: |
|
694 | 694 | t = i[:1] |
|
695 | 695 | for k, n in t: |
|
696 | 696 | yield { |
|
697 | 697 | b"parity": next(parity), |
|
698 | 698 | b"bookmark": k, |
|
699 | 699 | b"date": web.repo[n].date(), |
|
700 | 700 | b"node": hex(n), |
|
701 | 701 | } |
|
702 | 702 | |
|
703 | 703 | if i: |
|
704 | 704 | latestrev = i[0][1] |
|
705 | 705 | else: |
|
706 | 706 | latestrev = -1 |
|
707 | 707 | lastdate = web.repo[latestrev].date() |
|
708 | 708 | |
|
709 | 709 | return web.sendtemplate( |
|
710 | 710 | b'bookmarks', |
|
711 | 711 | node=hex(web.repo.changelog.tip()), |
|
712 | 712 | lastchange=templateutil.mappinglist([{b'date': lastdate}]), |
|
713 | 713 | entries=templateutil.mappinggenerator(entries, args=(False,)), |
|
714 | 714 | latestentry=templateutil.mappinggenerator(entries, args=(True,)), |
|
715 | 715 | ) |
|
716 | 716 | |
|
717 | 717 | |
|
718 | 718 | @webcommand(b'branches') |
|
719 | 719 | def branches(web): |
|
720 | 720 | """ |
|
721 | 721 | /branches |
|
722 | 722 | --------- |
|
723 | 723 | |
|
724 | 724 | Show information about branches. |
|
725 | 725 | |
|
726 | 726 | All known branches are contained in the output, even closed branches. |
|
727 | 727 | |
|
728 | 728 | No arguments are accepted. |
|
729 | 729 | |
|
730 | 730 | The ``branches`` template is rendered. |
|
731 | 731 | """ |
|
732 | 732 | entries = webutil.branchentries(web.repo, web.stripecount) |
|
733 | 733 | latestentry = webutil.branchentries(web.repo, web.stripecount, 1) |
|
734 | 734 | |
|
735 | 735 | return web.sendtemplate( |
|
736 | 736 | b'branches', |
|
737 | 737 | node=hex(web.repo.changelog.tip()), |
|
738 | 738 | entries=entries, |
|
739 | 739 | latestentry=latestentry, |
|
740 | 740 | ) |
|
741 | 741 | |
|
742 | 742 | |
|
743 | 743 | @webcommand(b'summary') |
|
744 | 744 | def summary(web): |
|
745 | 745 | """ |
|
746 | 746 | /summary |
|
747 | 747 | -------- |
|
748 | 748 | |
|
749 | 749 | Show a summary of repository state. |
|
750 | 750 | |
|
751 | 751 | Information about the latest changesets, bookmarks, tags, and branches |
|
752 | 752 | is captured by this handler. |
|
753 | 753 | |
|
754 | 754 | The ``summary`` template is rendered. |
|
755 | 755 | """ |
|
756 | 756 | i = reversed(web.repo.tagslist()) |
|
757 | 757 | |
|
758 | 758 | def tagentries(context): |
|
759 | 759 | parity = paritygen(web.stripecount) |
|
760 | 760 | count = 0 |
|
761 | 761 | for k, n in i: |
|
762 | 762 | if k == b"tip": # skip tip |
|
763 | 763 | continue |
|
764 | 764 | |
|
765 | 765 | count += 1 |
|
766 | 766 | if count > 10: # limit to 10 tags |
|
767 | 767 | break |
|
768 | 768 | |
|
769 | 769 | yield { |
|
770 | 770 | b'parity': next(parity), |
|
771 | 771 | b'tag': k, |
|
772 | 772 | b'node': hex(n), |
|
773 | 773 | b'date': web.repo[n].date(), |
|
774 | 774 | } |
|
775 | 775 | |
|
776 | 776 | def bookmarks(context): |
|
777 | 777 | parity = paritygen(web.stripecount) |
|
778 | 778 | marks = [b for b in web.repo._bookmarks.items() if b[1] in web.repo] |
|
779 | 779 | sortkey = lambda b: (web.repo[b[1]].rev(), b[0]) |
|
780 | 780 | marks = sorted(marks, key=sortkey, reverse=True) |
|
781 | 781 | for k, n in marks[:10]: # limit to 10 bookmarks |
|
782 | 782 | yield { |
|
783 | 783 | b'parity': next(parity), |
|
784 | 784 | b'bookmark': k, |
|
785 | 785 | b'date': web.repo[n].date(), |
|
786 | 786 | b'node': hex(n), |
|
787 | 787 | } |
|
788 | 788 | |
|
789 | 789 | def changelist(context): |
|
790 | 790 | parity = paritygen(web.stripecount, offset=start - end) |
|
791 | 791 | l = [] # build a list in forward order for efficiency |
|
792 | 792 | revs = [] |
|
793 | 793 | if start < end: |
|
794 | 794 | revs = web.repo.changelog.revs(start, end - 1) |
|
795 | 795 | for i in revs: |
|
796 | 796 | ctx = web.repo[i] |
|
797 | 797 | lm = webutil.commonentry(web.repo, ctx) |
|
798 | 798 | lm[b'parity'] = next(parity) |
|
799 | 799 | l.append(lm) |
|
800 | 800 | |
|
801 | 801 | for entry in reversed(l): |
|
802 | 802 | yield entry |
|
803 | 803 | |
|
804 | 804 | tip = web.repo[b'tip'] |
|
805 | 805 | count = len(web.repo) |
|
806 | 806 | start = max(0, count - web.maxchanges) |
|
807 | 807 | end = min(count, start + web.maxchanges) |
|
808 | 808 | |
|
809 | 809 | desc = web.config(b"web", b"description") |
|
810 | 810 | if not desc: |
|
811 | 811 | desc = b'unknown' |
|
812 | 812 | labels = web.configlist(b'web', b'labels') |
|
813 | 813 | |
|
814 | 814 | return web.sendtemplate( |
|
815 | 815 | b'summary', |
|
816 | 816 | desc=desc, |
|
817 | 817 | owner=get_contact(web.config) or b'unknown', |
|
818 | 818 | lastchange=tip.date(), |
|
819 | 819 | tags=templateutil.mappinggenerator(tagentries, name=b'tagentry'), |
|
820 | 820 | bookmarks=templateutil.mappinggenerator(bookmarks), |
|
821 | 821 | branches=webutil.branchentries(web.repo, web.stripecount, 10), |
|
822 | 822 | shortlog=templateutil.mappinggenerator( |
|
823 | 823 | changelist, name=b'shortlogentry' |
|
824 | 824 | ), |
|
825 | 825 | node=tip.hex(), |
|
826 | 826 | symrev=b'tip', |
|
827 | 827 | archives=web.archivelist(b'tip'), |
|
828 | 828 | labels=templateutil.hybridlist(labels, name=b'label'), |
|
829 | 829 | ) |
|
830 | 830 | |
|
831 | 831 | |
|
832 | 832 | @webcommand(b'filediff') |
|
833 | 833 | def filediff(web): |
|
834 | 834 | """ |
|
835 | 835 | /diff/{revision}/{path} |
|
836 | 836 | ----------------------- |
|
837 | 837 | |
|
838 | 838 | Show how a file changed in a particular commit. |
|
839 | 839 | |
|
840 | 840 | The ``filediff`` template is rendered. |
|
841 | 841 | |
|
842 | 842 | This handler is registered under both the ``/diff`` and ``/filediff`` |
|
843 | 843 | paths. ``/diff`` is used in modern code. |
|
844 | 844 | """ |
|
845 | 845 | fctx, ctx = None, None |
|
846 | 846 | try: |
|
847 | 847 | fctx = webutil.filectx(web.repo, web.req) |
|
848 | 848 | except LookupError: |
|
849 | 849 | ctx = webutil.changectx(web.repo, web.req) |
|
850 | 850 | path = webutil.cleanpath(web.repo, web.req.qsparams[b'file']) |
|
851 | 851 | if path not in ctx.files(): |
|
852 | 852 | raise |
|
853 | 853 | |
|
854 | 854 | if fctx is not None: |
|
855 | 855 | path = fctx.path() |
|
856 | 856 | ctx = fctx.changectx() |
|
857 | 857 | basectx = ctx.p1() |
|
858 | 858 | |
|
859 | 859 | style = web.config(b'web', b'style') |
|
860 | 860 | if b'style' in web.req.qsparams: |
|
861 | 861 | style = web.req.qsparams[b'style'] |
|
862 | 862 | |
|
863 | 863 | diffs = webutil.diffs(web, ctx, basectx, [path], style) |
|
864 | 864 | if fctx is not None: |
|
865 | 865 | rename = webutil.renamelink(fctx) |
|
866 | 866 | ctx = fctx |
|
867 | 867 | else: |
|
868 | 868 | rename = templateutil.mappinglist([]) |
|
869 | 869 | ctx = ctx |
|
870 | 870 | |
|
871 | 871 | return web.sendtemplate( |
|
872 | 872 | b'filediff', |
|
873 | 873 | file=path, |
|
874 | 874 | symrev=webutil.symrevorshortnode(web.req, ctx), |
|
875 | 875 | rename=rename, |
|
876 | 876 | diff=diffs, |
|
877 | 877 | **pycompat.strkwargs(webutil.commonentry(web.repo, ctx)) |
|
878 | 878 | ) |
|
879 | 879 | |
|
880 | 880 | |
|
881 | 881 | diff = webcommand(b'diff')(filediff) |
|
882 | 882 | |
|
883 | 883 | |
|
884 | 884 | @webcommand(b'comparison') |
|
885 | 885 | def comparison(web): |
|
886 | 886 | """ |
|
887 | 887 | /comparison/{revision}/{path} |
|
888 | 888 | ----------------------------- |
|
889 | 889 | |
|
890 | 890 | Show a comparison between the old and new versions of a file from changes |
|
891 | 891 | made on a particular revision. |
|
892 | 892 | |
|
893 | 893 | This is similar to the ``diff`` handler. However, this form features |
|
894 | 894 | a split or side-by-side diff rather than a unified diff. |
|
895 | 895 | |
|
896 | 896 | The ``context`` query string argument can be used to control the lines of |
|
897 | 897 | context in the diff. |
|
898 | 898 | |
|
899 | 899 | The ``filecomparison`` template is rendered. |
|
900 | 900 | """ |
|
901 | 901 | ctx = webutil.changectx(web.repo, web.req) |
|
902 | 902 | if b'file' not in web.req.qsparams: |
|
903 | 903 | raise ErrorResponse(HTTP_NOT_FOUND, b'file not given') |
|
904 | 904 | path = webutil.cleanpath(web.repo, web.req.qsparams[b'file']) |
|
905 | 905 | |
|
906 | 906 | parsecontext = lambda v: v == b'full' and -1 or int(v) |
|
907 | 907 | if b'context' in web.req.qsparams: |
|
908 | 908 | context = parsecontext(web.req.qsparams[b'context']) |
|
909 | 909 | else: |
|
910 | 910 | context = parsecontext(web.config(b'web', b'comparisoncontext')) |
|
911 | 911 | |
|
912 | 912 | def filelines(f): |
|
913 | 913 | if f.isbinary(): |
|
914 | 914 | mt = pycompat.sysbytes( |
|
915 | 915 | mimetypes.guess_type(pycompat.fsdecode(f.path()))[0] |
|
916 | 916 | or r'application/octet-stream' |
|
917 | 917 | ) |
|
918 | 918 | return [_(b'(binary file %s, hash: %s)') % (mt, hex(f.filenode()))] |
|
919 | 919 | return f.data().splitlines() |
|
920 | 920 | |
|
921 | 921 | fctx = None |
|
922 | 922 | parent = ctx.p1() |
|
923 | 923 | leftrev = parent.rev() |
|
924 | 924 | leftnode = parent.node() |
|
925 | 925 | rightrev = ctx.rev() |
|
926 | 926 | rightnode = scmutil.binnode(ctx) |
|
927 | 927 | if path in ctx: |
|
928 | 928 | fctx = ctx[path] |
|
929 | 929 | rightlines = filelines(fctx) |
|
930 | 930 | if path not in parent: |
|
931 | 931 | leftlines = () |
|
932 | 932 | else: |
|
933 | 933 | pfctx = parent[path] |
|
934 | 934 | leftlines = filelines(pfctx) |
|
935 | 935 | else: |
|
936 | 936 | rightlines = () |
|
937 | 937 | pfctx = ctx.p1()[path] |
|
938 | 938 | leftlines = filelines(pfctx) |
|
939 | 939 | |
|
940 | 940 | comparison = webutil.compare(context, leftlines, rightlines) |
|
941 | 941 | if fctx is not None: |
|
942 | 942 | rename = webutil.renamelink(fctx) |
|
943 | 943 | ctx = fctx |
|
944 | 944 | else: |
|
945 | 945 | rename = templateutil.mappinglist([]) |
|
946 | 946 | ctx = ctx |
|
947 | 947 | |
|
948 | 948 | return web.sendtemplate( |
|
949 | 949 | b'filecomparison', |
|
950 | 950 | file=path, |
|
951 | 951 | symrev=webutil.symrevorshortnode(web.req, ctx), |
|
952 | 952 | rename=rename, |
|
953 | 953 | leftrev=leftrev, |
|
954 | 954 | leftnode=hex(leftnode), |
|
955 | 955 | rightrev=rightrev, |
|
956 | 956 | rightnode=hex(rightnode), |
|
957 | 957 | comparison=comparison, |
|
958 | 958 | **pycompat.strkwargs(webutil.commonentry(web.repo, ctx)) |
|
959 | 959 | ) |
|
960 | 960 | |
|
961 | 961 | |
|
962 | 962 | @webcommand(b'annotate') |
|
963 | 963 | def annotate(web): |
|
964 | 964 | """ |
|
965 | 965 | /annotate/{revision}/{path} |
|
966 | 966 | --------------------------- |
|
967 | 967 | |
|
968 | 968 | Show changeset information for each line in a file. |
|
969 | 969 | |
|
970 | 970 | The ``ignorews``, ``ignorewsamount``, ``ignorewseol``, and |
|
971 | 971 | ``ignoreblanklines`` query string arguments have the same meaning as |
|
972 | 972 | their ``[annotate]`` config equivalents. It uses the hgrc boolean |
|
973 | 973 | parsing logic to interpret the value. e.g. ``0`` and ``false`` are |
|
974 | 974 | false and ``1`` and ``true`` are true. If not defined, the server |
|
975 | 975 | default settings are used. |
|
976 | 976 | |
|
977 | 977 | The ``fileannotate`` template is rendered. |
|
978 | 978 | """ |
|
979 | 979 | fctx = webutil.filectx(web.repo, web.req) |
|
980 | 980 | f = fctx.path() |
|
981 | 981 | parity = paritygen(web.stripecount) |
|
982 | 982 | ishead = fctx.filenode() in fctx.filelog().heads() |
|
983 | 983 | |
|
984 | 984 | # parents() is called once per line and several lines likely belong to |
|
985 | 985 | # same revision. So it is worth caching. |
|
986 | 986 | # TODO there are still redundant operations within basefilectx.parents() |
|
987 | 987 | # and from the fctx.annotate() call itself that could be cached. |
|
988 | 988 | parentscache = {} |
|
989 | 989 | |
|
990 | 990 | def parents(context, f): |
|
991 | 991 | rev = f.rev() |
|
992 | 992 | if rev not in parentscache: |
|
993 | 993 | parentscache[rev] = [] |
|
994 | 994 | for p in f.parents(): |
|
995 | 995 | entry = { |
|
996 | 996 | b'node': p.hex(), |
|
997 | 997 | b'rev': p.rev(), |
|
998 | 998 | } |
|
999 | 999 | parentscache[rev].append(entry) |
|
1000 | 1000 | |
|
1001 | 1001 | for p in parentscache[rev]: |
|
1002 | 1002 | yield p |
|
1003 | 1003 | |
|
1004 | 1004 | def annotate(context): |
|
1005 | 1005 | if fctx.isbinary(): |
|
1006 | 1006 | mt = pycompat.sysbytes( |
|
1007 | 1007 | mimetypes.guess_type(pycompat.fsdecode(fctx.path()))[0] |
|
1008 | 1008 | or r'application/octet-stream' |
|
1009 | 1009 | ) |
|
1010 | 1010 | lines = [ |
|
1011 | 1011 | dagop.annotateline( |
|
1012 | 1012 | fctx=fctx.filectx(fctx.filerev()), |
|
1013 | 1013 | lineno=1, |
|
1014 | 1014 | text=b'(binary:%s)' % mt, |
|
1015 | 1015 | ) |
|
1016 | 1016 | ] |
|
1017 | 1017 | else: |
|
1018 | 1018 | lines = webutil.annotate(web.req, fctx, web.repo.ui) |
|
1019 | 1019 | |
|
1020 | 1020 | previousrev = None |
|
1021 | 1021 | blockparitygen = paritygen(1) |
|
1022 | 1022 | for lineno, aline in enumerate(lines): |
|
1023 | 1023 | f = aline.fctx |
|
1024 | 1024 | rev = f.rev() |
|
1025 | 1025 | if rev != previousrev: |
|
1026 | 1026 | blockhead = True |
|
1027 | 1027 | blockparity = next(blockparitygen) |
|
1028 | 1028 | else: |
|
1029 | 1029 | blockhead = None |
|
1030 | 1030 | previousrev = rev |
|
1031 | 1031 | yield { |
|
1032 | 1032 | b"parity": next(parity), |
|
1033 | 1033 | b"node": f.hex(), |
|
1034 | 1034 | b"rev": rev, |
|
1035 | 1035 | b"author": f.user(), |
|
1036 | 1036 | b"parents": templateutil.mappinggenerator(parents, args=(f,)), |
|
1037 | 1037 | b"desc": f.description(), |
|
1038 | 1038 | b"extra": f.extra(), |
|
1039 | 1039 | b"file": f.path(), |
|
1040 | 1040 | b"blockhead": blockhead, |
|
1041 | 1041 | b"blockparity": blockparity, |
|
1042 | 1042 | b"targetline": aline.lineno, |
|
1043 | 1043 | b"line": aline.text, |
|
1044 | 1044 | b"lineno": lineno + 1, |
|
1045 | 1045 | b"lineid": b"l%d" % (lineno + 1), |
|
1046 | 1046 | b"linenumber": b"% 6d" % (lineno + 1), |
|
1047 | 1047 | b"revdate": f.date(), |
|
1048 | 1048 | } |
|
1049 | 1049 | |
|
1050 | 1050 | diffopts = webutil.difffeatureopts(web.req, web.repo.ui, b'annotate') |
|
1051 | 1051 | diffopts = { |
|
1052 | 1052 | k: getattr(diffopts, pycompat.sysstr(k)) for k in diffopts.defaults |
|
1053 | 1053 | } |
|
1054 | 1054 | |
|
1055 | 1055 | return web.sendtemplate( |
|
1056 | 1056 | b'fileannotate', |
|
1057 | 1057 | file=f, |
|
1058 | 1058 | annotate=templateutil.mappinggenerator(annotate), |
|
1059 | 1059 | path=webutil.up(f), |
|
1060 | 1060 | symrev=webutil.symrevorshortnode(web.req, fctx), |
|
1061 | 1061 | rename=webutil.renamelink(fctx), |
|
1062 | 1062 | permissions=fctx.manifest().flags(f), |
|
1063 | 1063 | ishead=int(ishead), |
|
1064 | 1064 | diffopts=templateutil.hybriddict(diffopts), |
|
1065 | 1065 | **pycompat.strkwargs(webutil.commonentry(web.repo, fctx)) |
|
1066 | 1066 | ) |
|
1067 | 1067 | |
|
1068 | 1068 | |
|
1069 | 1069 | @webcommand(b'filelog') |
|
1070 | 1070 | def filelog(web): |
|
1071 | 1071 | """ |
|
1072 | 1072 | /filelog/{revision}/{path} |
|
1073 | 1073 | -------------------------- |
|
1074 | 1074 | |
|
1075 | 1075 | Show information about the history of a file in the repository. |
|
1076 | 1076 | |
|
1077 | 1077 | The ``revcount`` query string argument can be defined to control the |
|
1078 | 1078 | maximum number of entries to show. |
|
1079 | 1079 | |
|
1080 | 1080 | The ``filelog`` template will be rendered. |
|
1081 | 1081 | """ |
|
1082 | 1082 | |
|
1083 | 1083 | try: |
|
1084 | 1084 | fctx = webutil.filectx(web.repo, web.req) |
|
1085 | 1085 | f = fctx.path() |
|
1086 | 1086 | fl = fctx.filelog() |
|
1087 | 1087 | except error.LookupError: |
|
1088 | 1088 | f = webutil.cleanpath(web.repo, web.req.qsparams[b'file']) |
|
1089 | 1089 | fl = web.repo.file(f) |
|
1090 | 1090 | numrevs = len(fl) |
|
1091 | 1091 | if not numrevs: # file doesn't exist at all |
|
1092 | 1092 | raise |
|
1093 | 1093 | rev = webutil.changectx(web.repo, web.req).rev() |
|
1094 | 1094 | first = fl.linkrev(0) |
|
1095 | 1095 | if rev < first: # current rev is from before file existed |
|
1096 | 1096 | raise |
|
1097 | 1097 | frev = numrevs - 1 |
|
1098 | 1098 | while fl.linkrev(frev) > rev: |
|
1099 | 1099 | frev -= 1 |
|
1100 | 1100 | fctx = web.repo.filectx(f, fl.linkrev(frev)) |
|
1101 | 1101 | |
|
1102 | 1102 | revcount = web.maxshortchanges |
|
1103 | 1103 | if b'revcount' in web.req.qsparams: |
|
1104 | 1104 | try: |
|
1105 | 1105 | revcount = int(web.req.qsparams.get(b'revcount', revcount)) |
|
1106 | 1106 | revcount = max(revcount, 1) |
|
1107 | 1107 | web.tmpl.defaults[b'sessionvars'][b'revcount'] = revcount |
|
1108 | 1108 | except ValueError: |
|
1109 | 1109 | pass |
|
1110 | 1110 | |
|
1111 | 1111 | lrange = webutil.linerange(web.req) |
|
1112 | 1112 | |
|
1113 | 1113 | lessvars = copy.copy(web.tmpl.defaults[b'sessionvars']) |
|
1114 | 1114 | lessvars[b'revcount'] = max(revcount // 2, 1) |
|
1115 | 1115 | morevars = copy.copy(web.tmpl.defaults[b'sessionvars']) |
|
1116 | 1116 | morevars[b'revcount'] = revcount * 2 |
|
1117 | 1117 | |
|
1118 | 1118 | patch = b'patch' in web.req.qsparams |
|
1119 | 1119 | if patch: |
|
1120 | 1120 | lessvars[b'patch'] = morevars[b'patch'] = web.req.qsparams[b'patch'] |
|
1121 | 1121 | descend = b'descend' in web.req.qsparams |
|
1122 | 1122 | if descend: |
|
1123 | 1123 | lessvars[b'descend'] = morevars[b'descend'] = web.req.qsparams[ |
|
1124 | 1124 | b'descend' |
|
1125 | 1125 | ] |
|
1126 | 1126 | |
|
1127 | 1127 | count = fctx.filerev() + 1 |
|
1128 | 1128 | start = max(0, count - revcount) # first rev on this page |
|
1129 | 1129 | end = min(count, start + revcount) # last rev on this page |
|
1130 | 1130 | parity = paritygen(web.stripecount, offset=start - end) |
|
1131 | 1131 | |
|
1132 | 1132 | repo = web.repo |
|
1133 | 1133 | filelog = fctx.filelog() |
|
1134 | 1134 | revs = [ |
|
1135 | 1135 | filerev |
|
1136 | 1136 | for filerev in filelog.revs(start, end - 1) |
|
1137 | 1137 | if filelog.linkrev(filerev) in repo |
|
1138 | 1138 | ] |
|
1139 | 1139 | entries = [] |
|
1140 | 1140 | |
|
1141 | 1141 | diffstyle = web.config(b'web', b'style') |
|
1142 | 1142 | if b'style' in web.req.qsparams: |
|
1143 | 1143 | diffstyle = web.req.qsparams[b'style'] |
|
1144 | 1144 | |
|
1145 | 1145 | def diff(fctx, linerange=None): |
|
1146 | 1146 | ctx = fctx.changectx() |
|
1147 | 1147 | basectx = ctx.p1() |
|
1148 | 1148 | path = fctx.path() |
|
1149 | 1149 | return webutil.diffs( |
|
1150 | 1150 | web, |
|
1151 | 1151 | ctx, |
|
1152 | 1152 | basectx, |
|
1153 | 1153 | [path], |
|
1154 | 1154 | diffstyle, |
|
1155 | 1155 | linerange=linerange, |
|
1156 | 1156 | lineidprefix=b'%s-' % ctx.hex()[:12], |
|
1157 | 1157 | ) |
|
1158 | 1158 | |
|
1159 | 1159 | linerange = None |
|
1160 | 1160 | if lrange is not None: |
|
1161 | 1161 | assert lrange is not None # help pytype (!?) |
|
1162 | 1162 | linerange = webutil.formatlinerange(*lrange) |
|
1163 | 1163 | # deactivate numeric nav links when linerange is specified as this |
|
1164 | 1164 | # would required a dedicated "revnav" class |
|
1165 | 1165 | nav = templateutil.mappinglist([]) |
|
1166 | 1166 | if descend: |
|
1167 | 1167 | it = dagop.blockdescendants(fctx, *lrange) |
|
1168 | 1168 | else: |
|
1169 | 1169 | it = dagop.blockancestors(fctx, *lrange) |
|
1170 | 1170 | for i, (c, lr) in enumerate(it, 1): |
|
1171 | 1171 | diffs = None |
|
1172 | 1172 | if patch: |
|
1173 | 1173 | diffs = diff(c, linerange=lr) |
|
1174 | 1174 | # follow renames accross filtered (not in range) revisions |
|
1175 | 1175 | path = c.path() |
|
1176 | 1176 | lm = webutil.commonentry(repo, c) |
|
1177 | 1177 | lm.update( |
|
1178 | 1178 | { |
|
1179 | 1179 | b'parity': next(parity), |
|
1180 | 1180 | b'filerev': c.rev(), |
|
1181 | 1181 | b'file': path, |
|
1182 | 1182 | b'diff': diffs, |
|
1183 | 1183 | b'linerange': webutil.formatlinerange(*lr), |
|
1184 | 1184 | b'rename': templateutil.mappinglist([]), |
|
1185 | 1185 | } |
|
1186 | 1186 | ) |
|
1187 | 1187 | entries.append(lm) |
|
1188 | 1188 | if i == revcount: |
|
1189 | 1189 | break |
|
1190 | 1190 | lessvars[b'linerange'] = webutil.formatlinerange(*lrange) |
|
1191 | 1191 | morevars[b'linerange'] = lessvars[b'linerange'] |
|
1192 | 1192 | else: |
|
1193 | 1193 | for i in revs: |
|
1194 | 1194 | iterfctx = fctx.filectx(i) |
|
1195 | 1195 | diffs = None |
|
1196 | 1196 | if patch: |
|
1197 | 1197 | diffs = diff(iterfctx) |
|
1198 | 1198 | lm = webutil.commonentry(repo, iterfctx) |
|
1199 | 1199 | lm.update( |
|
1200 | 1200 | { |
|
1201 | 1201 | b'parity': next(parity), |
|
1202 | 1202 | b'filerev': i, |
|
1203 | 1203 | b'file': f, |
|
1204 | 1204 | b'diff': diffs, |
|
1205 | 1205 | b'rename': webutil.renamelink(iterfctx), |
|
1206 | 1206 | } |
|
1207 | 1207 | ) |
|
1208 | 1208 | entries.append(lm) |
|
1209 | 1209 | entries.reverse() |
|
1210 | 1210 | revnav = webutil.filerevnav(web.repo, fctx.path()) |
|
1211 | 1211 | nav = revnav.gen(end - 1, revcount, count) |
|
1212 | 1212 | |
|
1213 | 1213 | latestentry = entries[:1] |
|
1214 | 1214 | |
|
1215 | 1215 | return web.sendtemplate( |
|
1216 | 1216 | b'filelog', |
|
1217 | 1217 | file=f, |
|
1218 | 1218 | nav=nav, |
|
1219 | 1219 | symrev=webutil.symrevorshortnode(web.req, fctx), |
|
1220 | 1220 | entries=templateutil.mappinglist(entries), |
|
1221 | 1221 | descend=descend, |
|
1222 | 1222 | patch=patch, |
|
1223 | 1223 | latestentry=templateutil.mappinglist(latestentry), |
|
1224 | 1224 | linerange=linerange, |
|
1225 | 1225 | revcount=revcount, |
|
1226 | 1226 | morevars=morevars, |
|
1227 | 1227 | lessvars=lessvars, |
|
1228 | 1228 | **pycompat.strkwargs(webutil.commonentry(web.repo, fctx)) |
|
1229 | 1229 | ) |
|
1230 | 1230 | |
|
1231 | 1231 | |
|
1232 | 1232 | @webcommand(b'archive') |
|
1233 | 1233 | def archive(web): |
|
1234 | 1234 | """ |
|
1235 | 1235 | /archive/{revision}.{format}[/{path}] |
|
1236 | 1236 | ------------------------------------- |
|
1237 | 1237 | |
|
1238 | 1238 | Obtain an archive of repository content. |
|
1239 | 1239 | |
|
1240 | 1240 | The content and type of the archive is defined by a URL path parameter. |
|
1241 | 1241 | ``format`` is the file extension of the archive type to be generated. e.g. |
|
1242 | 1242 | ``zip`` or ``tar.bz2``. Not all archive types may be allowed by your |
|
1243 | 1243 | server configuration. |
|
1244 | 1244 | |
|
1245 | 1245 | The optional ``path`` URL parameter controls content to include in the |
|
1246 | 1246 | archive. If omitted, every file in the specified revision is present in the |
|
1247 | 1247 | archive. If included, only the specified file or contents of the specified |
|
1248 | 1248 | directory will be included in the archive. |
|
1249 | 1249 | |
|
1250 | 1250 | No template is used for this handler. Raw, binary content is generated. |
|
1251 | 1251 | """ |
|
1252 | 1252 | |
|
1253 | 1253 | type_ = web.req.qsparams.get(b'type') |
|
1254 | 1254 | allowed = web.configlist(b"web", b"allow-archive") |
|
1255 | 1255 | key = web.req.qsparams[b'node'] |
|
1256 | 1256 | |
|
1257 | 1257 | if type_ not in webutil.archivespecs: |
|
1258 | 1258 | msg = b'Unsupported archive type: %s' % stringutil.pprint(type_) |
|
1259 | 1259 | raise ErrorResponse(HTTP_NOT_FOUND, msg) |
|
1260 | 1260 | |
|
1261 | 1261 | if not ((type_ in allowed or web.configbool(b"web", b"allow" + type_))): |
|
1262 | 1262 | msg = b'Archive type not allowed: %s' % type_ |
|
1263 | 1263 | raise ErrorResponse(HTTP_FORBIDDEN, msg) |
|
1264 | 1264 | |
|
1265 | 1265 | reponame = re.sub(br"\W+", b"-", os.path.basename(web.reponame)) |
|
1266 | 1266 | cnode = web.repo.lookup(key) |
|
1267 | 1267 | arch_version = key |
|
1268 | 1268 | if cnode == key or key == b'tip': |
|
1269 | 1269 | arch_version = short(cnode) |
|
1270 | 1270 | name = b"%s-%s" % (reponame, arch_version) |
|
1271 | 1271 | |
|
1272 | 1272 | ctx = webutil.changectx(web.repo, web.req) |
|
1273 | 1273 | match = scmutil.match(ctx, []) |
|
1274 | 1274 | file = web.req.qsparams.get(b'file') |
|
1275 | 1275 | if file: |
|
1276 | 1276 | pats = [b'path:' + file] |
|
1277 | 1277 | match = scmutil.match(ctx, pats, default=b'path') |
|
1278 | 1278 | if pats: |
|
1279 | 1279 | files = [f for f in ctx.manifest().keys() if match(f)] |
|
1280 | 1280 | if not files: |
|
1281 | 1281 | raise ErrorResponse( |
|
1282 | 1282 | HTTP_NOT_FOUND, b'file(s) not found: %s' % file |
|
1283 | 1283 | ) |
|
1284 | 1284 | |
|
1285 | 1285 | mimetype, artype, extension, encoding = webutil.archivespecs[type_] |
|
1286 | 1286 | |
|
1287 | 1287 | web.res.headers[b'Content-Type'] = mimetype |
|
1288 | 1288 | web.res.headers[b'Content-Disposition'] = b'attachment; filename=%s%s' % ( |
|
1289 | 1289 | name, |
|
1290 | 1290 | extension, |
|
1291 | 1291 | ) |
|
1292 | 1292 | |
|
1293 | 1293 | if encoding: |
|
1294 | 1294 | web.res.headers[b'Content-Encoding'] = encoding |
|
1295 | 1295 | |
|
1296 | 1296 | web.res.setbodywillwrite() |
|
1297 | 1297 | if list(web.res.sendresponse()): |
|
1298 | 1298 | raise error.ProgrammingError( |
|
1299 | 1299 | b'sendresponse() should not emit data if writing later' |
|
1300 | 1300 | ) |
|
1301 | 1301 | |
|
1302 | 1302 | if web.req.method == b'HEAD': |
|
1303 | 1303 | return [] |
|
1304 | 1304 | |
|
1305 | 1305 | bodyfh = web.res.getbodyfile() |
|
1306 | 1306 | |
|
1307 | 1307 | archival.archive( |
|
1308 | 1308 | web.repo, |
|
1309 | 1309 | bodyfh, |
|
1310 | 1310 | cnode, |
|
1311 | 1311 | artype, |
|
1312 | 1312 | prefix=name, |
|
1313 | 1313 | match=match, |
|
1314 | 1314 | subrepos=web.configbool(b"web", b"archivesubrepos"), |
|
1315 | 1315 | ) |
|
1316 | 1316 | |
|
1317 | 1317 | return [] |
|
1318 | 1318 | |
|
1319 | 1319 | |
|
1320 | 1320 | @webcommand(b'static') |
|
1321 | 1321 | def static(web): |
|
1322 | 1322 | fname = web.req.qsparams[b'file'] |
|
1323 | 1323 | # a repo owner may set web.static in .hg/hgrc to get any file |
|
1324 | 1324 | # readable by the user running the CGI script |
|
1325 | 1325 | static = web.config(b"web", b"static", untrusted=False) |
|
1326 | 1326 | staticfile(web.templatepath, static, fname, web.res) |
|
1327 | 1327 | return web.res.sendresponse() |
|
1328 | 1328 | |
|
1329 | 1329 | |
|
1330 | 1330 | @webcommand(b'graph') |
|
1331 | 1331 | def graph(web): |
|
1332 | 1332 | """ |
|
1333 | 1333 | /graph[/{revision}] |
|
1334 | 1334 | ------------------- |
|
1335 | 1335 | |
|
1336 | 1336 | Show information about the graphical topology of the repository. |
|
1337 | 1337 | |
|
1338 | 1338 | Information rendered by this handler can be used to create visual |
|
1339 | 1339 | representations of repository topology. |
|
1340 | 1340 | |
|
1341 | 1341 | The ``revision`` URL parameter controls the starting changeset. If it's |
|
1342 | 1342 | absent, the default is ``tip``. |
|
1343 | 1343 | |
|
1344 | 1344 | The ``revcount`` query string argument can define the number of changesets |
|
1345 | 1345 | to show information for. |
|
1346 | 1346 | |
|
1347 | 1347 | The ``graphtop`` query string argument can specify the starting changeset |
|
1348 | 1348 | for producing ``jsdata`` variable that is used for rendering graph in |
|
1349 | 1349 | JavaScript. By default it has the same value as ``revision``. |
|
1350 | 1350 | |
|
1351 | 1351 | This handler will render the ``graph`` template. |
|
1352 | 1352 | """ |
|
1353 | 1353 | |
|
1354 | 1354 | if b'node' in web.req.qsparams: |
|
1355 | 1355 | ctx = webutil.changectx(web.repo, web.req) |
|
1356 | 1356 | symrev = webutil.symrevorshortnode(web.req, ctx) |
|
1357 | 1357 | else: |
|
1358 | 1358 | ctx = web.repo[b'tip'] |
|
1359 | 1359 | symrev = b'tip' |
|
1360 | 1360 | rev = ctx.rev() |
|
1361 | 1361 | |
|
1362 | 1362 | bg_height = 39 |
|
1363 | 1363 | revcount = web.maxshortchanges |
|
1364 | 1364 | if b'revcount' in web.req.qsparams: |
|
1365 | 1365 | try: |
|
1366 | 1366 | revcount = int(web.req.qsparams.get(b'revcount', revcount)) |
|
1367 | 1367 | revcount = max(revcount, 1) |
|
1368 | 1368 | web.tmpl.defaults[b'sessionvars'][b'revcount'] = revcount |
|
1369 | 1369 | except ValueError: |
|
1370 | 1370 | pass |
|
1371 | 1371 | |
|
1372 | 1372 | lessvars = copy.copy(web.tmpl.defaults[b'sessionvars']) |
|
1373 | 1373 | lessvars[b'revcount'] = max(revcount // 2, 1) |
|
1374 | 1374 | morevars = copy.copy(web.tmpl.defaults[b'sessionvars']) |
|
1375 | 1375 | morevars[b'revcount'] = revcount * 2 |
|
1376 | 1376 | |
|
1377 | 1377 | graphtop = web.req.qsparams.get(b'graphtop', ctx.hex()) |
|
1378 | 1378 | graphvars = copy.copy(web.tmpl.defaults[b'sessionvars']) |
|
1379 | 1379 | graphvars[b'graphtop'] = graphtop |
|
1380 | 1380 | |
|
1381 | 1381 | count = len(web.repo) |
|
1382 | 1382 | pos = rev |
|
1383 | 1383 | |
|
1384 | 1384 | uprev = min(max(0, count - 1), rev + revcount) |
|
1385 | 1385 | downrev = max(0, rev - revcount) |
|
1386 | 1386 | changenav = webutil.revnav(web.repo).gen(pos, revcount, count) |
|
1387 | 1387 | |
|
1388 | 1388 | tree = [] |
|
1389 | 1389 | nextentry = [] |
|
1390 | 1390 | lastrev = 0 |
|
1391 | 1391 | if pos != -1: |
|
1392 | 1392 | allrevs = web.repo.changelog.revs(pos, 0) |
|
1393 | 1393 | revs = [] |
|
1394 | 1394 | for i in allrevs: |
|
1395 | 1395 | revs.append(i) |
|
1396 | 1396 | if len(revs) >= revcount + 1: |
|
1397 | 1397 | break |
|
1398 | 1398 | |
|
1399 | 1399 | if len(revs) > revcount: |
|
1400 | 1400 | nextentry = [webutil.commonentry(web.repo, web.repo[revs[-1]])] |
|
1401 | 1401 | revs = revs[:-1] |
|
1402 | 1402 | |
|
1403 | 1403 | lastrev = revs[-1] |
|
1404 | 1404 | |
|
1405 | 1405 | # We have to feed a baseset to dagwalker as it is expecting smartset |
|
1406 | 1406 | # object. This does not have a big impact on hgweb performance itself |
|
1407 | 1407 | # since hgweb graphing code is not itself lazy yet. |
|
1408 | 1408 | dag = graphmod.dagwalker(web.repo, smartset.baseset(revs)) |
|
1409 | 1409 | # As we said one line above... not lazy. |
|
1410 | 1410 | tree = list( |
|
1411 | 1411 | item |
|
1412 | 1412 | for item in graphmod.colored(dag, web.repo) |
|
1413 | 1413 | if item[1] == graphmod.CHANGESET |
|
1414 | 1414 | ) |
|
1415 | 1415 | |
|
1416 | 1416 | def fulltree(): |
|
1417 | 1417 | pos = web.repo[graphtop].rev() |
|
1418 | 1418 | tree = [] |
|
1419 | 1419 | if pos != -1: |
|
1420 | 1420 | revs = web.repo.changelog.revs(pos, lastrev) |
|
1421 | 1421 | dag = graphmod.dagwalker(web.repo, smartset.baseset(revs)) |
|
1422 | 1422 | tree = list( |
|
1423 | 1423 | item |
|
1424 | 1424 | for item in graphmod.colored(dag, web.repo) |
|
1425 | 1425 | if item[1] == graphmod.CHANGESET |
|
1426 | 1426 | ) |
|
1427 | 1427 | return tree |
|
1428 | 1428 | |
|
1429 | 1429 | def jsdata(context): |
|
1430 | 1430 | for (id, type, ctx, vtx, edges) in fulltree(): |
|
1431 | 1431 | yield { |
|
1432 | 1432 | b'node': pycompat.bytestr(ctx), |
|
1433 | 1433 | b'graphnode': webutil.getgraphnode(web.repo, ctx), |
|
1434 | 1434 | b'vertex': vtx, |
|
1435 | 1435 | b'edges': edges, |
|
1436 | 1436 | } |
|
1437 | 1437 | |
|
1438 | 1438 | def nodes(context): |
|
1439 | 1439 | parity = paritygen(web.stripecount) |
|
1440 | 1440 | for row, (id, type, ctx, vtx, edges) in enumerate(tree): |
|
1441 | 1441 | entry = webutil.commonentry(web.repo, ctx) |
|
1442 | 1442 | edgedata = [ |
|
1443 | 1443 | { |
|
1444 | 1444 | b'col': edge[0], |
|
1445 | 1445 | b'nextcol': edge[1], |
|
1446 | 1446 | b'color': (edge[2] - 1) % 6 + 1, |
|
1447 | 1447 | b'width': edge[3], |
|
1448 | 1448 | b'bcolor': edge[4], |
|
1449 | 1449 | } |
|
1450 | 1450 | for edge in edges |
|
1451 | 1451 | ] |
|
1452 | 1452 | |
|
1453 | 1453 | entry.update( |
|
1454 | 1454 | { |
|
1455 | 1455 | b'col': vtx[0], |
|
1456 | 1456 | b'color': (vtx[1] - 1) % 6 + 1, |
|
1457 | 1457 | b'parity': next(parity), |
|
1458 | 1458 | b'edges': templateutil.mappinglist(edgedata), |
|
1459 | 1459 | b'row': row, |
|
1460 | 1460 | b'nextrow': row + 1, |
|
1461 | 1461 | } |
|
1462 | 1462 | ) |
|
1463 | 1463 | |
|
1464 | 1464 | yield entry |
|
1465 | 1465 | |
|
1466 | 1466 | rows = len(tree) |
|
1467 | 1467 | |
|
1468 | 1468 | return web.sendtemplate( |
|
1469 | 1469 | b'graph', |
|
1470 | 1470 | rev=rev, |
|
1471 | 1471 | symrev=symrev, |
|
1472 | 1472 | revcount=revcount, |
|
1473 | 1473 | uprev=uprev, |
|
1474 | 1474 | lessvars=lessvars, |
|
1475 | 1475 | morevars=morevars, |
|
1476 | 1476 | downrev=downrev, |
|
1477 | 1477 | graphvars=graphvars, |
|
1478 | 1478 | rows=rows, |
|
1479 | 1479 | bg_height=bg_height, |
|
1480 | 1480 | changesets=count, |
|
1481 | 1481 | nextentry=templateutil.mappinglist(nextentry), |
|
1482 | 1482 | jsdata=templateutil.mappinggenerator(jsdata), |
|
1483 | 1483 | nodes=templateutil.mappinggenerator(nodes), |
|
1484 | 1484 | node=ctx.hex(), |
|
1485 | 1485 | archives=web.archivelist(b'tip'), |
|
1486 | 1486 | changenav=changenav, |
|
1487 | 1487 | ) |
|
1488 | 1488 | |
|
1489 | 1489 | |
|
1490 | 1490 | def _getdoc(e): |
|
1491 | 1491 | doc = e[0].__doc__ |
|
1492 | 1492 | if doc: |
|
1493 | 1493 | doc = _(doc).partition(b'\n')[0] |
|
1494 | 1494 | else: |
|
1495 | 1495 | doc = _(b'(no help text available)') |
|
1496 | 1496 | return doc |
|
1497 | 1497 | |
|
1498 | 1498 | |
|
1499 | 1499 | @webcommand(b'help') |
|
1500 | 1500 | def help(web): |
|
1501 | 1501 | """ |
|
1502 | 1502 | /help[/{topic}] |
|
1503 | 1503 | --------------- |
|
1504 | 1504 | |
|
1505 | 1505 | Render help documentation. |
|
1506 | 1506 | |
|
1507 | 1507 | This web command is roughly equivalent to :hg:`help`. If a ``topic`` |
|
1508 | 1508 | is defined, that help topic will be rendered. If not, an index of |
|
1509 | 1509 | available help topics will be rendered. |
|
1510 | 1510 | |
|
1511 | 1511 | The ``help`` template will be rendered when requesting help for a topic. |
|
1512 | 1512 | ``helptopics`` will be rendered for the index of help topics. |
|
1513 | 1513 | """ |
|
1514 | 1514 | from .. import commands, help as helpmod # avoid cycle |
|
1515 | 1515 | |
|
1516 | 1516 | topicname = web.req.qsparams.get(b'node') |
|
1517 | 1517 | if not topicname: |
|
1518 | 1518 | |
|
1519 | 1519 | def topics(context): |
|
1520 | 1520 | for h in helpmod.helptable: |
|
1521 | 1521 | entries, summary, _doc = h[0:3] |
|
1522 | 1522 | yield {b'topic': entries[0], b'summary': summary} |
|
1523 | 1523 | |
|
1524 | 1524 | early, other = [], [] |
|
1525 | 1525 | primary = lambda s: s.partition(b'|')[0] |
|
1526 | 1526 | for c, e in commands.table.items(): |
|
1527 | 1527 | doc = _getdoc(e) |
|
1528 | 1528 | if b'DEPRECATED' in doc or c.startswith(b'debug'): |
|
1529 | 1529 | continue |
|
1530 | 1530 | cmd = primary(c) |
|
1531 | 1531 | if getattr(e[0], 'helpbasic', False): |
|
1532 | 1532 | early.append((cmd, doc)) |
|
1533 | 1533 | else: |
|
1534 | 1534 | other.append((cmd, doc)) |
|
1535 | 1535 | |
|
1536 | 1536 | early.sort() |
|
1537 | 1537 | other.sort() |
|
1538 | 1538 | |
|
1539 | 1539 | def earlycommands(context): |
|
1540 | 1540 | for c, doc in early: |
|
1541 | 1541 | yield {b'topic': c, b'summary': doc} |
|
1542 | 1542 | |
|
1543 | 1543 | def othercommands(context): |
|
1544 | 1544 | for c, doc in other: |
|
1545 | 1545 | yield {b'topic': c, b'summary': doc} |
|
1546 | 1546 | |
|
1547 | 1547 | return web.sendtemplate( |
|
1548 | 1548 | b'helptopics', |
|
1549 | 1549 | topics=templateutil.mappinggenerator(topics), |
|
1550 | 1550 | earlycommands=templateutil.mappinggenerator(earlycommands), |
|
1551 | 1551 | othercommands=templateutil.mappinggenerator(othercommands), |
|
1552 | 1552 | title=b'Index', |
|
1553 | 1553 | ) |
|
1554 | 1554 | |
|
1555 | 1555 | # Render an index of sub-topics. |
|
1556 | 1556 | if topicname in helpmod.subtopics: |
|
1557 | 1557 | topics = [] |
|
1558 | 1558 | for entries, summary, _doc in helpmod.subtopics[topicname]: |
|
1559 | 1559 | topics.append( |
|
1560 | 1560 | { |
|
1561 | 1561 | b'topic': b'%s.%s' % (topicname, entries[0]), |
|
1562 | 1562 | b'basename': entries[0], |
|
1563 | 1563 | b'summary': summary, |
|
1564 | 1564 | } |
|
1565 | 1565 | ) |
|
1566 | 1566 | |
|
1567 | 1567 | return web.sendtemplate( |
|
1568 | 1568 | b'helptopics', |
|
1569 | 1569 | topics=templateutil.mappinglist(topics), |
|
1570 | 1570 | title=topicname, |
|
1571 | 1571 | subindex=True, |
|
1572 | 1572 | ) |
|
1573 | 1573 | |
|
1574 | 1574 | u = webutil.wsgiui.load() |
|
1575 | 1575 | u.verbose = True |
|
1576 | 1576 | |
|
1577 | 1577 | # Render a page from a sub-topic. |
|
1578 | 1578 | if b'.' in topicname: |
|
1579 | 1579 | # TODO implement support for rendering sections, like |
|
1580 | 1580 | # `hg help` works. |
|
1581 | 1581 | topic, subtopic = topicname.split(b'.', 1) |
|
1582 | 1582 | if topic not in helpmod.subtopics: |
|
1583 | 1583 | raise ErrorResponse(HTTP_NOT_FOUND) |
|
1584 | 1584 | else: |
|
1585 | 1585 | topic = topicname |
|
1586 | 1586 | subtopic = None |
|
1587 | 1587 | |
|
1588 | 1588 | try: |
|
1589 | 1589 | doc = helpmod.help_(u, commands, topic, subtopic=subtopic) |
|
1590 | 1590 | except error.Abort: |
|
1591 | 1591 | raise ErrorResponse(HTTP_NOT_FOUND) |
|
1592 | 1592 | |
|
1593 | 1593 | return web.sendtemplate(b'help', topic=topicname, doc=doc) |
|
1594 | 1594 | |
|
1595 | 1595 | |
|
1596 | 1596 | # tell hggettext to extract docstrings from these functions: |
|
1597 | 1597 | i18nfunctions = commands.values() |
@@ -1,135 +1,135 b'' | |||
|
1 | 1 | import _lsprof |
|
2 | 2 | import sys |
|
3 | 3 | |
|
4 | 4 | Profiler = _lsprof.Profiler |
|
5 | 5 | |
|
6 | 6 | # PyPy doesn't expose profiler_entry from the module. |
|
7 | 7 | profiler_entry = getattr(_lsprof, 'profiler_entry', None) |
|
8 | 8 | |
|
9 |
__all__ = [ |
|
|
9 | __all__ = ['profile', 'Stats'] | |
|
10 | 10 | |
|
11 | 11 | |
|
12 | 12 | def profile(f, *args, **kwds): |
|
13 | 13 | """XXX docstring""" |
|
14 | 14 | p = Profiler() |
|
15 | 15 | p.enable(subcalls=True, builtins=True) |
|
16 | 16 | try: |
|
17 | 17 | f(*args, **kwds) |
|
18 | 18 | finally: |
|
19 | 19 | p.disable() |
|
20 | 20 | return Stats(p.getstats()) |
|
21 | 21 | |
|
22 | 22 | |
|
23 | 23 | class Stats: |
|
24 | 24 | """XXX docstring""" |
|
25 | 25 | |
|
26 | 26 | def __init__(self, data): |
|
27 | 27 | self.data = data |
|
28 | 28 | |
|
29 | 29 | def sort(self, crit="inlinetime"): |
|
30 | 30 | """XXX docstring""" |
|
31 | 31 | # profiler_entries isn't defined when running under PyPy. |
|
32 | 32 | if profiler_entry: |
|
33 | 33 | if crit not in profiler_entry.__dict__: |
|
34 | 34 | raise ValueError(b"Can't sort by %s" % crit) |
|
35 | 35 | elif self.data and not getattr(self.data[0], crit, None): |
|
36 | 36 | raise ValueError(b"Can't sort by %s" % crit) |
|
37 | 37 | |
|
38 | 38 | self.data.sort(key=lambda x: getattr(x, crit), reverse=True) |
|
39 | 39 | for e in self.data: |
|
40 | 40 | if e.calls: |
|
41 | 41 | e.calls.sort(key=lambda x: getattr(x, crit), reverse=True) |
|
42 | 42 | |
|
43 | 43 | def pprint(self, top=None, file=None, limit=None, climit=None): |
|
44 | 44 | """XXX docstring""" |
|
45 | 45 | if file is None: |
|
46 | 46 | file = sys.stdout |
|
47 | 47 | d = self.data |
|
48 | 48 | if top is not None: |
|
49 | 49 | d = d[:top] |
|
50 | 50 | cols = b"% 12d %12d %11.4f %11.4f %s\n" |
|
51 | 51 | hcols = b"% 12s %12s %12s %12s %s\n" |
|
52 | 52 | file.write( |
|
53 | 53 | hcols |
|
54 | 54 | % ( |
|
55 | 55 | b"CallCount", |
|
56 | 56 | b"Recursive", |
|
57 | 57 | b"Total(s)", |
|
58 | 58 | b"Inline(s)", |
|
59 | 59 | b"module:lineno(function)", |
|
60 | 60 | ) |
|
61 | 61 | ) |
|
62 | 62 | count = 0 |
|
63 | 63 | for e in d: |
|
64 | 64 | file.write( |
|
65 | 65 | cols |
|
66 | 66 | % ( |
|
67 | 67 | e.callcount, |
|
68 | 68 | e.reccallcount, |
|
69 | 69 | e.totaltime, |
|
70 | 70 | e.inlinetime, |
|
71 | 71 | label(e.code), |
|
72 | 72 | ) |
|
73 | 73 | ) |
|
74 | 74 | count += 1 |
|
75 | 75 | if limit is not None and count == limit: |
|
76 | 76 | return |
|
77 | 77 | ccount = 0 |
|
78 | 78 | if climit and e.calls: |
|
79 | 79 | for se in e.calls: |
|
80 | 80 | file.write( |
|
81 | 81 | cols |
|
82 | 82 | % ( |
|
83 | 83 | se.callcount, |
|
84 | 84 | se.reccallcount, |
|
85 | 85 | se.totaltime, |
|
86 | 86 | se.inlinetime, |
|
87 | 87 | b" %s" % label(se.code), |
|
88 | 88 | ) |
|
89 | 89 | ) |
|
90 | 90 | count += 1 |
|
91 | 91 | ccount += 1 |
|
92 | 92 | if limit is not None and count == limit: |
|
93 | 93 | return |
|
94 | 94 | if climit is not None and ccount == climit: |
|
95 | 95 | break |
|
96 | 96 | |
|
97 | 97 | def freeze(self): |
|
98 | 98 | """Replace all references to code objects with string |
|
99 | 99 | descriptions; this makes it possible to pickle the instance.""" |
|
100 | 100 | |
|
101 | 101 | # this code is probably rather ickier than it needs to be! |
|
102 | 102 | for i in range(len(self.data)): |
|
103 | 103 | e = self.data[i] |
|
104 | 104 | if not isinstance(e.code, str): |
|
105 | 105 | self.data[i] = type(e)((label(e.code),) + e[1:]) |
|
106 | 106 | if e.calls: |
|
107 | 107 | for j in range(len(e.calls)): |
|
108 | 108 | se = e.calls[j] |
|
109 | 109 | if not isinstance(se.code, str): |
|
110 | 110 | e.calls[j] = type(se)((label(se.code),) + se[1:]) |
|
111 | 111 | |
|
112 | 112 | |
|
113 | 113 | _fn2mod = {} |
|
114 | 114 | |
|
115 | 115 | |
|
116 | 116 | def label(code): |
|
117 | 117 | if isinstance(code, str): |
|
118 | 118 | return code.encode('latin-1') |
|
119 | 119 | try: |
|
120 | 120 | mname = _fn2mod[code.co_filename] |
|
121 | 121 | except KeyError: |
|
122 | 122 | for k, v in list(sys.modules.items()): |
|
123 | 123 | if v is None: |
|
124 | 124 | continue |
|
125 | 125 | if not isinstance(getattr(v, '__file__', None), str): |
|
126 | 126 | continue |
|
127 | 127 | if v.__file__.startswith(code.co_filename): |
|
128 | 128 | mname = _fn2mod[code.co_filename] = k |
|
129 | 129 | break |
|
130 | 130 | else: |
|
131 | 131 | mname = _fn2mod[code.co_filename] = '<%s>' % code.co_filename |
|
132 | 132 | |
|
133 | 133 | res = '%s:%d(%s)' % (mname, code.co_firstlineno, code.co_name) |
|
134 | 134 | |
|
135 | 135 | return res.encode('latin-1') |
@@ -1,1097 +1,1097 b'' | |||
|
1 | 1 | ## statprof.py |
|
2 | 2 | ## Copyright (C) 2012 Bryan O'Sullivan <bos@serpentine.com> |
|
3 | 3 | ## Copyright (C) 2011 Alex Fraser <alex at phatcore dot com> |
|
4 | 4 | ## Copyright (C) 2004,2005 Andy Wingo <wingo at pobox dot com> |
|
5 | 5 | ## Copyright (C) 2001 Rob Browning <rlb at defaultvalue dot org> |
|
6 | 6 | |
|
7 | 7 | ## This library is free software; you can redistribute it and/or |
|
8 | 8 | ## modify it under the terms of the GNU Lesser General Public |
|
9 | 9 | ## License as published by the Free Software Foundation; either |
|
10 | 10 | ## version 2.1 of the License, or (at your option) any later version. |
|
11 | 11 | ## |
|
12 | 12 | ## This library is distributed in the hope that it will be useful, |
|
13 | 13 | ## but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
14 | 14 | ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
|
15 | 15 | ## Lesser General Public License for more details. |
|
16 | 16 | ## |
|
17 | 17 | ## You should have received a copy of the GNU Lesser General Public |
|
18 | 18 | ## License along with this program; if not, contact: |
|
19 | 19 | ## |
|
20 | 20 | ## Free Software Foundation Voice: +1-617-542-5942 |
|
21 | 21 | ## 59 Temple Place - Suite 330 Fax: +1-617-542-2652 |
|
22 | 22 | ## Boston, MA 02111-1307, USA gnu@gnu.org |
|
23 | 23 | |
|
24 | 24 | """ |
|
25 | 25 | statprof is intended to be a fairly simple statistical profiler for |
|
26 | 26 | python. It was ported directly from a statistical profiler for guile, |
|
27 | 27 | also named statprof, available from guile-lib [0]. |
|
28 | 28 | |
|
29 | 29 | [0] http://wingolog.org/software/guile-lib/statprof/ |
|
30 | 30 | |
|
31 | 31 | To start profiling, call statprof.start(): |
|
32 | 32 | >>> start() |
|
33 | 33 | |
|
34 | 34 | Then run whatever it is that you want to profile, for example: |
|
35 | 35 | >>> import test.pystone; test.pystone.pystones() |
|
36 | 36 | |
|
37 | 37 | Then stop the profiling and print out the results: |
|
38 | 38 | >>> stop() |
|
39 | 39 | >>> display() |
|
40 | 40 | % cumulative self |
|
41 | 41 | time seconds seconds name |
|
42 | 42 | 26.72 1.40 0.37 pystone.py:79:Proc0 |
|
43 | 43 | 13.79 0.56 0.19 pystone.py:133:Proc1 |
|
44 | 44 | 13.79 0.19 0.19 pystone.py:208:Proc8 |
|
45 | 45 | 10.34 0.16 0.14 pystone.py:229:Func2 |
|
46 | 46 | 6.90 0.10 0.10 pystone.py:45:__init__ |
|
47 | 47 | 4.31 0.16 0.06 pystone.py:53:copy |
|
48 | 48 | ... |
|
49 | 49 | |
|
50 | 50 | All of the numerical data is statistically approximate. In the |
|
51 | 51 | following column descriptions, and in all of statprof, "time" refers |
|
52 | 52 | to execution time (both user and system), not wall clock time. |
|
53 | 53 | |
|
54 | 54 | % time |
|
55 | 55 | The percent of the time spent inside the procedure itself (not |
|
56 | 56 | counting children). |
|
57 | 57 | |
|
58 | 58 | cumulative seconds |
|
59 | 59 | The total number of seconds spent in the procedure, including |
|
60 | 60 | children. |
|
61 | 61 | |
|
62 | 62 | self seconds |
|
63 | 63 | The total number of seconds spent in the procedure itself (not |
|
64 | 64 | counting children). |
|
65 | 65 | |
|
66 | 66 | name |
|
67 | 67 | The name of the procedure. |
|
68 | 68 | |
|
69 | 69 | By default statprof keeps the data collected from previous runs. If you |
|
70 | 70 | want to clear the collected data, call reset(): |
|
71 | 71 | >>> reset() |
|
72 | 72 | |
|
73 | 73 | reset() can also be used to change the sampling frequency from the |
|
74 | 74 | default of 1000 Hz. For example, to tell statprof to sample 50 times a |
|
75 | 75 | second: |
|
76 | 76 | >>> reset(50) |
|
77 | 77 | |
|
78 | 78 | This means that statprof will sample the call stack after every 1/50 of |
|
79 | 79 | a second of user + system time spent running on behalf of the python |
|
80 | 80 | process. When your process is idle (for example, blocking in a read(), |
|
81 | 81 | as is the case at the listener), the clock does not advance. For this |
|
82 | 82 | reason statprof is not currently not suitable for profiling io-bound |
|
83 | 83 | operations. |
|
84 | 84 | |
|
85 | 85 | The profiler uses the hash of the code object itself to identify the |
|
86 | 86 | procedures, so it won't confuse different procedures with the same name. |
|
87 | 87 | They will show up as two different rows in the output. |
|
88 | 88 | |
|
89 | 89 | Right now the profiler is quite simplistic. I cannot provide |
|
90 | 90 | call-graphs or other higher level information. What you see in the |
|
91 | 91 | table is pretty much all there is. Patches are welcome :-) |
|
92 | 92 | |
|
93 | 93 | |
|
94 | 94 | Threading |
|
95 | 95 | --------- |
|
96 | 96 | |
|
97 | 97 | Because signals only get delivered to the main thread in Python, |
|
98 | 98 | statprof only profiles the main thread. However because the time |
|
99 | 99 | reporting function uses per-process timers, the results can be |
|
100 | 100 | significantly off if other threads' work patterns are not similar to the |
|
101 | 101 | main thread's work patterns. |
|
102 | 102 | """ |
|
103 | 103 | # no-check-code |
|
104 | 104 | |
|
105 | 105 | import collections |
|
106 | 106 | import contextlib |
|
107 | 107 | import getopt |
|
108 | 108 | import inspect |
|
109 | 109 | import json |
|
110 | 110 | import os |
|
111 | 111 | import signal |
|
112 | 112 | import sys |
|
113 | 113 | import threading |
|
114 | 114 | import time |
|
115 | 115 | |
|
116 | 116 | from .pycompat import open |
|
117 | 117 | from . import ( |
|
118 | 118 | encoding, |
|
119 | 119 | pycompat, |
|
120 | 120 | ) |
|
121 | 121 | |
|
122 | 122 | defaultdict = collections.defaultdict |
|
123 | 123 | contextmanager = contextlib.contextmanager |
|
124 | 124 | |
|
125 |
__all__ = [ |
|
|
125 | __all__ = ['start', 'stop', 'reset', 'display', 'profile'] | |
|
126 | 126 | |
|
127 | 127 | skips = { |
|
128 | 128 | "util.py:check", |
|
129 | 129 | "extensions.py:closure", |
|
130 | 130 | "color.py:colorcmd", |
|
131 | 131 | "dispatch.py:checkargs", |
|
132 | 132 | "dispatch.py:<lambda>", |
|
133 | 133 | "dispatch.py:_runcatch", |
|
134 | 134 | "dispatch.py:_dispatch", |
|
135 | 135 | "dispatch.py:_runcommand", |
|
136 | 136 | "pager.py:pagecmd", |
|
137 | 137 | "dispatch.py:run", |
|
138 | 138 | "dispatch.py:dispatch", |
|
139 | 139 | "dispatch.py:runcommand", |
|
140 | 140 | "hg.py:<module>", |
|
141 | 141 | "evolve.py:warnobserrors", |
|
142 | 142 | } |
|
143 | 143 | |
|
144 | 144 | ########################################################################### |
|
145 | 145 | ## Utils |
|
146 | 146 | |
|
147 | 147 | |
|
148 | 148 | def clock(): |
|
149 | 149 | times = os.times() |
|
150 | 150 | return (times[0] + times[1], times[4]) |
|
151 | 151 | |
|
152 | 152 | |
|
153 | 153 | ########################################################################### |
|
154 | 154 | ## Collection data structures |
|
155 | 155 | |
|
156 | 156 | |
|
157 | 157 | class ProfileState: |
|
158 | 158 | def __init__(self, frequency=None): |
|
159 | 159 | self.reset(frequency) |
|
160 | 160 | self.track = b'cpu' |
|
161 | 161 | |
|
162 | 162 | def reset(self, frequency=None): |
|
163 | 163 | # total so far |
|
164 | 164 | self.accumulated_time = (0.0, 0.0) |
|
165 | 165 | # start_time when timer is active |
|
166 | 166 | self.last_start_time = None |
|
167 | 167 | # a float |
|
168 | 168 | if frequency: |
|
169 | 169 | self.sample_interval = 1.0 / frequency |
|
170 | 170 | elif not hasattr(self, 'sample_interval'): |
|
171 | 171 | # default to 1000 Hz |
|
172 | 172 | self.sample_interval = 1.0 / 1000.0 |
|
173 | 173 | else: |
|
174 | 174 | # leave the frequency as it was |
|
175 | 175 | pass |
|
176 | 176 | self.remaining_prof_time = None |
|
177 | 177 | # for user start/stop nesting |
|
178 | 178 | self.profile_level = 0 |
|
179 | 179 | |
|
180 | 180 | self.samples = [] |
|
181 | 181 | |
|
182 | 182 | def accumulate_time(self, stop_time): |
|
183 | 183 | increment = ( |
|
184 | 184 | stop_time[0] - self.last_start_time[0], |
|
185 | 185 | stop_time[1] - self.last_start_time[1], |
|
186 | 186 | ) |
|
187 | 187 | self.accumulated_time = ( |
|
188 | 188 | self.accumulated_time[0] + increment[0], |
|
189 | 189 | self.accumulated_time[1] + increment[1], |
|
190 | 190 | ) |
|
191 | 191 | |
|
192 | 192 | def seconds_per_sample(self): |
|
193 | 193 | return self.accumulated_time[self.timeidx] / len(self.samples) |
|
194 | 194 | |
|
195 | 195 | @property |
|
196 | 196 | def timeidx(self): |
|
197 | 197 | if self.track == b'real': |
|
198 | 198 | return 1 |
|
199 | 199 | return 0 |
|
200 | 200 | |
|
201 | 201 | |
|
202 | 202 | state = ProfileState() |
|
203 | 203 | |
|
204 | 204 | |
|
205 | 205 | class CodeSite: |
|
206 | 206 | cache = {} |
|
207 | 207 | |
|
208 | 208 | __slots__ = ('path', 'lineno', 'function', 'source') |
|
209 | 209 | |
|
210 | 210 | def __init__(self, path, lineno, function): |
|
211 | 211 | assert isinstance(path, bytes) |
|
212 | 212 | self.path = path |
|
213 | 213 | self.lineno = lineno |
|
214 | 214 | assert isinstance(function, bytes) |
|
215 | 215 | self.function = function |
|
216 | 216 | self.source = None |
|
217 | 217 | |
|
218 | 218 | def __eq__(self, other): |
|
219 | 219 | try: |
|
220 | 220 | return self.lineno == other.lineno and self.path == other.path |
|
221 | 221 | except: |
|
222 | 222 | return False |
|
223 | 223 | |
|
224 | 224 | def __hash__(self): |
|
225 | 225 | return hash((self.lineno, self.path)) |
|
226 | 226 | |
|
227 | 227 | @classmethod |
|
228 | 228 | def get(cls, path, lineno, function): |
|
229 | 229 | k = (path, lineno) |
|
230 | 230 | try: |
|
231 | 231 | return cls.cache[k] |
|
232 | 232 | except KeyError: |
|
233 | 233 | v = cls(path, lineno, function) |
|
234 | 234 | cls.cache[k] = v |
|
235 | 235 | return v |
|
236 | 236 | |
|
237 | 237 | def getsource(self, length): |
|
238 | 238 | if self.source is None: |
|
239 | 239 | try: |
|
240 | 240 | lineno = self.lineno - 1 # lineno can be None |
|
241 | 241 | with open(self.path, b'rb') as fp: |
|
242 | 242 | for i, line in enumerate(fp): |
|
243 | 243 | if i == lineno: |
|
244 | 244 | self.source = line.strip() |
|
245 | 245 | break |
|
246 | 246 | except: |
|
247 | 247 | pass |
|
248 | 248 | if self.source is None: |
|
249 | 249 | self.source = b'' |
|
250 | 250 | |
|
251 | 251 | source = self.source |
|
252 | 252 | if len(source) > length: |
|
253 | 253 | source = source[: (length - 3)] + b"..." |
|
254 | 254 | return source |
|
255 | 255 | |
|
256 | 256 | def filename(self): |
|
257 | 257 | return os.path.basename(self.path) |
|
258 | 258 | |
|
259 | 259 | def skipname(self): |
|
260 | 260 | return '%s:%s' % (self.filename(), self.function) |
|
261 | 261 | |
|
262 | 262 | |
|
263 | 263 | class Sample: |
|
264 | 264 | __slots__ = ('stack', 'time') |
|
265 | 265 | |
|
266 | 266 | def __init__(self, stack, time): |
|
267 | 267 | self.stack = stack |
|
268 | 268 | self.time = time |
|
269 | 269 | |
|
270 | 270 | @classmethod |
|
271 | 271 | def from_frame(cls, frame, time): |
|
272 | 272 | stack = [] |
|
273 | 273 | |
|
274 | 274 | while frame: |
|
275 | 275 | stack.append( |
|
276 | 276 | CodeSite.get( |
|
277 | 277 | pycompat.sysbytes(frame.f_code.co_filename), |
|
278 | 278 | frame.f_lineno, |
|
279 | 279 | pycompat.sysbytes(frame.f_code.co_name), |
|
280 | 280 | ) |
|
281 | 281 | ) |
|
282 | 282 | frame = frame.f_back |
|
283 | 283 | |
|
284 | 284 | return Sample(stack, time) |
|
285 | 285 | |
|
286 | 286 | |
|
287 | 287 | ########################################################################### |
|
288 | 288 | ## SIGPROF handler |
|
289 | 289 | |
|
290 | 290 | |
|
291 | 291 | def profile_signal_handler(signum, frame): |
|
292 | 292 | if state.profile_level > 0: |
|
293 | 293 | now = clock() |
|
294 | 294 | state.accumulate_time(now) |
|
295 | 295 | |
|
296 | 296 | timestamp = state.accumulated_time[state.timeidx] |
|
297 | 297 | state.samples.append(Sample.from_frame(frame, timestamp)) |
|
298 | 298 | |
|
299 | 299 | signal.setitimer(signal.ITIMER_PROF, state.sample_interval, 0.0) |
|
300 | 300 | state.last_start_time = now |
|
301 | 301 | |
|
302 | 302 | |
|
303 | 303 | stopthread = threading.Event() |
|
304 | 304 | |
|
305 | 305 | |
|
306 | 306 | def samplerthread(tid): |
|
307 | 307 | while not stopthread.is_set(): |
|
308 | 308 | now = clock() |
|
309 | 309 | state.accumulate_time(now) |
|
310 | 310 | |
|
311 | 311 | frame = sys._current_frames()[tid] |
|
312 | 312 | |
|
313 | 313 | timestamp = state.accumulated_time[state.timeidx] |
|
314 | 314 | state.samples.append(Sample.from_frame(frame, timestamp)) |
|
315 | 315 | |
|
316 | 316 | state.last_start_time = now |
|
317 | 317 | time.sleep(state.sample_interval) |
|
318 | 318 | |
|
319 | 319 | stopthread.clear() |
|
320 | 320 | |
|
321 | 321 | |
|
322 | 322 | ########################################################################### |
|
323 | 323 | ## Profiling API |
|
324 | 324 | |
|
325 | 325 | |
|
326 | 326 | def is_active(): |
|
327 | 327 | return state.profile_level > 0 |
|
328 | 328 | |
|
329 | 329 | |
|
330 | 330 | lastmechanism = None |
|
331 | 331 | |
|
332 | 332 | |
|
333 | 333 | def start(mechanism=b'thread', track=b'cpu'): |
|
334 | 334 | '''Install the profiling signal handler, and start profiling.''' |
|
335 | 335 | state.track = track # note: nesting different mode won't work |
|
336 | 336 | state.profile_level += 1 |
|
337 | 337 | if state.profile_level == 1: |
|
338 | 338 | state.last_start_time = clock() |
|
339 | 339 | rpt = state.remaining_prof_time |
|
340 | 340 | state.remaining_prof_time = None |
|
341 | 341 | |
|
342 | 342 | global lastmechanism |
|
343 | 343 | lastmechanism = mechanism |
|
344 | 344 | |
|
345 | 345 | if mechanism == b'signal': |
|
346 | 346 | signal.signal(signal.SIGPROF, profile_signal_handler) |
|
347 | 347 | signal.setitimer( |
|
348 | 348 | signal.ITIMER_PROF, rpt or state.sample_interval, 0.0 |
|
349 | 349 | ) |
|
350 | 350 | elif mechanism == b'thread': |
|
351 | 351 | frame = inspect.currentframe() |
|
352 | 352 | tid = [k for k, f in sys._current_frames().items() if f == frame][0] |
|
353 | 353 | state.thread = threading.Thread( |
|
354 | 354 | target=samplerthread, args=(tid,), name="samplerthread" |
|
355 | 355 | ) |
|
356 | 356 | state.thread.start() |
|
357 | 357 | |
|
358 | 358 | |
|
359 | 359 | def stop(): |
|
360 | 360 | '''Stop profiling, and uninstall the profiling signal handler.''' |
|
361 | 361 | state.profile_level -= 1 |
|
362 | 362 | if state.profile_level == 0: |
|
363 | 363 | if lastmechanism == b'signal': |
|
364 | 364 | rpt = signal.setitimer(signal.ITIMER_PROF, 0.0, 0.0) |
|
365 | 365 | signal.signal(signal.SIGPROF, signal.SIG_IGN) |
|
366 | 366 | state.remaining_prof_time = rpt[0] |
|
367 | 367 | elif lastmechanism == b'thread': |
|
368 | 368 | stopthread.set() |
|
369 | 369 | state.thread.join() |
|
370 | 370 | |
|
371 | 371 | state.accumulate_time(clock()) |
|
372 | 372 | state.last_start_time = None |
|
373 | 373 | statprofpath = encoding.environ.get(b'STATPROF_DEST') |
|
374 | 374 | if statprofpath: |
|
375 | 375 | save_data(statprofpath) |
|
376 | 376 | |
|
377 | 377 | return state |
|
378 | 378 | |
|
379 | 379 | |
|
380 | 380 | def save_data(path): |
|
381 | 381 | with open(path, b'w+') as file: |
|
382 | 382 | file.write(b"%f %f\n" % state.accumulated_time) |
|
383 | 383 | for sample in state.samples: |
|
384 | 384 | time = sample.time |
|
385 | 385 | stack = sample.stack |
|
386 | 386 | sites = [ |
|
387 | 387 | b'\1'.join([s.path, b'%d' % s.lineno or -1, s.function]) |
|
388 | 388 | for s in stack |
|
389 | 389 | ] |
|
390 | 390 | file.write(b"%d\0%s\n" % (time, b'\0'.join(sites))) |
|
391 | 391 | |
|
392 | 392 | |
|
393 | 393 | def load_data(path): |
|
394 | 394 | lines = open(path, b'rb').read().splitlines() |
|
395 | 395 | |
|
396 | 396 | state.accumulated_time = [float(value) for value in lines[0].split()] |
|
397 | 397 | state.samples = [] |
|
398 | 398 | for line in lines[1:]: |
|
399 | 399 | parts = line.split(b'\0') |
|
400 | 400 | time = float(parts[0]) |
|
401 | 401 | rawsites = parts[1:] |
|
402 | 402 | sites = [] |
|
403 | 403 | for rawsite in rawsites: |
|
404 | 404 | siteparts = rawsite.split(b'\1') |
|
405 | 405 | sites.append( |
|
406 | 406 | CodeSite.get(siteparts[0], int(siteparts[1]), siteparts[2]) |
|
407 | 407 | ) |
|
408 | 408 | |
|
409 | 409 | state.samples.append(Sample(sites, time)) |
|
410 | 410 | |
|
411 | 411 | |
|
412 | 412 | def reset(frequency=None): |
|
413 | 413 | """Clear out the state of the profiler. Do not call while the |
|
414 | 414 | profiler is running. |
|
415 | 415 | |
|
416 | 416 | The optional frequency argument specifies the number of samples to |
|
417 | 417 | collect per second.""" |
|
418 | 418 | assert state.profile_level == 0, b"Can't reset() while statprof is running" |
|
419 | 419 | CodeSite.cache.clear() |
|
420 | 420 | state.reset(frequency) |
|
421 | 421 | |
|
422 | 422 | |
|
423 | 423 | @contextmanager |
|
424 | 424 | def profile(): |
|
425 | 425 | start() |
|
426 | 426 | try: |
|
427 | 427 | yield |
|
428 | 428 | finally: |
|
429 | 429 | stop() |
|
430 | 430 | display() |
|
431 | 431 | |
|
432 | 432 | |
|
433 | 433 | ########################################################################### |
|
434 | 434 | ## Reporting API |
|
435 | 435 | |
|
436 | 436 | |
|
437 | 437 | class SiteStats: |
|
438 | 438 | def __init__(self, site): |
|
439 | 439 | self.site = site |
|
440 | 440 | self.selfcount = 0 |
|
441 | 441 | self.totalcount = 0 |
|
442 | 442 | |
|
443 | 443 | def addself(self): |
|
444 | 444 | self.selfcount += 1 |
|
445 | 445 | |
|
446 | 446 | def addtotal(self): |
|
447 | 447 | self.totalcount += 1 |
|
448 | 448 | |
|
449 | 449 | def selfpercent(self): |
|
450 | 450 | return self.selfcount / len(state.samples) * 100 |
|
451 | 451 | |
|
452 | 452 | def totalpercent(self): |
|
453 | 453 | return self.totalcount / len(state.samples) * 100 |
|
454 | 454 | |
|
455 | 455 | def selfseconds(self): |
|
456 | 456 | return self.selfcount * state.seconds_per_sample() |
|
457 | 457 | |
|
458 | 458 | def totalseconds(self): |
|
459 | 459 | return self.totalcount * state.seconds_per_sample() |
|
460 | 460 | |
|
461 | 461 | @classmethod |
|
462 | 462 | def buildstats(cls, samples): |
|
463 | 463 | stats = {} |
|
464 | 464 | |
|
465 | 465 | for sample in samples: |
|
466 | 466 | for i, site in enumerate(sample.stack): |
|
467 | 467 | sitestat = stats.get(site) |
|
468 | 468 | if not sitestat: |
|
469 | 469 | sitestat = SiteStats(site) |
|
470 | 470 | stats[site] = sitestat |
|
471 | 471 | |
|
472 | 472 | sitestat.addtotal() |
|
473 | 473 | |
|
474 | 474 | if i == 0: |
|
475 | 475 | sitestat.addself() |
|
476 | 476 | |
|
477 | 477 | return [s for s in stats.values()] |
|
478 | 478 | |
|
479 | 479 | |
|
480 | 480 | class DisplayFormats: |
|
481 | 481 | ByLine = 0 |
|
482 | 482 | ByMethod = 1 |
|
483 | 483 | AboutMethod = 2 |
|
484 | 484 | Hotpath = 3 |
|
485 | 485 | FlameGraph = 4 |
|
486 | 486 | Json = 5 |
|
487 | 487 | Chrome = 6 |
|
488 | 488 | |
|
489 | 489 | |
|
490 | 490 | def display(fp=None, format=3, data=None, **kwargs): |
|
491 | 491 | '''Print statistics, either to stdout or the given file object.''' |
|
492 | 492 | if data is None: |
|
493 | 493 | data = state |
|
494 | 494 | |
|
495 | 495 | if fp is None: |
|
496 | 496 | from .utils import procutil |
|
497 | 497 | |
|
498 | 498 | fp = procutil.stdout |
|
499 | 499 | if len(data.samples) == 0: |
|
500 | 500 | fp.write(b'No samples recorded.\n') |
|
501 | 501 | return |
|
502 | 502 | |
|
503 | 503 | if format == DisplayFormats.ByLine: |
|
504 | 504 | display_by_line(data, fp) |
|
505 | 505 | elif format == DisplayFormats.ByMethod: |
|
506 | 506 | display_by_method(data, fp) |
|
507 | 507 | elif format == DisplayFormats.AboutMethod: |
|
508 | 508 | display_about_method(data, fp, **kwargs) |
|
509 | 509 | elif format == DisplayFormats.Hotpath: |
|
510 | 510 | display_hotpath(data, fp, **kwargs) |
|
511 | 511 | elif format == DisplayFormats.FlameGraph: |
|
512 | 512 | write_to_flame(data, fp, **kwargs) |
|
513 | 513 | elif format == DisplayFormats.Json: |
|
514 | 514 | write_to_json(data, fp) |
|
515 | 515 | elif format == DisplayFormats.Chrome: |
|
516 | 516 | write_to_chrome(data, fp, **kwargs) |
|
517 | 517 | else: |
|
518 | 518 | raise Exception("Invalid display format") |
|
519 | 519 | |
|
520 | 520 | if format not in (DisplayFormats.Json, DisplayFormats.Chrome): |
|
521 | 521 | fp.write(b'---\n') |
|
522 | 522 | fp.write(b'Sample count: %d\n' % len(data.samples)) |
|
523 | 523 | fp.write(b'Total time: %f seconds (%f wall)\n' % data.accumulated_time) |
|
524 | 524 | |
|
525 | 525 | |
|
526 | 526 | def display_by_line(data, fp): |
|
527 | 527 | """Print the profiler data with each sample line represented |
|
528 | 528 | as one row in a table. Sorted by self-time per line.""" |
|
529 | 529 | stats = SiteStats.buildstats(data.samples) |
|
530 | 530 | stats.sort(reverse=True, key=lambda x: x.selfseconds()) |
|
531 | 531 | |
|
532 | 532 | fp.write( |
|
533 | 533 | b'%5.5s %10.10s %7.7s %-8.8s\n' |
|
534 | 534 | % (b'% ', b'cumulative', b'self', b'') |
|
535 | 535 | ) |
|
536 | 536 | fp.write( |
|
537 | 537 | b'%5.5s %9.9s %8.8s %-8.8s\n' |
|
538 | 538 | % (b"time", b"seconds", b"seconds", b"name") |
|
539 | 539 | ) |
|
540 | 540 | |
|
541 | 541 | for stat in stats: |
|
542 | 542 | site = stat.site |
|
543 | 543 | sitelabel = b'%s:%d:%s' % ( |
|
544 | 544 | site.filename(), |
|
545 | 545 | site.lineno or -1, |
|
546 | 546 | site.function, |
|
547 | 547 | ) |
|
548 | 548 | fp.write( |
|
549 | 549 | b'%6.2f %9.2f %9.2f %s\n' |
|
550 | 550 | % ( |
|
551 | 551 | stat.selfpercent(), |
|
552 | 552 | stat.totalseconds(), |
|
553 | 553 | stat.selfseconds(), |
|
554 | 554 | sitelabel, |
|
555 | 555 | ) |
|
556 | 556 | ) |
|
557 | 557 | |
|
558 | 558 | |
|
559 | 559 | def display_by_method(data, fp): |
|
560 | 560 | """Print the profiler data with each sample function represented |
|
561 | 561 | as one row in a table. Important lines within that function are |
|
562 | 562 | output as nested rows. Sorted by self-time per line.""" |
|
563 | 563 | fp.write( |
|
564 | 564 | b'%5.5s %10.10s %7.7s %-8.8s\n' |
|
565 | 565 | % (b'% ', b'cumulative', b'self', b'') |
|
566 | 566 | ) |
|
567 | 567 | fp.write( |
|
568 | 568 | b'%5.5s %9.9s %8.8s %-8.8s\n' |
|
569 | 569 | % (b"time", b"seconds", b"seconds", b"name") |
|
570 | 570 | ) |
|
571 | 571 | |
|
572 | 572 | stats = SiteStats.buildstats(data.samples) |
|
573 | 573 | |
|
574 | 574 | grouped = defaultdict(list) |
|
575 | 575 | for stat in stats: |
|
576 | 576 | grouped[stat.site.filename() + b":" + stat.site.function].append(stat) |
|
577 | 577 | |
|
578 | 578 | # compute sums for each function |
|
579 | 579 | functiondata = [] |
|
580 | 580 | for fname, sitestats in grouped.items(): |
|
581 | 581 | total_cum_sec = 0 |
|
582 | 582 | total_self_sec = 0 |
|
583 | 583 | total_percent = 0 |
|
584 | 584 | for stat in sitestats: |
|
585 | 585 | total_cum_sec += stat.totalseconds() |
|
586 | 586 | total_self_sec += stat.selfseconds() |
|
587 | 587 | total_percent += stat.selfpercent() |
|
588 | 588 | |
|
589 | 589 | functiondata.append( |
|
590 | 590 | (fname, total_cum_sec, total_self_sec, total_percent, sitestats) |
|
591 | 591 | ) |
|
592 | 592 | |
|
593 | 593 | # sort by total self sec |
|
594 | 594 | functiondata.sort(reverse=True, key=lambda x: x[2]) |
|
595 | 595 | |
|
596 | 596 | for function in functiondata: |
|
597 | 597 | if function[3] < 0.05: |
|
598 | 598 | continue |
|
599 | 599 | fp.write( |
|
600 | 600 | b'%6.2f %9.2f %9.2f %s\n' |
|
601 | 601 | % ( |
|
602 | 602 | function[3], # total percent |
|
603 | 603 | function[1], # total cum sec |
|
604 | 604 | function[2], # total self sec |
|
605 | 605 | function[0], |
|
606 | 606 | ) |
|
607 | 607 | ) # file:function |
|
608 | 608 | |
|
609 | 609 | function[4].sort(reverse=True, key=lambda i: i.selfseconds()) |
|
610 | 610 | for stat in function[4]: |
|
611 | 611 | # only show line numbers for significant locations (>1% time spent) |
|
612 | 612 | if stat.selfpercent() > 1: |
|
613 | 613 | source = stat.site.getsource(25) |
|
614 | 614 | if not isinstance(source, bytes): |
|
615 | 615 | source = pycompat.bytestr(source) |
|
616 | 616 | |
|
617 | 617 | stattuple = ( |
|
618 | 618 | stat.selfpercent(), |
|
619 | 619 | stat.selfseconds(), |
|
620 | 620 | stat.site.lineno or -1, |
|
621 | 621 | source, |
|
622 | 622 | ) |
|
623 | 623 | |
|
624 | 624 | fp.write(b'%33.0f%% %6.2f line %d: %s\n' % stattuple) |
|
625 | 625 | |
|
626 | 626 | |
|
627 | 627 | def display_about_method(data, fp, function=None, **kwargs): |
|
628 | 628 | if function is None: |
|
629 | 629 | raise Exception("Invalid function") |
|
630 | 630 | |
|
631 | 631 | filename = None |
|
632 | 632 | if b':' in function: |
|
633 | 633 | filename, function = function.split(b':') |
|
634 | 634 | |
|
635 | 635 | relevant_samples = 0 |
|
636 | 636 | parents = {} |
|
637 | 637 | children = {} |
|
638 | 638 | |
|
639 | 639 | for sample in data.samples: |
|
640 | 640 | for i, site in enumerate(sample.stack): |
|
641 | 641 | if site.function == function and ( |
|
642 | 642 | not filename or site.filename() == filename |
|
643 | 643 | ): |
|
644 | 644 | relevant_samples += 1 |
|
645 | 645 | if i != len(sample.stack) - 1: |
|
646 | 646 | parent = sample.stack[i + 1] |
|
647 | 647 | if parent in parents: |
|
648 | 648 | parents[parent] = parents[parent] + 1 |
|
649 | 649 | else: |
|
650 | 650 | parents[parent] = 1 |
|
651 | 651 | |
|
652 | 652 | if site in children: |
|
653 | 653 | children[site] = children[site] + 1 |
|
654 | 654 | else: |
|
655 | 655 | children[site] = 1 |
|
656 | 656 | |
|
657 | 657 | parents = [(parent, count) for parent, count in parents.items()] |
|
658 | 658 | parents.sort(reverse=True, key=lambda x: x[1]) |
|
659 | 659 | for parent, count in parents: |
|
660 | 660 | fp.write( |
|
661 | 661 | b'%6.2f%% %s:%s line %s: %s\n' |
|
662 | 662 | % ( |
|
663 | 663 | count / relevant_samples * 100, |
|
664 | 664 | pycompat.fsencode(parent.filename()), |
|
665 | 665 | pycompat.sysbytes(parent.function), |
|
666 | 666 | parent.lineno or -1, |
|
667 | 667 | pycompat.sysbytes(parent.getsource(50)), |
|
668 | 668 | ) |
|
669 | 669 | ) |
|
670 | 670 | |
|
671 | 671 | stats = SiteStats.buildstats(data.samples) |
|
672 | 672 | stats = [ |
|
673 | 673 | s |
|
674 | 674 | for s in stats |
|
675 | 675 | if s.site.function == function |
|
676 | 676 | and (not filename or s.site.filename() == filename) |
|
677 | 677 | ] |
|
678 | 678 | |
|
679 | 679 | total_cum_sec = 0 |
|
680 | 680 | total_self_sec = 0 |
|
681 | 681 | total_self_percent = 0 |
|
682 | 682 | total_cum_percent = 0 |
|
683 | 683 | for stat in stats: |
|
684 | 684 | total_cum_sec += stat.totalseconds() |
|
685 | 685 | total_self_sec += stat.selfseconds() |
|
686 | 686 | total_self_percent += stat.selfpercent() |
|
687 | 687 | total_cum_percent += stat.totalpercent() |
|
688 | 688 | |
|
689 | 689 | fp.write( |
|
690 | 690 | b'\n %s:%s Total: %0.2fs (%0.2f%%) Self: %0.2fs (%0.2f%%)\n\n' |
|
691 | 691 | % ( |
|
692 | 692 | pycompat.sysbytes(filename or b'___'), |
|
693 | 693 | pycompat.sysbytes(function), |
|
694 | 694 | total_cum_sec, |
|
695 | 695 | total_cum_percent, |
|
696 | 696 | total_self_sec, |
|
697 | 697 | total_self_percent, |
|
698 | 698 | ) |
|
699 | 699 | ) |
|
700 | 700 | |
|
701 | 701 | children = [(child, count) for child, count in children.items()] |
|
702 | 702 | children.sort(reverse=True, key=lambda x: x[1]) |
|
703 | 703 | for child, count in children: |
|
704 | 704 | fp.write( |
|
705 | 705 | b' %6.2f%% line %s: %s\n' |
|
706 | 706 | % ( |
|
707 | 707 | count / relevant_samples * 100, |
|
708 | 708 | child.lineno or -1, |
|
709 | 709 | pycompat.sysbytes(child.getsource(50)), |
|
710 | 710 | ) |
|
711 | 711 | ) |
|
712 | 712 | |
|
713 | 713 | |
|
714 | 714 | def display_hotpath(data, fp, limit=0.05, **kwargs): |
|
715 | 715 | class HotNode: |
|
716 | 716 | def __init__(self, site): |
|
717 | 717 | self.site = site |
|
718 | 718 | self.count = 0 |
|
719 | 719 | self.children = {} |
|
720 | 720 | |
|
721 | 721 | def add(self, stack, time): |
|
722 | 722 | self.count += time |
|
723 | 723 | site = stack[0] |
|
724 | 724 | child = self.children.get(site) |
|
725 | 725 | if not child: |
|
726 | 726 | child = HotNode(site) |
|
727 | 727 | self.children[site] = child |
|
728 | 728 | |
|
729 | 729 | if len(stack) > 1: |
|
730 | 730 | i = 1 |
|
731 | 731 | # Skip boiler plate parts of the stack |
|
732 | 732 | while i < len(stack) and stack[i].skipname() in skips: |
|
733 | 733 | i += 1 |
|
734 | 734 | if i < len(stack): |
|
735 | 735 | child.add(stack[i:], time) |
|
736 | 736 | else: |
|
737 | 737 | # Normally this is done by the .add() calls |
|
738 | 738 | child.count += time |
|
739 | 739 | |
|
740 | 740 | root = HotNode(None) |
|
741 | 741 | lasttime = data.samples[0].time |
|
742 | 742 | for sample in data.samples: |
|
743 | 743 | root.add(sample.stack[::-1], sample.time - lasttime) |
|
744 | 744 | lasttime = sample.time |
|
745 | 745 | showtime = kwargs.get('showtime', True) |
|
746 | 746 | |
|
747 | 747 | def _write(node, depth, multiple_siblings): |
|
748 | 748 | site = node.site |
|
749 | 749 | visiblechildren = [ |
|
750 | 750 | c for c in node.children.values() if c.count >= (limit * root.count) |
|
751 | 751 | ] |
|
752 | 752 | if site: |
|
753 | 753 | indent = depth * 2 - 1 |
|
754 | 754 | filename = (site.filename() + b':').ljust(15) |
|
755 | 755 | function = site.function |
|
756 | 756 | |
|
757 | 757 | # lots of string formatting |
|
758 | 758 | listpattern = ( |
|
759 | 759 | b''.ljust(indent) |
|
760 | 760 | + (b'\\' if multiple_siblings else b'|') |
|
761 | 761 | + b' %4.1f%%' |
|
762 | 762 | + (b' %5.2fs' % node.count if showtime else b'') |
|
763 | 763 | + b' %s %s' |
|
764 | 764 | ) |
|
765 | 765 | liststring = listpattern % ( |
|
766 | 766 | node.count / root.count * 100, |
|
767 | 767 | filename, |
|
768 | 768 | function, |
|
769 | 769 | ) |
|
770 | 770 | # 4 to account for the word 'line' |
|
771 | 771 | spacing_len = max(4, 55 - len(liststring)) |
|
772 | 772 | prefix = b'' |
|
773 | 773 | if spacing_len == 4: |
|
774 | 774 | prefix = b', ' |
|
775 | 775 | |
|
776 | 776 | codepattern = b'%s%s %d: %s%s' |
|
777 | 777 | codestring = codepattern % ( |
|
778 | 778 | prefix, |
|
779 | 779 | b'line'.rjust(spacing_len), |
|
780 | 780 | site.lineno if site.lineno is not None else -1, |
|
781 | 781 | b''.ljust(max(0, 4 - len(str(site.lineno)))), |
|
782 | 782 | site.getsource(30), |
|
783 | 783 | ) |
|
784 | 784 | |
|
785 | 785 | finalstring = liststring + codestring |
|
786 | 786 | childrensamples = sum([c.count for c in node.children.values()]) |
|
787 | 787 | # Make frames that performed more than 10% of the operation red |
|
788 | 788 | if node.count - childrensamples > (0.1 * root.count): |
|
789 | 789 | finalstring = b'\033[91m' + finalstring + b'\033[0m' |
|
790 | 790 | # Make frames that didn't actually perform work dark grey |
|
791 | 791 | elif node.count - childrensamples == 0: |
|
792 | 792 | finalstring = b'\033[90m' + finalstring + b'\033[0m' |
|
793 | 793 | fp.write(finalstring + b'\n') |
|
794 | 794 | |
|
795 | 795 | newdepth = depth |
|
796 | 796 | if len(visiblechildren) > 1 or multiple_siblings: |
|
797 | 797 | newdepth += 1 |
|
798 | 798 | |
|
799 | 799 | visiblechildren.sort(reverse=True, key=lambda x: x.count) |
|
800 | 800 | for child in visiblechildren: |
|
801 | 801 | _write(child, newdepth, len(visiblechildren) > 1) |
|
802 | 802 | |
|
803 | 803 | if root.count > 0: |
|
804 | 804 | _write(root, 0, False) |
|
805 | 805 | |
|
806 | 806 | |
|
807 | 807 | def write_to_flame(data, fp, scriptpath=None, outputfile=None, **kwargs): |
|
808 | 808 | if scriptpath is None: |
|
809 | 809 | scriptpath = encoding.environ[b'HOME'] + b'/flamegraph.pl' |
|
810 | 810 | if not os.path.exists(scriptpath): |
|
811 | 811 | fp.write(b'error: missing %s\n' % scriptpath) |
|
812 | 812 | fp.write(b'get it here: https://github.com/brendangregg/FlameGraph\n') |
|
813 | 813 | return |
|
814 | 814 | |
|
815 | 815 | lines = {} |
|
816 | 816 | for sample in data.samples: |
|
817 | 817 | sites = [s.function for s in sample.stack] |
|
818 | 818 | sites.reverse() |
|
819 | 819 | line = b';'.join(sites) |
|
820 | 820 | if line in lines: |
|
821 | 821 | lines[line] = lines[line] + 1 |
|
822 | 822 | else: |
|
823 | 823 | lines[line] = 1 |
|
824 | 824 | |
|
825 | 825 | fd, path = pycompat.mkstemp() |
|
826 | 826 | |
|
827 | 827 | with open(path, b"w+") as file: |
|
828 | 828 | for line, count in lines.items(): |
|
829 | 829 | file.write(b"%s %d\n" % (line, count)) |
|
830 | 830 | |
|
831 | 831 | if outputfile is None: |
|
832 | 832 | outputfile = b'~/flamegraph.svg' |
|
833 | 833 | |
|
834 | 834 | os.system(b"perl ~/flamegraph.pl %s > %s" % (path, outputfile)) |
|
835 | 835 | fp.write(b'Written to %s\n' % outputfile) |
|
836 | 836 | |
|
837 | 837 | |
|
838 | 838 | _pathcache = {} |
|
839 | 839 | |
|
840 | 840 | |
|
841 | 841 | def simplifypath(path): |
|
842 | 842 | """Attempt to make the path to a Python module easier to read by |
|
843 | 843 | removing whatever part of the Python search path it was found |
|
844 | 844 | on.""" |
|
845 | 845 | |
|
846 | 846 | if path in _pathcache: |
|
847 | 847 | return _pathcache[path] |
|
848 | 848 | hgpath = encoding.__file__.rsplit(os.sep, 2)[0] |
|
849 | 849 | for p in [hgpath] + sys.path: |
|
850 | 850 | prefix = p + os.sep |
|
851 | 851 | if path.startswith(prefix): |
|
852 | 852 | path = path[len(prefix) :] |
|
853 | 853 | break |
|
854 | 854 | _pathcache[path] = path |
|
855 | 855 | return path |
|
856 | 856 | |
|
857 | 857 | |
|
858 | 858 | def write_to_json(data, fp): |
|
859 | 859 | samples = [] |
|
860 | 860 | |
|
861 | 861 | for sample in data.samples: |
|
862 | 862 | stack = [] |
|
863 | 863 | |
|
864 | 864 | for frame in sample.stack: |
|
865 | 865 | stack.append( |
|
866 | 866 | ( |
|
867 | 867 | pycompat.sysstr(frame.path), |
|
868 | 868 | frame.lineno or -1, |
|
869 | 869 | pycompat.sysstr(frame.function), |
|
870 | 870 | ) |
|
871 | 871 | ) |
|
872 | 872 | |
|
873 | 873 | samples.append((sample.time, stack)) |
|
874 | 874 | |
|
875 | 875 | data = json.dumps(samples) |
|
876 | 876 | if not isinstance(data, bytes): |
|
877 | 877 | data = data.encode('utf-8') |
|
878 | 878 | |
|
879 | 879 | fp.write(data) |
|
880 | 880 | |
|
881 | 881 | |
|
882 | 882 | def write_to_chrome(data, fp, minthreshold=0.005, maxthreshold=0.999): |
|
883 | 883 | samples = [] |
|
884 | 884 | laststack = collections.deque() |
|
885 | 885 | lastseen = collections.deque() |
|
886 | 886 | |
|
887 | 887 | # The Chrome tracing format allows us to use a compact stack |
|
888 | 888 | # representation to save space. It's fiddly but worth it. |
|
889 | 889 | # We maintain a bijection between stack and ID. |
|
890 | 890 | stack2id = {} |
|
891 | 891 | id2stack = [] # will eventually be rendered |
|
892 | 892 | |
|
893 | 893 | def stackid(stack): |
|
894 | 894 | if not stack: |
|
895 | 895 | return |
|
896 | 896 | if stack in stack2id: |
|
897 | 897 | return stack2id[stack] |
|
898 | 898 | parent = stackid(stack[1:]) |
|
899 | 899 | myid = len(stack2id) |
|
900 | 900 | stack2id[stack] = myid |
|
901 | 901 | id2stack.append(dict(category=stack[0][0], name='%s %s' % stack[0])) |
|
902 | 902 | if parent is not None: |
|
903 | 903 | id2stack[-1].update(parent=parent) |
|
904 | 904 | return myid |
|
905 | 905 | |
|
906 | 906 | # The sampling profiler can sample multiple times without |
|
907 | 907 | # advancing the clock, potentially causing the Chrome trace viewer |
|
908 | 908 | # to render single-pixel columns that we cannot zoom in on. We |
|
909 | 909 | # work around this by pretending that zero-duration samples are a |
|
910 | 910 | # millisecond in length. |
|
911 | 911 | |
|
912 | 912 | clamp = 0.001 |
|
913 | 913 | |
|
914 | 914 | # We provide knobs that by default attempt to filter out stack |
|
915 | 915 | # frames that are too noisy: |
|
916 | 916 | # |
|
917 | 917 | # * A few take almost all execution time. These are usually boring |
|
918 | 918 | # setup functions, giving a stack that is deep but uninformative. |
|
919 | 919 | # |
|
920 | 920 | # * Numerous samples take almost no time, but introduce lots of |
|
921 | 921 | # noisy, oft-deep "spines" into a rendered profile. |
|
922 | 922 | |
|
923 | 923 | blacklist = set() |
|
924 | 924 | totaltime = data.samples[-1].time - data.samples[0].time |
|
925 | 925 | minthreshold = totaltime * minthreshold |
|
926 | 926 | maxthreshold = max(totaltime * maxthreshold, clamp) |
|
927 | 927 | |
|
928 | 928 | def poplast(): |
|
929 | 929 | oldsid = stackid(tuple(laststack)) |
|
930 | 930 | oldcat, oldfunc = laststack.popleft() |
|
931 | 931 | oldtime, oldidx = lastseen.popleft() |
|
932 | 932 | duration = sample.time - oldtime |
|
933 | 933 | if minthreshold <= duration <= maxthreshold: |
|
934 | 934 | # ensure no zero-duration events |
|
935 | 935 | sampletime = max(oldtime + clamp, sample.time) |
|
936 | 936 | samples.append( |
|
937 | 937 | dict( |
|
938 | 938 | ph='E', |
|
939 | 939 | name=oldfunc, |
|
940 | 940 | cat=oldcat, |
|
941 | 941 | sf=oldsid, |
|
942 | 942 | ts=sampletime * 1e6, |
|
943 | 943 | pid=0, |
|
944 | 944 | ) |
|
945 | 945 | ) |
|
946 | 946 | else: |
|
947 | 947 | blacklist.add(oldidx) |
|
948 | 948 | |
|
949 | 949 | # Much fiddling to synthesize correctly(ish) nested begin/end |
|
950 | 950 | # events given only stack snapshots. |
|
951 | 951 | |
|
952 | 952 | for sample in data.samples: |
|
953 | 953 | stack = tuple( |
|
954 | 954 | ( |
|
955 | 955 | ( |
|
956 | 956 | '%s:%d' |
|
957 | 957 | % ( |
|
958 | 958 | simplifypath(pycompat.sysstr(frame.path)), |
|
959 | 959 | frame.lineno or -1, |
|
960 | 960 | ), |
|
961 | 961 | pycompat.sysstr(frame.function), |
|
962 | 962 | ) |
|
963 | 963 | for frame in sample.stack |
|
964 | 964 | ) |
|
965 | 965 | ) |
|
966 | 966 | qstack = collections.deque(stack) |
|
967 | 967 | if laststack == qstack: |
|
968 | 968 | continue |
|
969 | 969 | while laststack and qstack and laststack[-1] == qstack[-1]: |
|
970 | 970 | laststack.pop() |
|
971 | 971 | qstack.pop() |
|
972 | 972 | while laststack: |
|
973 | 973 | poplast() |
|
974 | 974 | for f in reversed(qstack): |
|
975 | 975 | lastseen.appendleft((sample.time, len(samples))) |
|
976 | 976 | laststack.appendleft(f) |
|
977 | 977 | path, name = f |
|
978 | 978 | sid = stackid(tuple(laststack)) |
|
979 | 979 | samples.append( |
|
980 | 980 | dict( |
|
981 | 981 | ph='B', |
|
982 | 982 | name=name, |
|
983 | 983 | cat=path, |
|
984 | 984 | ts=sample.time * 1e6, |
|
985 | 985 | sf=sid, |
|
986 | 986 | pid=0, |
|
987 | 987 | ) |
|
988 | 988 | ) |
|
989 | 989 | laststack = collections.deque(stack) |
|
990 | 990 | while laststack: |
|
991 | 991 | poplast() |
|
992 | 992 | events = [ |
|
993 | 993 | sample for idx, sample in enumerate(samples) if idx not in blacklist |
|
994 | 994 | ] |
|
995 | 995 | frames = collections.OrderedDict( |
|
996 | 996 | (str(k), v) for (k, v) in enumerate(id2stack) |
|
997 | 997 | ) |
|
998 | 998 | data = json.dumps(dict(traceEvents=events, stackFrames=frames), indent=1) |
|
999 | 999 | if not isinstance(data, bytes): |
|
1000 | 1000 | data = data.encode('utf-8') |
|
1001 | 1001 | fp.write(data) |
|
1002 | 1002 | fp.write(b'\n') |
|
1003 | 1003 | |
|
1004 | 1004 | |
|
1005 | 1005 | def printusage(): |
|
1006 | 1006 | print( |
|
1007 | 1007 | r""" |
|
1008 | 1008 | The statprof command line allows you to inspect the last profile's results in |
|
1009 | 1009 | the following forms: |
|
1010 | 1010 | |
|
1011 | 1011 | usage: |
|
1012 | 1012 | hotpath [-l --limit percent] |
|
1013 | 1013 | Shows a graph of calls with the percent of time each takes. |
|
1014 | 1014 | Red calls take over 10%% of the total time themselves. |
|
1015 | 1015 | lines |
|
1016 | 1016 | Shows the actual sampled lines. |
|
1017 | 1017 | functions |
|
1018 | 1018 | Shows the samples grouped by function. |
|
1019 | 1019 | function [filename:]functionname |
|
1020 | 1020 | Shows the callers and callees of a particular function. |
|
1021 | 1021 | flame [-s --script-path] [-o --output-file path] |
|
1022 | 1022 | Writes out a flamegraph to output-file (defaults to ~/flamegraph.svg) |
|
1023 | 1023 | Requires that ~/flamegraph.pl exist. |
|
1024 | 1024 | (Specify alternate script path with --script-path.)""" |
|
1025 | 1025 | ) |
|
1026 | 1026 | |
|
1027 | 1027 | |
|
1028 | 1028 | def main(argv=None): |
|
1029 | 1029 | if argv is None: |
|
1030 | 1030 | argv = sys.argv |
|
1031 | 1031 | |
|
1032 | 1032 | if len(argv) == 1: |
|
1033 | 1033 | printusage() |
|
1034 | 1034 | return 0 |
|
1035 | 1035 | |
|
1036 | 1036 | displayargs = {} |
|
1037 | 1037 | |
|
1038 | 1038 | optstart = 2 |
|
1039 | 1039 | displayargs[b'function'] = None |
|
1040 | 1040 | if argv[1] == 'hotpath': |
|
1041 | 1041 | displayargs[b'format'] = DisplayFormats.Hotpath |
|
1042 | 1042 | elif argv[1] == 'lines': |
|
1043 | 1043 | displayargs[b'format'] = DisplayFormats.ByLine |
|
1044 | 1044 | elif argv[1] == 'functions': |
|
1045 | 1045 | displayargs[b'format'] = DisplayFormats.ByMethod |
|
1046 | 1046 | elif argv[1] == 'function': |
|
1047 | 1047 | displayargs[b'format'] = DisplayFormats.AboutMethod |
|
1048 | 1048 | displayargs[b'function'] = argv[2] |
|
1049 | 1049 | optstart = 3 |
|
1050 | 1050 | elif argv[1] == 'flame': |
|
1051 | 1051 | displayargs[b'format'] = DisplayFormats.FlameGraph |
|
1052 | 1052 | else: |
|
1053 | 1053 | printusage() |
|
1054 | 1054 | return 0 |
|
1055 | 1055 | |
|
1056 | 1056 | # process options |
|
1057 | 1057 | try: |
|
1058 | 1058 | opts, args = pycompat.getoptb( |
|
1059 | 1059 | pycompat.sysargv[optstart:], |
|
1060 | 1060 | b"hl:f:o:p:", |
|
1061 | 1061 | [b"help", b"limit=", b"file=", b"output-file=", b"script-path="], |
|
1062 | 1062 | ) |
|
1063 | 1063 | except getopt.error as msg: |
|
1064 | 1064 | print(msg) |
|
1065 | 1065 | printusage() |
|
1066 | 1066 | return 2 |
|
1067 | 1067 | |
|
1068 | 1068 | displayargs[b'limit'] = 0.05 |
|
1069 | 1069 | path = None |
|
1070 | 1070 | for o, value in opts: |
|
1071 | 1071 | if o in ("-l", "--limit"): |
|
1072 | 1072 | displayargs[b'limit'] = float(value) |
|
1073 | 1073 | elif o in ("-f", "--file"): |
|
1074 | 1074 | path = value |
|
1075 | 1075 | elif o in ("-o", "--output-file"): |
|
1076 | 1076 | displayargs[b'outputfile'] = value |
|
1077 | 1077 | elif o in ("-p", "--script-path"): |
|
1078 | 1078 | displayargs[b'scriptpath'] = value |
|
1079 | 1079 | elif o in ("-h", "help"): |
|
1080 | 1080 | printusage() |
|
1081 | 1081 | return 0 |
|
1082 | 1082 | else: |
|
1083 | 1083 | assert False, "unhandled option %s" % o |
|
1084 | 1084 | |
|
1085 | 1085 | if not path: |
|
1086 | 1086 | print('must specify --file to load') |
|
1087 | 1087 | return 1 |
|
1088 | 1088 | |
|
1089 | 1089 | load_data(path=path) |
|
1090 | 1090 | |
|
1091 | 1091 | display(**pycompat.strkwargs(displayargs)) |
|
1092 | 1092 | |
|
1093 | 1093 | return 0 |
|
1094 | 1094 | |
|
1095 | 1095 | |
|
1096 | 1096 | if __name__ == "__main__": |
|
1097 | 1097 | sys.exit(main()) |
@@ -1,23 +1,23 b'' | |||
|
1 | 1 | # A dummy extension that installs an hgweb command that throws an Exception. |
|
2 | 2 | |
|
3 | 3 | |
|
4 | 4 | from mercurial.hgweb import webcommands |
|
5 | 5 | |
|
6 | 6 | |
|
7 | 7 | def raiseerror(web): |
|
8 | 8 | '''Dummy web command that raises an uncaught Exception.''' |
|
9 | 9 | |
|
10 | 10 | # Simulate an error after partial response. |
|
11 | 11 | if b'partialresponse' in web.req.qsparams: |
|
12 | 12 | web.res.status = b'200 Script output follows' |
|
13 | 13 | web.res.headers[b'Content-Type'] = b'text/plain' |
|
14 | 14 | web.res.setbodywillwrite() |
|
15 | 15 | list(web.res.sendresponse()) |
|
16 | 16 | web.res.getbodyfile().write(b'partial content\n') |
|
17 | 17 | |
|
18 | 18 | raise AttributeError('I am an uncaught error!') |
|
19 | 19 | |
|
20 | 20 | |
|
21 | 21 | def extsetup(ui): |
|
22 | 22 | setattr(webcommands, 'raiseerror', raiseerror) |
|
23 |
webcommands.__all__.append( |
|
|
23 | webcommands.__all__.append('raiseerror') |
General Comments 0
You need to be logged in to leave comments.
Login now