##// END OF EJS Templates
re2: adjust local variable assignment scope...
Matt Harbison -
r47688:9c3e8456 default
parent child Browse files
Show More
@@ -1,3377 +1,3378 b''
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import, print_function
16 from __future__ import absolute_import, print_function
17
17
18 import abc
18 import abc
19 import collections
19 import collections
20 import contextlib
20 import contextlib
21 import errno
21 import errno
22 import gc
22 import gc
23 import hashlib
23 import hashlib
24 import itertools
24 import itertools
25 import locale
25 import locale
26 import mmap
26 import mmap
27 import os
27 import os
28 import platform as pyplatform
28 import platform as pyplatform
29 import re as remod
29 import re as remod
30 import shutil
30 import shutil
31 import stat
31 import stat
32 import sys
32 import sys
33 import time
33 import time
34 import traceback
34 import traceback
35 import warnings
35 import warnings
36
36
37 from .thirdparty import attr
37 from .thirdparty import attr
38 from .pycompat import (
38 from .pycompat import (
39 delattr,
39 delattr,
40 getattr,
40 getattr,
41 open,
41 open,
42 setattr,
42 setattr,
43 )
43 )
44 from .node import hex
44 from .node import hex
45 from hgdemandimport import tracing
45 from hgdemandimport import tracing
46 from . import (
46 from . import (
47 encoding,
47 encoding,
48 error,
48 error,
49 i18n,
49 i18n,
50 policy,
50 policy,
51 pycompat,
51 pycompat,
52 urllibcompat,
52 urllibcompat,
53 )
53 )
54 from .utils import (
54 from .utils import (
55 compression,
55 compression,
56 hashutil,
56 hashutil,
57 procutil,
57 procutil,
58 stringutil,
58 stringutil,
59 urlutil,
59 urlutil,
60 )
60 )
61
61
62 if pycompat.TYPE_CHECKING:
62 if pycompat.TYPE_CHECKING:
63 from typing import (
63 from typing import (
64 Iterator,
64 Iterator,
65 List,
65 List,
66 Optional,
66 Optional,
67 Tuple,
67 Tuple,
68 )
68 )
69
69
70
70
71 base85 = policy.importmod('base85')
71 base85 = policy.importmod('base85')
72 osutil = policy.importmod('osutil')
72 osutil = policy.importmod('osutil')
73
73
74 b85decode = base85.b85decode
74 b85decode = base85.b85decode
75 b85encode = base85.b85encode
75 b85encode = base85.b85encode
76
76
77 cookielib = pycompat.cookielib
77 cookielib = pycompat.cookielib
78 httplib = pycompat.httplib
78 httplib = pycompat.httplib
79 pickle = pycompat.pickle
79 pickle = pycompat.pickle
80 safehasattr = pycompat.safehasattr
80 safehasattr = pycompat.safehasattr
81 socketserver = pycompat.socketserver
81 socketserver = pycompat.socketserver
82 bytesio = pycompat.bytesio
82 bytesio = pycompat.bytesio
83 # TODO deprecate stringio name, as it is a lie on Python 3.
83 # TODO deprecate stringio name, as it is a lie on Python 3.
84 stringio = bytesio
84 stringio = bytesio
85 xmlrpclib = pycompat.xmlrpclib
85 xmlrpclib = pycompat.xmlrpclib
86
86
87 httpserver = urllibcompat.httpserver
87 httpserver = urllibcompat.httpserver
88 urlerr = urllibcompat.urlerr
88 urlerr = urllibcompat.urlerr
89 urlreq = urllibcompat.urlreq
89 urlreq = urllibcompat.urlreq
90
90
91 # workaround for win32mbcs
91 # workaround for win32mbcs
92 _filenamebytestr = pycompat.bytestr
92 _filenamebytestr = pycompat.bytestr
93
93
94 if pycompat.iswindows:
94 if pycompat.iswindows:
95 from . import windows as platform
95 from . import windows as platform
96 else:
96 else:
97 from . import posix as platform
97 from . import posix as platform
98
98
99 _ = i18n._
99 _ = i18n._
100
100
101 bindunixsocket = platform.bindunixsocket
101 bindunixsocket = platform.bindunixsocket
102 cachestat = platform.cachestat
102 cachestat = platform.cachestat
103 checkexec = platform.checkexec
103 checkexec = platform.checkexec
104 checklink = platform.checklink
104 checklink = platform.checklink
105 copymode = platform.copymode
105 copymode = platform.copymode
106 expandglobs = platform.expandglobs
106 expandglobs = platform.expandglobs
107 getfsmountpoint = platform.getfsmountpoint
107 getfsmountpoint = platform.getfsmountpoint
108 getfstype = platform.getfstype
108 getfstype = platform.getfstype
109 groupmembers = platform.groupmembers
109 groupmembers = platform.groupmembers
110 groupname = platform.groupname
110 groupname = platform.groupname
111 isexec = platform.isexec
111 isexec = platform.isexec
112 isowner = platform.isowner
112 isowner = platform.isowner
113 listdir = osutil.listdir
113 listdir = osutil.listdir
114 localpath = platform.localpath
114 localpath = platform.localpath
115 lookupreg = platform.lookupreg
115 lookupreg = platform.lookupreg
116 makedir = platform.makedir
116 makedir = platform.makedir
117 nlinks = platform.nlinks
117 nlinks = platform.nlinks
118 normpath = platform.normpath
118 normpath = platform.normpath
119 normcase = platform.normcase
119 normcase = platform.normcase
120 normcasespec = platform.normcasespec
120 normcasespec = platform.normcasespec
121 normcasefallback = platform.normcasefallback
121 normcasefallback = platform.normcasefallback
122 openhardlinks = platform.openhardlinks
122 openhardlinks = platform.openhardlinks
123 oslink = platform.oslink
123 oslink = platform.oslink
124 parsepatchoutput = platform.parsepatchoutput
124 parsepatchoutput = platform.parsepatchoutput
125 pconvert = platform.pconvert
125 pconvert = platform.pconvert
126 poll = platform.poll
126 poll = platform.poll
127 posixfile = platform.posixfile
127 posixfile = platform.posixfile
128 readlink = platform.readlink
128 readlink = platform.readlink
129 rename = platform.rename
129 rename = platform.rename
130 removedirs = platform.removedirs
130 removedirs = platform.removedirs
131 samedevice = platform.samedevice
131 samedevice = platform.samedevice
132 samefile = platform.samefile
132 samefile = platform.samefile
133 samestat = platform.samestat
133 samestat = platform.samestat
134 setflags = platform.setflags
134 setflags = platform.setflags
135 split = platform.split
135 split = platform.split
136 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
136 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
137 statisexec = platform.statisexec
137 statisexec = platform.statisexec
138 statislink = platform.statislink
138 statislink = platform.statislink
139 umask = platform.umask
139 umask = platform.umask
140 unlink = platform.unlink
140 unlink = platform.unlink
141 username = platform.username
141 username = platform.username
142
142
143
143
144 def setumask(val):
144 def setumask(val):
145 # type: (int) -> None
145 # type: (int) -> None
146 ''' updates the umask. used by chg server '''
146 ''' updates the umask. used by chg server '''
147 if pycompat.iswindows:
147 if pycompat.iswindows:
148 return
148 return
149 os.umask(val)
149 os.umask(val)
150 global umask
150 global umask
151 platform.umask = umask = val & 0o777
151 platform.umask = umask = val & 0o777
152
152
153
153
154 # small compat layer
154 # small compat layer
155 compengines = compression.compengines
155 compengines = compression.compengines
156 SERVERROLE = compression.SERVERROLE
156 SERVERROLE = compression.SERVERROLE
157 CLIENTROLE = compression.CLIENTROLE
157 CLIENTROLE = compression.CLIENTROLE
158
158
159 try:
159 try:
160 recvfds = osutil.recvfds
160 recvfds = osutil.recvfds
161 except AttributeError:
161 except AttributeError:
162 pass
162 pass
163
163
164 # Python compatibility
164 # Python compatibility
165
165
166 _notset = object()
166 _notset = object()
167
167
168
168
169 def bitsfrom(container):
169 def bitsfrom(container):
170 bits = 0
170 bits = 0
171 for bit in container:
171 for bit in container:
172 bits |= bit
172 bits |= bit
173 return bits
173 return bits
174
174
175
175
176 # python 2.6 still have deprecation warning enabled by default. We do not want
176 # python 2.6 still have deprecation warning enabled by default. We do not want
177 # to display anything to standard user so detect if we are running test and
177 # to display anything to standard user so detect if we are running test and
178 # only use python deprecation warning in this case.
178 # only use python deprecation warning in this case.
179 _dowarn = bool(encoding.environ.get(b'HGEMITWARNINGS'))
179 _dowarn = bool(encoding.environ.get(b'HGEMITWARNINGS'))
180 if _dowarn:
180 if _dowarn:
181 # explicitly unfilter our warning for python 2.7
181 # explicitly unfilter our warning for python 2.7
182 #
182 #
183 # The option of setting PYTHONWARNINGS in the test runner was investigated.
183 # The option of setting PYTHONWARNINGS in the test runner was investigated.
184 # However, module name set through PYTHONWARNINGS was exactly matched, so
184 # However, module name set through PYTHONWARNINGS was exactly matched, so
185 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
185 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
186 # makes the whole PYTHONWARNINGS thing useless for our usecase.
186 # makes the whole PYTHONWARNINGS thing useless for our usecase.
187 warnings.filterwarnings('default', '', DeprecationWarning, 'mercurial')
187 warnings.filterwarnings('default', '', DeprecationWarning, 'mercurial')
188 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext')
188 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext')
189 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext3rd')
189 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext3rd')
190 if _dowarn and pycompat.ispy3:
190 if _dowarn and pycompat.ispy3:
191 # silence warning emitted by passing user string to re.sub()
191 # silence warning emitted by passing user string to re.sub()
192 warnings.filterwarnings(
192 warnings.filterwarnings(
193 'ignore', 'bad escape', DeprecationWarning, 'mercurial'
193 'ignore', 'bad escape', DeprecationWarning, 'mercurial'
194 )
194 )
195 warnings.filterwarnings(
195 warnings.filterwarnings(
196 'ignore', 'invalid escape sequence', DeprecationWarning, 'mercurial'
196 'ignore', 'invalid escape sequence', DeprecationWarning, 'mercurial'
197 )
197 )
198 # TODO: reinvent imp.is_frozen()
198 # TODO: reinvent imp.is_frozen()
199 warnings.filterwarnings(
199 warnings.filterwarnings(
200 'ignore',
200 'ignore',
201 'the imp module is deprecated',
201 'the imp module is deprecated',
202 DeprecationWarning,
202 DeprecationWarning,
203 'mercurial',
203 'mercurial',
204 )
204 )
205
205
206
206
207 def nouideprecwarn(msg, version, stacklevel=1):
207 def nouideprecwarn(msg, version, stacklevel=1):
208 """Issue an python native deprecation warning
208 """Issue an python native deprecation warning
209
209
210 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
210 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
211 """
211 """
212 if _dowarn:
212 if _dowarn:
213 msg += (
213 msg += (
214 b"\n(compatibility will be dropped after Mercurial-%s,"
214 b"\n(compatibility will be dropped after Mercurial-%s,"
215 b" update your code.)"
215 b" update your code.)"
216 ) % version
216 ) % version
217 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
217 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
218 # on python 3 with chg, we will need to explicitly flush the output
218 # on python 3 with chg, we will need to explicitly flush the output
219 sys.stderr.flush()
219 sys.stderr.flush()
220
220
221
221
222 DIGESTS = {
222 DIGESTS = {
223 b'md5': hashlib.md5,
223 b'md5': hashlib.md5,
224 b'sha1': hashutil.sha1,
224 b'sha1': hashutil.sha1,
225 b'sha512': hashlib.sha512,
225 b'sha512': hashlib.sha512,
226 }
226 }
227 # List of digest types from strongest to weakest
227 # List of digest types from strongest to weakest
228 DIGESTS_BY_STRENGTH = [b'sha512', b'sha1', b'md5']
228 DIGESTS_BY_STRENGTH = [b'sha512', b'sha1', b'md5']
229
229
230 for k in DIGESTS_BY_STRENGTH:
230 for k in DIGESTS_BY_STRENGTH:
231 assert k in DIGESTS
231 assert k in DIGESTS
232
232
233
233
234 class digester(object):
234 class digester(object):
235 """helper to compute digests.
235 """helper to compute digests.
236
236
237 This helper can be used to compute one or more digests given their name.
237 This helper can be used to compute one or more digests given their name.
238
238
239 >>> d = digester([b'md5', b'sha1'])
239 >>> d = digester([b'md5', b'sha1'])
240 >>> d.update(b'foo')
240 >>> d.update(b'foo')
241 >>> [k for k in sorted(d)]
241 >>> [k for k in sorted(d)]
242 ['md5', 'sha1']
242 ['md5', 'sha1']
243 >>> d[b'md5']
243 >>> d[b'md5']
244 'acbd18db4cc2f85cedef654fccc4a4d8'
244 'acbd18db4cc2f85cedef654fccc4a4d8'
245 >>> d[b'sha1']
245 >>> d[b'sha1']
246 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
246 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
247 >>> digester.preferred([b'md5', b'sha1'])
247 >>> digester.preferred([b'md5', b'sha1'])
248 'sha1'
248 'sha1'
249 """
249 """
250
250
251 def __init__(self, digests, s=b''):
251 def __init__(self, digests, s=b''):
252 self._hashes = {}
252 self._hashes = {}
253 for k in digests:
253 for k in digests:
254 if k not in DIGESTS:
254 if k not in DIGESTS:
255 raise error.Abort(_(b'unknown digest type: %s') % k)
255 raise error.Abort(_(b'unknown digest type: %s') % k)
256 self._hashes[k] = DIGESTS[k]()
256 self._hashes[k] = DIGESTS[k]()
257 if s:
257 if s:
258 self.update(s)
258 self.update(s)
259
259
260 def update(self, data):
260 def update(self, data):
261 for h in self._hashes.values():
261 for h in self._hashes.values():
262 h.update(data)
262 h.update(data)
263
263
264 def __getitem__(self, key):
264 def __getitem__(self, key):
265 if key not in DIGESTS:
265 if key not in DIGESTS:
266 raise error.Abort(_(b'unknown digest type: %s') % k)
266 raise error.Abort(_(b'unknown digest type: %s') % k)
267 return hex(self._hashes[key].digest())
267 return hex(self._hashes[key].digest())
268
268
269 def __iter__(self):
269 def __iter__(self):
270 return iter(self._hashes)
270 return iter(self._hashes)
271
271
272 @staticmethod
272 @staticmethod
273 def preferred(supported):
273 def preferred(supported):
274 """returns the strongest digest type in both supported and DIGESTS."""
274 """returns the strongest digest type in both supported and DIGESTS."""
275
275
276 for k in DIGESTS_BY_STRENGTH:
276 for k in DIGESTS_BY_STRENGTH:
277 if k in supported:
277 if k in supported:
278 return k
278 return k
279 return None
279 return None
280
280
281
281
282 class digestchecker(object):
282 class digestchecker(object):
283 """file handle wrapper that additionally checks content against a given
283 """file handle wrapper that additionally checks content against a given
284 size and digests.
284 size and digests.
285
285
286 d = digestchecker(fh, size, {'md5': '...'})
286 d = digestchecker(fh, size, {'md5': '...'})
287
287
288 When multiple digests are given, all of them are validated.
288 When multiple digests are given, all of them are validated.
289 """
289 """
290
290
291 def __init__(self, fh, size, digests):
291 def __init__(self, fh, size, digests):
292 self._fh = fh
292 self._fh = fh
293 self._size = size
293 self._size = size
294 self._got = 0
294 self._got = 0
295 self._digests = dict(digests)
295 self._digests = dict(digests)
296 self._digester = digester(self._digests.keys())
296 self._digester = digester(self._digests.keys())
297
297
298 def read(self, length=-1):
298 def read(self, length=-1):
299 content = self._fh.read(length)
299 content = self._fh.read(length)
300 self._digester.update(content)
300 self._digester.update(content)
301 self._got += len(content)
301 self._got += len(content)
302 return content
302 return content
303
303
304 def validate(self):
304 def validate(self):
305 if self._size != self._got:
305 if self._size != self._got:
306 raise error.Abort(
306 raise error.Abort(
307 _(b'size mismatch: expected %d, got %d')
307 _(b'size mismatch: expected %d, got %d')
308 % (self._size, self._got)
308 % (self._size, self._got)
309 )
309 )
310 for k, v in self._digests.items():
310 for k, v in self._digests.items():
311 if v != self._digester[k]:
311 if v != self._digester[k]:
312 # i18n: first parameter is a digest name
312 # i18n: first parameter is a digest name
313 raise error.Abort(
313 raise error.Abort(
314 _(b'%s mismatch: expected %s, got %s')
314 _(b'%s mismatch: expected %s, got %s')
315 % (k, v, self._digester[k])
315 % (k, v, self._digester[k])
316 )
316 )
317
317
318
318
319 try:
319 try:
320 buffer = buffer # pytype: disable=name-error
320 buffer = buffer # pytype: disable=name-error
321 except NameError:
321 except NameError:
322
322
323 def buffer(sliceable, offset=0, length=None):
323 def buffer(sliceable, offset=0, length=None):
324 if length is not None:
324 if length is not None:
325 return memoryview(sliceable)[offset : offset + length]
325 return memoryview(sliceable)[offset : offset + length]
326 return memoryview(sliceable)[offset:]
326 return memoryview(sliceable)[offset:]
327
327
328
328
329 _chunksize = 4096
329 _chunksize = 4096
330
330
331
331
332 class bufferedinputpipe(object):
332 class bufferedinputpipe(object):
333 """a manually buffered input pipe
333 """a manually buffered input pipe
334
334
335 Python will not let us use buffered IO and lazy reading with 'polling' at
335 Python will not let us use buffered IO and lazy reading with 'polling' at
336 the same time. We cannot probe the buffer state and select will not detect
336 the same time. We cannot probe the buffer state and select will not detect
337 that data are ready to read if they are already buffered.
337 that data are ready to read if they are already buffered.
338
338
339 This class let us work around that by implementing its own buffering
339 This class let us work around that by implementing its own buffering
340 (allowing efficient readline) while offering a way to know if the buffer is
340 (allowing efficient readline) while offering a way to know if the buffer is
341 empty from the output (allowing collaboration of the buffer with polling).
341 empty from the output (allowing collaboration of the buffer with polling).
342
342
343 This class lives in the 'util' module because it makes use of the 'os'
343 This class lives in the 'util' module because it makes use of the 'os'
344 module from the python stdlib.
344 module from the python stdlib.
345 """
345 """
346
346
347 def __new__(cls, fh):
347 def __new__(cls, fh):
348 # If we receive a fileobjectproxy, we need to use a variation of this
348 # If we receive a fileobjectproxy, we need to use a variation of this
349 # class that notifies observers about activity.
349 # class that notifies observers about activity.
350 if isinstance(fh, fileobjectproxy):
350 if isinstance(fh, fileobjectproxy):
351 cls = observedbufferedinputpipe
351 cls = observedbufferedinputpipe
352
352
353 return super(bufferedinputpipe, cls).__new__(cls)
353 return super(bufferedinputpipe, cls).__new__(cls)
354
354
355 def __init__(self, input):
355 def __init__(self, input):
356 self._input = input
356 self._input = input
357 self._buffer = []
357 self._buffer = []
358 self._eof = False
358 self._eof = False
359 self._lenbuf = 0
359 self._lenbuf = 0
360
360
361 @property
361 @property
362 def hasbuffer(self):
362 def hasbuffer(self):
363 """True is any data is currently buffered
363 """True is any data is currently buffered
364
364
365 This will be used externally a pre-step for polling IO. If there is
365 This will be used externally a pre-step for polling IO. If there is
366 already data then no polling should be set in place."""
366 already data then no polling should be set in place."""
367 return bool(self._buffer)
367 return bool(self._buffer)
368
368
369 @property
369 @property
370 def closed(self):
370 def closed(self):
371 return self._input.closed
371 return self._input.closed
372
372
373 def fileno(self):
373 def fileno(self):
374 return self._input.fileno()
374 return self._input.fileno()
375
375
376 def close(self):
376 def close(self):
377 return self._input.close()
377 return self._input.close()
378
378
379 def read(self, size):
379 def read(self, size):
380 while (not self._eof) and (self._lenbuf < size):
380 while (not self._eof) and (self._lenbuf < size):
381 self._fillbuffer()
381 self._fillbuffer()
382 return self._frombuffer(size)
382 return self._frombuffer(size)
383
383
384 def unbufferedread(self, size):
384 def unbufferedread(self, size):
385 if not self._eof and self._lenbuf == 0:
385 if not self._eof and self._lenbuf == 0:
386 self._fillbuffer(max(size, _chunksize))
386 self._fillbuffer(max(size, _chunksize))
387 return self._frombuffer(min(self._lenbuf, size))
387 return self._frombuffer(min(self._lenbuf, size))
388
388
389 def readline(self, *args, **kwargs):
389 def readline(self, *args, **kwargs):
390 if len(self._buffer) > 1:
390 if len(self._buffer) > 1:
391 # this should not happen because both read and readline end with a
391 # this should not happen because both read and readline end with a
392 # _frombuffer call that collapse it.
392 # _frombuffer call that collapse it.
393 self._buffer = [b''.join(self._buffer)]
393 self._buffer = [b''.join(self._buffer)]
394 self._lenbuf = len(self._buffer[0])
394 self._lenbuf = len(self._buffer[0])
395 lfi = -1
395 lfi = -1
396 if self._buffer:
396 if self._buffer:
397 lfi = self._buffer[-1].find(b'\n')
397 lfi = self._buffer[-1].find(b'\n')
398 while (not self._eof) and lfi < 0:
398 while (not self._eof) and lfi < 0:
399 self._fillbuffer()
399 self._fillbuffer()
400 if self._buffer:
400 if self._buffer:
401 lfi = self._buffer[-1].find(b'\n')
401 lfi = self._buffer[-1].find(b'\n')
402 size = lfi + 1
402 size = lfi + 1
403 if lfi < 0: # end of file
403 if lfi < 0: # end of file
404 size = self._lenbuf
404 size = self._lenbuf
405 elif len(self._buffer) > 1:
405 elif len(self._buffer) > 1:
406 # we need to take previous chunks into account
406 # we need to take previous chunks into account
407 size += self._lenbuf - len(self._buffer[-1])
407 size += self._lenbuf - len(self._buffer[-1])
408 return self._frombuffer(size)
408 return self._frombuffer(size)
409
409
410 def _frombuffer(self, size):
410 def _frombuffer(self, size):
411 """return at most 'size' data from the buffer
411 """return at most 'size' data from the buffer
412
412
413 The data are removed from the buffer."""
413 The data are removed from the buffer."""
414 if size == 0 or not self._buffer:
414 if size == 0 or not self._buffer:
415 return b''
415 return b''
416 buf = self._buffer[0]
416 buf = self._buffer[0]
417 if len(self._buffer) > 1:
417 if len(self._buffer) > 1:
418 buf = b''.join(self._buffer)
418 buf = b''.join(self._buffer)
419
419
420 data = buf[:size]
420 data = buf[:size]
421 buf = buf[len(data) :]
421 buf = buf[len(data) :]
422 if buf:
422 if buf:
423 self._buffer = [buf]
423 self._buffer = [buf]
424 self._lenbuf = len(buf)
424 self._lenbuf = len(buf)
425 else:
425 else:
426 self._buffer = []
426 self._buffer = []
427 self._lenbuf = 0
427 self._lenbuf = 0
428 return data
428 return data
429
429
430 def _fillbuffer(self, size=_chunksize):
430 def _fillbuffer(self, size=_chunksize):
431 """read data to the buffer"""
431 """read data to the buffer"""
432 data = os.read(self._input.fileno(), size)
432 data = os.read(self._input.fileno(), size)
433 if not data:
433 if not data:
434 self._eof = True
434 self._eof = True
435 else:
435 else:
436 self._lenbuf += len(data)
436 self._lenbuf += len(data)
437 self._buffer.append(data)
437 self._buffer.append(data)
438
438
439 return data
439 return data
440
440
441
441
442 def mmapread(fp, size=None):
442 def mmapread(fp, size=None):
443 if size == 0:
443 if size == 0:
444 # size of 0 to mmap.mmap() means "all data"
444 # size of 0 to mmap.mmap() means "all data"
445 # rather than "zero bytes", so special case that.
445 # rather than "zero bytes", so special case that.
446 return b''
446 return b''
447 elif size is None:
447 elif size is None:
448 size = 0
448 size = 0
449 try:
449 try:
450 fd = getattr(fp, 'fileno', lambda: fp)()
450 fd = getattr(fp, 'fileno', lambda: fp)()
451 return mmap.mmap(fd, size, access=mmap.ACCESS_READ)
451 return mmap.mmap(fd, size, access=mmap.ACCESS_READ)
452 except ValueError:
452 except ValueError:
453 # Empty files cannot be mmapped, but mmapread should still work. Check
453 # Empty files cannot be mmapped, but mmapread should still work. Check
454 # if the file is empty, and if so, return an empty buffer.
454 # if the file is empty, and if so, return an empty buffer.
455 if os.fstat(fd).st_size == 0:
455 if os.fstat(fd).st_size == 0:
456 return b''
456 return b''
457 raise
457 raise
458
458
459
459
460 class fileobjectproxy(object):
460 class fileobjectproxy(object):
461 """A proxy around file objects that tells a watcher when events occur.
461 """A proxy around file objects that tells a watcher when events occur.
462
462
463 This type is intended to only be used for testing purposes. Think hard
463 This type is intended to only be used for testing purposes. Think hard
464 before using it in important code.
464 before using it in important code.
465 """
465 """
466
466
467 __slots__ = (
467 __slots__ = (
468 '_orig',
468 '_orig',
469 '_observer',
469 '_observer',
470 )
470 )
471
471
472 def __init__(self, fh, observer):
472 def __init__(self, fh, observer):
473 object.__setattr__(self, '_orig', fh)
473 object.__setattr__(self, '_orig', fh)
474 object.__setattr__(self, '_observer', observer)
474 object.__setattr__(self, '_observer', observer)
475
475
476 def __getattribute__(self, name):
476 def __getattribute__(self, name):
477 ours = {
477 ours = {
478 '_observer',
478 '_observer',
479 # IOBase
479 # IOBase
480 'close',
480 'close',
481 # closed if a property
481 # closed if a property
482 'fileno',
482 'fileno',
483 'flush',
483 'flush',
484 'isatty',
484 'isatty',
485 'readable',
485 'readable',
486 'readline',
486 'readline',
487 'readlines',
487 'readlines',
488 'seek',
488 'seek',
489 'seekable',
489 'seekable',
490 'tell',
490 'tell',
491 'truncate',
491 'truncate',
492 'writable',
492 'writable',
493 'writelines',
493 'writelines',
494 # RawIOBase
494 # RawIOBase
495 'read',
495 'read',
496 'readall',
496 'readall',
497 'readinto',
497 'readinto',
498 'write',
498 'write',
499 # BufferedIOBase
499 # BufferedIOBase
500 # raw is a property
500 # raw is a property
501 'detach',
501 'detach',
502 # read defined above
502 # read defined above
503 'read1',
503 'read1',
504 # readinto defined above
504 # readinto defined above
505 # write defined above
505 # write defined above
506 }
506 }
507
507
508 # We only observe some methods.
508 # We only observe some methods.
509 if name in ours:
509 if name in ours:
510 return object.__getattribute__(self, name)
510 return object.__getattribute__(self, name)
511
511
512 return getattr(object.__getattribute__(self, '_orig'), name)
512 return getattr(object.__getattribute__(self, '_orig'), name)
513
513
514 def __nonzero__(self):
514 def __nonzero__(self):
515 return bool(object.__getattribute__(self, '_orig'))
515 return bool(object.__getattribute__(self, '_orig'))
516
516
517 __bool__ = __nonzero__
517 __bool__ = __nonzero__
518
518
519 def __delattr__(self, name):
519 def __delattr__(self, name):
520 return delattr(object.__getattribute__(self, '_orig'), name)
520 return delattr(object.__getattribute__(self, '_orig'), name)
521
521
522 def __setattr__(self, name, value):
522 def __setattr__(self, name, value):
523 return setattr(object.__getattribute__(self, '_orig'), name, value)
523 return setattr(object.__getattribute__(self, '_orig'), name, value)
524
524
525 def __iter__(self):
525 def __iter__(self):
526 return object.__getattribute__(self, '_orig').__iter__()
526 return object.__getattribute__(self, '_orig').__iter__()
527
527
528 def _observedcall(self, name, *args, **kwargs):
528 def _observedcall(self, name, *args, **kwargs):
529 # Call the original object.
529 # Call the original object.
530 orig = object.__getattribute__(self, '_orig')
530 orig = object.__getattribute__(self, '_orig')
531 res = getattr(orig, name)(*args, **kwargs)
531 res = getattr(orig, name)(*args, **kwargs)
532
532
533 # Call a method on the observer of the same name with arguments
533 # Call a method on the observer of the same name with arguments
534 # so it can react, log, etc.
534 # so it can react, log, etc.
535 observer = object.__getattribute__(self, '_observer')
535 observer = object.__getattribute__(self, '_observer')
536 fn = getattr(observer, name, None)
536 fn = getattr(observer, name, None)
537 if fn:
537 if fn:
538 fn(res, *args, **kwargs)
538 fn(res, *args, **kwargs)
539
539
540 return res
540 return res
541
541
542 def close(self, *args, **kwargs):
542 def close(self, *args, **kwargs):
543 return object.__getattribute__(self, '_observedcall')(
543 return object.__getattribute__(self, '_observedcall')(
544 'close', *args, **kwargs
544 'close', *args, **kwargs
545 )
545 )
546
546
547 def fileno(self, *args, **kwargs):
547 def fileno(self, *args, **kwargs):
548 return object.__getattribute__(self, '_observedcall')(
548 return object.__getattribute__(self, '_observedcall')(
549 'fileno', *args, **kwargs
549 'fileno', *args, **kwargs
550 )
550 )
551
551
552 def flush(self, *args, **kwargs):
552 def flush(self, *args, **kwargs):
553 return object.__getattribute__(self, '_observedcall')(
553 return object.__getattribute__(self, '_observedcall')(
554 'flush', *args, **kwargs
554 'flush', *args, **kwargs
555 )
555 )
556
556
557 def isatty(self, *args, **kwargs):
557 def isatty(self, *args, **kwargs):
558 return object.__getattribute__(self, '_observedcall')(
558 return object.__getattribute__(self, '_observedcall')(
559 'isatty', *args, **kwargs
559 'isatty', *args, **kwargs
560 )
560 )
561
561
562 def readable(self, *args, **kwargs):
562 def readable(self, *args, **kwargs):
563 return object.__getattribute__(self, '_observedcall')(
563 return object.__getattribute__(self, '_observedcall')(
564 'readable', *args, **kwargs
564 'readable', *args, **kwargs
565 )
565 )
566
566
567 def readline(self, *args, **kwargs):
567 def readline(self, *args, **kwargs):
568 return object.__getattribute__(self, '_observedcall')(
568 return object.__getattribute__(self, '_observedcall')(
569 'readline', *args, **kwargs
569 'readline', *args, **kwargs
570 )
570 )
571
571
572 def readlines(self, *args, **kwargs):
572 def readlines(self, *args, **kwargs):
573 return object.__getattribute__(self, '_observedcall')(
573 return object.__getattribute__(self, '_observedcall')(
574 'readlines', *args, **kwargs
574 'readlines', *args, **kwargs
575 )
575 )
576
576
577 def seek(self, *args, **kwargs):
577 def seek(self, *args, **kwargs):
578 return object.__getattribute__(self, '_observedcall')(
578 return object.__getattribute__(self, '_observedcall')(
579 'seek', *args, **kwargs
579 'seek', *args, **kwargs
580 )
580 )
581
581
582 def seekable(self, *args, **kwargs):
582 def seekable(self, *args, **kwargs):
583 return object.__getattribute__(self, '_observedcall')(
583 return object.__getattribute__(self, '_observedcall')(
584 'seekable', *args, **kwargs
584 'seekable', *args, **kwargs
585 )
585 )
586
586
587 def tell(self, *args, **kwargs):
587 def tell(self, *args, **kwargs):
588 return object.__getattribute__(self, '_observedcall')(
588 return object.__getattribute__(self, '_observedcall')(
589 'tell', *args, **kwargs
589 'tell', *args, **kwargs
590 )
590 )
591
591
592 def truncate(self, *args, **kwargs):
592 def truncate(self, *args, **kwargs):
593 return object.__getattribute__(self, '_observedcall')(
593 return object.__getattribute__(self, '_observedcall')(
594 'truncate', *args, **kwargs
594 'truncate', *args, **kwargs
595 )
595 )
596
596
597 def writable(self, *args, **kwargs):
597 def writable(self, *args, **kwargs):
598 return object.__getattribute__(self, '_observedcall')(
598 return object.__getattribute__(self, '_observedcall')(
599 'writable', *args, **kwargs
599 'writable', *args, **kwargs
600 )
600 )
601
601
602 def writelines(self, *args, **kwargs):
602 def writelines(self, *args, **kwargs):
603 return object.__getattribute__(self, '_observedcall')(
603 return object.__getattribute__(self, '_observedcall')(
604 'writelines', *args, **kwargs
604 'writelines', *args, **kwargs
605 )
605 )
606
606
607 def read(self, *args, **kwargs):
607 def read(self, *args, **kwargs):
608 return object.__getattribute__(self, '_observedcall')(
608 return object.__getattribute__(self, '_observedcall')(
609 'read', *args, **kwargs
609 'read', *args, **kwargs
610 )
610 )
611
611
612 def readall(self, *args, **kwargs):
612 def readall(self, *args, **kwargs):
613 return object.__getattribute__(self, '_observedcall')(
613 return object.__getattribute__(self, '_observedcall')(
614 'readall', *args, **kwargs
614 'readall', *args, **kwargs
615 )
615 )
616
616
617 def readinto(self, *args, **kwargs):
617 def readinto(self, *args, **kwargs):
618 return object.__getattribute__(self, '_observedcall')(
618 return object.__getattribute__(self, '_observedcall')(
619 'readinto', *args, **kwargs
619 'readinto', *args, **kwargs
620 )
620 )
621
621
622 def write(self, *args, **kwargs):
622 def write(self, *args, **kwargs):
623 return object.__getattribute__(self, '_observedcall')(
623 return object.__getattribute__(self, '_observedcall')(
624 'write', *args, **kwargs
624 'write', *args, **kwargs
625 )
625 )
626
626
627 def detach(self, *args, **kwargs):
627 def detach(self, *args, **kwargs):
628 return object.__getattribute__(self, '_observedcall')(
628 return object.__getattribute__(self, '_observedcall')(
629 'detach', *args, **kwargs
629 'detach', *args, **kwargs
630 )
630 )
631
631
632 def read1(self, *args, **kwargs):
632 def read1(self, *args, **kwargs):
633 return object.__getattribute__(self, '_observedcall')(
633 return object.__getattribute__(self, '_observedcall')(
634 'read1', *args, **kwargs
634 'read1', *args, **kwargs
635 )
635 )
636
636
637
637
638 class observedbufferedinputpipe(bufferedinputpipe):
638 class observedbufferedinputpipe(bufferedinputpipe):
639 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
639 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
640
640
641 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
641 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
642 bypass ``fileobjectproxy``. Because of this, we need to make
642 bypass ``fileobjectproxy``. Because of this, we need to make
643 ``bufferedinputpipe`` aware of these operations.
643 ``bufferedinputpipe`` aware of these operations.
644
644
645 This variation of ``bufferedinputpipe`` can notify observers about
645 This variation of ``bufferedinputpipe`` can notify observers about
646 ``os.read()`` events. It also re-publishes other events, such as
646 ``os.read()`` events. It also re-publishes other events, such as
647 ``read()`` and ``readline()``.
647 ``read()`` and ``readline()``.
648 """
648 """
649
649
650 def _fillbuffer(self):
650 def _fillbuffer(self):
651 res = super(observedbufferedinputpipe, self)._fillbuffer()
651 res = super(observedbufferedinputpipe, self)._fillbuffer()
652
652
653 fn = getattr(self._input._observer, 'osread', None)
653 fn = getattr(self._input._observer, 'osread', None)
654 if fn:
654 if fn:
655 fn(res, _chunksize)
655 fn(res, _chunksize)
656
656
657 return res
657 return res
658
658
659 # We use different observer methods because the operation isn't
659 # We use different observer methods because the operation isn't
660 # performed on the actual file object but on us.
660 # performed on the actual file object but on us.
661 def read(self, size):
661 def read(self, size):
662 res = super(observedbufferedinputpipe, self).read(size)
662 res = super(observedbufferedinputpipe, self).read(size)
663
663
664 fn = getattr(self._input._observer, 'bufferedread', None)
664 fn = getattr(self._input._observer, 'bufferedread', None)
665 if fn:
665 if fn:
666 fn(res, size)
666 fn(res, size)
667
667
668 return res
668 return res
669
669
670 def readline(self, *args, **kwargs):
670 def readline(self, *args, **kwargs):
671 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
671 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
672
672
673 fn = getattr(self._input._observer, 'bufferedreadline', None)
673 fn = getattr(self._input._observer, 'bufferedreadline', None)
674 if fn:
674 if fn:
675 fn(res)
675 fn(res)
676
676
677 return res
677 return res
678
678
679
679
680 PROXIED_SOCKET_METHODS = {
680 PROXIED_SOCKET_METHODS = {
681 'makefile',
681 'makefile',
682 'recv',
682 'recv',
683 'recvfrom',
683 'recvfrom',
684 'recvfrom_into',
684 'recvfrom_into',
685 'recv_into',
685 'recv_into',
686 'send',
686 'send',
687 'sendall',
687 'sendall',
688 'sendto',
688 'sendto',
689 'setblocking',
689 'setblocking',
690 'settimeout',
690 'settimeout',
691 'gettimeout',
691 'gettimeout',
692 'setsockopt',
692 'setsockopt',
693 }
693 }
694
694
695
695
696 class socketproxy(object):
696 class socketproxy(object):
697 """A proxy around a socket that tells a watcher when events occur.
697 """A proxy around a socket that tells a watcher when events occur.
698
698
699 This is like ``fileobjectproxy`` except for sockets.
699 This is like ``fileobjectproxy`` except for sockets.
700
700
701 This type is intended to only be used for testing purposes. Think hard
701 This type is intended to only be used for testing purposes. Think hard
702 before using it in important code.
702 before using it in important code.
703 """
703 """
704
704
705 __slots__ = (
705 __slots__ = (
706 '_orig',
706 '_orig',
707 '_observer',
707 '_observer',
708 )
708 )
709
709
710 def __init__(self, sock, observer):
710 def __init__(self, sock, observer):
711 object.__setattr__(self, '_orig', sock)
711 object.__setattr__(self, '_orig', sock)
712 object.__setattr__(self, '_observer', observer)
712 object.__setattr__(self, '_observer', observer)
713
713
714 def __getattribute__(self, name):
714 def __getattribute__(self, name):
715 if name in PROXIED_SOCKET_METHODS:
715 if name in PROXIED_SOCKET_METHODS:
716 return object.__getattribute__(self, name)
716 return object.__getattribute__(self, name)
717
717
718 return getattr(object.__getattribute__(self, '_orig'), name)
718 return getattr(object.__getattribute__(self, '_orig'), name)
719
719
720 def __delattr__(self, name):
720 def __delattr__(self, name):
721 return delattr(object.__getattribute__(self, '_orig'), name)
721 return delattr(object.__getattribute__(self, '_orig'), name)
722
722
723 def __setattr__(self, name, value):
723 def __setattr__(self, name, value):
724 return setattr(object.__getattribute__(self, '_orig'), name, value)
724 return setattr(object.__getattribute__(self, '_orig'), name, value)
725
725
726 def __nonzero__(self):
726 def __nonzero__(self):
727 return bool(object.__getattribute__(self, '_orig'))
727 return bool(object.__getattribute__(self, '_orig'))
728
728
729 __bool__ = __nonzero__
729 __bool__ = __nonzero__
730
730
731 def _observedcall(self, name, *args, **kwargs):
731 def _observedcall(self, name, *args, **kwargs):
732 # Call the original object.
732 # Call the original object.
733 orig = object.__getattribute__(self, '_orig')
733 orig = object.__getattribute__(self, '_orig')
734 res = getattr(orig, name)(*args, **kwargs)
734 res = getattr(orig, name)(*args, **kwargs)
735
735
736 # Call a method on the observer of the same name with arguments
736 # Call a method on the observer of the same name with arguments
737 # so it can react, log, etc.
737 # so it can react, log, etc.
738 observer = object.__getattribute__(self, '_observer')
738 observer = object.__getattribute__(self, '_observer')
739 fn = getattr(observer, name, None)
739 fn = getattr(observer, name, None)
740 if fn:
740 if fn:
741 fn(res, *args, **kwargs)
741 fn(res, *args, **kwargs)
742
742
743 return res
743 return res
744
744
745 def makefile(self, *args, **kwargs):
745 def makefile(self, *args, **kwargs):
746 res = object.__getattribute__(self, '_observedcall')(
746 res = object.__getattribute__(self, '_observedcall')(
747 'makefile', *args, **kwargs
747 'makefile', *args, **kwargs
748 )
748 )
749
749
750 # The file object may be used for I/O. So we turn it into a
750 # The file object may be used for I/O. So we turn it into a
751 # proxy using our observer.
751 # proxy using our observer.
752 observer = object.__getattribute__(self, '_observer')
752 observer = object.__getattribute__(self, '_observer')
753 return makeloggingfileobject(
753 return makeloggingfileobject(
754 observer.fh,
754 observer.fh,
755 res,
755 res,
756 observer.name,
756 observer.name,
757 reads=observer.reads,
757 reads=observer.reads,
758 writes=observer.writes,
758 writes=observer.writes,
759 logdata=observer.logdata,
759 logdata=observer.logdata,
760 logdataapis=observer.logdataapis,
760 logdataapis=observer.logdataapis,
761 )
761 )
762
762
763 def recv(self, *args, **kwargs):
763 def recv(self, *args, **kwargs):
764 return object.__getattribute__(self, '_observedcall')(
764 return object.__getattribute__(self, '_observedcall')(
765 'recv', *args, **kwargs
765 'recv', *args, **kwargs
766 )
766 )
767
767
768 def recvfrom(self, *args, **kwargs):
768 def recvfrom(self, *args, **kwargs):
769 return object.__getattribute__(self, '_observedcall')(
769 return object.__getattribute__(self, '_observedcall')(
770 'recvfrom', *args, **kwargs
770 'recvfrom', *args, **kwargs
771 )
771 )
772
772
773 def recvfrom_into(self, *args, **kwargs):
773 def recvfrom_into(self, *args, **kwargs):
774 return object.__getattribute__(self, '_observedcall')(
774 return object.__getattribute__(self, '_observedcall')(
775 'recvfrom_into', *args, **kwargs
775 'recvfrom_into', *args, **kwargs
776 )
776 )
777
777
778 def recv_into(self, *args, **kwargs):
778 def recv_into(self, *args, **kwargs):
779 return object.__getattribute__(self, '_observedcall')(
779 return object.__getattribute__(self, '_observedcall')(
780 'recv_info', *args, **kwargs
780 'recv_info', *args, **kwargs
781 )
781 )
782
782
783 def send(self, *args, **kwargs):
783 def send(self, *args, **kwargs):
784 return object.__getattribute__(self, '_observedcall')(
784 return object.__getattribute__(self, '_observedcall')(
785 'send', *args, **kwargs
785 'send', *args, **kwargs
786 )
786 )
787
787
788 def sendall(self, *args, **kwargs):
788 def sendall(self, *args, **kwargs):
789 return object.__getattribute__(self, '_observedcall')(
789 return object.__getattribute__(self, '_observedcall')(
790 'sendall', *args, **kwargs
790 'sendall', *args, **kwargs
791 )
791 )
792
792
793 def sendto(self, *args, **kwargs):
793 def sendto(self, *args, **kwargs):
794 return object.__getattribute__(self, '_observedcall')(
794 return object.__getattribute__(self, '_observedcall')(
795 'sendto', *args, **kwargs
795 'sendto', *args, **kwargs
796 )
796 )
797
797
798 def setblocking(self, *args, **kwargs):
798 def setblocking(self, *args, **kwargs):
799 return object.__getattribute__(self, '_observedcall')(
799 return object.__getattribute__(self, '_observedcall')(
800 'setblocking', *args, **kwargs
800 'setblocking', *args, **kwargs
801 )
801 )
802
802
803 def settimeout(self, *args, **kwargs):
803 def settimeout(self, *args, **kwargs):
804 return object.__getattribute__(self, '_observedcall')(
804 return object.__getattribute__(self, '_observedcall')(
805 'settimeout', *args, **kwargs
805 'settimeout', *args, **kwargs
806 )
806 )
807
807
808 def gettimeout(self, *args, **kwargs):
808 def gettimeout(self, *args, **kwargs):
809 return object.__getattribute__(self, '_observedcall')(
809 return object.__getattribute__(self, '_observedcall')(
810 'gettimeout', *args, **kwargs
810 'gettimeout', *args, **kwargs
811 )
811 )
812
812
813 def setsockopt(self, *args, **kwargs):
813 def setsockopt(self, *args, **kwargs):
814 return object.__getattribute__(self, '_observedcall')(
814 return object.__getattribute__(self, '_observedcall')(
815 'setsockopt', *args, **kwargs
815 'setsockopt', *args, **kwargs
816 )
816 )
817
817
818
818
819 class baseproxyobserver(object):
819 class baseproxyobserver(object):
820 def __init__(self, fh, name, logdata, logdataapis):
820 def __init__(self, fh, name, logdata, logdataapis):
821 self.fh = fh
821 self.fh = fh
822 self.name = name
822 self.name = name
823 self.logdata = logdata
823 self.logdata = logdata
824 self.logdataapis = logdataapis
824 self.logdataapis = logdataapis
825
825
826 def _writedata(self, data):
826 def _writedata(self, data):
827 if not self.logdata:
827 if not self.logdata:
828 if self.logdataapis:
828 if self.logdataapis:
829 self.fh.write(b'\n')
829 self.fh.write(b'\n')
830 self.fh.flush()
830 self.fh.flush()
831 return
831 return
832
832
833 # Simple case writes all data on a single line.
833 # Simple case writes all data on a single line.
834 if b'\n' not in data:
834 if b'\n' not in data:
835 if self.logdataapis:
835 if self.logdataapis:
836 self.fh.write(b': %s\n' % stringutil.escapestr(data))
836 self.fh.write(b': %s\n' % stringutil.escapestr(data))
837 else:
837 else:
838 self.fh.write(
838 self.fh.write(
839 b'%s> %s\n' % (self.name, stringutil.escapestr(data))
839 b'%s> %s\n' % (self.name, stringutil.escapestr(data))
840 )
840 )
841 self.fh.flush()
841 self.fh.flush()
842 return
842 return
843
843
844 # Data with newlines is written to multiple lines.
844 # Data with newlines is written to multiple lines.
845 if self.logdataapis:
845 if self.logdataapis:
846 self.fh.write(b':\n')
846 self.fh.write(b':\n')
847
847
848 lines = data.splitlines(True)
848 lines = data.splitlines(True)
849 for line in lines:
849 for line in lines:
850 self.fh.write(
850 self.fh.write(
851 b'%s> %s\n' % (self.name, stringutil.escapestr(line))
851 b'%s> %s\n' % (self.name, stringutil.escapestr(line))
852 )
852 )
853 self.fh.flush()
853 self.fh.flush()
854
854
855
855
856 class fileobjectobserver(baseproxyobserver):
856 class fileobjectobserver(baseproxyobserver):
857 """Logs file object activity."""
857 """Logs file object activity."""
858
858
859 def __init__(
859 def __init__(
860 self, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
860 self, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
861 ):
861 ):
862 super(fileobjectobserver, self).__init__(fh, name, logdata, logdataapis)
862 super(fileobjectobserver, self).__init__(fh, name, logdata, logdataapis)
863 self.reads = reads
863 self.reads = reads
864 self.writes = writes
864 self.writes = writes
865
865
866 def read(self, res, size=-1):
866 def read(self, res, size=-1):
867 if not self.reads:
867 if not self.reads:
868 return
868 return
869 # Python 3 can return None from reads at EOF instead of empty strings.
869 # Python 3 can return None from reads at EOF instead of empty strings.
870 if res is None:
870 if res is None:
871 res = b''
871 res = b''
872
872
873 if size == -1 and res == b'':
873 if size == -1 and res == b'':
874 # Suppress pointless read(-1) calls that return
874 # Suppress pointless read(-1) calls that return
875 # nothing. These happen _a lot_ on Python 3, and there
875 # nothing. These happen _a lot_ on Python 3, and there
876 # doesn't seem to be a better workaround to have matching
876 # doesn't seem to be a better workaround to have matching
877 # Python 2 and 3 behavior. :(
877 # Python 2 and 3 behavior. :(
878 return
878 return
879
879
880 if self.logdataapis:
880 if self.logdataapis:
881 self.fh.write(b'%s> read(%d) -> %d' % (self.name, size, len(res)))
881 self.fh.write(b'%s> read(%d) -> %d' % (self.name, size, len(res)))
882
882
883 self._writedata(res)
883 self._writedata(res)
884
884
885 def readline(self, res, limit=-1):
885 def readline(self, res, limit=-1):
886 if not self.reads:
886 if not self.reads:
887 return
887 return
888
888
889 if self.logdataapis:
889 if self.logdataapis:
890 self.fh.write(b'%s> readline() -> %d' % (self.name, len(res)))
890 self.fh.write(b'%s> readline() -> %d' % (self.name, len(res)))
891
891
892 self._writedata(res)
892 self._writedata(res)
893
893
894 def readinto(self, res, dest):
894 def readinto(self, res, dest):
895 if not self.reads:
895 if not self.reads:
896 return
896 return
897
897
898 if self.logdataapis:
898 if self.logdataapis:
899 self.fh.write(
899 self.fh.write(
900 b'%s> readinto(%d) -> %r' % (self.name, len(dest), res)
900 b'%s> readinto(%d) -> %r' % (self.name, len(dest), res)
901 )
901 )
902
902
903 data = dest[0:res] if res is not None else b''
903 data = dest[0:res] if res is not None else b''
904
904
905 # _writedata() uses "in" operator and is confused by memoryview because
905 # _writedata() uses "in" operator and is confused by memoryview because
906 # characters are ints on Python 3.
906 # characters are ints on Python 3.
907 if isinstance(data, memoryview):
907 if isinstance(data, memoryview):
908 data = data.tobytes()
908 data = data.tobytes()
909
909
910 self._writedata(data)
910 self._writedata(data)
911
911
912 def write(self, res, data):
912 def write(self, res, data):
913 if not self.writes:
913 if not self.writes:
914 return
914 return
915
915
916 # Python 2 returns None from some write() calls. Python 3 (reasonably)
916 # Python 2 returns None from some write() calls. Python 3 (reasonably)
917 # returns the integer bytes written.
917 # returns the integer bytes written.
918 if res is None and data:
918 if res is None and data:
919 res = len(data)
919 res = len(data)
920
920
921 if self.logdataapis:
921 if self.logdataapis:
922 self.fh.write(b'%s> write(%d) -> %r' % (self.name, len(data), res))
922 self.fh.write(b'%s> write(%d) -> %r' % (self.name, len(data), res))
923
923
924 self._writedata(data)
924 self._writedata(data)
925
925
926 def flush(self, res):
926 def flush(self, res):
927 if not self.writes:
927 if not self.writes:
928 return
928 return
929
929
930 self.fh.write(b'%s> flush() -> %r\n' % (self.name, res))
930 self.fh.write(b'%s> flush() -> %r\n' % (self.name, res))
931
931
932 # For observedbufferedinputpipe.
932 # For observedbufferedinputpipe.
933 def bufferedread(self, res, size):
933 def bufferedread(self, res, size):
934 if not self.reads:
934 if not self.reads:
935 return
935 return
936
936
937 if self.logdataapis:
937 if self.logdataapis:
938 self.fh.write(
938 self.fh.write(
939 b'%s> bufferedread(%d) -> %d' % (self.name, size, len(res))
939 b'%s> bufferedread(%d) -> %d' % (self.name, size, len(res))
940 )
940 )
941
941
942 self._writedata(res)
942 self._writedata(res)
943
943
944 def bufferedreadline(self, res):
944 def bufferedreadline(self, res):
945 if not self.reads:
945 if not self.reads:
946 return
946 return
947
947
948 if self.logdataapis:
948 if self.logdataapis:
949 self.fh.write(
949 self.fh.write(
950 b'%s> bufferedreadline() -> %d' % (self.name, len(res))
950 b'%s> bufferedreadline() -> %d' % (self.name, len(res))
951 )
951 )
952
952
953 self._writedata(res)
953 self._writedata(res)
954
954
955
955
956 def makeloggingfileobject(
956 def makeloggingfileobject(
957 logh, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
957 logh, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
958 ):
958 ):
959 """Turn a file object into a logging file object."""
959 """Turn a file object into a logging file object."""
960
960
961 observer = fileobjectobserver(
961 observer = fileobjectobserver(
962 logh,
962 logh,
963 name,
963 name,
964 reads=reads,
964 reads=reads,
965 writes=writes,
965 writes=writes,
966 logdata=logdata,
966 logdata=logdata,
967 logdataapis=logdataapis,
967 logdataapis=logdataapis,
968 )
968 )
969 return fileobjectproxy(fh, observer)
969 return fileobjectproxy(fh, observer)
970
970
971
971
972 class socketobserver(baseproxyobserver):
972 class socketobserver(baseproxyobserver):
973 """Logs socket activity."""
973 """Logs socket activity."""
974
974
975 def __init__(
975 def __init__(
976 self,
976 self,
977 fh,
977 fh,
978 name,
978 name,
979 reads=True,
979 reads=True,
980 writes=True,
980 writes=True,
981 states=True,
981 states=True,
982 logdata=False,
982 logdata=False,
983 logdataapis=True,
983 logdataapis=True,
984 ):
984 ):
985 super(socketobserver, self).__init__(fh, name, logdata, logdataapis)
985 super(socketobserver, self).__init__(fh, name, logdata, logdataapis)
986 self.reads = reads
986 self.reads = reads
987 self.writes = writes
987 self.writes = writes
988 self.states = states
988 self.states = states
989
989
990 def makefile(self, res, mode=None, bufsize=None):
990 def makefile(self, res, mode=None, bufsize=None):
991 if not self.states:
991 if not self.states:
992 return
992 return
993
993
994 self.fh.write(b'%s> makefile(%r, %r)\n' % (self.name, mode, bufsize))
994 self.fh.write(b'%s> makefile(%r, %r)\n' % (self.name, mode, bufsize))
995
995
996 def recv(self, res, size, flags=0):
996 def recv(self, res, size, flags=0):
997 if not self.reads:
997 if not self.reads:
998 return
998 return
999
999
1000 if self.logdataapis:
1000 if self.logdataapis:
1001 self.fh.write(
1001 self.fh.write(
1002 b'%s> recv(%d, %d) -> %d' % (self.name, size, flags, len(res))
1002 b'%s> recv(%d, %d) -> %d' % (self.name, size, flags, len(res))
1003 )
1003 )
1004 self._writedata(res)
1004 self._writedata(res)
1005
1005
1006 def recvfrom(self, res, size, flags=0):
1006 def recvfrom(self, res, size, flags=0):
1007 if not self.reads:
1007 if not self.reads:
1008 return
1008 return
1009
1009
1010 if self.logdataapis:
1010 if self.logdataapis:
1011 self.fh.write(
1011 self.fh.write(
1012 b'%s> recvfrom(%d, %d) -> %d'
1012 b'%s> recvfrom(%d, %d) -> %d'
1013 % (self.name, size, flags, len(res[0]))
1013 % (self.name, size, flags, len(res[0]))
1014 )
1014 )
1015
1015
1016 self._writedata(res[0])
1016 self._writedata(res[0])
1017
1017
1018 def recvfrom_into(self, res, buf, size, flags=0):
1018 def recvfrom_into(self, res, buf, size, flags=0):
1019 if not self.reads:
1019 if not self.reads:
1020 return
1020 return
1021
1021
1022 if self.logdataapis:
1022 if self.logdataapis:
1023 self.fh.write(
1023 self.fh.write(
1024 b'%s> recvfrom_into(%d, %d) -> %d'
1024 b'%s> recvfrom_into(%d, %d) -> %d'
1025 % (self.name, size, flags, res[0])
1025 % (self.name, size, flags, res[0])
1026 )
1026 )
1027
1027
1028 self._writedata(buf[0 : res[0]])
1028 self._writedata(buf[0 : res[0]])
1029
1029
1030 def recv_into(self, res, buf, size=0, flags=0):
1030 def recv_into(self, res, buf, size=0, flags=0):
1031 if not self.reads:
1031 if not self.reads:
1032 return
1032 return
1033
1033
1034 if self.logdataapis:
1034 if self.logdataapis:
1035 self.fh.write(
1035 self.fh.write(
1036 b'%s> recv_into(%d, %d) -> %d' % (self.name, size, flags, res)
1036 b'%s> recv_into(%d, %d) -> %d' % (self.name, size, flags, res)
1037 )
1037 )
1038
1038
1039 self._writedata(buf[0:res])
1039 self._writedata(buf[0:res])
1040
1040
1041 def send(self, res, data, flags=0):
1041 def send(self, res, data, flags=0):
1042 if not self.writes:
1042 if not self.writes:
1043 return
1043 return
1044
1044
1045 self.fh.write(
1045 self.fh.write(
1046 b'%s> send(%d, %d) -> %d' % (self.name, len(data), flags, len(res))
1046 b'%s> send(%d, %d) -> %d' % (self.name, len(data), flags, len(res))
1047 )
1047 )
1048 self._writedata(data)
1048 self._writedata(data)
1049
1049
1050 def sendall(self, res, data, flags=0):
1050 def sendall(self, res, data, flags=0):
1051 if not self.writes:
1051 if not self.writes:
1052 return
1052 return
1053
1053
1054 if self.logdataapis:
1054 if self.logdataapis:
1055 # Returns None on success. So don't bother reporting return value.
1055 # Returns None on success. So don't bother reporting return value.
1056 self.fh.write(
1056 self.fh.write(
1057 b'%s> sendall(%d, %d)' % (self.name, len(data), flags)
1057 b'%s> sendall(%d, %d)' % (self.name, len(data), flags)
1058 )
1058 )
1059
1059
1060 self._writedata(data)
1060 self._writedata(data)
1061
1061
1062 def sendto(self, res, data, flagsoraddress, address=None):
1062 def sendto(self, res, data, flagsoraddress, address=None):
1063 if not self.writes:
1063 if not self.writes:
1064 return
1064 return
1065
1065
1066 if address:
1066 if address:
1067 flags = flagsoraddress
1067 flags = flagsoraddress
1068 else:
1068 else:
1069 flags = 0
1069 flags = 0
1070
1070
1071 if self.logdataapis:
1071 if self.logdataapis:
1072 self.fh.write(
1072 self.fh.write(
1073 b'%s> sendto(%d, %d, %r) -> %d'
1073 b'%s> sendto(%d, %d, %r) -> %d'
1074 % (self.name, len(data), flags, address, res)
1074 % (self.name, len(data), flags, address, res)
1075 )
1075 )
1076
1076
1077 self._writedata(data)
1077 self._writedata(data)
1078
1078
1079 def setblocking(self, res, flag):
1079 def setblocking(self, res, flag):
1080 if not self.states:
1080 if not self.states:
1081 return
1081 return
1082
1082
1083 self.fh.write(b'%s> setblocking(%r)\n' % (self.name, flag))
1083 self.fh.write(b'%s> setblocking(%r)\n' % (self.name, flag))
1084
1084
1085 def settimeout(self, res, value):
1085 def settimeout(self, res, value):
1086 if not self.states:
1086 if not self.states:
1087 return
1087 return
1088
1088
1089 self.fh.write(b'%s> settimeout(%r)\n' % (self.name, value))
1089 self.fh.write(b'%s> settimeout(%r)\n' % (self.name, value))
1090
1090
1091 def gettimeout(self, res):
1091 def gettimeout(self, res):
1092 if not self.states:
1092 if not self.states:
1093 return
1093 return
1094
1094
1095 self.fh.write(b'%s> gettimeout() -> %f\n' % (self.name, res))
1095 self.fh.write(b'%s> gettimeout() -> %f\n' % (self.name, res))
1096
1096
1097 def setsockopt(self, res, level, optname, value):
1097 def setsockopt(self, res, level, optname, value):
1098 if not self.states:
1098 if not self.states:
1099 return
1099 return
1100
1100
1101 self.fh.write(
1101 self.fh.write(
1102 b'%s> setsockopt(%r, %r, %r) -> %r\n'
1102 b'%s> setsockopt(%r, %r, %r) -> %r\n'
1103 % (self.name, level, optname, value, res)
1103 % (self.name, level, optname, value, res)
1104 )
1104 )
1105
1105
1106
1106
1107 def makeloggingsocket(
1107 def makeloggingsocket(
1108 logh,
1108 logh,
1109 fh,
1109 fh,
1110 name,
1110 name,
1111 reads=True,
1111 reads=True,
1112 writes=True,
1112 writes=True,
1113 states=True,
1113 states=True,
1114 logdata=False,
1114 logdata=False,
1115 logdataapis=True,
1115 logdataapis=True,
1116 ):
1116 ):
1117 """Turn a socket into a logging socket."""
1117 """Turn a socket into a logging socket."""
1118
1118
1119 observer = socketobserver(
1119 observer = socketobserver(
1120 logh,
1120 logh,
1121 name,
1121 name,
1122 reads=reads,
1122 reads=reads,
1123 writes=writes,
1123 writes=writes,
1124 states=states,
1124 states=states,
1125 logdata=logdata,
1125 logdata=logdata,
1126 logdataapis=logdataapis,
1126 logdataapis=logdataapis,
1127 )
1127 )
1128 return socketproxy(fh, observer)
1128 return socketproxy(fh, observer)
1129
1129
1130
1130
1131 def version():
1131 def version():
1132 """Return version information if available."""
1132 """Return version information if available."""
1133 try:
1133 try:
1134 from . import __version__
1134 from . import __version__
1135
1135
1136 return __version__.version
1136 return __version__.version
1137 except ImportError:
1137 except ImportError:
1138 return b'unknown'
1138 return b'unknown'
1139
1139
1140
1140
1141 def versiontuple(v=None, n=4):
1141 def versiontuple(v=None, n=4):
1142 """Parses a Mercurial version string into an N-tuple.
1142 """Parses a Mercurial version string into an N-tuple.
1143
1143
1144 The version string to be parsed is specified with the ``v`` argument.
1144 The version string to be parsed is specified with the ``v`` argument.
1145 If it isn't defined, the current Mercurial version string will be parsed.
1145 If it isn't defined, the current Mercurial version string will be parsed.
1146
1146
1147 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1147 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1148 returned values:
1148 returned values:
1149
1149
1150 >>> v = b'3.6.1+190-df9b73d2d444'
1150 >>> v = b'3.6.1+190-df9b73d2d444'
1151 >>> versiontuple(v, 2)
1151 >>> versiontuple(v, 2)
1152 (3, 6)
1152 (3, 6)
1153 >>> versiontuple(v, 3)
1153 >>> versiontuple(v, 3)
1154 (3, 6, 1)
1154 (3, 6, 1)
1155 >>> versiontuple(v, 4)
1155 >>> versiontuple(v, 4)
1156 (3, 6, 1, '190-df9b73d2d444')
1156 (3, 6, 1, '190-df9b73d2d444')
1157
1157
1158 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1158 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1159 (3, 6, 1, '190-df9b73d2d444+20151118')
1159 (3, 6, 1, '190-df9b73d2d444+20151118')
1160
1160
1161 >>> v = b'3.6'
1161 >>> v = b'3.6'
1162 >>> versiontuple(v, 2)
1162 >>> versiontuple(v, 2)
1163 (3, 6)
1163 (3, 6)
1164 >>> versiontuple(v, 3)
1164 >>> versiontuple(v, 3)
1165 (3, 6, None)
1165 (3, 6, None)
1166 >>> versiontuple(v, 4)
1166 >>> versiontuple(v, 4)
1167 (3, 6, None, None)
1167 (3, 6, None, None)
1168
1168
1169 >>> v = b'3.9-rc'
1169 >>> v = b'3.9-rc'
1170 >>> versiontuple(v, 2)
1170 >>> versiontuple(v, 2)
1171 (3, 9)
1171 (3, 9)
1172 >>> versiontuple(v, 3)
1172 >>> versiontuple(v, 3)
1173 (3, 9, None)
1173 (3, 9, None)
1174 >>> versiontuple(v, 4)
1174 >>> versiontuple(v, 4)
1175 (3, 9, None, 'rc')
1175 (3, 9, None, 'rc')
1176
1176
1177 >>> v = b'3.9-rc+2-02a8fea4289b'
1177 >>> v = b'3.9-rc+2-02a8fea4289b'
1178 >>> versiontuple(v, 2)
1178 >>> versiontuple(v, 2)
1179 (3, 9)
1179 (3, 9)
1180 >>> versiontuple(v, 3)
1180 >>> versiontuple(v, 3)
1181 (3, 9, None)
1181 (3, 9, None)
1182 >>> versiontuple(v, 4)
1182 >>> versiontuple(v, 4)
1183 (3, 9, None, 'rc+2-02a8fea4289b')
1183 (3, 9, None, 'rc+2-02a8fea4289b')
1184
1184
1185 >>> versiontuple(b'4.6rc0')
1185 >>> versiontuple(b'4.6rc0')
1186 (4, 6, None, 'rc0')
1186 (4, 6, None, 'rc0')
1187 >>> versiontuple(b'4.6rc0+12-425d55e54f98')
1187 >>> versiontuple(b'4.6rc0+12-425d55e54f98')
1188 (4, 6, None, 'rc0+12-425d55e54f98')
1188 (4, 6, None, 'rc0+12-425d55e54f98')
1189 >>> versiontuple(b'.1.2.3')
1189 >>> versiontuple(b'.1.2.3')
1190 (None, None, None, '.1.2.3')
1190 (None, None, None, '.1.2.3')
1191 >>> versiontuple(b'12.34..5')
1191 >>> versiontuple(b'12.34..5')
1192 (12, 34, None, '..5')
1192 (12, 34, None, '..5')
1193 >>> versiontuple(b'1.2.3.4.5.6')
1193 >>> versiontuple(b'1.2.3.4.5.6')
1194 (1, 2, 3, '.4.5.6')
1194 (1, 2, 3, '.4.5.6')
1195 """
1195 """
1196 if not v:
1196 if not v:
1197 v = version()
1197 v = version()
1198 m = remod.match(br'(\d+(?:\.\d+){,2})[+-]?(.*)', v)
1198 m = remod.match(br'(\d+(?:\.\d+){,2})[+-]?(.*)', v)
1199 if not m:
1199 if not m:
1200 vparts, extra = b'', v
1200 vparts, extra = b'', v
1201 elif m.group(2):
1201 elif m.group(2):
1202 vparts, extra = m.groups()
1202 vparts, extra = m.groups()
1203 else:
1203 else:
1204 vparts, extra = m.group(1), None
1204 vparts, extra = m.group(1), None
1205
1205
1206 assert vparts is not None # help pytype
1206 assert vparts is not None # help pytype
1207
1207
1208 vints = []
1208 vints = []
1209 for i in vparts.split(b'.'):
1209 for i in vparts.split(b'.'):
1210 try:
1210 try:
1211 vints.append(int(i))
1211 vints.append(int(i))
1212 except ValueError:
1212 except ValueError:
1213 break
1213 break
1214 # (3, 6) -> (3, 6, None)
1214 # (3, 6) -> (3, 6, None)
1215 while len(vints) < 3:
1215 while len(vints) < 3:
1216 vints.append(None)
1216 vints.append(None)
1217
1217
1218 if n == 2:
1218 if n == 2:
1219 return (vints[0], vints[1])
1219 return (vints[0], vints[1])
1220 if n == 3:
1220 if n == 3:
1221 return (vints[0], vints[1], vints[2])
1221 return (vints[0], vints[1], vints[2])
1222 if n == 4:
1222 if n == 4:
1223 return (vints[0], vints[1], vints[2], extra)
1223 return (vints[0], vints[1], vints[2], extra)
1224
1224
1225
1225
1226 def cachefunc(func):
1226 def cachefunc(func):
1227 '''cache the result of function calls'''
1227 '''cache the result of function calls'''
1228 # XXX doesn't handle keywords args
1228 # XXX doesn't handle keywords args
1229 if func.__code__.co_argcount == 0:
1229 if func.__code__.co_argcount == 0:
1230 listcache = []
1230 listcache = []
1231
1231
1232 def f():
1232 def f():
1233 if len(listcache) == 0:
1233 if len(listcache) == 0:
1234 listcache.append(func())
1234 listcache.append(func())
1235 return listcache[0]
1235 return listcache[0]
1236
1236
1237 return f
1237 return f
1238 cache = {}
1238 cache = {}
1239 if func.__code__.co_argcount == 1:
1239 if func.__code__.co_argcount == 1:
1240 # we gain a small amount of time because
1240 # we gain a small amount of time because
1241 # we don't need to pack/unpack the list
1241 # we don't need to pack/unpack the list
1242 def f(arg):
1242 def f(arg):
1243 if arg not in cache:
1243 if arg not in cache:
1244 cache[arg] = func(arg)
1244 cache[arg] = func(arg)
1245 return cache[arg]
1245 return cache[arg]
1246
1246
1247 else:
1247 else:
1248
1248
1249 def f(*args):
1249 def f(*args):
1250 if args not in cache:
1250 if args not in cache:
1251 cache[args] = func(*args)
1251 cache[args] = func(*args)
1252 return cache[args]
1252 return cache[args]
1253
1253
1254 return f
1254 return f
1255
1255
1256
1256
1257 class cow(object):
1257 class cow(object):
1258 """helper class to make copy-on-write easier
1258 """helper class to make copy-on-write easier
1259
1259
1260 Call preparewrite before doing any writes.
1260 Call preparewrite before doing any writes.
1261 """
1261 """
1262
1262
1263 def preparewrite(self):
1263 def preparewrite(self):
1264 """call this before writes, return self or a copied new object"""
1264 """call this before writes, return self or a copied new object"""
1265 if getattr(self, '_copied', 0):
1265 if getattr(self, '_copied', 0):
1266 self._copied -= 1
1266 self._copied -= 1
1267 # Function cow.__init__ expects 1 arg(s), got 2 [wrong-arg-count]
1267 # Function cow.__init__ expects 1 arg(s), got 2 [wrong-arg-count]
1268 return self.__class__(self) # pytype: disable=wrong-arg-count
1268 return self.__class__(self) # pytype: disable=wrong-arg-count
1269 return self
1269 return self
1270
1270
1271 def copy(self):
1271 def copy(self):
1272 """always do a cheap copy"""
1272 """always do a cheap copy"""
1273 self._copied = getattr(self, '_copied', 0) + 1
1273 self._copied = getattr(self, '_copied', 0) + 1
1274 return self
1274 return self
1275
1275
1276
1276
1277 class sortdict(collections.OrderedDict):
1277 class sortdict(collections.OrderedDict):
1278 """a simple sorted dictionary
1278 """a simple sorted dictionary
1279
1279
1280 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1280 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1281 >>> d2 = d1.copy()
1281 >>> d2 = d1.copy()
1282 >>> d2
1282 >>> d2
1283 sortdict([('a', 0), ('b', 1)])
1283 sortdict([('a', 0), ('b', 1)])
1284 >>> d2.update([(b'a', 2)])
1284 >>> d2.update([(b'a', 2)])
1285 >>> list(d2.keys()) # should still be in last-set order
1285 >>> list(d2.keys()) # should still be in last-set order
1286 ['b', 'a']
1286 ['b', 'a']
1287 >>> d1.insert(1, b'a.5', 0.5)
1287 >>> d1.insert(1, b'a.5', 0.5)
1288 >>> d1
1288 >>> d1
1289 sortdict([('a', 0), ('a.5', 0.5), ('b', 1)])
1289 sortdict([('a', 0), ('a.5', 0.5), ('b', 1)])
1290 """
1290 """
1291
1291
1292 def __setitem__(self, key, value):
1292 def __setitem__(self, key, value):
1293 if key in self:
1293 if key in self:
1294 del self[key]
1294 del self[key]
1295 super(sortdict, self).__setitem__(key, value)
1295 super(sortdict, self).__setitem__(key, value)
1296
1296
1297 if pycompat.ispypy:
1297 if pycompat.ispypy:
1298 # __setitem__() isn't called as of PyPy 5.8.0
1298 # __setitem__() isn't called as of PyPy 5.8.0
1299 def update(self, src, **f):
1299 def update(self, src, **f):
1300 if isinstance(src, dict):
1300 if isinstance(src, dict):
1301 src = pycompat.iteritems(src)
1301 src = pycompat.iteritems(src)
1302 for k, v in src:
1302 for k, v in src:
1303 self[k] = v
1303 self[k] = v
1304 for k in f:
1304 for k in f:
1305 self[k] = f[k]
1305 self[k] = f[k]
1306
1306
1307 def insert(self, position, key, value):
1307 def insert(self, position, key, value):
1308 for (i, (k, v)) in enumerate(list(self.items())):
1308 for (i, (k, v)) in enumerate(list(self.items())):
1309 if i == position:
1309 if i == position:
1310 self[key] = value
1310 self[key] = value
1311 if i >= position:
1311 if i >= position:
1312 del self[k]
1312 del self[k]
1313 self[k] = v
1313 self[k] = v
1314
1314
1315
1315
1316 class cowdict(cow, dict):
1316 class cowdict(cow, dict):
1317 """copy-on-write dict
1317 """copy-on-write dict
1318
1318
1319 Be sure to call d = d.preparewrite() before writing to d.
1319 Be sure to call d = d.preparewrite() before writing to d.
1320
1320
1321 >>> a = cowdict()
1321 >>> a = cowdict()
1322 >>> a is a.preparewrite()
1322 >>> a is a.preparewrite()
1323 True
1323 True
1324 >>> b = a.copy()
1324 >>> b = a.copy()
1325 >>> b is a
1325 >>> b is a
1326 True
1326 True
1327 >>> c = b.copy()
1327 >>> c = b.copy()
1328 >>> c is a
1328 >>> c is a
1329 True
1329 True
1330 >>> a = a.preparewrite()
1330 >>> a = a.preparewrite()
1331 >>> b is a
1331 >>> b is a
1332 False
1332 False
1333 >>> a is a.preparewrite()
1333 >>> a is a.preparewrite()
1334 True
1334 True
1335 >>> c = c.preparewrite()
1335 >>> c = c.preparewrite()
1336 >>> b is c
1336 >>> b is c
1337 False
1337 False
1338 >>> b is b.preparewrite()
1338 >>> b is b.preparewrite()
1339 True
1339 True
1340 """
1340 """
1341
1341
1342
1342
1343 class cowsortdict(cow, sortdict):
1343 class cowsortdict(cow, sortdict):
1344 """copy-on-write sortdict
1344 """copy-on-write sortdict
1345
1345
1346 Be sure to call d = d.preparewrite() before writing to d.
1346 Be sure to call d = d.preparewrite() before writing to d.
1347 """
1347 """
1348
1348
1349
1349
1350 class transactional(object): # pytype: disable=ignored-metaclass
1350 class transactional(object): # pytype: disable=ignored-metaclass
1351 """Base class for making a transactional type into a context manager."""
1351 """Base class for making a transactional type into a context manager."""
1352
1352
1353 __metaclass__ = abc.ABCMeta
1353 __metaclass__ = abc.ABCMeta
1354
1354
1355 @abc.abstractmethod
1355 @abc.abstractmethod
1356 def close(self):
1356 def close(self):
1357 """Successfully closes the transaction."""
1357 """Successfully closes the transaction."""
1358
1358
1359 @abc.abstractmethod
1359 @abc.abstractmethod
1360 def release(self):
1360 def release(self):
1361 """Marks the end of the transaction.
1361 """Marks the end of the transaction.
1362
1362
1363 If the transaction has not been closed, it will be aborted.
1363 If the transaction has not been closed, it will be aborted.
1364 """
1364 """
1365
1365
1366 def __enter__(self):
1366 def __enter__(self):
1367 return self
1367 return self
1368
1368
1369 def __exit__(self, exc_type, exc_val, exc_tb):
1369 def __exit__(self, exc_type, exc_val, exc_tb):
1370 try:
1370 try:
1371 if exc_type is None:
1371 if exc_type is None:
1372 self.close()
1372 self.close()
1373 finally:
1373 finally:
1374 self.release()
1374 self.release()
1375
1375
1376
1376
1377 @contextlib.contextmanager
1377 @contextlib.contextmanager
1378 def acceptintervention(tr=None):
1378 def acceptintervention(tr=None):
1379 """A context manager that closes the transaction on InterventionRequired
1379 """A context manager that closes the transaction on InterventionRequired
1380
1380
1381 If no transaction was provided, this simply runs the body and returns
1381 If no transaction was provided, this simply runs the body and returns
1382 """
1382 """
1383 if not tr:
1383 if not tr:
1384 yield
1384 yield
1385 return
1385 return
1386 try:
1386 try:
1387 yield
1387 yield
1388 tr.close()
1388 tr.close()
1389 except error.InterventionRequired:
1389 except error.InterventionRequired:
1390 tr.close()
1390 tr.close()
1391 raise
1391 raise
1392 finally:
1392 finally:
1393 tr.release()
1393 tr.release()
1394
1394
1395
1395
1396 @contextlib.contextmanager
1396 @contextlib.contextmanager
1397 def nullcontextmanager(enter_result=None):
1397 def nullcontextmanager(enter_result=None):
1398 yield enter_result
1398 yield enter_result
1399
1399
1400
1400
1401 class _lrucachenode(object):
1401 class _lrucachenode(object):
1402 """A node in a doubly linked list.
1402 """A node in a doubly linked list.
1403
1403
1404 Holds a reference to nodes on either side as well as a key-value
1404 Holds a reference to nodes on either side as well as a key-value
1405 pair for the dictionary entry.
1405 pair for the dictionary entry.
1406 """
1406 """
1407
1407
1408 __slots__ = ('next', 'prev', 'key', 'value', 'cost')
1408 __slots__ = ('next', 'prev', 'key', 'value', 'cost')
1409
1409
1410 def __init__(self):
1410 def __init__(self):
1411 self.next = self
1411 self.next = self
1412 self.prev = self
1412 self.prev = self
1413
1413
1414 self.key = _notset
1414 self.key = _notset
1415 self.value = None
1415 self.value = None
1416 self.cost = 0
1416 self.cost = 0
1417
1417
1418 def markempty(self):
1418 def markempty(self):
1419 """Mark the node as emptied."""
1419 """Mark the node as emptied."""
1420 self.key = _notset
1420 self.key = _notset
1421 self.value = None
1421 self.value = None
1422 self.cost = 0
1422 self.cost = 0
1423
1423
1424
1424
1425 class lrucachedict(object):
1425 class lrucachedict(object):
1426 """Dict that caches most recent accesses and sets.
1426 """Dict that caches most recent accesses and sets.
1427
1427
1428 The dict consists of an actual backing dict - indexed by original
1428 The dict consists of an actual backing dict - indexed by original
1429 key - and a doubly linked circular list defining the order of entries in
1429 key - and a doubly linked circular list defining the order of entries in
1430 the cache.
1430 the cache.
1431
1431
1432 The head node is the newest entry in the cache. If the cache is full,
1432 The head node is the newest entry in the cache. If the cache is full,
1433 we recycle head.prev and make it the new head. Cache accesses result in
1433 we recycle head.prev and make it the new head. Cache accesses result in
1434 the node being moved to before the existing head and being marked as the
1434 the node being moved to before the existing head and being marked as the
1435 new head node.
1435 new head node.
1436
1436
1437 Items in the cache can be inserted with an optional "cost" value. This is
1437 Items in the cache can be inserted with an optional "cost" value. This is
1438 simply an integer that is specified by the caller. The cache can be queried
1438 simply an integer that is specified by the caller. The cache can be queried
1439 for the total cost of all items presently in the cache.
1439 for the total cost of all items presently in the cache.
1440
1440
1441 The cache can also define a maximum cost. If a cache insertion would
1441 The cache can also define a maximum cost. If a cache insertion would
1442 cause the total cost of the cache to go beyond the maximum cost limit,
1442 cause the total cost of the cache to go beyond the maximum cost limit,
1443 nodes will be evicted to make room for the new code. This can be used
1443 nodes will be evicted to make room for the new code. This can be used
1444 to e.g. set a max memory limit and associate an estimated bytes size
1444 to e.g. set a max memory limit and associate an estimated bytes size
1445 cost to each item in the cache. By default, no maximum cost is enforced.
1445 cost to each item in the cache. By default, no maximum cost is enforced.
1446 """
1446 """
1447
1447
1448 def __init__(self, max, maxcost=0):
1448 def __init__(self, max, maxcost=0):
1449 self._cache = {}
1449 self._cache = {}
1450
1450
1451 self._head = _lrucachenode()
1451 self._head = _lrucachenode()
1452 self._size = 1
1452 self._size = 1
1453 self.capacity = max
1453 self.capacity = max
1454 self.totalcost = 0
1454 self.totalcost = 0
1455 self.maxcost = maxcost
1455 self.maxcost = maxcost
1456
1456
1457 def __len__(self):
1457 def __len__(self):
1458 return len(self._cache)
1458 return len(self._cache)
1459
1459
1460 def __contains__(self, k):
1460 def __contains__(self, k):
1461 return k in self._cache
1461 return k in self._cache
1462
1462
1463 def __iter__(self):
1463 def __iter__(self):
1464 # We don't have to iterate in cache order, but why not.
1464 # We don't have to iterate in cache order, but why not.
1465 n = self._head
1465 n = self._head
1466 for i in range(len(self._cache)):
1466 for i in range(len(self._cache)):
1467 yield n.key
1467 yield n.key
1468 n = n.next
1468 n = n.next
1469
1469
1470 def __getitem__(self, k):
1470 def __getitem__(self, k):
1471 node = self._cache[k]
1471 node = self._cache[k]
1472 self._movetohead(node)
1472 self._movetohead(node)
1473 return node.value
1473 return node.value
1474
1474
1475 def insert(self, k, v, cost=0):
1475 def insert(self, k, v, cost=0):
1476 """Insert a new item in the cache with optional cost value."""
1476 """Insert a new item in the cache with optional cost value."""
1477 node = self._cache.get(k)
1477 node = self._cache.get(k)
1478 # Replace existing value and mark as newest.
1478 # Replace existing value and mark as newest.
1479 if node is not None:
1479 if node is not None:
1480 self.totalcost -= node.cost
1480 self.totalcost -= node.cost
1481 node.value = v
1481 node.value = v
1482 node.cost = cost
1482 node.cost = cost
1483 self.totalcost += cost
1483 self.totalcost += cost
1484 self._movetohead(node)
1484 self._movetohead(node)
1485
1485
1486 if self.maxcost:
1486 if self.maxcost:
1487 self._enforcecostlimit()
1487 self._enforcecostlimit()
1488
1488
1489 return
1489 return
1490
1490
1491 if self._size < self.capacity:
1491 if self._size < self.capacity:
1492 node = self._addcapacity()
1492 node = self._addcapacity()
1493 else:
1493 else:
1494 # Grab the last/oldest item.
1494 # Grab the last/oldest item.
1495 node = self._head.prev
1495 node = self._head.prev
1496
1496
1497 # At capacity. Kill the old entry.
1497 # At capacity. Kill the old entry.
1498 if node.key is not _notset:
1498 if node.key is not _notset:
1499 self.totalcost -= node.cost
1499 self.totalcost -= node.cost
1500 del self._cache[node.key]
1500 del self._cache[node.key]
1501
1501
1502 node.key = k
1502 node.key = k
1503 node.value = v
1503 node.value = v
1504 node.cost = cost
1504 node.cost = cost
1505 self.totalcost += cost
1505 self.totalcost += cost
1506 self._cache[k] = node
1506 self._cache[k] = node
1507 # And mark it as newest entry. No need to adjust order since it
1507 # And mark it as newest entry. No need to adjust order since it
1508 # is already self._head.prev.
1508 # is already self._head.prev.
1509 self._head = node
1509 self._head = node
1510
1510
1511 if self.maxcost:
1511 if self.maxcost:
1512 self._enforcecostlimit()
1512 self._enforcecostlimit()
1513
1513
1514 def __setitem__(self, k, v):
1514 def __setitem__(self, k, v):
1515 self.insert(k, v)
1515 self.insert(k, v)
1516
1516
1517 def __delitem__(self, k):
1517 def __delitem__(self, k):
1518 self.pop(k)
1518 self.pop(k)
1519
1519
1520 def pop(self, k, default=_notset):
1520 def pop(self, k, default=_notset):
1521 try:
1521 try:
1522 node = self._cache.pop(k)
1522 node = self._cache.pop(k)
1523 except KeyError:
1523 except KeyError:
1524 if default is _notset:
1524 if default is _notset:
1525 raise
1525 raise
1526 return default
1526 return default
1527
1527
1528 assert node is not None # help pytype
1528 assert node is not None # help pytype
1529 value = node.value
1529 value = node.value
1530 self.totalcost -= node.cost
1530 self.totalcost -= node.cost
1531 node.markempty()
1531 node.markempty()
1532
1532
1533 # Temporarily mark as newest item before re-adjusting head to make
1533 # Temporarily mark as newest item before re-adjusting head to make
1534 # this node the oldest item.
1534 # this node the oldest item.
1535 self._movetohead(node)
1535 self._movetohead(node)
1536 self._head = node.next
1536 self._head = node.next
1537
1537
1538 return value
1538 return value
1539
1539
1540 # Additional dict methods.
1540 # Additional dict methods.
1541
1541
1542 def get(self, k, default=None):
1542 def get(self, k, default=None):
1543 try:
1543 try:
1544 return self.__getitem__(k)
1544 return self.__getitem__(k)
1545 except KeyError:
1545 except KeyError:
1546 return default
1546 return default
1547
1547
1548 def peek(self, k, default=_notset):
1548 def peek(self, k, default=_notset):
1549 """Get the specified item without moving it to the head
1549 """Get the specified item without moving it to the head
1550
1550
1551 Unlike get(), this doesn't mutate the internal state. But be aware
1551 Unlike get(), this doesn't mutate the internal state. But be aware
1552 that it doesn't mean peek() is thread safe.
1552 that it doesn't mean peek() is thread safe.
1553 """
1553 """
1554 try:
1554 try:
1555 node = self._cache[k]
1555 node = self._cache[k]
1556 assert node is not None # help pytype
1556 assert node is not None # help pytype
1557 return node.value
1557 return node.value
1558 except KeyError:
1558 except KeyError:
1559 if default is _notset:
1559 if default is _notset:
1560 raise
1560 raise
1561 return default
1561 return default
1562
1562
1563 def clear(self):
1563 def clear(self):
1564 n = self._head
1564 n = self._head
1565 while n.key is not _notset:
1565 while n.key is not _notset:
1566 self.totalcost -= n.cost
1566 self.totalcost -= n.cost
1567 n.markempty()
1567 n.markempty()
1568 n = n.next
1568 n = n.next
1569
1569
1570 self._cache.clear()
1570 self._cache.clear()
1571
1571
1572 def copy(self, capacity=None, maxcost=0):
1572 def copy(self, capacity=None, maxcost=0):
1573 """Create a new cache as a copy of the current one.
1573 """Create a new cache as a copy of the current one.
1574
1574
1575 By default, the new cache has the same capacity as the existing one.
1575 By default, the new cache has the same capacity as the existing one.
1576 But, the cache capacity can be changed as part of performing the
1576 But, the cache capacity can be changed as part of performing the
1577 copy.
1577 copy.
1578
1578
1579 Items in the copy have an insertion/access order matching this
1579 Items in the copy have an insertion/access order matching this
1580 instance.
1580 instance.
1581 """
1581 """
1582
1582
1583 capacity = capacity or self.capacity
1583 capacity = capacity or self.capacity
1584 maxcost = maxcost or self.maxcost
1584 maxcost = maxcost or self.maxcost
1585 result = lrucachedict(capacity, maxcost=maxcost)
1585 result = lrucachedict(capacity, maxcost=maxcost)
1586
1586
1587 # We copy entries by iterating in oldest-to-newest order so the copy
1587 # We copy entries by iterating in oldest-to-newest order so the copy
1588 # has the correct ordering.
1588 # has the correct ordering.
1589
1589
1590 # Find the first non-empty entry.
1590 # Find the first non-empty entry.
1591 n = self._head.prev
1591 n = self._head.prev
1592 while n.key is _notset and n is not self._head:
1592 while n.key is _notset and n is not self._head:
1593 n = n.prev
1593 n = n.prev
1594
1594
1595 # We could potentially skip the first N items when decreasing capacity.
1595 # We could potentially skip the first N items when decreasing capacity.
1596 # But let's keep it simple unless it is a performance problem.
1596 # But let's keep it simple unless it is a performance problem.
1597 for i in range(len(self._cache)):
1597 for i in range(len(self._cache)):
1598 result.insert(n.key, n.value, cost=n.cost)
1598 result.insert(n.key, n.value, cost=n.cost)
1599 n = n.prev
1599 n = n.prev
1600
1600
1601 return result
1601 return result
1602
1602
1603 def popoldest(self):
1603 def popoldest(self):
1604 """Remove the oldest item from the cache.
1604 """Remove the oldest item from the cache.
1605
1605
1606 Returns the (key, value) describing the removed cache entry.
1606 Returns the (key, value) describing the removed cache entry.
1607 """
1607 """
1608 if not self._cache:
1608 if not self._cache:
1609 return
1609 return
1610
1610
1611 # Walk the linked list backwards starting at tail node until we hit
1611 # Walk the linked list backwards starting at tail node until we hit
1612 # a non-empty node.
1612 # a non-empty node.
1613 n = self._head.prev
1613 n = self._head.prev
1614
1614
1615 assert n is not None # help pytype
1615 assert n is not None # help pytype
1616
1616
1617 while n.key is _notset:
1617 while n.key is _notset:
1618 n = n.prev
1618 n = n.prev
1619
1619
1620 assert n is not None # help pytype
1620 assert n is not None # help pytype
1621
1621
1622 key, value = n.key, n.value
1622 key, value = n.key, n.value
1623
1623
1624 # And remove it from the cache and mark it as empty.
1624 # And remove it from the cache and mark it as empty.
1625 del self._cache[n.key]
1625 del self._cache[n.key]
1626 self.totalcost -= n.cost
1626 self.totalcost -= n.cost
1627 n.markempty()
1627 n.markempty()
1628
1628
1629 return key, value
1629 return key, value
1630
1630
1631 def _movetohead(self, node):
1631 def _movetohead(self, node):
1632 """Mark a node as the newest, making it the new head.
1632 """Mark a node as the newest, making it the new head.
1633
1633
1634 When a node is accessed, it becomes the freshest entry in the LRU
1634 When a node is accessed, it becomes the freshest entry in the LRU
1635 list, which is denoted by self._head.
1635 list, which is denoted by self._head.
1636
1636
1637 Visually, let's make ``N`` the new head node (* denotes head):
1637 Visually, let's make ``N`` the new head node (* denotes head):
1638
1638
1639 previous/oldest <-> head <-> next/next newest
1639 previous/oldest <-> head <-> next/next newest
1640
1640
1641 ----<->--- A* ---<->-----
1641 ----<->--- A* ---<->-----
1642 | |
1642 | |
1643 E <-> D <-> N <-> C <-> B
1643 E <-> D <-> N <-> C <-> B
1644
1644
1645 To:
1645 To:
1646
1646
1647 ----<->--- N* ---<->-----
1647 ----<->--- N* ---<->-----
1648 | |
1648 | |
1649 E <-> D <-> C <-> B <-> A
1649 E <-> D <-> C <-> B <-> A
1650
1650
1651 This requires the following moves:
1651 This requires the following moves:
1652
1652
1653 C.next = D (node.prev.next = node.next)
1653 C.next = D (node.prev.next = node.next)
1654 D.prev = C (node.next.prev = node.prev)
1654 D.prev = C (node.next.prev = node.prev)
1655 E.next = N (head.prev.next = node)
1655 E.next = N (head.prev.next = node)
1656 N.prev = E (node.prev = head.prev)
1656 N.prev = E (node.prev = head.prev)
1657 N.next = A (node.next = head)
1657 N.next = A (node.next = head)
1658 A.prev = N (head.prev = node)
1658 A.prev = N (head.prev = node)
1659 """
1659 """
1660 head = self._head
1660 head = self._head
1661 # C.next = D
1661 # C.next = D
1662 node.prev.next = node.next
1662 node.prev.next = node.next
1663 # D.prev = C
1663 # D.prev = C
1664 node.next.prev = node.prev
1664 node.next.prev = node.prev
1665 # N.prev = E
1665 # N.prev = E
1666 node.prev = head.prev
1666 node.prev = head.prev
1667 # N.next = A
1667 # N.next = A
1668 # It is tempting to do just "head" here, however if node is
1668 # It is tempting to do just "head" here, however if node is
1669 # adjacent to head, this will do bad things.
1669 # adjacent to head, this will do bad things.
1670 node.next = head.prev.next
1670 node.next = head.prev.next
1671 # E.next = N
1671 # E.next = N
1672 node.next.prev = node
1672 node.next.prev = node
1673 # A.prev = N
1673 # A.prev = N
1674 node.prev.next = node
1674 node.prev.next = node
1675
1675
1676 self._head = node
1676 self._head = node
1677
1677
1678 def _addcapacity(self):
1678 def _addcapacity(self):
1679 """Add a node to the circular linked list.
1679 """Add a node to the circular linked list.
1680
1680
1681 The new node is inserted before the head node.
1681 The new node is inserted before the head node.
1682 """
1682 """
1683 head = self._head
1683 head = self._head
1684 node = _lrucachenode()
1684 node = _lrucachenode()
1685 head.prev.next = node
1685 head.prev.next = node
1686 node.prev = head.prev
1686 node.prev = head.prev
1687 node.next = head
1687 node.next = head
1688 head.prev = node
1688 head.prev = node
1689 self._size += 1
1689 self._size += 1
1690 return node
1690 return node
1691
1691
1692 def _enforcecostlimit(self):
1692 def _enforcecostlimit(self):
1693 # This should run after an insertion. It should only be called if total
1693 # This should run after an insertion. It should only be called if total
1694 # cost limits are being enforced.
1694 # cost limits are being enforced.
1695 # The most recently inserted node is never evicted.
1695 # The most recently inserted node is never evicted.
1696 if len(self) <= 1 or self.totalcost <= self.maxcost:
1696 if len(self) <= 1 or self.totalcost <= self.maxcost:
1697 return
1697 return
1698
1698
1699 # This is logically equivalent to calling popoldest() until we
1699 # This is logically equivalent to calling popoldest() until we
1700 # free up enough cost. We don't do that since popoldest() needs
1700 # free up enough cost. We don't do that since popoldest() needs
1701 # to walk the linked list and doing this in a loop would be
1701 # to walk the linked list and doing this in a loop would be
1702 # quadratic. So we find the first non-empty node and then
1702 # quadratic. So we find the first non-empty node and then
1703 # walk nodes until we free up enough capacity.
1703 # walk nodes until we free up enough capacity.
1704 #
1704 #
1705 # If we only removed the minimum number of nodes to free enough
1705 # If we only removed the minimum number of nodes to free enough
1706 # cost at insert time, chances are high that the next insert would
1706 # cost at insert time, chances are high that the next insert would
1707 # also require pruning. This would effectively constitute quadratic
1707 # also require pruning. This would effectively constitute quadratic
1708 # behavior for insert-heavy workloads. To mitigate this, we set a
1708 # behavior for insert-heavy workloads. To mitigate this, we set a
1709 # target cost that is a percentage of the max cost. This will tend
1709 # target cost that is a percentage of the max cost. This will tend
1710 # to free more nodes when the high water mark is reached, which
1710 # to free more nodes when the high water mark is reached, which
1711 # lowers the chances of needing to prune on the subsequent insert.
1711 # lowers the chances of needing to prune on the subsequent insert.
1712 targetcost = int(self.maxcost * 0.75)
1712 targetcost = int(self.maxcost * 0.75)
1713
1713
1714 n = self._head.prev
1714 n = self._head.prev
1715 while n.key is _notset:
1715 while n.key is _notset:
1716 n = n.prev
1716 n = n.prev
1717
1717
1718 while len(self) > 1 and self.totalcost > targetcost:
1718 while len(self) > 1 and self.totalcost > targetcost:
1719 del self._cache[n.key]
1719 del self._cache[n.key]
1720 self.totalcost -= n.cost
1720 self.totalcost -= n.cost
1721 n.markempty()
1721 n.markempty()
1722 n = n.prev
1722 n = n.prev
1723
1723
1724
1724
1725 def lrucachefunc(func):
1725 def lrucachefunc(func):
1726 '''cache most recent results of function calls'''
1726 '''cache most recent results of function calls'''
1727 cache = {}
1727 cache = {}
1728 order = collections.deque()
1728 order = collections.deque()
1729 if func.__code__.co_argcount == 1:
1729 if func.__code__.co_argcount == 1:
1730
1730
1731 def f(arg):
1731 def f(arg):
1732 if arg not in cache:
1732 if arg not in cache:
1733 if len(cache) > 20:
1733 if len(cache) > 20:
1734 del cache[order.popleft()]
1734 del cache[order.popleft()]
1735 cache[arg] = func(arg)
1735 cache[arg] = func(arg)
1736 else:
1736 else:
1737 order.remove(arg)
1737 order.remove(arg)
1738 order.append(arg)
1738 order.append(arg)
1739 return cache[arg]
1739 return cache[arg]
1740
1740
1741 else:
1741 else:
1742
1742
1743 def f(*args):
1743 def f(*args):
1744 if args not in cache:
1744 if args not in cache:
1745 if len(cache) > 20:
1745 if len(cache) > 20:
1746 del cache[order.popleft()]
1746 del cache[order.popleft()]
1747 cache[args] = func(*args)
1747 cache[args] = func(*args)
1748 else:
1748 else:
1749 order.remove(args)
1749 order.remove(args)
1750 order.append(args)
1750 order.append(args)
1751 return cache[args]
1751 return cache[args]
1752
1752
1753 return f
1753 return f
1754
1754
1755
1755
1756 class propertycache(object):
1756 class propertycache(object):
1757 def __init__(self, func):
1757 def __init__(self, func):
1758 self.func = func
1758 self.func = func
1759 self.name = func.__name__
1759 self.name = func.__name__
1760
1760
1761 def __get__(self, obj, type=None):
1761 def __get__(self, obj, type=None):
1762 result = self.func(obj)
1762 result = self.func(obj)
1763 self.cachevalue(obj, result)
1763 self.cachevalue(obj, result)
1764 return result
1764 return result
1765
1765
1766 def cachevalue(self, obj, value):
1766 def cachevalue(self, obj, value):
1767 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1767 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1768 obj.__dict__[self.name] = value
1768 obj.__dict__[self.name] = value
1769
1769
1770
1770
1771 def clearcachedproperty(obj, prop):
1771 def clearcachedproperty(obj, prop):
1772 '''clear a cached property value, if one has been set'''
1772 '''clear a cached property value, if one has been set'''
1773 prop = pycompat.sysstr(prop)
1773 prop = pycompat.sysstr(prop)
1774 if prop in obj.__dict__:
1774 if prop in obj.__dict__:
1775 del obj.__dict__[prop]
1775 del obj.__dict__[prop]
1776
1776
1777
1777
1778 def increasingchunks(source, min=1024, max=65536):
1778 def increasingchunks(source, min=1024, max=65536):
1779 """return no less than min bytes per chunk while data remains,
1779 """return no less than min bytes per chunk while data remains,
1780 doubling min after each chunk until it reaches max"""
1780 doubling min after each chunk until it reaches max"""
1781
1781
1782 def log2(x):
1782 def log2(x):
1783 if not x:
1783 if not x:
1784 return 0
1784 return 0
1785 i = 0
1785 i = 0
1786 while x:
1786 while x:
1787 x >>= 1
1787 x >>= 1
1788 i += 1
1788 i += 1
1789 return i - 1
1789 return i - 1
1790
1790
1791 buf = []
1791 buf = []
1792 blen = 0
1792 blen = 0
1793 for chunk in source:
1793 for chunk in source:
1794 buf.append(chunk)
1794 buf.append(chunk)
1795 blen += len(chunk)
1795 blen += len(chunk)
1796 if blen >= min:
1796 if blen >= min:
1797 if min < max:
1797 if min < max:
1798 min = min << 1
1798 min = min << 1
1799 nmin = 1 << log2(blen)
1799 nmin = 1 << log2(blen)
1800 if nmin > min:
1800 if nmin > min:
1801 min = nmin
1801 min = nmin
1802 if min > max:
1802 if min > max:
1803 min = max
1803 min = max
1804 yield b''.join(buf)
1804 yield b''.join(buf)
1805 blen = 0
1805 blen = 0
1806 buf = []
1806 buf = []
1807 if buf:
1807 if buf:
1808 yield b''.join(buf)
1808 yield b''.join(buf)
1809
1809
1810
1810
1811 def always(fn):
1811 def always(fn):
1812 return True
1812 return True
1813
1813
1814
1814
1815 def never(fn):
1815 def never(fn):
1816 return False
1816 return False
1817
1817
1818
1818
1819 def nogc(func):
1819 def nogc(func):
1820 """disable garbage collector
1820 """disable garbage collector
1821
1821
1822 Python's garbage collector triggers a GC each time a certain number of
1822 Python's garbage collector triggers a GC each time a certain number of
1823 container objects (the number being defined by gc.get_threshold()) are
1823 container objects (the number being defined by gc.get_threshold()) are
1824 allocated even when marked not to be tracked by the collector. Tracking has
1824 allocated even when marked not to be tracked by the collector. Tracking has
1825 no effect on when GCs are triggered, only on what objects the GC looks
1825 no effect on when GCs are triggered, only on what objects the GC looks
1826 into. As a workaround, disable GC while building complex (huge)
1826 into. As a workaround, disable GC while building complex (huge)
1827 containers.
1827 containers.
1828
1828
1829 This garbage collector issue have been fixed in 2.7. But it still affect
1829 This garbage collector issue have been fixed in 2.7. But it still affect
1830 CPython's performance.
1830 CPython's performance.
1831 """
1831 """
1832
1832
1833 def wrapper(*args, **kwargs):
1833 def wrapper(*args, **kwargs):
1834 gcenabled = gc.isenabled()
1834 gcenabled = gc.isenabled()
1835 gc.disable()
1835 gc.disable()
1836 try:
1836 try:
1837 return func(*args, **kwargs)
1837 return func(*args, **kwargs)
1838 finally:
1838 finally:
1839 if gcenabled:
1839 if gcenabled:
1840 gc.enable()
1840 gc.enable()
1841
1841
1842 return wrapper
1842 return wrapper
1843
1843
1844
1844
1845 if pycompat.ispypy:
1845 if pycompat.ispypy:
1846 # PyPy runs slower with gc disabled
1846 # PyPy runs slower with gc disabled
1847 nogc = lambda x: x
1847 nogc = lambda x: x
1848
1848
1849
1849
1850 def pathto(root, n1, n2):
1850 def pathto(root, n1, n2):
1851 # type: (bytes, bytes, bytes) -> bytes
1851 # type: (bytes, bytes, bytes) -> bytes
1852 """return the relative path from one place to another.
1852 """return the relative path from one place to another.
1853 root should use os.sep to separate directories
1853 root should use os.sep to separate directories
1854 n1 should use os.sep to separate directories
1854 n1 should use os.sep to separate directories
1855 n2 should use "/" to separate directories
1855 n2 should use "/" to separate directories
1856 returns an os.sep-separated path.
1856 returns an os.sep-separated path.
1857
1857
1858 If n1 is a relative path, it's assumed it's
1858 If n1 is a relative path, it's assumed it's
1859 relative to root.
1859 relative to root.
1860 n2 should always be relative to root.
1860 n2 should always be relative to root.
1861 """
1861 """
1862 if not n1:
1862 if not n1:
1863 return localpath(n2)
1863 return localpath(n2)
1864 if os.path.isabs(n1):
1864 if os.path.isabs(n1):
1865 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1865 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1866 return os.path.join(root, localpath(n2))
1866 return os.path.join(root, localpath(n2))
1867 n2 = b'/'.join((pconvert(root), n2))
1867 n2 = b'/'.join((pconvert(root), n2))
1868 a, b = splitpath(n1), n2.split(b'/')
1868 a, b = splitpath(n1), n2.split(b'/')
1869 a.reverse()
1869 a.reverse()
1870 b.reverse()
1870 b.reverse()
1871 while a and b and a[-1] == b[-1]:
1871 while a and b and a[-1] == b[-1]:
1872 a.pop()
1872 a.pop()
1873 b.pop()
1873 b.pop()
1874 b.reverse()
1874 b.reverse()
1875 return pycompat.ossep.join(([b'..'] * len(a)) + b) or b'.'
1875 return pycompat.ossep.join(([b'..'] * len(a)) + b) or b'.'
1876
1876
1877
1877
1878 def checksignature(func, depth=1):
1878 def checksignature(func, depth=1):
1879 '''wrap a function with code to check for calling errors'''
1879 '''wrap a function with code to check for calling errors'''
1880
1880
1881 def check(*args, **kwargs):
1881 def check(*args, **kwargs):
1882 try:
1882 try:
1883 return func(*args, **kwargs)
1883 return func(*args, **kwargs)
1884 except TypeError:
1884 except TypeError:
1885 if len(traceback.extract_tb(sys.exc_info()[2])) == depth:
1885 if len(traceback.extract_tb(sys.exc_info()[2])) == depth:
1886 raise error.SignatureError
1886 raise error.SignatureError
1887 raise
1887 raise
1888
1888
1889 return check
1889 return check
1890
1890
1891
1891
1892 # a whilelist of known filesystems where hardlink works reliably
1892 # a whilelist of known filesystems where hardlink works reliably
1893 _hardlinkfswhitelist = {
1893 _hardlinkfswhitelist = {
1894 b'apfs',
1894 b'apfs',
1895 b'btrfs',
1895 b'btrfs',
1896 b'ext2',
1896 b'ext2',
1897 b'ext3',
1897 b'ext3',
1898 b'ext4',
1898 b'ext4',
1899 b'hfs',
1899 b'hfs',
1900 b'jfs',
1900 b'jfs',
1901 b'NTFS',
1901 b'NTFS',
1902 b'reiserfs',
1902 b'reiserfs',
1903 b'tmpfs',
1903 b'tmpfs',
1904 b'ufs',
1904 b'ufs',
1905 b'xfs',
1905 b'xfs',
1906 b'zfs',
1906 b'zfs',
1907 }
1907 }
1908
1908
1909
1909
1910 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1910 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1911 """copy a file, preserving mode and optionally other stat info like
1911 """copy a file, preserving mode and optionally other stat info like
1912 atime/mtime
1912 atime/mtime
1913
1913
1914 checkambig argument is used with filestat, and is useful only if
1914 checkambig argument is used with filestat, and is useful only if
1915 destination file is guarded by any lock (e.g. repo.lock or
1915 destination file is guarded by any lock (e.g. repo.lock or
1916 repo.wlock).
1916 repo.wlock).
1917
1917
1918 copystat and checkambig should be exclusive.
1918 copystat and checkambig should be exclusive.
1919 """
1919 """
1920 assert not (copystat and checkambig)
1920 assert not (copystat and checkambig)
1921 oldstat = None
1921 oldstat = None
1922 if os.path.lexists(dest):
1922 if os.path.lexists(dest):
1923 if checkambig:
1923 if checkambig:
1924 oldstat = checkambig and filestat.frompath(dest)
1924 oldstat = checkambig and filestat.frompath(dest)
1925 unlink(dest)
1925 unlink(dest)
1926 if hardlink:
1926 if hardlink:
1927 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1927 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1928 # unless we are confident that dest is on a whitelisted filesystem.
1928 # unless we are confident that dest is on a whitelisted filesystem.
1929 try:
1929 try:
1930 fstype = getfstype(os.path.dirname(dest))
1930 fstype = getfstype(os.path.dirname(dest))
1931 except OSError:
1931 except OSError:
1932 fstype = None
1932 fstype = None
1933 if fstype not in _hardlinkfswhitelist:
1933 if fstype not in _hardlinkfswhitelist:
1934 hardlink = False
1934 hardlink = False
1935 if hardlink:
1935 if hardlink:
1936 try:
1936 try:
1937 oslink(src, dest)
1937 oslink(src, dest)
1938 return
1938 return
1939 except (IOError, OSError):
1939 except (IOError, OSError):
1940 pass # fall back to normal copy
1940 pass # fall back to normal copy
1941 if os.path.islink(src):
1941 if os.path.islink(src):
1942 os.symlink(os.readlink(src), dest)
1942 os.symlink(os.readlink(src), dest)
1943 # copytime is ignored for symlinks, but in general copytime isn't needed
1943 # copytime is ignored for symlinks, but in general copytime isn't needed
1944 # for them anyway
1944 # for them anyway
1945 else:
1945 else:
1946 try:
1946 try:
1947 shutil.copyfile(src, dest)
1947 shutil.copyfile(src, dest)
1948 if copystat:
1948 if copystat:
1949 # copystat also copies mode
1949 # copystat also copies mode
1950 shutil.copystat(src, dest)
1950 shutil.copystat(src, dest)
1951 else:
1951 else:
1952 shutil.copymode(src, dest)
1952 shutil.copymode(src, dest)
1953 if oldstat and oldstat.stat:
1953 if oldstat and oldstat.stat:
1954 newstat = filestat.frompath(dest)
1954 newstat = filestat.frompath(dest)
1955 if newstat.isambig(oldstat):
1955 if newstat.isambig(oldstat):
1956 # stat of copied file is ambiguous to original one
1956 # stat of copied file is ambiguous to original one
1957 advanced = (
1957 advanced = (
1958 oldstat.stat[stat.ST_MTIME] + 1
1958 oldstat.stat[stat.ST_MTIME] + 1
1959 ) & 0x7FFFFFFF
1959 ) & 0x7FFFFFFF
1960 os.utime(dest, (advanced, advanced))
1960 os.utime(dest, (advanced, advanced))
1961 except shutil.Error as inst:
1961 except shutil.Error as inst:
1962 raise error.Abort(stringutil.forcebytestr(inst))
1962 raise error.Abort(stringutil.forcebytestr(inst))
1963
1963
1964
1964
1965 def copyfiles(src, dst, hardlink=None, progress=None):
1965 def copyfiles(src, dst, hardlink=None, progress=None):
1966 """Copy a directory tree using hardlinks if possible."""
1966 """Copy a directory tree using hardlinks if possible."""
1967 num = 0
1967 num = 0
1968
1968
1969 def settopic():
1969 def settopic():
1970 if progress:
1970 if progress:
1971 progress.topic = _(b'linking') if hardlink else _(b'copying')
1971 progress.topic = _(b'linking') if hardlink else _(b'copying')
1972
1972
1973 if os.path.isdir(src):
1973 if os.path.isdir(src):
1974 if hardlink is None:
1974 if hardlink is None:
1975 hardlink = (
1975 hardlink = (
1976 os.stat(src).st_dev == os.stat(os.path.dirname(dst)).st_dev
1976 os.stat(src).st_dev == os.stat(os.path.dirname(dst)).st_dev
1977 )
1977 )
1978 settopic()
1978 settopic()
1979 os.mkdir(dst)
1979 os.mkdir(dst)
1980 for name, kind in listdir(src):
1980 for name, kind in listdir(src):
1981 srcname = os.path.join(src, name)
1981 srcname = os.path.join(src, name)
1982 dstname = os.path.join(dst, name)
1982 dstname = os.path.join(dst, name)
1983 hardlink, n = copyfiles(srcname, dstname, hardlink, progress)
1983 hardlink, n = copyfiles(srcname, dstname, hardlink, progress)
1984 num += n
1984 num += n
1985 else:
1985 else:
1986 if hardlink is None:
1986 if hardlink is None:
1987 hardlink = (
1987 hardlink = (
1988 os.stat(os.path.dirname(src)).st_dev
1988 os.stat(os.path.dirname(src)).st_dev
1989 == os.stat(os.path.dirname(dst)).st_dev
1989 == os.stat(os.path.dirname(dst)).st_dev
1990 )
1990 )
1991 settopic()
1991 settopic()
1992
1992
1993 if hardlink:
1993 if hardlink:
1994 try:
1994 try:
1995 oslink(src, dst)
1995 oslink(src, dst)
1996 except (IOError, OSError):
1996 except (IOError, OSError):
1997 hardlink = False
1997 hardlink = False
1998 shutil.copy(src, dst)
1998 shutil.copy(src, dst)
1999 else:
1999 else:
2000 shutil.copy(src, dst)
2000 shutil.copy(src, dst)
2001 num += 1
2001 num += 1
2002 if progress:
2002 if progress:
2003 progress.increment()
2003 progress.increment()
2004
2004
2005 return hardlink, num
2005 return hardlink, num
2006
2006
2007
2007
2008 _winreservednames = {
2008 _winreservednames = {
2009 b'con',
2009 b'con',
2010 b'prn',
2010 b'prn',
2011 b'aux',
2011 b'aux',
2012 b'nul',
2012 b'nul',
2013 b'com1',
2013 b'com1',
2014 b'com2',
2014 b'com2',
2015 b'com3',
2015 b'com3',
2016 b'com4',
2016 b'com4',
2017 b'com5',
2017 b'com5',
2018 b'com6',
2018 b'com6',
2019 b'com7',
2019 b'com7',
2020 b'com8',
2020 b'com8',
2021 b'com9',
2021 b'com9',
2022 b'lpt1',
2022 b'lpt1',
2023 b'lpt2',
2023 b'lpt2',
2024 b'lpt3',
2024 b'lpt3',
2025 b'lpt4',
2025 b'lpt4',
2026 b'lpt5',
2026 b'lpt5',
2027 b'lpt6',
2027 b'lpt6',
2028 b'lpt7',
2028 b'lpt7',
2029 b'lpt8',
2029 b'lpt8',
2030 b'lpt9',
2030 b'lpt9',
2031 }
2031 }
2032 _winreservedchars = b':*?"<>|'
2032 _winreservedchars = b':*?"<>|'
2033
2033
2034
2034
2035 def checkwinfilename(path):
2035 def checkwinfilename(path):
2036 # type: (bytes) -> Optional[bytes]
2036 # type: (bytes) -> Optional[bytes]
2037 r"""Check that the base-relative path is a valid filename on Windows.
2037 r"""Check that the base-relative path is a valid filename on Windows.
2038 Returns None if the path is ok, or a UI string describing the problem.
2038 Returns None if the path is ok, or a UI string describing the problem.
2039
2039
2040 >>> checkwinfilename(b"just/a/normal/path")
2040 >>> checkwinfilename(b"just/a/normal/path")
2041 >>> checkwinfilename(b"foo/bar/con.xml")
2041 >>> checkwinfilename(b"foo/bar/con.xml")
2042 "filename contains 'con', which is reserved on Windows"
2042 "filename contains 'con', which is reserved on Windows"
2043 >>> checkwinfilename(b"foo/con.xml/bar")
2043 >>> checkwinfilename(b"foo/con.xml/bar")
2044 "filename contains 'con', which is reserved on Windows"
2044 "filename contains 'con', which is reserved on Windows"
2045 >>> checkwinfilename(b"foo/bar/xml.con")
2045 >>> checkwinfilename(b"foo/bar/xml.con")
2046 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
2046 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
2047 "filename contains 'AUX', which is reserved on Windows"
2047 "filename contains 'AUX', which is reserved on Windows"
2048 >>> checkwinfilename(b"foo/bar/bla:.txt")
2048 >>> checkwinfilename(b"foo/bar/bla:.txt")
2049 "filename contains ':', which is reserved on Windows"
2049 "filename contains ':', which is reserved on Windows"
2050 >>> checkwinfilename(b"foo/bar/b\07la.txt")
2050 >>> checkwinfilename(b"foo/bar/b\07la.txt")
2051 "filename contains '\\x07', which is invalid on Windows"
2051 "filename contains '\\x07', which is invalid on Windows"
2052 >>> checkwinfilename(b"foo/bar/bla ")
2052 >>> checkwinfilename(b"foo/bar/bla ")
2053 "filename ends with ' ', which is not allowed on Windows"
2053 "filename ends with ' ', which is not allowed on Windows"
2054 >>> checkwinfilename(b"../bar")
2054 >>> checkwinfilename(b"../bar")
2055 >>> checkwinfilename(b"foo\\")
2055 >>> checkwinfilename(b"foo\\")
2056 "filename ends with '\\', which is invalid on Windows"
2056 "filename ends with '\\', which is invalid on Windows"
2057 >>> checkwinfilename(b"foo\\/bar")
2057 >>> checkwinfilename(b"foo\\/bar")
2058 "directory name ends with '\\', which is invalid on Windows"
2058 "directory name ends with '\\', which is invalid on Windows"
2059 """
2059 """
2060 if path.endswith(b'\\'):
2060 if path.endswith(b'\\'):
2061 return _(b"filename ends with '\\', which is invalid on Windows")
2061 return _(b"filename ends with '\\', which is invalid on Windows")
2062 if b'\\/' in path:
2062 if b'\\/' in path:
2063 return _(b"directory name ends with '\\', which is invalid on Windows")
2063 return _(b"directory name ends with '\\', which is invalid on Windows")
2064 for n in path.replace(b'\\', b'/').split(b'/'):
2064 for n in path.replace(b'\\', b'/').split(b'/'):
2065 if not n:
2065 if not n:
2066 continue
2066 continue
2067 for c in _filenamebytestr(n):
2067 for c in _filenamebytestr(n):
2068 if c in _winreservedchars:
2068 if c in _winreservedchars:
2069 return (
2069 return (
2070 _(
2070 _(
2071 b"filename contains '%s', which is reserved "
2071 b"filename contains '%s', which is reserved "
2072 b"on Windows"
2072 b"on Windows"
2073 )
2073 )
2074 % c
2074 % c
2075 )
2075 )
2076 if ord(c) <= 31:
2076 if ord(c) <= 31:
2077 return _(
2077 return _(
2078 b"filename contains '%s', which is invalid on Windows"
2078 b"filename contains '%s', which is invalid on Windows"
2079 ) % stringutil.escapestr(c)
2079 ) % stringutil.escapestr(c)
2080 base = n.split(b'.')[0]
2080 base = n.split(b'.')[0]
2081 if base and base.lower() in _winreservednames:
2081 if base and base.lower() in _winreservednames:
2082 return (
2082 return (
2083 _(b"filename contains '%s', which is reserved on Windows")
2083 _(b"filename contains '%s', which is reserved on Windows")
2084 % base
2084 % base
2085 )
2085 )
2086 t = n[-1:]
2086 t = n[-1:]
2087 if t in b'. ' and n not in b'..':
2087 if t in b'. ' and n not in b'..':
2088 return (
2088 return (
2089 _(
2089 _(
2090 b"filename ends with '%s', which is not allowed "
2090 b"filename ends with '%s', which is not allowed "
2091 b"on Windows"
2091 b"on Windows"
2092 )
2092 )
2093 % t
2093 % t
2094 )
2094 )
2095
2095
2096
2096
2097 timer = getattr(time, "perf_counter", None)
2097 timer = getattr(time, "perf_counter", None)
2098
2098
2099 if pycompat.iswindows:
2099 if pycompat.iswindows:
2100 checkosfilename = checkwinfilename
2100 checkosfilename = checkwinfilename
2101 if not timer:
2101 if not timer:
2102 timer = time.clock
2102 timer = time.clock
2103 else:
2103 else:
2104 # mercurial.windows doesn't have platform.checkosfilename
2104 # mercurial.windows doesn't have platform.checkosfilename
2105 checkosfilename = platform.checkosfilename # pytype: disable=module-attr
2105 checkosfilename = platform.checkosfilename # pytype: disable=module-attr
2106 if not timer:
2106 if not timer:
2107 timer = time.time
2107 timer = time.time
2108
2108
2109
2109
2110 def makelock(info, pathname):
2110 def makelock(info, pathname):
2111 """Create a lock file atomically if possible
2111 """Create a lock file atomically if possible
2112
2112
2113 This may leave a stale lock file if symlink isn't supported and signal
2113 This may leave a stale lock file if symlink isn't supported and signal
2114 interrupt is enabled.
2114 interrupt is enabled.
2115 """
2115 """
2116 try:
2116 try:
2117 return os.symlink(info, pathname)
2117 return os.symlink(info, pathname)
2118 except OSError as why:
2118 except OSError as why:
2119 if why.errno == errno.EEXIST:
2119 if why.errno == errno.EEXIST:
2120 raise
2120 raise
2121 except AttributeError: # no symlink in os
2121 except AttributeError: # no symlink in os
2122 pass
2122 pass
2123
2123
2124 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
2124 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
2125 ld = os.open(pathname, flags)
2125 ld = os.open(pathname, flags)
2126 os.write(ld, info)
2126 os.write(ld, info)
2127 os.close(ld)
2127 os.close(ld)
2128
2128
2129
2129
2130 def readlock(pathname):
2130 def readlock(pathname):
2131 # type: (bytes) -> bytes
2131 # type: (bytes) -> bytes
2132 try:
2132 try:
2133 return readlink(pathname)
2133 return readlink(pathname)
2134 except OSError as why:
2134 except OSError as why:
2135 if why.errno not in (errno.EINVAL, errno.ENOSYS):
2135 if why.errno not in (errno.EINVAL, errno.ENOSYS):
2136 raise
2136 raise
2137 except AttributeError: # no symlink in os
2137 except AttributeError: # no symlink in os
2138 pass
2138 pass
2139 with posixfile(pathname, b'rb') as fp:
2139 with posixfile(pathname, b'rb') as fp:
2140 return fp.read()
2140 return fp.read()
2141
2141
2142
2142
2143 def fstat(fp):
2143 def fstat(fp):
2144 '''stat file object that may not have fileno method.'''
2144 '''stat file object that may not have fileno method.'''
2145 try:
2145 try:
2146 return os.fstat(fp.fileno())
2146 return os.fstat(fp.fileno())
2147 except AttributeError:
2147 except AttributeError:
2148 return os.stat(fp.name)
2148 return os.stat(fp.name)
2149
2149
2150
2150
2151 # File system features
2151 # File system features
2152
2152
2153
2153
2154 def fscasesensitive(path):
2154 def fscasesensitive(path):
2155 # type: (bytes) -> bool
2155 # type: (bytes) -> bool
2156 """
2156 """
2157 Return true if the given path is on a case-sensitive filesystem
2157 Return true if the given path is on a case-sensitive filesystem
2158
2158
2159 Requires a path (like /foo/.hg) ending with a foldable final
2159 Requires a path (like /foo/.hg) ending with a foldable final
2160 directory component.
2160 directory component.
2161 """
2161 """
2162 s1 = os.lstat(path)
2162 s1 = os.lstat(path)
2163 d, b = os.path.split(path)
2163 d, b = os.path.split(path)
2164 b2 = b.upper()
2164 b2 = b.upper()
2165 if b == b2:
2165 if b == b2:
2166 b2 = b.lower()
2166 b2 = b.lower()
2167 if b == b2:
2167 if b == b2:
2168 return True # no evidence against case sensitivity
2168 return True # no evidence against case sensitivity
2169 p2 = os.path.join(d, b2)
2169 p2 = os.path.join(d, b2)
2170 try:
2170 try:
2171 s2 = os.lstat(p2)
2171 s2 = os.lstat(p2)
2172 if s2 == s1:
2172 if s2 == s1:
2173 return False
2173 return False
2174 return True
2174 return True
2175 except OSError:
2175 except OSError:
2176 return True
2176 return True
2177
2177
2178
2178
2179 _re2_input = lambda x: x
2179 _re2_input = lambda x: x
2180 try:
2180 try:
2181 import re2 # pytype: disable=import-error
2181 import re2 # pytype: disable=import-error
2182
2182
2183 _re2 = None
2183 _re2 = None
2184 except ImportError:
2184 except ImportError:
2185 _re2 = False
2185 _re2 = False
2186
2186
2187
2187
2188 class _re(object):
2188 class _re(object):
2189 def _checkre2(self):
2189 def _checkre2(self):
2190 global _re2
2190 global _re2
2191 global _re2_input
2191 global _re2_input
2192
2193 check_pattern = br'\[([^\[]+)\]'
2194 check_input = b'[ui]'
2192 try:
2195 try:
2193 # check if match works, see issue3964
2196 # check if match works, see issue3964
2194 check_pattern = br'\[([^\[]+)\]'
2195 check_input = b'[ui]'
2196 _re2 = bool(re2.match(check_pattern, check_input))
2197 _re2 = bool(re2.match(check_pattern, check_input))
2197 except ImportError:
2198 except ImportError:
2198 _re2 = False
2199 _re2 = False
2199 except TypeError:
2200 except TypeError:
2200 # the `pyre-2` project provides a re2 module that accept bytes
2201 # the `pyre-2` project provides a re2 module that accept bytes
2201 # the `fb-re2` project provides a re2 module that acccept sysstr
2202 # the `fb-re2` project provides a re2 module that acccept sysstr
2202 check_pattern = pycompat.sysstr(check_pattern)
2203 check_pattern = pycompat.sysstr(check_pattern)
2203 check_input = pycompat.sysstr(check_input)
2204 check_input = pycompat.sysstr(check_input)
2204 _re2 = bool(re2.match(check_pattern, check_input))
2205 _re2 = bool(re2.match(check_pattern, check_input))
2205 _re2_input = pycompat.sysstr
2206 _re2_input = pycompat.sysstr
2206
2207
2207 def compile(self, pat, flags=0):
2208 def compile(self, pat, flags=0):
2208 """Compile a regular expression, using re2 if possible
2209 """Compile a regular expression, using re2 if possible
2209
2210
2210 For best performance, use only re2-compatible regexp features. The
2211 For best performance, use only re2-compatible regexp features. The
2211 only flags from the re module that are re2-compatible are
2212 only flags from the re module that are re2-compatible are
2212 IGNORECASE and MULTILINE."""
2213 IGNORECASE and MULTILINE."""
2213 if _re2 is None:
2214 if _re2 is None:
2214 self._checkre2()
2215 self._checkre2()
2215 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
2216 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
2216 if flags & remod.IGNORECASE:
2217 if flags & remod.IGNORECASE:
2217 pat = b'(?i)' + pat
2218 pat = b'(?i)' + pat
2218 if flags & remod.MULTILINE:
2219 if flags & remod.MULTILINE:
2219 pat = b'(?m)' + pat
2220 pat = b'(?m)' + pat
2220 try:
2221 try:
2221 return re2.compile(_re2_input(pat))
2222 return re2.compile(_re2_input(pat))
2222 except re2.error:
2223 except re2.error:
2223 pass
2224 pass
2224 return remod.compile(pat, flags)
2225 return remod.compile(pat, flags)
2225
2226
2226 @propertycache
2227 @propertycache
2227 def escape(self):
2228 def escape(self):
2228 """Return the version of escape corresponding to self.compile.
2229 """Return the version of escape corresponding to self.compile.
2229
2230
2230 This is imperfect because whether re2 or re is used for a particular
2231 This is imperfect because whether re2 or re is used for a particular
2231 function depends on the flags, etc, but it's the best we can do.
2232 function depends on the flags, etc, but it's the best we can do.
2232 """
2233 """
2233 global _re2
2234 global _re2
2234 if _re2 is None:
2235 if _re2 is None:
2235 self._checkre2()
2236 self._checkre2()
2236 if _re2:
2237 if _re2:
2237 return re2.escape
2238 return re2.escape
2238 else:
2239 else:
2239 return remod.escape
2240 return remod.escape
2240
2241
2241
2242
2242 re = _re()
2243 re = _re()
2243
2244
2244 _fspathcache = {}
2245 _fspathcache = {}
2245
2246
2246
2247
2247 def fspath(name, root):
2248 def fspath(name, root):
2248 # type: (bytes, bytes) -> bytes
2249 # type: (bytes, bytes) -> bytes
2249 """Get name in the case stored in the filesystem
2250 """Get name in the case stored in the filesystem
2250
2251
2251 The name should be relative to root, and be normcase-ed for efficiency.
2252 The name should be relative to root, and be normcase-ed for efficiency.
2252
2253
2253 Note that this function is unnecessary, and should not be
2254 Note that this function is unnecessary, and should not be
2254 called, for case-sensitive filesystems (simply because it's expensive).
2255 called, for case-sensitive filesystems (simply because it's expensive).
2255
2256
2256 The root should be normcase-ed, too.
2257 The root should be normcase-ed, too.
2257 """
2258 """
2258
2259
2259 def _makefspathcacheentry(dir):
2260 def _makefspathcacheentry(dir):
2260 return {normcase(n): n for n in os.listdir(dir)}
2261 return {normcase(n): n for n in os.listdir(dir)}
2261
2262
2262 seps = pycompat.ossep
2263 seps = pycompat.ossep
2263 if pycompat.osaltsep:
2264 if pycompat.osaltsep:
2264 seps = seps + pycompat.osaltsep
2265 seps = seps + pycompat.osaltsep
2265 # Protect backslashes. This gets silly very quickly.
2266 # Protect backslashes. This gets silly very quickly.
2266 seps.replace(b'\\', b'\\\\')
2267 seps.replace(b'\\', b'\\\\')
2267 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
2268 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
2268 dir = os.path.normpath(root)
2269 dir = os.path.normpath(root)
2269 result = []
2270 result = []
2270 for part, sep in pattern.findall(name):
2271 for part, sep in pattern.findall(name):
2271 if sep:
2272 if sep:
2272 result.append(sep)
2273 result.append(sep)
2273 continue
2274 continue
2274
2275
2275 if dir not in _fspathcache:
2276 if dir not in _fspathcache:
2276 _fspathcache[dir] = _makefspathcacheentry(dir)
2277 _fspathcache[dir] = _makefspathcacheentry(dir)
2277 contents = _fspathcache[dir]
2278 contents = _fspathcache[dir]
2278
2279
2279 found = contents.get(part)
2280 found = contents.get(part)
2280 if not found:
2281 if not found:
2281 # retry "once per directory" per "dirstate.walk" which
2282 # retry "once per directory" per "dirstate.walk" which
2282 # may take place for each patches of "hg qpush", for example
2283 # may take place for each patches of "hg qpush", for example
2283 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
2284 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
2284 found = contents.get(part)
2285 found = contents.get(part)
2285
2286
2286 result.append(found or part)
2287 result.append(found or part)
2287 dir = os.path.join(dir, part)
2288 dir = os.path.join(dir, part)
2288
2289
2289 return b''.join(result)
2290 return b''.join(result)
2290
2291
2291
2292
2292 def checknlink(testfile):
2293 def checknlink(testfile):
2293 # type: (bytes) -> bool
2294 # type: (bytes) -> bool
2294 '''check whether hardlink count reporting works properly'''
2295 '''check whether hardlink count reporting works properly'''
2295
2296
2296 # testfile may be open, so we need a separate file for checking to
2297 # testfile may be open, so we need a separate file for checking to
2297 # work around issue2543 (or testfile may get lost on Samba shares)
2298 # work around issue2543 (or testfile may get lost on Samba shares)
2298 f1, f2, fp = None, None, None
2299 f1, f2, fp = None, None, None
2299 try:
2300 try:
2300 fd, f1 = pycompat.mkstemp(
2301 fd, f1 = pycompat.mkstemp(
2301 prefix=b'.%s-' % os.path.basename(testfile),
2302 prefix=b'.%s-' % os.path.basename(testfile),
2302 suffix=b'1~',
2303 suffix=b'1~',
2303 dir=os.path.dirname(testfile),
2304 dir=os.path.dirname(testfile),
2304 )
2305 )
2305 os.close(fd)
2306 os.close(fd)
2306 f2 = b'%s2~' % f1[:-2]
2307 f2 = b'%s2~' % f1[:-2]
2307
2308
2308 oslink(f1, f2)
2309 oslink(f1, f2)
2309 # nlinks() may behave differently for files on Windows shares if
2310 # nlinks() may behave differently for files on Windows shares if
2310 # the file is open.
2311 # the file is open.
2311 fp = posixfile(f2)
2312 fp = posixfile(f2)
2312 return nlinks(f2) > 1
2313 return nlinks(f2) > 1
2313 except OSError:
2314 except OSError:
2314 return False
2315 return False
2315 finally:
2316 finally:
2316 if fp is not None:
2317 if fp is not None:
2317 fp.close()
2318 fp.close()
2318 for f in (f1, f2):
2319 for f in (f1, f2):
2319 try:
2320 try:
2320 if f is not None:
2321 if f is not None:
2321 os.unlink(f)
2322 os.unlink(f)
2322 except OSError:
2323 except OSError:
2323 pass
2324 pass
2324
2325
2325
2326
2326 def endswithsep(path):
2327 def endswithsep(path):
2327 # type: (bytes) -> bool
2328 # type: (bytes) -> bool
2328 '''Check path ends with os.sep or os.altsep.'''
2329 '''Check path ends with os.sep or os.altsep.'''
2329 return bool( # help pytype
2330 return bool( # help pytype
2330 path.endswith(pycompat.ossep)
2331 path.endswith(pycompat.ossep)
2331 or pycompat.osaltsep
2332 or pycompat.osaltsep
2332 and path.endswith(pycompat.osaltsep)
2333 and path.endswith(pycompat.osaltsep)
2333 )
2334 )
2334
2335
2335
2336
2336 def splitpath(path):
2337 def splitpath(path):
2337 # type: (bytes) -> List[bytes]
2338 # type: (bytes) -> List[bytes]
2338 """Split path by os.sep.
2339 """Split path by os.sep.
2339 Note that this function does not use os.altsep because this is
2340 Note that this function does not use os.altsep because this is
2340 an alternative of simple "xxx.split(os.sep)".
2341 an alternative of simple "xxx.split(os.sep)".
2341 It is recommended to use os.path.normpath() before using this
2342 It is recommended to use os.path.normpath() before using this
2342 function if need."""
2343 function if need."""
2343 return path.split(pycompat.ossep)
2344 return path.split(pycompat.ossep)
2344
2345
2345
2346
2346 def mktempcopy(name, emptyok=False, createmode=None, enforcewritable=False):
2347 def mktempcopy(name, emptyok=False, createmode=None, enforcewritable=False):
2347 """Create a temporary file with the same contents from name
2348 """Create a temporary file with the same contents from name
2348
2349
2349 The permission bits are copied from the original file.
2350 The permission bits are copied from the original file.
2350
2351
2351 If the temporary file is going to be truncated immediately, you
2352 If the temporary file is going to be truncated immediately, you
2352 can use emptyok=True as an optimization.
2353 can use emptyok=True as an optimization.
2353
2354
2354 Returns the name of the temporary file.
2355 Returns the name of the temporary file.
2355 """
2356 """
2356 d, fn = os.path.split(name)
2357 d, fn = os.path.split(name)
2357 fd, temp = pycompat.mkstemp(prefix=b'.%s-' % fn, suffix=b'~', dir=d)
2358 fd, temp = pycompat.mkstemp(prefix=b'.%s-' % fn, suffix=b'~', dir=d)
2358 os.close(fd)
2359 os.close(fd)
2359 # Temporary files are created with mode 0600, which is usually not
2360 # Temporary files are created with mode 0600, which is usually not
2360 # what we want. If the original file already exists, just copy
2361 # what we want. If the original file already exists, just copy
2361 # its mode. Otherwise, manually obey umask.
2362 # its mode. Otherwise, manually obey umask.
2362 copymode(name, temp, createmode, enforcewritable)
2363 copymode(name, temp, createmode, enforcewritable)
2363
2364
2364 if emptyok:
2365 if emptyok:
2365 return temp
2366 return temp
2366 try:
2367 try:
2367 try:
2368 try:
2368 ifp = posixfile(name, b"rb")
2369 ifp = posixfile(name, b"rb")
2369 except IOError as inst:
2370 except IOError as inst:
2370 if inst.errno == errno.ENOENT:
2371 if inst.errno == errno.ENOENT:
2371 return temp
2372 return temp
2372 if not getattr(inst, 'filename', None):
2373 if not getattr(inst, 'filename', None):
2373 inst.filename = name
2374 inst.filename = name
2374 raise
2375 raise
2375 ofp = posixfile(temp, b"wb")
2376 ofp = posixfile(temp, b"wb")
2376 for chunk in filechunkiter(ifp):
2377 for chunk in filechunkiter(ifp):
2377 ofp.write(chunk)
2378 ofp.write(chunk)
2378 ifp.close()
2379 ifp.close()
2379 ofp.close()
2380 ofp.close()
2380 except: # re-raises
2381 except: # re-raises
2381 try:
2382 try:
2382 os.unlink(temp)
2383 os.unlink(temp)
2383 except OSError:
2384 except OSError:
2384 pass
2385 pass
2385 raise
2386 raise
2386 return temp
2387 return temp
2387
2388
2388
2389
2389 class filestat(object):
2390 class filestat(object):
2390 """help to exactly detect change of a file
2391 """help to exactly detect change of a file
2391
2392
2392 'stat' attribute is result of 'os.stat()' if specified 'path'
2393 'stat' attribute is result of 'os.stat()' if specified 'path'
2393 exists. Otherwise, it is None. This can avoid preparative
2394 exists. Otherwise, it is None. This can avoid preparative
2394 'exists()' examination on client side of this class.
2395 'exists()' examination on client side of this class.
2395 """
2396 """
2396
2397
2397 def __init__(self, stat):
2398 def __init__(self, stat):
2398 self.stat = stat
2399 self.stat = stat
2399
2400
2400 @classmethod
2401 @classmethod
2401 def frompath(cls, path):
2402 def frompath(cls, path):
2402 try:
2403 try:
2403 stat = os.stat(path)
2404 stat = os.stat(path)
2404 except OSError as err:
2405 except OSError as err:
2405 if err.errno != errno.ENOENT:
2406 if err.errno != errno.ENOENT:
2406 raise
2407 raise
2407 stat = None
2408 stat = None
2408 return cls(stat)
2409 return cls(stat)
2409
2410
2410 @classmethod
2411 @classmethod
2411 def fromfp(cls, fp):
2412 def fromfp(cls, fp):
2412 stat = os.fstat(fp.fileno())
2413 stat = os.fstat(fp.fileno())
2413 return cls(stat)
2414 return cls(stat)
2414
2415
2415 __hash__ = object.__hash__
2416 __hash__ = object.__hash__
2416
2417
2417 def __eq__(self, old):
2418 def __eq__(self, old):
2418 try:
2419 try:
2419 # if ambiguity between stat of new and old file is
2420 # if ambiguity between stat of new and old file is
2420 # avoided, comparison of size, ctime and mtime is enough
2421 # avoided, comparison of size, ctime and mtime is enough
2421 # to exactly detect change of a file regardless of platform
2422 # to exactly detect change of a file regardless of platform
2422 return (
2423 return (
2423 self.stat.st_size == old.stat.st_size
2424 self.stat.st_size == old.stat.st_size
2424 and self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2425 and self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2425 and self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME]
2426 and self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME]
2426 )
2427 )
2427 except AttributeError:
2428 except AttributeError:
2428 pass
2429 pass
2429 try:
2430 try:
2430 return self.stat is None and old.stat is None
2431 return self.stat is None and old.stat is None
2431 except AttributeError:
2432 except AttributeError:
2432 return False
2433 return False
2433
2434
2434 def isambig(self, old):
2435 def isambig(self, old):
2435 """Examine whether new (= self) stat is ambiguous against old one
2436 """Examine whether new (= self) stat is ambiguous against old one
2436
2437
2437 "S[N]" below means stat of a file at N-th change:
2438 "S[N]" below means stat of a file at N-th change:
2438
2439
2439 - S[n-1].ctime < S[n].ctime: can detect change of a file
2440 - S[n-1].ctime < S[n].ctime: can detect change of a file
2440 - S[n-1].ctime == S[n].ctime
2441 - S[n-1].ctime == S[n].ctime
2441 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2442 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2442 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2443 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2443 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2444 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2444 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2445 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2445
2446
2446 Case (*2) above means that a file was changed twice or more at
2447 Case (*2) above means that a file was changed twice or more at
2447 same time in sec (= S[n-1].ctime), and comparison of timestamp
2448 same time in sec (= S[n-1].ctime), and comparison of timestamp
2448 is ambiguous.
2449 is ambiguous.
2449
2450
2450 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2451 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2451 timestamp is ambiguous".
2452 timestamp is ambiguous".
2452
2453
2453 But advancing mtime only in case (*2) doesn't work as
2454 But advancing mtime only in case (*2) doesn't work as
2454 expected, because naturally advanced S[n].mtime in case (*1)
2455 expected, because naturally advanced S[n].mtime in case (*1)
2455 might be equal to manually advanced S[n-1 or earlier].mtime.
2456 might be equal to manually advanced S[n-1 or earlier].mtime.
2456
2457
2457 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2458 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2458 treated as ambiguous regardless of mtime, to avoid overlooking
2459 treated as ambiguous regardless of mtime, to avoid overlooking
2459 by confliction between such mtime.
2460 by confliction between such mtime.
2460
2461
2461 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2462 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2462 S[n].mtime", even if size of a file isn't changed.
2463 S[n].mtime", even if size of a file isn't changed.
2463 """
2464 """
2464 try:
2465 try:
2465 return self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2466 return self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2466 except AttributeError:
2467 except AttributeError:
2467 return False
2468 return False
2468
2469
2469 def avoidambig(self, path, old):
2470 def avoidambig(self, path, old):
2470 """Change file stat of specified path to avoid ambiguity
2471 """Change file stat of specified path to avoid ambiguity
2471
2472
2472 'old' should be previous filestat of 'path'.
2473 'old' should be previous filestat of 'path'.
2473
2474
2474 This skips avoiding ambiguity, if a process doesn't have
2475 This skips avoiding ambiguity, if a process doesn't have
2475 appropriate privileges for 'path'. This returns False in this
2476 appropriate privileges for 'path'. This returns False in this
2476 case.
2477 case.
2477
2478
2478 Otherwise, this returns True, as "ambiguity is avoided".
2479 Otherwise, this returns True, as "ambiguity is avoided".
2479 """
2480 """
2480 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2481 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2481 try:
2482 try:
2482 os.utime(path, (advanced, advanced))
2483 os.utime(path, (advanced, advanced))
2483 except OSError as inst:
2484 except OSError as inst:
2484 if inst.errno == errno.EPERM:
2485 if inst.errno == errno.EPERM:
2485 # utime() on the file created by another user causes EPERM,
2486 # utime() on the file created by another user causes EPERM,
2486 # if a process doesn't have appropriate privileges
2487 # if a process doesn't have appropriate privileges
2487 return False
2488 return False
2488 raise
2489 raise
2489 return True
2490 return True
2490
2491
2491 def __ne__(self, other):
2492 def __ne__(self, other):
2492 return not self == other
2493 return not self == other
2493
2494
2494
2495
2495 class atomictempfile(object):
2496 class atomictempfile(object):
2496 """writable file object that atomically updates a file
2497 """writable file object that atomically updates a file
2497
2498
2498 All writes will go to a temporary copy of the original file. Call
2499 All writes will go to a temporary copy of the original file. Call
2499 close() when you are done writing, and atomictempfile will rename
2500 close() when you are done writing, and atomictempfile will rename
2500 the temporary copy to the original name, making the changes
2501 the temporary copy to the original name, making the changes
2501 visible. If the object is destroyed without being closed, all your
2502 visible. If the object is destroyed without being closed, all your
2502 writes are discarded.
2503 writes are discarded.
2503
2504
2504 checkambig argument of constructor is used with filestat, and is
2505 checkambig argument of constructor is used with filestat, and is
2505 useful only if target file is guarded by any lock (e.g. repo.lock
2506 useful only if target file is guarded by any lock (e.g. repo.lock
2506 or repo.wlock).
2507 or repo.wlock).
2507 """
2508 """
2508
2509
2509 def __init__(self, name, mode=b'w+b', createmode=None, checkambig=False):
2510 def __init__(self, name, mode=b'w+b', createmode=None, checkambig=False):
2510 self.__name = name # permanent name
2511 self.__name = name # permanent name
2511 self._tempname = mktempcopy(
2512 self._tempname = mktempcopy(
2512 name,
2513 name,
2513 emptyok=(b'w' in mode),
2514 emptyok=(b'w' in mode),
2514 createmode=createmode,
2515 createmode=createmode,
2515 enforcewritable=(b'w' in mode),
2516 enforcewritable=(b'w' in mode),
2516 )
2517 )
2517
2518
2518 self._fp = posixfile(self._tempname, mode)
2519 self._fp = posixfile(self._tempname, mode)
2519 self._checkambig = checkambig
2520 self._checkambig = checkambig
2520
2521
2521 # delegated methods
2522 # delegated methods
2522 self.read = self._fp.read
2523 self.read = self._fp.read
2523 self.write = self._fp.write
2524 self.write = self._fp.write
2524 self.seek = self._fp.seek
2525 self.seek = self._fp.seek
2525 self.tell = self._fp.tell
2526 self.tell = self._fp.tell
2526 self.fileno = self._fp.fileno
2527 self.fileno = self._fp.fileno
2527
2528
2528 def close(self):
2529 def close(self):
2529 if not self._fp.closed:
2530 if not self._fp.closed:
2530 self._fp.close()
2531 self._fp.close()
2531 filename = localpath(self.__name)
2532 filename = localpath(self.__name)
2532 oldstat = self._checkambig and filestat.frompath(filename)
2533 oldstat = self._checkambig and filestat.frompath(filename)
2533 if oldstat and oldstat.stat:
2534 if oldstat and oldstat.stat:
2534 rename(self._tempname, filename)
2535 rename(self._tempname, filename)
2535 newstat = filestat.frompath(filename)
2536 newstat = filestat.frompath(filename)
2536 if newstat.isambig(oldstat):
2537 if newstat.isambig(oldstat):
2537 # stat of changed file is ambiguous to original one
2538 # stat of changed file is ambiguous to original one
2538 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2539 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2539 os.utime(filename, (advanced, advanced))
2540 os.utime(filename, (advanced, advanced))
2540 else:
2541 else:
2541 rename(self._tempname, filename)
2542 rename(self._tempname, filename)
2542
2543
2543 def discard(self):
2544 def discard(self):
2544 if not self._fp.closed:
2545 if not self._fp.closed:
2545 try:
2546 try:
2546 os.unlink(self._tempname)
2547 os.unlink(self._tempname)
2547 except OSError:
2548 except OSError:
2548 pass
2549 pass
2549 self._fp.close()
2550 self._fp.close()
2550
2551
2551 def __del__(self):
2552 def __del__(self):
2552 if safehasattr(self, '_fp'): # constructor actually did something
2553 if safehasattr(self, '_fp'): # constructor actually did something
2553 self.discard()
2554 self.discard()
2554
2555
2555 def __enter__(self):
2556 def __enter__(self):
2556 return self
2557 return self
2557
2558
2558 def __exit__(self, exctype, excvalue, traceback):
2559 def __exit__(self, exctype, excvalue, traceback):
2559 if exctype is not None:
2560 if exctype is not None:
2560 self.discard()
2561 self.discard()
2561 else:
2562 else:
2562 self.close()
2563 self.close()
2563
2564
2564
2565
2565 def unlinkpath(f, ignoremissing=False, rmdir=True):
2566 def unlinkpath(f, ignoremissing=False, rmdir=True):
2566 # type: (bytes, bool, bool) -> None
2567 # type: (bytes, bool, bool) -> None
2567 """unlink and remove the directory if it is empty"""
2568 """unlink and remove the directory if it is empty"""
2568 if ignoremissing:
2569 if ignoremissing:
2569 tryunlink(f)
2570 tryunlink(f)
2570 else:
2571 else:
2571 unlink(f)
2572 unlink(f)
2572 if rmdir:
2573 if rmdir:
2573 # try removing directories that might now be empty
2574 # try removing directories that might now be empty
2574 try:
2575 try:
2575 removedirs(os.path.dirname(f))
2576 removedirs(os.path.dirname(f))
2576 except OSError:
2577 except OSError:
2577 pass
2578 pass
2578
2579
2579
2580
2580 def tryunlink(f):
2581 def tryunlink(f):
2581 # type: (bytes) -> None
2582 # type: (bytes) -> None
2582 """Attempt to remove a file, ignoring ENOENT errors."""
2583 """Attempt to remove a file, ignoring ENOENT errors."""
2583 try:
2584 try:
2584 unlink(f)
2585 unlink(f)
2585 except OSError as e:
2586 except OSError as e:
2586 if e.errno != errno.ENOENT:
2587 if e.errno != errno.ENOENT:
2587 raise
2588 raise
2588
2589
2589
2590
2590 def makedirs(name, mode=None, notindexed=False):
2591 def makedirs(name, mode=None, notindexed=False):
2591 # type: (bytes, Optional[int], bool) -> None
2592 # type: (bytes, Optional[int], bool) -> None
2592 """recursive directory creation with parent mode inheritance
2593 """recursive directory creation with parent mode inheritance
2593
2594
2594 Newly created directories are marked as "not to be indexed by
2595 Newly created directories are marked as "not to be indexed by
2595 the content indexing service", if ``notindexed`` is specified
2596 the content indexing service", if ``notindexed`` is specified
2596 for "write" mode access.
2597 for "write" mode access.
2597 """
2598 """
2598 try:
2599 try:
2599 makedir(name, notindexed)
2600 makedir(name, notindexed)
2600 except OSError as err:
2601 except OSError as err:
2601 if err.errno == errno.EEXIST:
2602 if err.errno == errno.EEXIST:
2602 return
2603 return
2603 if err.errno != errno.ENOENT or not name:
2604 if err.errno != errno.ENOENT or not name:
2604 raise
2605 raise
2605 parent = os.path.dirname(os.path.abspath(name))
2606 parent = os.path.dirname(os.path.abspath(name))
2606 if parent == name:
2607 if parent == name:
2607 raise
2608 raise
2608 makedirs(parent, mode, notindexed)
2609 makedirs(parent, mode, notindexed)
2609 try:
2610 try:
2610 makedir(name, notindexed)
2611 makedir(name, notindexed)
2611 except OSError as err:
2612 except OSError as err:
2612 # Catch EEXIST to handle races
2613 # Catch EEXIST to handle races
2613 if err.errno == errno.EEXIST:
2614 if err.errno == errno.EEXIST:
2614 return
2615 return
2615 raise
2616 raise
2616 if mode is not None:
2617 if mode is not None:
2617 os.chmod(name, mode)
2618 os.chmod(name, mode)
2618
2619
2619
2620
2620 def readfile(path):
2621 def readfile(path):
2621 # type: (bytes) -> bytes
2622 # type: (bytes) -> bytes
2622 with open(path, b'rb') as fp:
2623 with open(path, b'rb') as fp:
2623 return fp.read()
2624 return fp.read()
2624
2625
2625
2626
2626 def writefile(path, text):
2627 def writefile(path, text):
2627 # type: (bytes, bytes) -> None
2628 # type: (bytes, bytes) -> None
2628 with open(path, b'wb') as fp:
2629 with open(path, b'wb') as fp:
2629 fp.write(text)
2630 fp.write(text)
2630
2631
2631
2632
2632 def appendfile(path, text):
2633 def appendfile(path, text):
2633 # type: (bytes, bytes) -> None
2634 # type: (bytes, bytes) -> None
2634 with open(path, b'ab') as fp:
2635 with open(path, b'ab') as fp:
2635 fp.write(text)
2636 fp.write(text)
2636
2637
2637
2638
2638 class chunkbuffer(object):
2639 class chunkbuffer(object):
2639 """Allow arbitrary sized chunks of data to be efficiently read from an
2640 """Allow arbitrary sized chunks of data to be efficiently read from an
2640 iterator over chunks of arbitrary size."""
2641 iterator over chunks of arbitrary size."""
2641
2642
2642 def __init__(self, in_iter):
2643 def __init__(self, in_iter):
2643 """in_iter is the iterator that's iterating over the input chunks."""
2644 """in_iter is the iterator that's iterating over the input chunks."""
2644
2645
2645 def splitbig(chunks):
2646 def splitbig(chunks):
2646 for chunk in chunks:
2647 for chunk in chunks:
2647 if len(chunk) > 2 ** 20:
2648 if len(chunk) > 2 ** 20:
2648 pos = 0
2649 pos = 0
2649 while pos < len(chunk):
2650 while pos < len(chunk):
2650 end = pos + 2 ** 18
2651 end = pos + 2 ** 18
2651 yield chunk[pos:end]
2652 yield chunk[pos:end]
2652 pos = end
2653 pos = end
2653 else:
2654 else:
2654 yield chunk
2655 yield chunk
2655
2656
2656 self.iter = splitbig(in_iter)
2657 self.iter = splitbig(in_iter)
2657 self._queue = collections.deque()
2658 self._queue = collections.deque()
2658 self._chunkoffset = 0
2659 self._chunkoffset = 0
2659
2660
2660 def read(self, l=None):
2661 def read(self, l=None):
2661 """Read L bytes of data from the iterator of chunks of data.
2662 """Read L bytes of data from the iterator of chunks of data.
2662 Returns less than L bytes if the iterator runs dry.
2663 Returns less than L bytes if the iterator runs dry.
2663
2664
2664 If size parameter is omitted, read everything"""
2665 If size parameter is omitted, read everything"""
2665 if l is None:
2666 if l is None:
2666 return b''.join(self.iter)
2667 return b''.join(self.iter)
2667
2668
2668 left = l
2669 left = l
2669 buf = []
2670 buf = []
2670 queue = self._queue
2671 queue = self._queue
2671 while left > 0:
2672 while left > 0:
2672 # refill the queue
2673 # refill the queue
2673 if not queue:
2674 if not queue:
2674 target = 2 ** 18
2675 target = 2 ** 18
2675 for chunk in self.iter:
2676 for chunk in self.iter:
2676 queue.append(chunk)
2677 queue.append(chunk)
2677 target -= len(chunk)
2678 target -= len(chunk)
2678 if target <= 0:
2679 if target <= 0:
2679 break
2680 break
2680 if not queue:
2681 if not queue:
2681 break
2682 break
2682
2683
2683 # The easy way to do this would be to queue.popleft(), modify the
2684 # The easy way to do this would be to queue.popleft(), modify the
2684 # chunk (if necessary), then queue.appendleft(). However, for cases
2685 # chunk (if necessary), then queue.appendleft(). However, for cases
2685 # where we read partial chunk content, this incurs 2 dequeue
2686 # where we read partial chunk content, this incurs 2 dequeue
2686 # mutations and creates a new str for the remaining chunk in the
2687 # mutations and creates a new str for the remaining chunk in the
2687 # queue. Our code below avoids this overhead.
2688 # queue. Our code below avoids this overhead.
2688
2689
2689 chunk = queue[0]
2690 chunk = queue[0]
2690 chunkl = len(chunk)
2691 chunkl = len(chunk)
2691 offset = self._chunkoffset
2692 offset = self._chunkoffset
2692
2693
2693 # Use full chunk.
2694 # Use full chunk.
2694 if offset == 0 and left >= chunkl:
2695 if offset == 0 and left >= chunkl:
2695 left -= chunkl
2696 left -= chunkl
2696 queue.popleft()
2697 queue.popleft()
2697 buf.append(chunk)
2698 buf.append(chunk)
2698 # self._chunkoffset remains at 0.
2699 # self._chunkoffset remains at 0.
2699 continue
2700 continue
2700
2701
2701 chunkremaining = chunkl - offset
2702 chunkremaining = chunkl - offset
2702
2703
2703 # Use all of unconsumed part of chunk.
2704 # Use all of unconsumed part of chunk.
2704 if left >= chunkremaining:
2705 if left >= chunkremaining:
2705 left -= chunkremaining
2706 left -= chunkremaining
2706 queue.popleft()
2707 queue.popleft()
2707 # offset == 0 is enabled by block above, so this won't merely
2708 # offset == 0 is enabled by block above, so this won't merely
2708 # copy via ``chunk[0:]``.
2709 # copy via ``chunk[0:]``.
2709 buf.append(chunk[offset:])
2710 buf.append(chunk[offset:])
2710 self._chunkoffset = 0
2711 self._chunkoffset = 0
2711
2712
2712 # Partial chunk needed.
2713 # Partial chunk needed.
2713 else:
2714 else:
2714 buf.append(chunk[offset : offset + left])
2715 buf.append(chunk[offset : offset + left])
2715 self._chunkoffset += left
2716 self._chunkoffset += left
2716 left -= chunkremaining
2717 left -= chunkremaining
2717
2718
2718 return b''.join(buf)
2719 return b''.join(buf)
2719
2720
2720
2721
2721 def filechunkiter(f, size=131072, limit=None):
2722 def filechunkiter(f, size=131072, limit=None):
2722 """Create a generator that produces the data in the file size
2723 """Create a generator that produces the data in the file size
2723 (default 131072) bytes at a time, up to optional limit (default is
2724 (default 131072) bytes at a time, up to optional limit (default is
2724 to read all data). Chunks may be less than size bytes if the
2725 to read all data). Chunks may be less than size bytes if the
2725 chunk is the last chunk in the file, or the file is a socket or
2726 chunk is the last chunk in the file, or the file is a socket or
2726 some other type of file that sometimes reads less data than is
2727 some other type of file that sometimes reads less data than is
2727 requested."""
2728 requested."""
2728 assert size >= 0
2729 assert size >= 0
2729 assert limit is None or limit >= 0
2730 assert limit is None or limit >= 0
2730 while True:
2731 while True:
2731 if limit is None:
2732 if limit is None:
2732 nbytes = size
2733 nbytes = size
2733 else:
2734 else:
2734 nbytes = min(limit, size)
2735 nbytes = min(limit, size)
2735 s = nbytes and f.read(nbytes)
2736 s = nbytes and f.read(nbytes)
2736 if not s:
2737 if not s:
2737 break
2738 break
2738 if limit:
2739 if limit:
2739 limit -= len(s)
2740 limit -= len(s)
2740 yield s
2741 yield s
2741
2742
2742
2743
2743 class cappedreader(object):
2744 class cappedreader(object):
2744 """A file object proxy that allows reading up to N bytes.
2745 """A file object proxy that allows reading up to N bytes.
2745
2746
2746 Given a source file object, instances of this type allow reading up to
2747 Given a source file object, instances of this type allow reading up to
2747 N bytes from that source file object. Attempts to read past the allowed
2748 N bytes from that source file object. Attempts to read past the allowed
2748 limit are treated as EOF.
2749 limit are treated as EOF.
2749
2750
2750 It is assumed that I/O is not performed on the original file object
2751 It is assumed that I/O is not performed on the original file object
2751 in addition to I/O that is performed by this instance. If there is,
2752 in addition to I/O that is performed by this instance. If there is,
2752 state tracking will get out of sync and unexpected results will ensue.
2753 state tracking will get out of sync and unexpected results will ensue.
2753 """
2754 """
2754
2755
2755 def __init__(self, fh, limit):
2756 def __init__(self, fh, limit):
2756 """Allow reading up to <limit> bytes from <fh>."""
2757 """Allow reading up to <limit> bytes from <fh>."""
2757 self._fh = fh
2758 self._fh = fh
2758 self._left = limit
2759 self._left = limit
2759
2760
2760 def read(self, n=-1):
2761 def read(self, n=-1):
2761 if not self._left:
2762 if not self._left:
2762 return b''
2763 return b''
2763
2764
2764 if n < 0:
2765 if n < 0:
2765 n = self._left
2766 n = self._left
2766
2767
2767 data = self._fh.read(min(n, self._left))
2768 data = self._fh.read(min(n, self._left))
2768 self._left -= len(data)
2769 self._left -= len(data)
2769 assert self._left >= 0
2770 assert self._left >= 0
2770
2771
2771 return data
2772 return data
2772
2773
2773 def readinto(self, b):
2774 def readinto(self, b):
2774 res = self.read(len(b))
2775 res = self.read(len(b))
2775 if res is None:
2776 if res is None:
2776 return None
2777 return None
2777
2778
2778 b[0 : len(res)] = res
2779 b[0 : len(res)] = res
2779 return len(res)
2780 return len(res)
2780
2781
2781
2782
2782 def unitcountfn(*unittable):
2783 def unitcountfn(*unittable):
2783 '''return a function that renders a readable count of some quantity'''
2784 '''return a function that renders a readable count of some quantity'''
2784
2785
2785 def go(count):
2786 def go(count):
2786 for multiplier, divisor, format in unittable:
2787 for multiplier, divisor, format in unittable:
2787 if abs(count) >= divisor * multiplier:
2788 if abs(count) >= divisor * multiplier:
2788 return format % (count / float(divisor))
2789 return format % (count / float(divisor))
2789 return unittable[-1][2] % count
2790 return unittable[-1][2] % count
2790
2791
2791 return go
2792 return go
2792
2793
2793
2794
2794 def processlinerange(fromline, toline):
2795 def processlinerange(fromline, toline):
2795 # type: (int, int) -> Tuple[int, int]
2796 # type: (int, int) -> Tuple[int, int]
2796 """Check that linerange <fromline>:<toline> makes sense and return a
2797 """Check that linerange <fromline>:<toline> makes sense and return a
2797 0-based range.
2798 0-based range.
2798
2799
2799 >>> processlinerange(10, 20)
2800 >>> processlinerange(10, 20)
2800 (9, 20)
2801 (9, 20)
2801 >>> processlinerange(2, 1)
2802 >>> processlinerange(2, 1)
2802 Traceback (most recent call last):
2803 Traceback (most recent call last):
2803 ...
2804 ...
2804 ParseError: line range must be positive
2805 ParseError: line range must be positive
2805 >>> processlinerange(0, 5)
2806 >>> processlinerange(0, 5)
2806 Traceback (most recent call last):
2807 Traceback (most recent call last):
2807 ...
2808 ...
2808 ParseError: fromline must be strictly positive
2809 ParseError: fromline must be strictly positive
2809 """
2810 """
2810 if toline - fromline < 0:
2811 if toline - fromline < 0:
2811 raise error.ParseError(_(b"line range must be positive"))
2812 raise error.ParseError(_(b"line range must be positive"))
2812 if fromline < 1:
2813 if fromline < 1:
2813 raise error.ParseError(_(b"fromline must be strictly positive"))
2814 raise error.ParseError(_(b"fromline must be strictly positive"))
2814 return fromline - 1, toline
2815 return fromline - 1, toline
2815
2816
2816
2817
2817 bytecount = unitcountfn(
2818 bytecount = unitcountfn(
2818 (100, 1 << 30, _(b'%.0f GB')),
2819 (100, 1 << 30, _(b'%.0f GB')),
2819 (10, 1 << 30, _(b'%.1f GB')),
2820 (10, 1 << 30, _(b'%.1f GB')),
2820 (1, 1 << 30, _(b'%.2f GB')),
2821 (1, 1 << 30, _(b'%.2f GB')),
2821 (100, 1 << 20, _(b'%.0f MB')),
2822 (100, 1 << 20, _(b'%.0f MB')),
2822 (10, 1 << 20, _(b'%.1f MB')),
2823 (10, 1 << 20, _(b'%.1f MB')),
2823 (1, 1 << 20, _(b'%.2f MB')),
2824 (1, 1 << 20, _(b'%.2f MB')),
2824 (100, 1 << 10, _(b'%.0f KB')),
2825 (100, 1 << 10, _(b'%.0f KB')),
2825 (10, 1 << 10, _(b'%.1f KB')),
2826 (10, 1 << 10, _(b'%.1f KB')),
2826 (1, 1 << 10, _(b'%.2f KB')),
2827 (1, 1 << 10, _(b'%.2f KB')),
2827 (1, 1, _(b'%.0f bytes')),
2828 (1, 1, _(b'%.0f bytes')),
2828 )
2829 )
2829
2830
2830
2831
2831 class transformingwriter(object):
2832 class transformingwriter(object):
2832 """Writable file wrapper to transform data by function"""
2833 """Writable file wrapper to transform data by function"""
2833
2834
2834 def __init__(self, fp, encode):
2835 def __init__(self, fp, encode):
2835 self._fp = fp
2836 self._fp = fp
2836 self._encode = encode
2837 self._encode = encode
2837
2838
2838 def close(self):
2839 def close(self):
2839 self._fp.close()
2840 self._fp.close()
2840
2841
2841 def flush(self):
2842 def flush(self):
2842 self._fp.flush()
2843 self._fp.flush()
2843
2844
2844 def write(self, data):
2845 def write(self, data):
2845 return self._fp.write(self._encode(data))
2846 return self._fp.write(self._encode(data))
2846
2847
2847
2848
2848 # Matches a single EOL which can either be a CRLF where repeated CR
2849 # Matches a single EOL which can either be a CRLF where repeated CR
2849 # are removed or a LF. We do not care about old Macintosh files, so a
2850 # are removed or a LF. We do not care about old Macintosh files, so a
2850 # stray CR is an error.
2851 # stray CR is an error.
2851 _eolre = remod.compile(br'\r*\n')
2852 _eolre = remod.compile(br'\r*\n')
2852
2853
2853
2854
2854 def tolf(s):
2855 def tolf(s):
2855 # type: (bytes) -> bytes
2856 # type: (bytes) -> bytes
2856 return _eolre.sub(b'\n', s)
2857 return _eolre.sub(b'\n', s)
2857
2858
2858
2859
2859 def tocrlf(s):
2860 def tocrlf(s):
2860 # type: (bytes) -> bytes
2861 # type: (bytes) -> bytes
2861 return _eolre.sub(b'\r\n', s)
2862 return _eolre.sub(b'\r\n', s)
2862
2863
2863
2864
2864 def _crlfwriter(fp):
2865 def _crlfwriter(fp):
2865 return transformingwriter(fp, tocrlf)
2866 return transformingwriter(fp, tocrlf)
2866
2867
2867
2868
2868 if pycompat.oslinesep == b'\r\n':
2869 if pycompat.oslinesep == b'\r\n':
2869 tonativeeol = tocrlf
2870 tonativeeol = tocrlf
2870 fromnativeeol = tolf
2871 fromnativeeol = tolf
2871 nativeeolwriter = _crlfwriter
2872 nativeeolwriter = _crlfwriter
2872 else:
2873 else:
2873 tonativeeol = pycompat.identity
2874 tonativeeol = pycompat.identity
2874 fromnativeeol = pycompat.identity
2875 fromnativeeol = pycompat.identity
2875 nativeeolwriter = pycompat.identity
2876 nativeeolwriter = pycompat.identity
2876
2877
2877 if pyplatform.python_implementation() == b'CPython' and sys.version_info < (
2878 if pyplatform.python_implementation() == b'CPython' and sys.version_info < (
2878 3,
2879 3,
2879 0,
2880 0,
2880 ):
2881 ):
2881 # There is an issue in CPython that some IO methods do not handle EINTR
2882 # There is an issue in CPython that some IO methods do not handle EINTR
2882 # correctly. The following table shows what CPython version (and functions)
2883 # correctly. The following table shows what CPython version (and functions)
2883 # are affected (buggy: has the EINTR bug, okay: otherwise):
2884 # are affected (buggy: has the EINTR bug, okay: otherwise):
2884 #
2885 #
2885 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2886 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2886 # --------------------------------------------------
2887 # --------------------------------------------------
2887 # fp.__iter__ | buggy | buggy | okay
2888 # fp.__iter__ | buggy | buggy | okay
2888 # fp.read* | buggy | okay [1] | okay
2889 # fp.read* | buggy | okay [1] | okay
2889 #
2890 #
2890 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2891 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2891 #
2892 #
2892 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2893 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2893 # like "read*" work fine, as we do not support Python < 2.7.4.
2894 # like "read*" work fine, as we do not support Python < 2.7.4.
2894 #
2895 #
2895 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2896 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2896 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2897 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2897 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2898 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2898 # fp.__iter__ but not other fp.read* methods.
2899 # fp.__iter__ but not other fp.read* methods.
2899 #
2900 #
2900 # On modern systems like Linux, the "read" syscall cannot be interrupted
2901 # On modern systems like Linux, the "read" syscall cannot be interrupted
2901 # when reading "fast" files like on-disk files. So the EINTR issue only
2902 # when reading "fast" files like on-disk files. So the EINTR issue only
2902 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2903 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2903 # files approximately as "fast" files and use the fast (unsafe) code path,
2904 # files approximately as "fast" files and use the fast (unsafe) code path,
2904 # to minimize the performance impact.
2905 # to minimize the performance impact.
2905
2906
2906 def iterfile(fp):
2907 def iterfile(fp):
2907 fastpath = True
2908 fastpath = True
2908 if type(fp) is file:
2909 if type(fp) is file:
2909 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2910 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2910 if fastpath:
2911 if fastpath:
2911 return fp
2912 return fp
2912 else:
2913 else:
2913 # fp.readline deals with EINTR correctly, use it as a workaround.
2914 # fp.readline deals with EINTR correctly, use it as a workaround.
2914 return iter(fp.readline, b'')
2915 return iter(fp.readline, b'')
2915
2916
2916
2917
2917 else:
2918 else:
2918 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2919 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2919 def iterfile(fp):
2920 def iterfile(fp):
2920 return fp
2921 return fp
2921
2922
2922
2923
2923 def iterlines(iterator):
2924 def iterlines(iterator):
2924 # type: (Iterator[bytes]) -> Iterator[bytes]
2925 # type: (Iterator[bytes]) -> Iterator[bytes]
2925 for chunk in iterator:
2926 for chunk in iterator:
2926 for line in chunk.splitlines():
2927 for line in chunk.splitlines():
2927 yield line
2928 yield line
2928
2929
2929
2930
2930 def expandpath(path):
2931 def expandpath(path):
2931 # type: (bytes) -> bytes
2932 # type: (bytes) -> bytes
2932 return os.path.expanduser(os.path.expandvars(path))
2933 return os.path.expanduser(os.path.expandvars(path))
2933
2934
2934
2935
2935 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2936 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2936 """Return the result of interpolating items in the mapping into string s.
2937 """Return the result of interpolating items in the mapping into string s.
2937
2938
2938 prefix is a single character string, or a two character string with
2939 prefix is a single character string, or a two character string with
2939 a backslash as the first character if the prefix needs to be escaped in
2940 a backslash as the first character if the prefix needs to be escaped in
2940 a regular expression.
2941 a regular expression.
2941
2942
2942 fn is an optional function that will be applied to the replacement text
2943 fn is an optional function that will be applied to the replacement text
2943 just before replacement.
2944 just before replacement.
2944
2945
2945 escape_prefix is an optional flag that allows using doubled prefix for
2946 escape_prefix is an optional flag that allows using doubled prefix for
2946 its escaping.
2947 its escaping.
2947 """
2948 """
2948 fn = fn or (lambda s: s)
2949 fn = fn or (lambda s: s)
2949 patterns = b'|'.join(mapping.keys())
2950 patterns = b'|'.join(mapping.keys())
2950 if escape_prefix:
2951 if escape_prefix:
2951 patterns += b'|' + prefix
2952 patterns += b'|' + prefix
2952 if len(prefix) > 1:
2953 if len(prefix) > 1:
2953 prefix_char = prefix[1:]
2954 prefix_char = prefix[1:]
2954 else:
2955 else:
2955 prefix_char = prefix
2956 prefix_char = prefix
2956 mapping[prefix_char] = prefix_char
2957 mapping[prefix_char] = prefix_char
2957 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2958 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2958 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2959 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2959
2960
2960
2961
2961 def getport(*args, **kwargs):
2962 def getport(*args, **kwargs):
2962 msg = b'getport(...) moved to mercurial.utils.urlutil'
2963 msg = b'getport(...) moved to mercurial.utils.urlutil'
2963 nouideprecwarn(msg, b'6.0', stacklevel=2)
2964 nouideprecwarn(msg, b'6.0', stacklevel=2)
2964 return urlutil.getport(*args, **kwargs)
2965 return urlutil.getport(*args, **kwargs)
2965
2966
2966
2967
2967 def url(*args, **kwargs):
2968 def url(*args, **kwargs):
2968 msg = b'url(...) moved to mercurial.utils.urlutil'
2969 msg = b'url(...) moved to mercurial.utils.urlutil'
2969 nouideprecwarn(msg, b'6.0', stacklevel=2)
2970 nouideprecwarn(msg, b'6.0', stacklevel=2)
2970 return urlutil.url(*args, **kwargs)
2971 return urlutil.url(*args, **kwargs)
2971
2972
2972
2973
2973 def hasscheme(*args, **kwargs):
2974 def hasscheme(*args, **kwargs):
2974 msg = b'hasscheme(...) moved to mercurial.utils.urlutil'
2975 msg = b'hasscheme(...) moved to mercurial.utils.urlutil'
2975 nouideprecwarn(msg, b'6.0', stacklevel=2)
2976 nouideprecwarn(msg, b'6.0', stacklevel=2)
2976 return urlutil.hasscheme(*args, **kwargs)
2977 return urlutil.hasscheme(*args, **kwargs)
2977
2978
2978
2979
2979 def hasdriveletter(*args, **kwargs):
2980 def hasdriveletter(*args, **kwargs):
2980 msg = b'hasdriveletter(...) moved to mercurial.utils.urlutil'
2981 msg = b'hasdriveletter(...) moved to mercurial.utils.urlutil'
2981 nouideprecwarn(msg, b'6.0', stacklevel=2)
2982 nouideprecwarn(msg, b'6.0', stacklevel=2)
2982 return urlutil.hasdriveletter(*args, **kwargs)
2983 return urlutil.hasdriveletter(*args, **kwargs)
2983
2984
2984
2985
2985 def urllocalpath(*args, **kwargs):
2986 def urllocalpath(*args, **kwargs):
2986 msg = b'urllocalpath(...) moved to mercurial.utils.urlutil'
2987 msg = b'urllocalpath(...) moved to mercurial.utils.urlutil'
2987 nouideprecwarn(msg, b'6.0', stacklevel=2)
2988 nouideprecwarn(msg, b'6.0', stacklevel=2)
2988 return urlutil.urllocalpath(*args, **kwargs)
2989 return urlutil.urllocalpath(*args, **kwargs)
2989
2990
2990
2991
2991 def checksafessh(*args, **kwargs):
2992 def checksafessh(*args, **kwargs):
2992 msg = b'checksafessh(...) moved to mercurial.utils.urlutil'
2993 msg = b'checksafessh(...) moved to mercurial.utils.urlutil'
2993 nouideprecwarn(msg, b'6.0', stacklevel=2)
2994 nouideprecwarn(msg, b'6.0', stacklevel=2)
2994 return urlutil.checksafessh(*args, **kwargs)
2995 return urlutil.checksafessh(*args, **kwargs)
2995
2996
2996
2997
2997 def hidepassword(*args, **kwargs):
2998 def hidepassword(*args, **kwargs):
2998 msg = b'hidepassword(...) moved to mercurial.utils.urlutil'
2999 msg = b'hidepassword(...) moved to mercurial.utils.urlutil'
2999 nouideprecwarn(msg, b'6.0', stacklevel=2)
3000 nouideprecwarn(msg, b'6.0', stacklevel=2)
3000 return urlutil.hidepassword(*args, **kwargs)
3001 return urlutil.hidepassword(*args, **kwargs)
3001
3002
3002
3003
3003 def removeauth(*args, **kwargs):
3004 def removeauth(*args, **kwargs):
3004 msg = b'removeauth(...) moved to mercurial.utils.urlutil'
3005 msg = b'removeauth(...) moved to mercurial.utils.urlutil'
3005 nouideprecwarn(msg, b'6.0', stacklevel=2)
3006 nouideprecwarn(msg, b'6.0', stacklevel=2)
3006 return urlutil.removeauth(*args, **kwargs)
3007 return urlutil.removeauth(*args, **kwargs)
3007
3008
3008
3009
3009 timecount = unitcountfn(
3010 timecount = unitcountfn(
3010 (1, 1e3, _(b'%.0f s')),
3011 (1, 1e3, _(b'%.0f s')),
3011 (100, 1, _(b'%.1f s')),
3012 (100, 1, _(b'%.1f s')),
3012 (10, 1, _(b'%.2f s')),
3013 (10, 1, _(b'%.2f s')),
3013 (1, 1, _(b'%.3f s')),
3014 (1, 1, _(b'%.3f s')),
3014 (100, 0.001, _(b'%.1f ms')),
3015 (100, 0.001, _(b'%.1f ms')),
3015 (10, 0.001, _(b'%.2f ms')),
3016 (10, 0.001, _(b'%.2f ms')),
3016 (1, 0.001, _(b'%.3f ms')),
3017 (1, 0.001, _(b'%.3f ms')),
3017 (100, 0.000001, _(b'%.1f us')),
3018 (100, 0.000001, _(b'%.1f us')),
3018 (10, 0.000001, _(b'%.2f us')),
3019 (10, 0.000001, _(b'%.2f us')),
3019 (1, 0.000001, _(b'%.3f us')),
3020 (1, 0.000001, _(b'%.3f us')),
3020 (100, 0.000000001, _(b'%.1f ns')),
3021 (100, 0.000000001, _(b'%.1f ns')),
3021 (10, 0.000000001, _(b'%.2f ns')),
3022 (10, 0.000000001, _(b'%.2f ns')),
3022 (1, 0.000000001, _(b'%.3f ns')),
3023 (1, 0.000000001, _(b'%.3f ns')),
3023 )
3024 )
3024
3025
3025
3026
3026 @attr.s
3027 @attr.s
3027 class timedcmstats(object):
3028 class timedcmstats(object):
3028 """Stats information produced by the timedcm context manager on entering."""
3029 """Stats information produced by the timedcm context manager on entering."""
3029
3030
3030 # the starting value of the timer as a float (meaning and resulution is
3031 # the starting value of the timer as a float (meaning and resulution is
3031 # platform dependent, see util.timer)
3032 # platform dependent, see util.timer)
3032 start = attr.ib(default=attr.Factory(lambda: timer()))
3033 start = attr.ib(default=attr.Factory(lambda: timer()))
3033 # the number of seconds as a floating point value; starts at 0, updated when
3034 # the number of seconds as a floating point value; starts at 0, updated when
3034 # the context is exited.
3035 # the context is exited.
3035 elapsed = attr.ib(default=0)
3036 elapsed = attr.ib(default=0)
3036 # the number of nested timedcm context managers.
3037 # the number of nested timedcm context managers.
3037 level = attr.ib(default=1)
3038 level = attr.ib(default=1)
3038
3039
3039 def __bytes__(self):
3040 def __bytes__(self):
3040 return timecount(self.elapsed) if self.elapsed else b'<unknown>'
3041 return timecount(self.elapsed) if self.elapsed else b'<unknown>'
3041
3042
3042 __str__ = encoding.strmethod(__bytes__)
3043 __str__ = encoding.strmethod(__bytes__)
3043
3044
3044
3045
3045 @contextlib.contextmanager
3046 @contextlib.contextmanager
3046 def timedcm(whencefmt, *whenceargs):
3047 def timedcm(whencefmt, *whenceargs):
3047 """A context manager that produces timing information for a given context.
3048 """A context manager that produces timing information for a given context.
3048
3049
3049 On entering a timedcmstats instance is produced.
3050 On entering a timedcmstats instance is produced.
3050
3051
3051 This context manager is reentrant.
3052 This context manager is reentrant.
3052
3053
3053 """
3054 """
3054 # track nested context managers
3055 # track nested context managers
3055 timedcm._nested += 1
3056 timedcm._nested += 1
3056 timing_stats = timedcmstats(level=timedcm._nested)
3057 timing_stats = timedcmstats(level=timedcm._nested)
3057 try:
3058 try:
3058 with tracing.log(whencefmt, *whenceargs):
3059 with tracing.log(whencefmt, *whenceargs):
3059 yield timing_stats
3060 yield timing_stats
3060 finally:
3061 finally:
3061 timing_stats.elapsed = timer() - timing_stats.start
3062 timing_stats.elapsed = timer() - timing_stats.start
3062 timedcm._nested -= 1
3063 timedcm._nested -= 1
3063
3064
3064
3065
3065 timedcm._nested = 0
3066 timedcm._nested = 0
3066
3067
3067
3068
3068 def timed(func):
3069 def timed(func):
3069 """Report the execution time of a function call to stderr.
3070 """Report the execution time of a function call to stderr.
3070
3071
3071 During development, use as a decorator when you need to measure
3072 During development, use as a decorator when you need to measure
3072 the cost of a function, e.g. as follows:
3073 the cost of a function, e.g. as follows:
3073
3074
3074 @util.timed
3075 @util.timed
3075 def foo(a, b, c):
3076 def foo(a, b, c):
3076 pass
3077 pass
3077 """
3078 """
3078
3079
3079 def wrapper(*args, **kwargs):
3080 def wrapper(*args, **kwargs):
3080 with timedcm(pycompat.bytestr(func.__name__)) as time_stats:
3081 with timedcm(pycompat.bytestr(func.__name__)) as time_stats:
3081 result = func(*args, **kwargs)
3082 result = func(*args, **kwargs)
3082 stderr = procutil.stderr
3083 stderr = procutil.stderr
3083 stderr.write(
3084 stderr.write(
3084 b'%s%s: %s\n'
3085 b'%s%s: %s\n'
3085 % (
3086 % (
3086 b' ' * time_stats.level * 2,
3087 b' ' * time_stats.level * 2,
3087 pycompat.bytestr(func.__name__),
3088 pycompat.bytestr(func.__name__),
3088 time_stats,
3089 time_stats,
3089 )
3090 )
3090 )
3091 )
3091 return result
3092 return result
3092
3093
3093 return wrapper
3094 return wrapper
3094
3095
3095
3096
3096 _sizeunits = (
3097 _sizeunits = (
3097 (b'm', 2 ** 20),
3098 (b'm', 2 ** 20),
3098 (b'k', 2 ** 10),
3099 (b'k', 2 ** 10),
3099 (b'g', 2 ** 30),
3100 (b'g', 2 ** 30),
3100 (b'kb', 2 ** 10),
3101 (b'kb', 2 ** 10),
3101 (b'mb', 2 ** 20),
3102 (b'mb', 2 ** 20),
3102 (b'gb', 2 ** 30),
3103 (b'gb', 2 ** 30),
3103 (b'b', 1),
3104 (b'b', 1),
3104 )
3105 )
3105
3106
3106
3107
3107 def sizetoint(s):
3108 def sizetoint(s):
3108 # type: (bytes) -> int
3109 # type: (bytes) -> int
3109 """Convert a space specifier to a byte count.
3110 """Convert a space specifier to a byte count.
3110
3111
3111 >>> sizetoint(b'30')
3112 >>> sizetoint(b'30')
3112 30
3113 30
3113 >>> sizetoint(b'2.2kb')
3114 >>> sizetoint(b'2.2kb')
3114 2252
3115 2252
3115 >>> sizetoint(b'6M')
3116 >>> sizetoint(b'6M')
3116 6291456
3117 6291456
3117 """
3118 """
3118 t = s.strip().lower()
3119 t = s.strip().lower()
3119 try:
3120 try:
3120 for k, u in _sizeunits:
3121 for k, u in _sizeunits:
3121 if t.endswith(k):
3122 if t.endswith(k):
3122 return int(float(t[: -len(k)]) * u)
3123 return int(float(t[: -len(k)]) * u)
3123 return int(t)
3124 return int(t)
3124 except ValueError:
3125 except ValueError:
3125 raise error.ParseError(_(b"couldn't parse size: %s") % s)
3126 raise error.ParseError(_(b"couldn't parse size: %s") % s)
3126
3127
3127
3128
3128 class hooks(object):
3129 class hooks(object):
3129 """A collection of hook functions that can be used to extend a
3130 """A collection of hook functions that can be used to extend a
3130 function's behavior. Hooks are called in lexicographic order,
3131 function's behavior. Hooks are called in lexicographic order,
3131 based on the names of their sources."""
3132 based on the names of their sources."""
3132
3133
3133 def __init__(self):
3134 def __init__(self):
3134 self._hooks = []
3135 self._hooks = []
3135
3136
3136 def add(self, source, hook):
3137 def add(self, source, hook):
3137 self._hooks.append((source, hook))
3138 self._hooks.append((source, hook))
3138
3139
3139 def __call__(self, *args):
3140 def __call__(self, *args):
3140 self._hooks.sort(key=lambda x: x[0])
3141 self._hooks.sort(key=lambda x: x[0])
3141 results = []
3142 results = []
3142 for source, hook in self._hooks:
3143 for source, hook in self._hooks:
3143 results.append(hook(*args))
3144 results.append(hook(*args))
3144 return results
3145 return results
3145
3146
3146
3147
3147 def getstackframes(skip=0, line=b' %-*s in %s\n', fileline=b'%s:%d', depth=0):
3148 def getstackframes(skip=0, line=b' %-*s in %s\n', fileline=b'%s:%d', depth=0):
3148 """Yields lines for a nicely formatted stacktrace.
3149 """Yields lines for a nicely formatted stacktrace.
3149 Skips the 'skip' last entries, then return the last 'depth' entries.
3150 Skips the 'skip' last entries, then return the last 'depth' entries.
3150 Each file+linenumber is formatted according to fileline.
3151 Each file+linenumber is formatted according to fileline.
3151 Each line is formatted according to line.
3152 Each line is formatted according to line.
3152 If line is None, it yields:
3153 If line is None, it yields:
3153 length of longest filepath+line number,
3154 length of longest filepath+line number,
3154 filepath+linenumber,
3155 filepath+linenumber,
3155 function
3156 function
3156
3157
3157 Not be used in production code but very convenient while developing.
3158 Not be used in production code but very convenient while developing.
3158 """
3159 """
3159 entries = [
3160 entries = [
3160 (fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3161 (fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3161 for fn, ln, func, _text in traceback.extract_stack()[: -skip - 1]
3162 for fn, ln, func, _text in traceback.extract_stack()[: -skip - 1]
3162 ][-depth:]
3163 ][-depth:]
3163 if entries:
3164 if entries:
3164 fnmax = max(len(entry[0]) for entry in entries)
3165 fnmax = max(len(entry[0]) for entry in entries)
3165 for fnln, func in entries:
3166 for fnln, func in entries:
3166 if line is None:
3167 if line is None:
3167 yield (fnmax, fnln, func)
3168 yield (fnmax, fnln, func)
3168 else:
3169 else:
3169 yield line % (fnmax, fnln, func)
3170 yield line % (fnmax, fnln, func)
3170
3171
3171
3172
3172 def debugstacktrace(
3173 def debugstacktrace(
3173 msg=b'stacktrace',
3174 msg=b'stacktrace',
3174 skip=0,
3175 skip=0,
3175 f=procutil.stderr,
3176 f=procutil.stderr,
3176 otherf=procutil.stdout,
3177 otherf=procutil.stdout,
3177 depth=0,
3178 depth=0,
3178 prefix=b'',
3179 prefix=b'',
3179 ):
3180 ):
3180 """Writes a message to f (stderr) with a nicely formatted stacktrace.
3181 """Writes a message to f (stderr) with a nicely formatted stacktrace.
3181 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3182 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3182 By default it will flush stdout first.
3183 By default it will flush stdout first.
3183 It can be used everywhere and intentionally does not require an ui object.
3184 It can be used everywhere and intentionally does not require an ui object.
3184 Not be used in production code but very convenient while developing.
3185 Not be used in production code but very convenient while developing.
3185 """
3186 """
3186 if otherf:
3187 if otherf:
3187 otherf.flush()
3188 otherf.flush()
3188 f.write(b'%s%s at:\n' % (prefix, msg.rstrip()))
3189 f.write(b'%s%s at:\n' % (prefix, msg.rstrip()))
3189 for line in getstackframes(skip + 1, depth=depth):
3190 for line in getstackframes(skip + 1, depth=depth):
3190 f.write(prefix + line)
3191 f.write(prefix + line)
3191 f.flush()
3192 f.flush()
3192
3193
3193
3194
3194 # convenient shortcut
3195 # convenient shortcut
3195 dst = debugstacktrace
3196 dst = debugstacktrace
3196
3197
3197
3198
3198 def safename(f, tag, ctx, others=None):
3199 def safename(f, tag, ctx, others=None):
3199 """
3200 """
3200 Generate a name that it is safe to rename f to in the given context.
3201 Generate a name that it is safe to rename f to in the given context.
3201
3202
3202 f: filename to rename
3203 f: filename to rename
3203 tag: a string tag that will be included in the new name
3204 tag: a string tag that will be included in the new name
3204 ctx: a context, in which the new name must not exist
3205 ctx: a context, in which the new name must not exist
3205 others: a set of other filenames that the new name must not be in
3206 others: a set of other filenames that the new name must not be in
3206
3207
3207 Returns a file name of the form oldname~tag[~number] which does not exist
3208 Returns a file name of the form oldname~tag[~number] which does not exist
3208 in the provided context and is not in the set of other names.
3209 in the provided context and is not in the set of other names.
3209 """
3210 """
3210 if others is None:
3211 if others is None:
3211 others = set()
3212 others = set()
3212
3213
3213 fn = b'%s~%s' % (f, tag)
3214 fn = b'%s~%s' % (f, tag)
3214 if fn not in ctx and fn not in others:
3215 if fn not in ctx and fn not in others:
3215 return fn
3216 return fn
3216 for n in itertools.count(1):
3217 for n in itertools.count(1):
3217 fn = b'%s~%s~%s' % (f, tag, n)
3218 fn = b'%s~%s~%s' % (f, tag, n)
3218 if fn not in ctx and fn not in others:
3219 if fn not in ctx and fn not in others:
3219 return fn
3220 return fn
3220
3221
3221
3222
3222 def readexactly(stream, n):
3223 def readexactly(stream, n):
3223 '''read n bytes from stream.read and abort if less was available'''
3224 '''read n bytes from stream.read and abort if less was available'''
3224 s = stream.read(n)
3225 s = stream.read(n)
3225 if len(s) < n:
3226 if len(s) < n:
3226 raise error.Abort(
3227 raise error.Abort(
3227 _(b"stream ended unexpectedly (got %d bytes, expected %d)")
3228 _(b"stream ended unexpectedly (got %d bytes, expected %d)")
3228 % (len(s), n)
3229 % (len(s), n)
3229 )
3230 )
3230 return s
3231 return s
3231
3232
3232
3233
3233 def uvarintencode(value):
3234 def uvarintencode(value):
3234 """Encode an unsigned integer value to a varint.
3235 """Encode an unsigned integer value to a varint.
3235
3236
3236 A varint is a variable length integer of 1 or more bytes. Each byte
3237 A varint is a variable length integer of 1 or more bytes. Each byte
3237 except the last has the most significant bit set. The lower 7 bits of
3238 except the last has the most significant bit set. The lower 7 bits of
3238 each byte store the 2's complement representation, least significant group
3239 each byte store the 2's complement representation, least significant group
3239 first.
3240 first.
3240
3241
3241 >>> uvarintencode(0)
3242 >>> uvarintencode(0)
3242 '\\x00'
3243 '\\x00'
3243 >>> uvarintencode(1)
3244 >>> uvarintencode(1)
3244 '\\x01'
3245 '\\x01'
3245 >>> uvarintencode(127)
3246 >>> uvarintencode(127)
3246 '\\x7f'
3247 '\\x7f'
3247 >>> uvarintencode(1337)
3248 >>> uvarintencode(1337)
3248 '\\xb9\\n'
3249 '\\xb9\\n'
3249 >>> uvarintencode(65536)
3250 >>> uvarintencode(65536)
3250 '\\x80\\x80\\x04'
3251 '\\x80\\x80\\x04'
3251 >>> uvarintencode(-1)
3252 >>> uvarintencode(-1)
3252 Traceback (most recent call last):
3253 Traceback (most recent call last):
3253 ...
3254 ...
3254 ProgrammingError: negative value for uvarint: -1
3255 ProgrammingError: negative value for uvarint: -1
3255 """
3256 """
3256 if value < 0:
3257 if value < 0:
3257 raise error.ProgrammingError(b'negative value for uvarint: %d' % value)
3258 raise error.ProgrammingError(b'negative value for uvarint: %d' % value)
3258 bits = value & 0x7F
3259 bits = value & 0x7F
3259 value >>= 7
3260 value >>= 7
3260 bytes = []
3261 bytes = []
3261 while value:
3262 while value:
3262 bytes.append(pycompat.bytechr(0x80 | bits))
3263 bytes.append(pycompat.bytechr(0x80 | bits))
3263 bits = value & 0x7F
3264 bits = value & 0x7F
3264 value >>= 7
3265 value >>= 7
3265 bytes.append(pycompat.bytechr(bits))
3266 bytes.append(pycompat.bytechr(bits))
3266
3267
3267 return b''.join(bytes)
3268 return b''.join(bytes)
3268
3269
3269
3270
3270 def uvarintdecodestream(fh):
3271 def uvarintdecodestream(fh):
3271 """Decode an unsigned variable length integer from a stream.
3272 """Decode an unsigned variable length integer from a stream.
3272
3273
3273 The passed argument is anything that has a ``.read(N)`` method.
3274 The passed argument is anything that has a ``.read(N)`` method.
3274
3275
3275 >>> try:
3276 >>> try:
3276 ... from StringIO import StringIO as BytesIO
3277 ... from StringIO import StringIO as BytesIO
3277 ... except ImportError:
3278 ... except ImportError:
3278 ... from io import BytesIO
3279 ... from io import BytesIO
3279 >>> uvarintdecodestream(BytesIO(b'\\x00'))
3280 >>> uvarintdecodestream(BytesIO(b'\\x00'))
3280 0
3281 0
3281 >>> uvarintdecodestream(BytesIO(b'\\x01'))
3282 >>> uvarintdecodestream(BytesIO(b'\\x01'))
3282 1
3283 1
3283 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
3284 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
3284 127
3285 127
3285 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
3286 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
3286 1337
3287 1337
3287 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
3288 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
3288 65536
3289 65536
3289 >>> uvarintdecodestream(BytesIO(b'\\x80'))
3290 >>> uvarintdecodestream(BytesIO(b'\\x80'))
3290 Traceback (most recent call last):
3291 Traceback (most recent call last):
3291 ...
3292 ...
3292 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
3293 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
3293 """
3294 """
3294 result = 0
3295 result = 0
3295 shift = 0
3296 shift = 0
3296 while True:
3297 while True:
3297 byte = ord(readexactly(fh, 1))
3298 byte = ord(readexactly(fh, 1))
3298 result |= (byte & 0x7F) << shift
3299 result |= (byte & 0x7F) << shift
3299 if not (byte & 0x80):
3300 if not (byte & 0x80):
3300 return result
3301 return result
3301 shift += 7
3302 shift += 7
3302
3303
3303
3304
3304 # Passing the '' locale means that the locale should be set according to the
3305 # Passing the '' locale means that the locale should be set according to the
3305 # user settings (environment variables).
3306 # user settings (environment variables).
3306 # Python sometimes avoids setting the global locale settings. When interfacing
3307 # Python sometimes avoids setting the global locale settings. When interfacing
3307 # with C code (e.g. the curses module or the Subversion bindings), the global
3308 # with C code (e.g. the curses module or the Subversion bindings), the global
3308 # locale settings must be initialized correctly. Python 2 does not initialize
3309 # locale settings must be initialized correctly. Python 2 does not initialize
3309 # the global locale settings on interpreter startup. Python 3 sometimes
3310 # the global locale settings on interpreter startup. Python 3 sometimes
3310 # initializes LC_CTYPE, but not consistently at least on Windows. Therefore we
3311 # initializes LC_CTYPE, but not consistently at least on Windows. Therefore we
3311 # explicitly initialize it to get consistent behavior if it's not already
3312 # explicitly initialize it to get consistent behavior if it's not already
3312 # initialized. Since CPython commit 177d921c8c03d30daa32994362023f777624b10d,
3313 # initialized. Since CPython commit 177d921c8c03d30daa32994362023f777624b10d,
3313 # LC_CTYPE is always initialized. If we require Python 3.8+, we should re-check
3314 # LC_CTYPE is always initialized. If we require Python 3.8+, we should re-check
3314 # if we can remove this code.
3315 # if we can remove this code.
3315 @contextlib.contextmanager
3316 @contextlib.contextmanager
3316 def with_lc_ctype():
3317 def with_lc_ctype():
3317 oldloc = locale.setlocale(locale.LC_CTYPE, None)
3318 oldloc = locale.setlocale(locale.LC_CTYPE, None)
3318 if oldloc == 'C':
3319 if oldloc == 'C':
3319 try:
3320 try:
3320 try:
3321 try:
3321 locale.setlocale(locale.LC_CTYPE, '')
3322 locale.setlocale(locale.LC_CTYPE, '')
3322 except locale.Error:
3323 except locale.Error:
3323 # The likely case is that the locale from the environment
3324 # The likely case is that the locale from the environment
3324 # variables is unknown.
3325 # variables is unknown.
3325 pass
3326 pass
3326 yield
3327 yield
3327 finally:
3328 finally:
3328 locale.setlocale(locale.LC_CTYPE, oldloc)
3329 locale.setlocale(locale.LC_CTYPE, oldloc)
3329 else:
3330 else:
3330 yield
3331 yield
3331
3332
3332
3333
3333 def _estimatememory():
3334 def _estimatememory():
3334 # type: () -> Optional[int]
3335 # type: () -> Optional[int]
3335 """Provide an estimate for the available system memory in Bytes.
3336 """Provide an estimate for the available system memory in Bytes.
3336
3337
3337 If no estimate can be provided on the platform, returns None.
3338 If no estimate can be provided on the platform, returns None.
3338 """
3339 """
3339 if pycompat.sysplatform.startswith(b'win'):
3340 if pycompat.sysplatform.startswith(b'win'):
3340 # On Windows, use the GlobalMemoryStatusEx kernel function directly.
3341 # On Windows, use the GlobalMemoryStatusEx kernel function directly.
3341 from ctypes import c_long as DWORD, c_ulonglong as DWORDLONG
3342 from ctypes import c_long as DWORD, c_ulonglong as DWORDLONG
3342 from ctypes.wintypes import ( # pytype: disable=import-error
3343 from ctypes.wintypes import ( # pytype: disable=import-error
3343 Structure,
3344 Structure,
3344 byref,
3345 byref,
3345 sizeof,
3346 sizeof,
3346 windll,
3347 windll,
3347 )
3348 )
3348
3349
3349 class MEMORYSTATUSEX(Structure):
3350 class MEMORYSTATUSEX(Structure):
3350 _fields_ = [
3351 _fields_ = [
3351 ('dwLength', DWORD),
3352 ('dwLength', DWORD),
3352 ('dwMemoryLoad', DWORD),
3353 ('dwMemoryLoad', DWORD),
3353 ('ullTotalPhys', DWORDLONG),
3354 ('ullTotalPhys', DWORDLONG),
3354 ('ullAvailPhys', DWORDLONG),
3355 ('ullAvailPhys', DWORDLONG),
3355 ('ullTotalPageFile', DWORDLONG),
3356 ('ullTotalPageFile', DWORDLONG),
3356 ('ullAvailPageFile', DWORDLONG),
3357 ('ullAvailPageFile', DWORDLONG),
3357 ('ullTotalVirtual', DWORDLONG),
3358 ('ullTotalVirtual', DWORDLONG),
3358 ('ullAvailVirtual', DWORDLONG),
3359 ('ullAvailVirtual', DWORDLONG),
3359 ('ullExtendedVirtual', DWORDLONG),
3360 ('ullExtendedVirtual', DWORDLONG),
3360 ]
3361 ]
3361
3362
3362 x = MEMORYSTATUSEX()
3363 x = MEMORYSTATUSEX()
3363 x.dwLength = sizeof(x)
3364 x.dwLength = sizeof(x)
3364 windll.kernel32.GlobalMemoryStatusEx(byref(x))
3365 windll.kernel32.GlobalMemoryStatusEx(byref(x))
3365 return x.ullAvailPhys
3366 return x.ullAvailPhys
3366
3367
3367 # On newer Unix-like systems and Mac OSX, the sysconf interface
3368 # On newer Unix-like systems and Mac OSX, the sysconf interface
3368 # can be used. _SC_PAGE_SIZE is part of POSIX; _SC_PHYS_PAGES
3369 # can be used. _SC_PAGE_SIZE is part of POSIX; _SC_PHYS_PAGES
3369 # seems to be implemented on most systems.
3370 # seems to be implemented on most systems.
3370 try:
3371 try:
3371 pagesize = os.sysconf(os.sysconf_names['SC_PAGE_SIZE'])
3372 pagesize = os.sysconf(os.sysconf_names['SC_PAGE_SIZE'])
3372 pages = os.sysconf(os.sysconf_names['SC_PHYS_PAGES'])
3373 pages = os.sysconf(os.sysconf_names['SC_PHYS_PAGES'])
3373 return pagesize * pages
3374 return pagesize * pages
3374 except OSError: # sysconf can fail
3375 except OSError: # sysconf can fail
3375 pass
3376 pass
3376 except KeyError: # unknown parameter
3377 except KeyError: # unknown parameter
3377 pass
3378 pass
General Comments 0
You need to be logged in to leave comments. Login now