##// END OF EJS Templates
util: avoid a leaked file descriptor in `util.makelock()` exceptional case
Matt Harbison -
r52781:f833ad92 default
parent child Browse files
Show More
@@ -1,3404 +1,3406
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import annotations
16 from __future__ import annotations
17
17
18 import abc
18 import abc
19 import collections
19 import collections
20 import contextlib
20 import contextlib
21 import errno
21 import errno
22 import gc
22 import gc
23 import hashlib
23 import hashlib
24 import io
24 import io
25 import itertools
25 import itertools
26 import locale
26 import locale
27 import mmap
27 import mmap
28 import os
28 import os
29 import pickle # provides util.pickle symbol
29 import pickle # provides util.pickle symbol
30 import re as remod
30 import re as remod
31 import shutil
31 import shutil
32 import stat
32 import stat
33 import sys
33 import sys
34 import time
34 import time
35 import traceback
35 import traceback
36 import typing
36 import typing
37 import warnings
37 import warnings
38
38
39 from typing import (
39 from typing import (
40 Any,
40 Any,
41 BinaryIO,
41 BinaryIO,
42 Callable,
42 Callable,
43 Iterable,
43 Iterable,
44 Iterator,
44 Iterator,
45 List,
45 List,
46 Optional,
46 Optional,
47 Tuple,
47 Tuple,
48 Type,
48 Type,
49 TypeVar,
49 TypeVar,
50 )
50 )
51
51
52 from .node import hex
52 from .node import hex
53 from .thirdparty import attr
53 from .thirdparty import attr
54
54
55 # Force pytype to use the non-vendored package
55 # Force pytype to use the non-vendored package
56 if typing.TYPE_CHECKING:
56 if typing.TYPE_CHECKING:
57 # noinspection PyPackageRequirements
57 # noinspection PyPackageRequirements
58 import attr
58 import attr
59
59
60 from .pycompat import (
60 from .pycompat import (
61 open,
61 open,
62 )
62 )
63 from hgdemandimport import tracing
63 from hgdemandimport import tracing
64 from . import (
64 from . import (
65 encoding,
65 encoding,
66 error,
66 error,
67 i18n,
67 i18n,
68 policy,
68 policy,
69 pycompat,
69 pycompat,
70 typelib,
70 typelib,
71 urllibcompat,
71 urllibcompat,
72 )
72 )
73 from .utils import (
73 from .utils import (
74 compression,
74 compression,
75 hashutil,
75 hashutil,
76 procutil,
76 procutil,
77 stringutil,
77 stringutil,
78 )
78 )
79
79
80 # keeps pyflakes happy
80 # keeps pyflakes happy
81 assert [
81 assert [
82 Iterable,
82 Iterable,
83 Iterator,
83 Iterator,
84 List,
84 List,
85 Optional,
85 Optional,
86 Tuple,
86 Tuple,
87 ]
87 ]
88
88
89
89
90 base85 = policy.importmod('base85')
90 base85 = policy.importmod('base85')
91 osutil = policy.importmod('osutil')
91 osutil = policy.importmod('osutil')
92
92
93 b85decode = base85.b85decode
93 b85decode = base85.b85decode
94 b85encode = base85.b85encode
94 b85encode = base85.b85encode
95
95
96 cookielib = pycompat.cookielib
96 cookielib = pycompat.cookielib
97 httplib = pycompat.httplib
97 httplib = pycompat.httplib
98 safehasattr = pycompat.safehasattr
98 safehasattr = pycompat.safehasattr
99 socketserver = pycompat.socketserver
99 socketserver = pycompat.socketserver
100 bytesio = io.BytesIO
100 bytesio = io.BytesIO
101 # TODO deprecate stringio name, as it is a lie on Python 3.
101 # TODO deprecate stringio name, as it is a lie on Python 3.
102 stringio = bytesio
102 stringio = bytesio
103 xmlrpclib = pycompat.xmlrpclib
103 xmlrpclib = pycompat.xmlrpclib
104
104
105 httpserver = urllibcompat.httpserver
105 httpserver = urllibcompat.httpserver
106 urlerr = urllibcompat.urlerr
106 urlerr = urllibcompat.urlerr
107 urlreq = urllibcompat.urlreq
107 urlreq = urllibcompat.urlreq
108
108
109 # workaround for win32mbcs
109 # workaround for win32mbcs
110 _filenamebytestr = pycompat.bytestr
110 _filenamebytestr = pycompat.bytestr
111
111
112 if pycompat.iswindows:
112 if pycompat.iswindows:
113 from . import windows as platform
113 from . import windows as platform
114 else:
114 else:
115 from . import posix as platform
115 from . import posix as platform
116
116
117 _ = i18n._
117 _ = i18n._
118
118
119 abspath = platform.abspath
119 abspath = platform.abspath
120 bindunixsocket = platform.bindunixsocket
120 bindunixsocket = platform.bindunixsocket
121 cachestat = platform.cachestat
121 cachestat = platform.cachestat
122 checkexec = platform.checkexec
122 checkexec = platform.checkexec
123 checklink = platform.checklink
123 checklink = platform.checklink
124 copymode = platform.copymode
124 copymode = platform.copymode
125 expandglobs = platform.expandglobs
125 expandglobs = platform.expandglobs
126 getfsmountpoint = platform.getfsmountpoint
126 getfsmountpoint = platform.getfsmountpoint
127 getfstype = platform.getfstype
127 getfstype = platform.getfstype
128 get_password = platform.get_password
128 get_password = platform.get_password
129 groupmembers = platform.groupmembers
129 groupmembers = platform.groupmembers
130 groupname = platform.groupname
130 groupname = platform.groupname
131 isexec = platform.isexec
131 isexec = platform.isexec
132 isowner = platform.isowner
132 isowner = platform.isowner
133 listdir = osutil.listdir
133 listdir = osutil.listdir
134 localpath = platform.localpath
134 localpath = platform.localpath
135 lookupreg = platform.lookupreg
135 lookupreg = platform.lookupreg
136 makedir = platform.makedir
136 makedir = platform.makedir
137 nlinks = platform.nlinks
137 nlinks = platform.nlinks
138 normpath = platform.normpath
138 normpath = platform.normpath
139 normcase = platform.normcase
139 normcase = platform.normcase
140 normcasespec = platform.normcasespec
140 normcasespec = platform.normcasespec
141 normcasefallback = platform.normcasefallback
141 normcasefallback = platform.normcasefallback
142 openhardlinks = platform.openhardlinks
142 openhardlinks = platform.openhardlinks
143 oslink = platform.oslink
143 oslink = platform.oslink
144 parsepatchoutput = platform.parsepatchoutput
144 parsepatchoutput = platform.parsepatchoutput
145 pconvert = platform.pconvert
145 pconvert = platform.pconvert
146 poll = platform.poll
146 poll = platform.poll
147 posixfile = platform.posixfile
147 posixfile = platform.posixfile
148 readlink = platform.readlink
148 readlink = platform.readlink
149 rename = platform.rename
149 rename = platform.rename
150 removedirs = platform.removedirs
150 removedirs = platform.removedirs
151 samedevice = platform.samedevice
151 samedevice = platform.samedevice
152 samefile = platform.samefile
152 samefile = platform.samefile
153 samestat = platform.samestat
153 samestat = platform.samestat
154 setflags = platform.setflags
154 setflags = platform.setflags
155 split = platform.split
155 split = platform.split
156 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
156 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
157 statisexec = platform.statisexec
157 statisexec = platform.statisexec
158 statislink = platform.statislink
158 statislink = platform.statislink
159 umask = platform.umask
159 umask = platform.umask
160 unlink = platform.unlink
160 unlink = platform.unlink
161 username = platform.username
161 username = platform.username
162
162
163
163
164 if typing.TYPE_CHECKING:
164 if typing.TYPE_CHECKING:
165 _Tfilestat = TypeVar('_Tfilestat', bound='filestat')
165 _Tfilestat = TypeVar('_Tfilestat', bound='filestat')
166
166
167
167
168 def setumask(val: int) -> None:
168 def setumask(val: int) -> None:
169 '''updates the umask. used by chg server'''
169 '''updates the umask. used by chg server'''
170 if pycompat.iswindows:
170 if pycompat.iswindows:
171 return
171 return
172 os.umask(val)
172 os.umask(val)
173 global umask
173 global umask
174 platform.umask = umask = val & 0o777
174 platform.umask = umask = val & 0o777
175
175
176
176
177 # small compat layer
177 # small compat layer
178 compengines = compression.compengines
178 compengines = compression.compengines
179 SERVERROLE = compression.SERVERROLE
179 SERVERROLE = compression.SERVERROLE
180 CLIENTROLE = compression.CLIENTROLE
180 CLIENTROLE = compression.CLIENTROLE
181
181
182 # Python compatibility
182 # Python compatibility
183
183
184 _notset = object()
184 _notset = object()
185
185
186
186
187 def bitsfrom(container):
187 def bitsfrom(container):
188 bits = 0
188 bits = 0
189 for bit in container:
189 for bit in container:
190 bits |= bit
190 bits |= bit
191 return bits
191 return bits
192
192
193
193
194 # python 2.6 still have deprecation warning enabled by default. We do not want
194 # python 2.6 still have deprecation warning enabled by default. We do not want
195 # to display anything to standard user so detect if we are running test and
195 # to display anything to standard user so detect if we are running test and
196 # only use python deprecation warning in this case.
196 # only use python deprecation warning in this case.
197 _dowarn = bool(encoding.environ.get(b'HGEMITWARNINGS'))
197 _dowarn = bool(encoding.environ.get(b'HGEMITWARNINGS'))
198 if _dowarn:
198 if _dowarn:
199 # explicitly unfilter our warning for python 2.7
199 # explicitly unfilter our warning for python 2.7
200 #
200 #
201 # The option of setting PYTHONWARNINGS in the test runner was investigated.
201 # The option of setting PYTHONWARNINGS in the test runner was investigated.
202 # However, module name set through PYTHONWARNINGS was exactly matched, so
202 # However, module name set through PYTHONWARNINGS was exactly matched, so
203 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
203 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
204 # makes the whole PYTHONWARNINGS thing useless for our usecase.
204 # makes the whole PYTHONWARNINGS thing useless for our usecase.
205 warnings.filterwarnings('default', '', DeprecationWarning, 'mercurial')
205 warnings.filterwarnings('default', '', DeprecationWarning, 'mercurial')
206 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext')
206 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext')
207 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext3rd')
207 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext3rd')
208 if _dowarn:
208 if _dowarn:
209 # silence warning emitted by passing user string to re.sub()
209 # silence warning emitted by passing user string to re.sub()
210 warnings.filterwarnings(
210 warnings.filterwarnings(
211 'ignore', 'bad escape', DeprecationWarning, 'mercurial'
211 'ignore', 'bad escape', DeprecationWarning, 'mercurial'
212 )
212 )
213 warnings.filterwarnings(
213 warnings.filterwarnings(
214 'ignore', 'invalid escape sequence', DeprecationWarning, 'mercurial'
214 'ignore', 'invalid escape sequence', DeprecationWarning, 'mercurial'
215 )
215 )
216 # TODO: reinvent imp.is_frozen()
216 # TODO: reinvent imp.is_frozen()
217 warnings.filterwarnings(
217 warnings.filterwarnings(
218 'ignore',
218 'ignore',
219 'the imp module is deprecated',
219 'the imp module is deprecated',
220 DeprecationWarning,
220 DeprecationWarning,
221 'mercurial',
221 'mercurial',
222 )
222 )
223
223
224
224
225 def nouideprecwarn(msg, version, stacklevel=1):
225 def nouideprecwarn(msg, version, stacklevel=1):
226 """Issue an python native deprecation warning
226 """Issue an python native deprecation warning
227
227
228 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
228 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
229 """
229 """
230 if _dowarn:
230 if _dowarn:
231 msg += (
231 msg += (
232 b"\n(compatibility will be dropped after Mercurial-%s,"
232 b"\n(compatibility will be dropped after Mercurial-%s,"
233 b" update your code.)"
233 b" update your code.)"
234 ) % version
234 ) % version
235 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
235 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
236 # on python 3 with chg, we will need to explicitly flush the output
236 # on python 3 with chg, we will need to explicitly flush the output
237 sys.stderr.flush()
237 sys.stderr.flush()
238
238
239
239
240 DIGESTS = {
240 DIGESTS = {
241 b'md5': hashlib.md5,
241 b'md5': hashlib.md5,
242 b'sha1': hashutil.sha1,
242 b'sha1': hashutil.sha1,
243 b'sha512': hashlib.sha512,
243 b'sha512': hashlib.sha512,
244 }
244 }
245 # List of digest types from strongest to weakest
245 # List of digest types from strongest to weakest
246 DIGESTS_BY_STRENGTH = [b'sha512', b'sha1', b'md5']
246 DIGESTS_BY_STRENGTH = [b'sha512', b'sha1', b'md5']
247
247
248 for k in DIGESTS_BY_STRENGTH:
248 for k in DIGESTS_BY_STRENGTH:
249 assert k in DIGESTS
249 assert k in DIGESTS
250
250
251
251
252 class digester:
252 class digester:
253 """helper to compute digests.
253 """helper to compute digests.
254
254
255 This helper can be used to compute one or more digests given their name.
255 This helper can be used to compute one or more digests given their name.
256
256
257 >>> d = digester([b'md5', b'sha1'])
257 >>> d = digester([b'md5', b'sha1'])
258 >>> d.update(b'foo')
258 >>> d.update(b'foo')
259 >>> [k for k in sorted(d)]
259 >>> [k for k in sorted(d)]
260 ['md5', 'sha1']
260 ['md5', 'sha1']
261 >>> d[b'md5']
261 >>> d[b'md5']
262 'acbd18db4cc2f85cedef654fccc4a4d8'
262 'acbd18db4cc2f85cedef654fccc4a4d8'
263 >>> d[b'sha1']
263 >>> d[b'sha1']
264 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
264 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
265 >>> digester.preferred([b'md5', b'sha1'])
265 >>> digester.preferred([b'md5', b'sha1'])
266 'sha1'
266 'sha1'
267 """
267 """
268
268
269 def __init__(self, digests, s=b''):
269 def __init__(self, digests, s=b''):
270 self._hashes = {}
270 self._hashes = {}
271 for k in digests:
271 for k in digests:
272 if k not in DIGESTS:
272 if k not in DIGESTS:
273 raise error.Abort(_(b'unknown digest type: %s') % k)
273 raise error.Abort(_(b'unknown digest type: %s') % k)
274 self._hashes[k] = DIGESTS[k]()
274 self._hashes[k] = DIGESTS[k]()
275 if s:
275 if s:
276 self.update(s)
276 self.update(s)
277
277
278 def update(self, data):
278 def update(self, data):
279 for h in self._hashes.values():
279 for h in self._hashes.values():
280 h.update(data)
280 h.update(data)
281
281
282 def __getitem__(self, key):
282 def __getitem__(self, key):
283 if key not in DIGESTS:
283 if key not in DIGESTS:
284 raise error.Abort(_(b'unknown digest type: %s') % k)
284 raise error.Abort(_(b'unknown digest type: %s') % k)
285 return hex(self._hashes[key].digest())
285 return hex(self._hashes[key].digest())
286
286
287 def __iter__(self):
287 def __iter__(self):
288 return iter(self._hashes)
288 return iter(self._hashes)
289
289
290 @staticmethod
290 @staticmethod
291 def preferred(supported):
291 def preferred(supported):
292 """returns the strongest digest type in both supported and DIGESTS."""
292 """returns the strongest digest type in both supported and DIGESTS."""
293
293
294 for k in DIGESTS_BY_STRENGTH:
294 for k in DIGESTS_BY_STRENGTH:
295 if k in supported:
295 if k in supported:
296 return k
296 return k
297 return None
297 return None
298
298
299
299
300 class digestchecker:
300 class digestchecker:
301 """file handle wrapper that additionally checks content against a given
301 """file handle wrapper that additionally checks content against a given
302 size and digests.
302 size and digests.
303
303
304 d = digestchecker(fh, size, {'md5': '...'})
304 d = digestchecker(fh, size, {'md5': '...'})
305
305
306 When multiple digests are given, all of them are validated.
306 When multiple digests are given, all of them are validated.
307 """
307 """
308
308
309 def __init__(self, fh, size, digests):
309 def __init__(self, fh, size, digests):
310 self._fh = fh
310 self._fh = fh
311 self._size = size
311 self._size = size
312 self._got = 0
312 self._got = 0
313 self._digests = dict(digests)
313 self._digests = dict(digests)
314 self._digester = digester(self._digests.keys())
314 self._digester = digester(self._digests.keys())
315
315
316 def read(self, length=-1):
316 def read(self, length=-1):
317 content = self._fh.read(length)
317 content = self._fh.read(length)
318 self._digester.update(content)
318 self._digester.update(content)
319 self._got += len(content)
319 self._got += len(content)
320 return content
320 return content
321
321
322 def validate(self):
322 def validate(self):
323 if self._size != self._got:
323 if self._size != self._got:
324 raise error.Abort(
324 raise error.Abort(
325 _(b'size mismatch: expected %d, got %d')
325 _(b'size mismatch: expected %d, got %d')
326 % (self._size, self._got)
326 % (self._size, self._got)
327 )
327 )
328 for k, v in self._digests.items():
328 for k, v in self._digests.items():
329 if v != self._digester[k]:
329 if v != self._digester[k]:
330 # i18n: first parameter is a digest name
330 # i18n: first parameter is a digest name
331 raise error.Abort(
331 raise error.Abort(
332 _(b'%s mismatch: expected %s, got %s')
332 _(b'%s mismatch: expected %s, got %s')
333 % (k, v, self._digester[k])
333 % (k, v, self._digester[k])
334 )
334 )
335
335
336
336
337 try:
337 try:
338 buffer = buffer # pytype: disable=name-error
338 buffer = buffer # pytype: disable=name-error
339 except NameError:
339 except NameError:
340
340
341 def buffer(sliceable, offset=0, length=None):
341 def buffer(sliceable, offset=0, length=None):
342 if length is not None:
342 if length is not None:
343 view = memoryview(sliceable)[offset : offset + length]
343 view = memoryview(sliceable)[offset : offset + length]
344 else:
344 else:
345 view = memoryview(sliceable)[offset:]
345 view = memoryview(sliceable)[offset:]
346 return view.toreadonly()
346 return view.toreadonly()
347
347
348
348
349 _chunksize = 4096
349 _chunksize = 4096
350
350
351
351
352 class bufferedinputpipe:
352 class bufferedinputpipe:
353 """a manually buffered input pipe
353 """a manually buffered input pipe
354
354
355 Python will not let us use buffered IO and lazy reading with 'polling' at
355 Python will not let us use buffered IO and lazy reading with 'polling' at
356 the same time. We cannot probe the buffer state and select will not detect
356 the same time. We cannot probe the buffer state and select will not detect
357 that data are ready to read if they are already buffered.
357 that data are ready to read if they are already buffered.
358
358
359 This class let us work around that by implementing its own buffering
359 This class let us work around that by implementing its own buffering
360 (allowing efficient readline) while offering a way to know if the buffer is
360 (allowing efficient readline) while offering a way to know if the buffer is
361 empty from the output (allowing collaboration of the buffer with polling).
361 empty from the output (allowing collaboration of the buffer with polling).
362
362
363 This class lives in the 'util' module because it makes use of the 'os'
363 This class lives in the 'util' module because it makes use of the 'os'
364 module from the python stdlib.
364 module from the python stdlib.
365 """
365 """
366
366
367 def __new__(cls, fh):
367 def __new__(cls, fh):
368 # If we receive a fileobjectproxy, we need to use a variation of this
368 # If we receive a fileobjectproxy, we need to use a variation of this
369 # class that notifies observers about activity.
369 # class that notifies observers about activity.
370 if isinstance(fh, fileobjectproxy):
370 if isinstance(fh, fileobjectproxy):
371 cls = observedbufferedinputpipe
371 cls = observedbufferedinputpipe
372
372
373 return super(bufferedinputpipe, cls).__new__(cls)
373 return super(bufferedinputpipe, cls).__new__(cls)
374
374
375 def __init__(self, input):
375 def __init__(self, input):
376 self._input = input
376 self._input = input
377 self._buffer = []
377 self._buffer = []
378 self._eof = False
378 self._eof = False
379 self._lenbuf = 0
379 self._lenbuf = 0
380
380
381 @property
381 @property
382 def hasbuffer(self):
382 def hasbuffer(self):
383 """True is any data is currently buffered
383 """True is any data is currently buffered
384
384
385 This will be used externally a pre-step for polling IO. If there is
385 This will be used externally a pre-step for polling IO. If there is
386 already data then no polling should be set in place."""
386 already data then no polling should be set in place."""
387 return bool(self._buffer)
387 return bool(self._buffer)
388
388
389 @property
389 @property
390 def closed(self):
390 def closed(self):
391 return self._input.closed
391 return self._input.closed
392
392
393 def fileno(self):
393 def fileno(self):
394 return self._input.fileno()
394 return self._input.fileno()
395
395
396 def close(self):
396 def close(self):
397 return self._input.close()
397 return self._input.close()
398
398
399 def read(self, size):
399 def read(self, size):
400 while (not self._eof) and (self._lenbuf < size):
400 while (not self._eof) and (self._lenbuf < size):
401 self._fillbuffer()
401 self._fillbuffer()
402 return self._frombuffer(size)
402 return self._frombuffer(size)
403
403
404 def unbufferedread(self, size):
404 def unbufferedread(self, size):
405 if not self._eof and self._lenbuf == 0:
405 if not self._eof and self._lenbuf == 0:
406 self._fillbuffer(max(size, _chunksize))
406 self._fillbuffer(max(size, _chunksize))
407 return self._frombuffer(min(self._lenbuf, size))
407 return self._frombuffer(min(self._lenbuf, size))
408
408
409 def readline(self, *args, **kwargs):
409 def readline(self, *args, **kwargs):
410 if len(self._buffer) > 1:
410 if len(self._buffer) > 1:
411 # this should not happen because both read and readline end with a
411 # this should not happen because both read and readline end with a
412 # _frombuffer call that collapse it.
412 # _frombuffer call that collapse it.
413 self._buffer = [b''.join(self._buffer)]
413 self._buffer = [b''.join(self._buffer)]
414 self._lenbuf = len(self._buffer[0])
414 self._lenbuf = len(self._buffer[0])
415 lfi = -1
415 lfi = -1
416 if self._buffer:
416 if self._buffer:
417 lfi = self._buffer[-1].find(b'\n')
417 lfi = self._buffer[-1].find(b'\n')
418 while (not self._eof) and lfi < 0:
418 while (not self._eof) and lfi < 0:
419 self._fillbuffer()
419 self._fillbuffer()
420 if self._buffer:
420 if self._buffer:
421 lfi = self._buffer[-1].find(b'\n')
421 lfi = self._buffer[-1].find(b'\n')
422 size = lfi + 1
422 size = lfi + 1
423 if lfi < 0: # end of file
423 if lfi < 0: # end of file
424 size = self._lenbuf
424 size = self._lenbuf
425 elif len(self._buffer) > 1:
425 elif len(self._buffer) > 1:
426 # we need to take previous chunks into account
426 # we need to take previous chunks into account
427 size += self._lenbuf - len(self._buffer[-1])
427 size += self._lenbuf - len(self._buffer[-1])
428 return self._frombuffer(size)
428 return self._frombuffer(size)
429
429
430 def _frombuffer(self, size):
430 def _frombuffer(self, size):
431 """return at most 'size' data from the buffer
431 """return at most 'size' data from the buffer
432
432
433 The data are removed from the buffer."""
433 The data are removed from the buffer."""
434 if size == 0 or not self._buffer:
434 if size == 0 or not self._buffer:
435 return b''
435 return b''
436 buf = self._buffer[0]
436 buf = self._buffer[0]
437 if len(self._buffer) > 1:
437 if len(self._buffer) > 1:
438 buf = b''.join(self._buffer)
438 buf = b''.join(self._buffer)
439
439
440 data = buf[:size]
440 data = buf[:size]
441 buf = buf[len(data) :]
441 buf = buf[len(data) :]
442 if buf:
442 if buf:
443 self._buffer = [buf]
443 self._buffer = [buf]
444 self._lenbuf = len(buf)
444 self._lenbuf = len(buf)
445 else:
445 else:
446 self._buffer = []
446 self._buffer = []
447 self._lenbuf = 0
447 self._lenbuf = 0
448 return data
448 return data
449
449
450 def _fillbuffer(self, size=_chunksize):
450 def _fillbuffer(self, size=_chunksize):
451 """read data to the buffer"""
451 """read data to the buffer"""
452 data = os.read(self._input.fileno(), size)
452 data = os.read(self._input.fileno(), size)
453 if not data:
453 if not data:
454 self._eof = True
454 self._eof = True
455 else:
455 else:
456 self._lenbuf += len(data)
456 self._lenbuf += len(data)
457 self._buffer.append(data)
457 self._buffer.append(data)
458
458
459 return data
459 return data
460
460
461
461
462 def has_mmap_populate():
462 def has_mmap_populate():
463 return hasattr(osutil, "background_mmap_populate") or hasattr(
463 return hasattr(osutil, "background_mmap_populate") or hasattr(
464 mmap, 'MAP_POPULATE'
464 mmap, 'MAP_POPULATE'
465 )
465 )
466
466
467
467
468 def mmapread(fp, size=None, pre_populate=True):
468 def mmapread(fp, size=None, pre_populate=True):
469 """Read a file content using mmap
469 """Read a file content using mmap
470
470
471 The responsability of checking the file system is mmap safe is the
471 The responsability of checking the file system is mmap safe is the
472 responsability of the caller (see `vfs.is_mmap_safe`).
472 responsability of the caller (see `vfs.is_mmap_safe`).
473
473
474 In some case, a normal string might be returned.
474 In some case, a normal string might be returned.
475
475
476 If `pre_populate` is True (the default), the mmapped data will be
476 If `pre_populate` is True (the default), the mmapped data will be
477 pre-populated in memory if the system support this option, this slow down
477 pre-populated in memory if the system support this option, this slow down
478 the initial mmaping but avoid potentially crippling page fault on later
478 the initial mmaping but avoid potentially crippling page fault on later
479 access. If this is not the desired behavior, set `pre_populate` to False.
479 access. If this is not the desired behavior, set `pre_populate` to False.
480 """
480 """
481 if size == 0:
481 if size == 0:
482 # size of 0 to mmap.mmap() means "all data"
482 # size of 0 to mmap.mmap() means "all data"
483 # rather than "zero bytes", so special case that.
483 # rather than "zero bytes", so special case that.
484 return b''
484 return b''
485 elif size is None:
485 elif size is None:
486 size = 0
486 size = 0
487 fd = getattr(fp, 'fileno', lambda: fp)()
487 fd = getattr(fp, 'fileno', lambda: fp)()
488 flags = mmap.MAP_PRIVATE
488 flags = mmap.MAP_PRIVATE
489 bg_populate = hasattr(osutil, "background_mmap_populate")
489 bg_populate = hasattr(osutil, "background_mmap_populate")
490 if pre_populate and not bg_populate:
490 if pre_populate and not bg_populate:
491 flags |= getattr(mmap, 'MAP_POPULATE', 0)
491 flags |= getattr(mmap, 'MAP_POPULATE', 0)
492 try:
492 try:
493 m = mmap.mmap(fd, size, flags=flags, prot=mmap.PROT_READ)
493 m = mmap.mmap(fd, size, flags=flags, prot=mmap.PROT_READ)
494 if pre_populate and bg_populate:
494 if pre_populate and bg_populate:
495 osutil.background_mmap_populate(m)
495 osutil.background_mmap_populate(m)
496 return m
496 return m
497 except ValueError:
497 except ValueError:
498 # Empty files cannot be mmapped, but mmapread should still work. Check
498 # Empty files cannot be mmapped, but mmapread should still work. Check
499 # if the file is empty, and if so, return an empty buffer.
499 # if the file is empty, and if so, return an empty buffer.
500 if os.fstat(fd).st_size == 0:
500 if os.fstat(fd).st_size == 0:
501 return b''
501 return b''
502 raise
502 raise
503
503
504
504
505 class fileobjectproxy:
505 class fileobjectproxy:
506 """A proxy around file objects that tells a watcher when events occur.
506 """A proxy around file objects that tells a watcher when events occur.
507
507
508 This type is intended to only be used for testing purposes. Think hard
508 This type is intended to only be used for testing purposes. Think hard
509 before using it in important code.
509 before using it in important code.
510 """
510 """
511
511
512 __slots__ = (
512 __slots__ = (
513 '_orig',
513 '_orig',
514 '_observer',
514 '_observer',
515 )
515 )
516
516
517 def __init__(self, fh, observer):
517 def __init__(self, fh, observer):
518 object.__setattr__(self, '_orig', fh)
518 object.__setattr__(self, '_orig', fh)
519 object.__setattr__(self, '_observer', observer)
519 object.__setattr__(self, '_observer', observer)
520
520
521 def __getattribute__(self, name):
521 def __getattribute__(self, name):
522 ours = {
522 ours = {
523 '_observer',
523 '_observer',
524 # IOBase
524 # IOBase
525 'close',
525 'close',
526 # closed if a property
526 # closed if a property
527 'fileno',
527 'fileno',
528 'flush',
528 'flush',
529 'isatty',
529 'isatty',
530 'readable',
530 'readable',
531 'readline',
531 'readline',
532 'readlines',
532 'readlines',
533 'seek',
533 'seek',
534 'seekable',
534 'seekable',
535 'tell',
535 'tell',
536 'truncate',
536 'truncate',
537 'writable',
537 'writable',
538 'writelines',
538 'writelines',
539 # RawIOBase
539 # RawIOBase
540 'read',
540 'read',
541 'readall',
541 'readall',
542 'readinto',
542 'readinto',
543 'write',
543 'write',
544 # BufferedIOBase
544 # BufferedIOBase
545 # raw is a property
545 # raw is a property
546 'detach',
546 'detach',
547 # read defined above
547 # read defined above
548 'read1',
548 'read1',
549 # readinto defined above
549 # readinto defined above
550 # write defined above
550 # write defined above
551 }
551 }
552
552
553 # We only observe some methods.
553 # We only observe some methods.
554 if name in ours:
554 if name in ours:
555 return object.__getattribute__(self, name)
555 return object.__getattribute__(self, name)
556
556
557 return getattr(object.__getattribute__(self, '_orig'), name)
557 return getattr(object.__getattribute__(self, '_orig'), name)
558
558
559 def __nonzero__(self):
559 def __nonzero__(self):
560 return bool(object.__getattribute__(self, '_orig'))
560 return bool(object.__getattribute__(self, '_orig'))
561
561
562 __bool__ = __nonzero__
562 __bool__ = __nonzero__
563
563
564 def __delattr__(self, name):
564 def __delattr__(self, name):
565 return delattr(object.__getattribute__(self, '_orig'), name)
565 return delattr(object.__getattribute__(self, '_orig'), name)
566
566
567 def __setattr__(self, name, value):
567 def __setattr__(self, name, value):
568 return setattr(object.__getattribute__(self, '_orig'), name, value)
568 return setattr(object.__getattribute__(self, '_orig'), name, value)
569
569
570 def __iter__(self):
570 def __iter__(self):
571 return object.__getattribute__(self, '_orig').__iter__()
571 return object.__getattribute__(self, '_orig').__iter__()
572
572
573 def _observedcall(self, name, *args, **kwargs):
573 def _observedcall(self, name, *args, **kwargs):
574 # Call the original object.
574 # Call the original object.
575 orig = object.__getattribute__(self, '_orig')
575 orig = object.__getattribute__(self, '_orig')
576 res = getattr(orig, name)(*args, **kwargs)
576 res = getattr(orig, name)(*args, **kwargs)
577
577
578 # Call a method on the observer of the same name with arguments
578 # Call a method on the observer of the same name with arguments
579 # so it can react, log, etc.
579 # so it can react, log, etc.
580 observer = object.__getattribute__(self, '_observer')
580 observer = object.__getattribute__(self, '_observer')
581 fn = getattr(observer, name, None)
581 fn = getattr(observer, name, None)
582 if fn:
582 if fn:
583 fn(res, *args, **kwargs)
583 fn(res, *args, **kwargs)
584
584
585 return res
585 return res
586
586
587 def close(self, *args, **kwargs):
587 def close(self, *args, **kwargs):
588 return object.__getattribute__(self, '_observedcall')(
588 return object.__getattribute__(self, '_observedcall')(
589 'close', *args, **kwargs
589 'close', *args, **kwargs
590 )
590 )
591
591
592 def fileno(self, *args, **kwargs):
592 def fileno(self, *args, **kwargs):
593 return object.__getattribute__(self, '_observedcall')(
593 return object.__getattribute__(self, '_observedcall')(
594 'fileno', *args, **kwargs
594 'fileno', *args, **kwargs
595 )
595 )
596
596
597 def flush(self, *args, **kwargs):
597 def flush(self, *args, **kwargs):
598 return object.__getattribute__(self, '_observedcall')(
598 return object.__getattribute__(self, '_observedcall')(
599 'flush', *args, **kwargs
599 'flush', *args, **kwargs
600 )
600 )
601
601
602 def isatty(self, *args, **kwargs):
602 def isatty(self, *args, **kwargs):
603 return object.__getattribute__(self, '_observedcall')(
603 return object.__getattribute__(self, '_observedcall')(
604 'isatty', *args, **kwargs
604 'isatty', *args, **kwargs
605 )
605 )
606
606
607 def readable(self, *args, **kwargs):
607 def readable(self, *args, **kwargs):
608 return object.__getattribute__(self, '_observedcall')(
608 return object.__getattribute__(self, '_observedcall')(
609 'readable', *args, **kwargs
609 'readable', *args, **kwargs
610 )
610 )
611
611
612 def readline(self, *args, **kwargs):
612 def readline(self, *args, **kwargs):
613 return object.__getattribute__(self, '_observedcall')(
613 return object.__getattribute__(self, '_observedcall')(
614 'readline', *args, **kwargs
614 'readline', *args, **kwargs
615 )
615 )
616
616
617 def readlines(self, *args, **kwargs):
617 def readlines(self, *args, **kwargs):
618 return object.__getattribute__(self, '_observedcall')(
618 return object.__getattribute__(self, '_observedcall')(
619 'readlines', *args, **kwargs
619 'readlines', *args, **kwargs
620 )
620 )
621
621
622 def seek(self, *args, **kwargs):
622 def seek(self, *args, **kwargs):
623 return object.__getattribute__(self, '_observedcall')(
623 return object.__getattribute__(self, '_observedcall')(
624 'seek', *args, **kwargs
624 'seek', *args, **kwargs
625 )
625 )
626
626
627 def seekable(self, *args, **kwargs):
627 def seekable(self, *args, **kwargs):
628 return object.__getattribute__(self, '_observedcall')(
628 return object.__getattribute__(self, '_observedcall')(
629 'seekable', *args, **kwargs
629 'seekable', *args, **kwargs
630 )
630 )
631
631
632 def tell(self, *args, **kwargs):
632 def tell(self, *args, **kwargs):
633 return object.__getattribute__(self, '_observedcall')(
633 return object.__getattribute__(self, '_observedcall')(
634 'tell', *args, **kwargs
634 'tell', *args, **kwargs
635 )
635 )
636
636
637 def truncate(self, *args, **kwargs):
637 def truncate(self, *args, **kwargs):
638 return object.__getattribute__(self, '_observedcall')(
638 return object.__getattribute__(self, '_observedcall')(
639 'truncate', *args, **kwargs
639 'truncate', *args, **kwargs
640 )
640 )
641
641
642 def writable(self, *args, **kwargs):
642 def writable(self, *args, **kwargs):
643 return object.__getattribute__(self, '_observedcall')(
643 return object.__getattribute__(self, '_observedcall')(
644 'writable', *args, **kwargs
644 'writable', *args, **kwargs
645 )
645 )
646
646
647 def writelines(self, *args, **kwargs):
647 def writelines(self, *args, **kwargs):
648 return object.__getattribute__(self, '_observedcall')(
648 return object.__getattribute__(self, '_observedcall')(
649 'writelines', *args, **kwargs
649 'writelines', *args, **kwargs
650 )
650 )
651
651
652 def read(self, *args, **kwargs):
652 def read(self, *args, **kwargs):
653 return object.__getattribute__(self, '_observedcall')(
653 return object.__getattribute__(self, '_observedcall')(
654 'read', *args, **kwargs
654 'read', *args, **kwargs
655 )
655 )
656
656
657 def readall(self, *args, **kwargs):
657 def readall(self, *args, **kwargs):
658 return object.__getattribute__(self, '_observedcall')(
658 return object.__getattribute__(self, '_observedcall')(
659 'readall', *args, **kwargs
659 'readall', *args, **kwargs
660 )
660 )
661
661
662 def readinto(self, *args, **kwargs):
662 def readinto(self, *args, **kwargs):
663 return object.__getattribute__(self, '_observedcall')(
663 return object.__getattribute__(self, '_observedcall')(
664 'readinto', *args, **kwargs
664 'readinto', *args, **kwargs
665 )
665 )
666
666
667 def write(self, *args, **kwargs):
667 def write(self, *args, **kwargs):
668 return object.__getattribute__(self, '_observedcall')(
668 return object.__getattribute__(self, '_observedcall')(
669 'write', *args, **kwargs
669 'write', *args, **kwargs
670 )
670 )
671
671
672 def detach(self, *args, **kwargs):
672 def detach(self, *args, **kwargs):
673 return object.__getattribute__(self, '_observedcall')(
673 return object.__getattribute__(self, '_observedcall')(
674 'detach', *args, **kwargs
674 'detach', *args, **kwargs
675 )
675 )
676
676
677 def read1(self, *args, **kwargs):
677 def read1(self, *args, **kwargs):
678 return object.__getattribute__(self, '_observedcall')(
678 return object.__getattribute__(self, '_observedcall')(
679 'read1', *args, **kwargs
679 'read1', *args, **kwargs
680 )
680 )
681
681
682
682
683 class observedbufferedinputpipe(bufferedinputpipe):
683 class observedbufferedinputpipe(bufferedinputpipe):
684 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
684 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
685
685
686 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
686 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
687 bypass ``fileobjectproxy``. Because of this, we need to make
687 bypass ``fileobjectproxy``. Because of this, we need to make
688 ``bufferedinputpipe`` aware of these operations.
688 ``bufferedinputpipe`` aware of these operations.
689
689
690 This variation of ``bufferedinputpipe`` can notify observers about
690 This variation of ``bufferedinputpipe`` can notify observers about
691 ``os.read()`` events. It also re-publishes other events, such as
691 ``os.read()`` events. It also re-publishes other events, such as
692 ``read()`` and ``readline()``.
692 ``read()`` and ``readline()``.
693 """
693 """
694
694
695 def _fillbuffer(self, size=_chunksize):
695 def _fillbuffer(self, size=_chunksize):
696 res = super(observedbufferedinputpipe, self)._fillbuffer(size=size)
696 res = super(observedbufferedinputpipe, self)._fillbuffer(size=size)
697
697
698 fn = getattr(self._input._observer, 'osread', None)
698 fn = getattr(self._input._observer, 'osread', None)
699 if fn:
699 if fn:
700 fn(res, size)
700 fn(res, size)
701
701
702 return res
702 return res
703
703
704 # We use different observer methods because the operation isn't
704 # We use different observer methods because the operation isn't
705 # performed on the actual file object but on us.
705 # performed on the actual file object but on us.
706 def read(self, size):
706 def read(self, size):
707 res = super(observedbufferedinputpipe, self).read(size)
707 res = super(observedbufferedinputpipe, self).read(size)
708
708
709 fn = getattr(self._input._observer, 'bufferedread', None)
709 fn = getattr(self._input._observer, 'bufferedread', None)
710 if fn:
710 if fn:
711 fn(res, size)
711 fn(res, size)
712
712
713 return res
713 return res
714
714
715 def readline(self, *args, **kwargs):
715 def readline(self, *args, **kwargs):
716 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
716 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
717
717
718 fn = getattr(self._input._observer, 'bufferedreadline', None)
718 fn = getattr(self._input._observer, 'bufferedreadline', None)
719 if fn:
719 if fn:
720 fn(res)
720 fn(res)
721
721
722 return res
722 return res
723
723
724
724
725 PROXIED_SOCKET_METHODS = {
725 PROXIED_SOCKET_METHODS = {
726 'makefile',
726 'makefile',
727 'recv',
727 'recv',
728 'recvfrom',
728 'recvfrom',
729 'recvfrom_into',
729 'recvfrom_into',
730 'recv_into',
730 'recv_into',
731 'send',
731 'send',
732 'sendall',
732 'sendall',
733 'sendto',
733 'sendto',
734 'setblocking',
734 'setblocking',
735 'settimeout',
735 'settimeout',
736 'gettimeout',
736 'gettimeout',
737 'setsockopt',
737 'setsockopt',
738 }
738 }
739
739
740
740
741 class socketproxy:
741 class socketproxy:
742 """A proxy around a socket that tells a watcher when events occur.
742 """A proxy around a socket that tells a watcher when events occur.
743
743
744 This is like ``fileobjectproxy`` except for sockets.
744 This is like ``fileobjectproxy`` except for sockets.
745
745
746 This type is intended to only be used for testing purposes. Think hard
746 This type is intended to only be used for testing purposes. Think hard
747 before using it in important code.
747 before using it in important code.
748 """
748 """
749
749
750 __slots__ = (
750 __slots__ = (
751 '_orig',
751 '_orig',
752 '_observer',
752 '_observer',
753 )
753 )
754
754
755 def __init__(self, sock, observer):
755 def __init__(self, sock, observer):
756 object.__setattr__(self, '_orig', sock)
756 object.__setattr__(self, '_orig', sock)
757 object.__setattr__(self, '_observer', observer)
757 object.__setattr__(self, '_observer', observer)
758
758
759 def __getattribute__(self, name):
759 def __getattribute__(self, name):
760 if name in PROXIED_SOCKET_METHODS:
760 if name in PROXIED_SOCKET_METHODS:
761 return object.__getattribute__(self, name)
761 return object.__getattribute__(self, name)
762
762
763 return getattr(object.__getattribute__(self, '_orig'), name)
763 return getattr(object.__getattribute__(self, '_orig'), name)
764
764
765 def __delattr__(self, name):
765 def __delattr__(self, name):
766 return delattr(object.__getattribute__(self, '_orig'), name)
766 return delattr(object.__getattribute__(self, '_orig'), name)
767
767
768 def __setattr__(self, name, value):
768 def __setattr__(self, name, value):
769 return setattr(object.__getattribute__(self, '_orig'), name, value)
769 return setattr(object.__getattribute__(self, '_orig'), name, value)
770
770
771 def __nonzero__(self):
771 def __nonzero__(self):
772 return bool(object.__getattribute__(self, '_orig'))
772 return bool(object.__getattribute__(self, '_orig'))
773
773
774 __bool__ = __nonzero__
774 __bool__ = __nonzero__
775
775
776 def _observedcall(self, name, *args, **kwargs):
776 def _observedcall(self, name, *args, **kwargs):
777 # Call the original object.
777 # Call the original object.
778 orig = object.__getattribute__(self, '_orig')
778 orig = object.__getattribute__(self, '_orig')
779 res = getattr(orig, name)(*args, **kwargs)
779 res = getattr(orig, name)(*args, **kwargs)
780
780
781 # Call a method on the observer of the same name with arguments
781 # Call a method on the observer of the same name with arguments
782 # so it can react, log, etc.
782 # so it can react, log, etc.
783 observer = object.__getattribute__(self, '_observer')
783 observer = object.__getattribute__(self, '_observer')
784 fn = getattr(observer, name, None)
784 fn = getattr(observer, name, None)
785 if fn:
785 if fn:
786 fn(res, *args, **kwargs)
786 fn(res, *args, **kwargs)
787
787
788 return res
788 return res
789
789
790 def makefile(self, *args, **kwargs):
790 def makefile(self, *args, **kwargs):
791 res = object.__getattribute__(self, '_observedcall')(
791 res = object.__getattribute__(self, '_observedcall')(
792 'makefile', *args, **kwargs
792 'makefile', *args, **kwargs
793 )
793 )
794
794
795 # The file object may be used for I/O. So we turn it into a
795 # The file object may be used for I/O. So we turn it into a
796 # proxy using our observer.
796 # proxy using our observer.
797 observer = object.__getattribute__(self, '_observer')
797 observer = object.__getattribute__(self, '_observer')
798 return makeloggingfileobject(
798 return makeloggingfileobject(
799 observer.fh,
799 observer.fh,
800 res,
800 res,
801 observer.name,
801 observer.name,
802 reads=observer.reads,
802 reads=observer.reads,
803 writes=observer.writes,
803 writes=observer.writes,
804 logdata=observer.logdata,
804 logdata=observer.logdata,
805 logdataapis=observer.logdataapis,
805 logdataapis=observer.logdataapis,
806 )
806 )
807
807
808 def recv(self, *args, **kwargs):
808 def recv(self, *args, **kwargs):
809 return object.__getattribute__(self, '_observedcall')(
809 return object.__getattribute__(self, '_observedcall')(
810 'recv', *args, **kwargs
810 'recv', *args, **kwargs
811 )
811 )
812
812
813 def recvfrom(self, *args, **kwargs):
813 def recvfrom(self, *args, **kwargs):
814 return object.__getattribute__(self, '_observedcall')(
814 return object.__getattribute__(self, '_observedcall')(
815 'recvfrom', *args, **kwargs
815 'recvfrom', *args, **kwargs
816 )
816 )
817
817
818 def recvfrom_into(self, *args, **kwargs):
818 def recvfrom_into(self, *args, **kwargs):
819 return object.__getattribute__(self, '_observedcall')(
819 return object.__getattribute__(self, '_observedcall')(
820 'recvfrom_into', *args, **kwargs
820 'recvfrom_into', *args, **kwargs
821 )
821 )
822
822
823 def recv_into(self, *args, **kwargs):
823 def recv_into(self, *args, **kwargs):
824 return object.__getattribute__(self, '_observedcall')(
824 return object.__getattribute__(self, '_observedcall')(
825 'recv_info', *args, **kwargs
825 'recv_info', *args, **kwargs
826 )
826 )
827
827
828 def send(self, *args, **kwargs):
828 def send(self, *args, **kwargs):
829 return object.__getattribute__(self, '_observedcall')(
829 return object.__getattribute__(self, '_observedcall')(
830 'send', *args, **kwargs
830 'send', *args, **kwargs
831 )
831 )
832
832
833 def sendall(self, *args, **kwargs):
833 def sendall(self, *args, **kwargs):
834 return object.__getattribute__(self, '_observedcall')(
834 return object.__getattribute__(self, '_observedcall')(
835 'sendall', *args, **kwargs
835 'sendall', *args, **kwargs
836 )
836 )
837
837
838 def sendto(self, *args, **kwargs):
838 def sendto(self, *args, **kwargs):
839 return object.__getattribute__(self, '_observedcall')(
839 return object.__getattribute__(self, '_observedcall')(
840 'sendto', *args, **kwargs
840 'sendto', *args, **kwargs
841 )
841 )
842
842
843 def setblocking(self, *args, **kwargs):
843 def setblocking(self, *args, **kwargs):
844 return object.__getattribute__(self, '_observedcall')(
844 return object.__getattribute__(self, '_observedcall')(
845 'setblocking', *args, **kwargs
845 'setblocking', *args, **kwargs
846 )
846 )
847
847
848 def settimeout(self, *args, **kwargs):
848 def settimeout(self, *args, **kwargs):
849 return object.__getattribute__(self, '_observedcall')(
849 return object.__getattribute__(self, '_observedcall')(
850 'settimeout', *args, **kwargs
850 'settimeout', *args, **kwargs
851 )
851 )
852
852
853 def gettimeout(self, *args, **kwargs):
853 def gettimeout(self, *args, **kwargs):
854 return object.__getattribute__(self, '_observedcall')(
854 return object.__getattribute__(self, '_observedcall')(
855 'gettimeout', *args, **kwargs
855 'gettimeout', *args, **kwargs
856 )
856 )
857
857
858 def setsockopt(self, *args, **kwargs):
858 def setsockopt(self, *args, **kwargs):
859 return object.__getattribute__(self, '_observedcall')(
859 return object.__getattribute__(self, '_observedcall')(
860 'setsockopt', *args, **kwargs
860 'setsockopt', *args, **kwargs
861 )
861 )
862
862
863
863
864 class baseproxyobserver:
864 class baseproxyobserver:
865 def __init__(self, fh, name, logdata, logdataapis):
865 def __init__(self, fh, name, logdata, logdataapis):
866 self.fh = fh
866 self.fh = fh
867 self.name = name
867 self.name = name
868 self.logdata = logdata
868 self.logdata = logdata
869 self.logdataapis = logdataapis
869 self.logdataapis = logdataapis
870
870
871 def _writedata(self, data):
871 def _writedata(self, data):
872 if not self.logdata:
872 if not self.logdata:
873 if self.logdataapis:
873 if self.logdataapis:
874 self.fh.write(b'\n')
874 self.fh.write(b'\n')
875 self.fh.flush()
875 self.fh.flush()
876 return
876 return
877
877
878 # Simple case writes all data on a single line.
878 # Simple case writes all data on a single line.
879 if b'\n' not in data:
879 if b'\n' not in data:
880 if self.logdataapis:
880 if self.logdataapis:
881 self.fh.write(b': %s\n' % stringutil.escapestr(data))
881 self.fh.write(b': %s\n' % stringutil.escapestr(data))
882 else:
882 else:
883 self.fh.write(
883 self.fh.write(
884 b'%s> %s\n' % (self.name, stringutil.escapestr(data))
884 b'%s> %s\n' % (self.name, stringutil.escapestr(data))
885 )
885 )
886 self.fh.flush()
886 self.fh.flush()
887 return
887 return
888
888
889 # Data with newlines is written to multiple lines.
889 # Data with newlines is written to multiple lines.
890 if self.logdataapis:
890 if self.logdataapis:
891 self.fh.write(b':\n')
891 self.fh.write(b':\n')
892
892
893 lines = data.splitlines(True)
893 lines = data.splitlines(True)
894 for line in lines:
894 for line in lines:
895 self.fh.write(
895 self.fh.write(
896 b'%s> %s\n' % (self.name, stringutil.escapestr(line))
896 b'%s> %s\n' % (self.name, stringutil.escapestr(line))
897 )
897 )
898 self.fh.flush()
898 self.fh.flush()
899
899
900
900
901 class fileobjectobserver(baseproxyobserver):
901 class fileobjectobserver(baseproxyobserver):
902 """Logs file object activity."""
902 """Logs file object activity."""
903
903
904 def __init__(
904 def __init__(
905 self, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
905 self, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
906 ):
906 ):
907 super(fileobjectobserver, self).__init__(fh, name, logdata, logdataapis)
907 super(fileobjectobserver, self).__init__(fh, name, logdata, logdataapis)
908 self.reads = reads
908 self.reads = reads
909 self.writes = writes
909 self.writes = writes
910
910
911 def read(self, res, size=-1):
911 def read(self, res, size=-1):
912 if not self.reads:
912 if not self.reads:
913 return
913 return
914 # Python 3 can return None from reads at EOF instead of empty strings.
914 # Python 3 can return None from reads at EOF instead of empty strings.
915 if res is None:
915 if res is None:
916 res = b''
916 res = b''
917
917
918 if size == -1 and res == b'':
918 if size == -1 and res == b'':
919 # Suppress pointless read(-1) calls that return
919 # Suppress pointless read(-1) calls that return
920 # nothing. These happen _a lot_ on Python 3, and there
920 # nothing. These happen _a lot_ on Python 3, and there
921 # doesn't seem to be a better workaround to have matching
921 # doesn't seem to be a better workaround to have matching
922 # Python 2 and 3 behavior. :(
922 # Python 2 and 3 behavior. :(
923 return
923 return
924
924
925 if self.logdataapis:
925 if self.logdataapis:
926 self.fh.write(b'%s> read(%d) -> %d' % (self.name, size, len(res)))
926 self.fh.write(b'%s> read(%d) -> %d' % (self.name, size, len(res)))
927
927
928 self._writedata(res)
928 self._writedata(res)
929
929
930 def readline(self, res, limit=-1):
930 def readline(self, res, limit=-1):
931 if not self.reads:
931 if not self.reads:
932 return
932 return
933
933
934 if self.logdataapis:
934 if self.logdataapis:
935 self.fh.write(b'%s> readline() -> %d' % (self.name, len(res)))
935 self.fh.write(b'%s> readline() -> %d' % (self.name, len(res)))
936
936
937 self._writedata(res)
937 self._writedata(res)
938
938
939 def readinto(self, res, dest):
939 def readinto(self, res, dest):
940 if not self.reads:
940 if not self.reads:
941 return
941 return
942
942
943 if self.logdataapis:
943 if self.logdataapis:
944 self.fh.write(
944 self.fh.write(
945 b'%s> readinto(%d) -> %r' % (self.name, len(dest), res)
945 b'%s> readinto(%d) -> %r' % (self.name, len(dest), res)
946 )
946 )
947
947
948 data = dest[0:res] if res is not None else b''
948 data = dest[0:res] if res is not None else b''
949
949
950 # _writedata() uses "in" operator and is confused by memoryview because
950 # _writedata() uses "in" operator and is confused by memoryview because
951 # characters are ints on Python 3.
951 # characters are ints on Python 3.
952 if isinstance(data, memoryview):
952 if isinstance(data, memoryview):
953 data = data.tobytes()
953 data = data.tobytes()
954
954
955 self._writedata(data)
955 self._writedata(data)
956
956
957 def write(self, res, data):
957 def write(self, res, data):
958 if not self.writes:
958 if not self.writes:
959 return
959 return
960
960
961 # Python 2 returns None from some write() calls. Python 3 (reasonably)
961 # Python 2 returns None from some write() calls. Python 3 (reasonably)
962 # returns the integer bytes written.
962 # returns the integer bytes written.
963 if res is None and data:
963 if res is None and data:
964 res = len(data)
964 res = len(data)
965
965
966 if self.logdataapis:
966 if self.logdataapis:
967 self.fh.write(b'%s> write(%d) -> %r' % (self.name, len(data), res))
967 self.fh.write(b'%s> write(%d) -> %r' % (self.name, len(data), res))
968
968
969 self._writedata(data)
969 self._writedata(data)
970
970
971 def flush(self, res):
971 def flush(self, res):
972 if not self.writes:
972 if not self.writes:
973 return
973 return
974
974
975 self.fh.write(b'%s> flush() -> %r\n' % (self.name, res))
975 self.fh.write(b'%s> flush() -> %r\n' % (self.name, res))
976
976
977 # For observedbufferedinputpipe.
977 # For observedbufferedinputpipe.
978 def bufferedread(self, res, size):
978 def bufferedread(self, res, size):
979 if not self.reads:
979 if not self.reads:
980 return
980 return
981
981
982 if self.logdataapis:
982 if self.logdataapis:
983 self.fh.write(
983 self.fh.write(
984 b'%s> bufferedread(%d) -> %d' % (self.name, size, len(res))
984 b'%s> bufferedread(%d) -> %d' % (self.name, size, len(res))
985 )
985 )
986
986
987 self._writedata(res)
987 self._writedata(res)
988
988
989 def bufferedreadline(self, res):
989 def bufferedreadline(self, res):
990 if not self.reads:
990 if not self.reads:
991 return
991 return
992
992
993 if self.logdataapis:
993 if self.logdataapis:
994 self.fh.write(
994 self.fh.write(
995 b'%s> bufferedreadline() -> %d' % (self.name, len(res))
995 b'%s> bufferedreadline() -> %d' % (self.name, len(res))
996 )
996 )
997
997
998 self._writedata(res)
998 self._writedata(res)
999
999
1000
1000
1001 def makeloggingfileobject(
1001 def makeloggingfileobject(
1002 logh, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
1002 logh, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
1003 ):
1003 ):
1004 """Turn a file object into a logging file object."""
1004 """Turn a file object into a logging file object."""
1005
1005
1006 observer = fileobjectobserver(
1006 observer = fileobjectobserver(
1007 logh,
1007 logh,
1008 name,
1008 name,
1009 reads=reads,
1009 reads=reads,
1010 writes=writes,
1010 writes=writes,
1011 logdata=logdata,
1011 logdata=logdata,
1012 logdataapis=logdataapis,
1012 logdataapis=logdataapis,
1013 )
1013 )
1014 return fileobjectproxy(fh, observer)
1014 return fileobjectproxy(fh, observer)
1015
1015
1016
1016
1017 class socketobserver(baseproxyobserver):
1017 class socketobserver(baseproxyobserver):
1018 """Logs socket activity."""
1018 """Logs socket activity."""
1019
1019
1020 def __init__(
1020 def __init__(
1021 self,
1021 self,
1022 fh,
1022 fh,
1023 name,
1023 name,
1024 reads=True,
1024 reads=True,
1025 writes=True,
1025 writes=True,
1026 states=True,
1026 states=True,
1027 logdata=False,
1027 logdata=False,
1028 logdataapis=True,
1028 logdataapis=True,
1029 ):
1029 ):
1030 super(socketobserver, self).__init__(fh, name, logdata, logdataapis)
1030 super(socketobserver, self).__init__(fh, name, logdata, logdataapis)
1031 self.reads = reads
1031 self.reads = reads
1032 self.writes = writes
1032 self.writes = writes
1033 self.states = states
1033 self.states = states
1034
1034
1035 def makefile(self, res, mode=None, bufsize=None):
1035 def makefile(self, res, mode=None, bufsize=None):
1036 if not self.states:
1036 if not self.states:
1037 return
1037 return
1038
1038
1039 self.fh.write(b'%s> makefile(%r, %r)\n' % (self.name, mode, bufsize))
1039 self.fh.write(b'%s> makefile(%r, %r)\n' % (self.name, mode, bufsize))
1040
1040
1041 def recv(self, res, size, flags=0):
1041 def recv(self, res, size, flags=0):
1042 if not self.reads:
1042 if not self.reads:
1043 return
1043 return
1044
1044
1045 if self.logdataapis:
1045 if self.logdataapis:
1046 self.fh.write(
1046 self.fh.write(
1047 b'%s> recv(%d, %d) -> %d' % (self.name, size, flags, len(res))
1047 b'%s> recv(%d, %d) -> %d' % (self.name, size, flags, len(res))
1048 )
1048 )
1049 self._writedata(res)
1049 self._writedata(res)
1050
1050
1051 def recvfrom(self, res, size, flags=0):
1051 def recvfrom(self, res, size, flags=0):
1052 if not self.reads:
1052 if not self.reads:
1053 return
1053 return
1054
1054
1055 if self.logdataapis:
1055 if self.logdataapis:
1056 self.fh.write(
1056 self.fh.write(
1057 b'%s> recvfrom(%d, %d) -> %d'
1057 b'%s> recvfrom(%d, %d) -> %d'
1058 % (self.name, size, flags, len(res[0]))
1058 % (self.name, size, flags, len(res[0]))
1059 )
1059 )
1060
1060
1061 self._writedata(res[0])
1061 self._writedata(res[0])
1062
1062
1063 def recvfrom_into(self, res, buf, size, flags=0):
1063 def recvfrom_into(self, res, buf, size, flags=0):
1064 if not self.reads:
1064 if not self.reads:
1065 return
1065 return
1066
1066
1067 if self.logdataapis:
1067 if self.logdataapis:
1068 self.fh.write(
1068 self.fh.write(
1069 b'%s> recvfrom_into(%d, %d) -> %d'
1069 b'%s> recvfrom_into(%d, %d) -> %d'
1070 % (self.name, size, flags, res[0])
1070 % (self.name, size, flags, res[0])
1071 )
1071 )
1072
1072
1073 self._writedata(buf[0 : res[0]])
1073 self._writedata(buf[0 : res[0]])
1074
1074
1075 def recv_into(self, res, buf, size=0, flags=0):
1075 def recv_into(self, res, buf, size=0, flags=0):
1076 if not self.reads:
1076 if not self.reads:
1077 return
1077 return
1078
1078
1079 if self.logdataapis:
1079 if self.logdataapis:
1080 self.fh.write(
1080 self.fh.write(
1081 b'%s> recv_into(%d, %d) -> %d' % (self.name, size, flags, res)
1081 b'%s> recv_into(%d, %d) -> %d' % (self.name, size, flags, res)
1082 )
1082 )
1083
1083
1084 self._writedata(buf[0:res])
1084 self._writedata(buf[0:res])
1085
1085
1086 def send(self, res, data, flags=0):
1086 def send(self, res, data, flags=0):
1087 if not self.writes:
1087 if not self.writes:
1088 return
1088 return
1089
1089
1090 self.fh.write(
1090 self.fh.write(
1091 b'%s> send(%d, %d) -> %d' % (self.name, len(data), flags, len(res))
1091 b'%s> send(%d, %d) -> %d' % (self.name, len(data), flags, len(res))
1092 )
1092 )
1093 self._writedata(data)
1093 self._writedata(data)
1094
1094
1095 def sendall(self, res, data, flags=0):
1095 def sendall(self, res, data, flags=0):
1096 if not self.writes:
1096 if not self.writes:
1097 return
1097 return
1098
1098
1099 if self.logdataapis:
1099 if self.logdataapis:
1100 # Returns None on success. So don't bother reporting return value.
1100 # Returns None on success. So don't bother reporting return value.
1101 self.fh.write(
1101 self.fh.write(
1102 b'%s> sendall(%d, %d)' % (self.name, len(data), flags)
1102 b'%s> sendall(%d, %d)' % (self.name, len(data), flags)
1103 )
1103 )
1104
1104
1105 self._writedata(data)
1105 self._writedata(data)
1106
1106
1107 def sendto(self, res, data, flagsoraddress, address=None):
1107 def sendto(self, res, data, flagsoraddress, address=None):
1108 if not self.writes:
1108 if not self.writes:
1109 return
1109 return
1110
1110
1111 if address:
1111 if address:
1112 flags = flagsoraddress
1112 flags = flagsoraddress
1113 else:
1113 else:
1114 flags = 0
1114 flags = 0
1115
1115
1116 if self.logdataapis:
1116 if self.logdataapis:
1117 self.fh.write(
1117 self.fh.write(
1118 b'%s> sendto(%d, %d, %r) -> %d'
1118 b'%s> sendto(%d, %d, %r) -> %d'
1119 % (self.name, len(data), flags, address, res)
1119 % (self.name, len(data), flags, address, res)
1120 )
1120 )
1121
1121
1122 self._writedata(data)
1122 self._writedata(data)
1123
1123
1124 def setblocking(self, res, flag):
1124 def setblocking(self, res, flag):
1125 if not self.states:
1125 if not self.states:
1126 return
1126 return
1127
1127
1128 self.fh.write(b'%s> setblocking(%r)\n' % (self.name, flag))
1128 self.fh.write(b'%s> setblocking(%r)\n' % (self.name, flag))
1129
1129
1130 def settimeout(self, res, value):
1130 def settimeout(self, res, value):
1131 if not self.states:
1131 if not self.states:
1132 return
1132 return
1133
1133
1134 self.fh.write(b'%s> settimeout(%r)\n' % (self.name, value))
1134 self.fh.write(b'%s> settimeout(%r)\n' % (self.name, value))
1135
1135
1136 def gettimeout(self, res):
1136 def gettimeout(self, res):
1137 if not self.states:
1137 if not self.states:
1138 return
1138 return
1139
1139
1140 self.fh.write(b'%s> gettimeout() -> %f\n' % (self.name, res))
1140 self.fh.write(b'%s> gettimeout() -> %f\n' % (self.name, res))
1141
1141
1142 def setsockopt(self, res, level, optname, value):
1142 def setsockopt(self, res, level, optname, value):
1143 if not self.states:
1143 if not self.states:
1144 return
1144 return
1145
1145
1146 self.fh.write(
1146 self.fh.write(
1147 b'%s> setsockopt(%r, %r, %r) -> %r\n'
1147 b'%s> setsockopt(%r, %r, %r) -> %r\n'
1148 % (self.name, level, optname, value, res)
1148 % (self.name, level, optname, value, res)
1149 )
1149 )
1150
1150
1151
1151
1152 def makeloggingsocket(
1152 def makeloggingsocket(
1153 logh,
1153 logh,
1154 fh,
1154 fh,
1155 name,
1155 name,
1156 reads=True,
1156 reads=True,
1157 writes=True,
1157 writes=True,
1158 states=True,
1158 states=True,
1159 logdata=False,
1159 logdata=False,
1160 logdataapis=True,
1160 logdataapis=True,
1161 ):
1161 ):
1162 """Turn a socket into a logging socket."""
1162 """Turn a socket into a logging socket."""
1163
1163
1164 observer = socketobserver(
1164 observer = socketobserver(
1165 logh,
1165 logh,
1166 name,
1166 name,
1167 reads=reads,
1167 reads=reads,
1168 writes=writes,
1168 writes=writes,
1169 states=states,
1169 states=states,
1170 logdata=logdata,
1170 logdata=logdata,
1171 logdataapis=logdataapis,
1171 logdataapis=logdataapis,
1172 )
1172 )
1173 return socketproxy(fh, observer)
1173 return socketproxy(fh, observer)
1174
1174
1175
1175
1176 def version():
1176 def version():
1177 """Return version information if available."""
1177 """Return version information if available."""
1178 try:
1178 try:
1179 from . import __version__ # pytype: disable=import-error
1179 from . import __version__ # pytype: disable=import-error
1180
1180
1181 return __version__.version
1181 return __version__.version
1182 except ImportError:
1182 except ImportError:
1183 return b'unknown'
1183 return b'unknown'
1184
1184
1185
1185
1186 def versiontuple(v=None, n=4):
1186 def versiontuple(v=None, n=4):
1187 """Parses a Mercurial version string into an N-tuple.
1187 """Parses a Mercurial version string into an N-tuple.
1188
1188
1189 The version string to be parsed is specified with the ``v`` argument.
1189 The version string to be parsed is specified with the ``v`` argument.
1190 If it isn't defined, the current Mercurial version string will be parsed.
1190 If it isn't defined, the current Mercurial version string will be parsed.
1191
1191
1192 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1192 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1193 returned values:
1193 returned values:
1194
1194
1195 >>> v = b'3.6.1+190-df9b73d2d444'
1195 >>> v = b'3.6.1+190-df9b73d2d444'
1196 >>> versiontuple(v, 2)
1196 >>> versiontuple(v, 2)
1197 (3, 6)
1197 (3, 6)
1198 >>> versiontuple(v, 3)
1198 >>> versiontuple(v, 3)
1199 (3, 6, 1)
1199 (3, 6, 1)
1200 >>> versiontuple(v, 4)
1200 >>> versiontuple(v, 4)
1201 (3, 6, 1, '190-df9b73d2d444')
1201 (3, 6, 1, '190-df9b73d2d444')
1202
1202
1203 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1203 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1204 (3, 6, 1, '190-df9b73d2d444+20151118')
1204 (3, 6, 1, '190-df9b73d2d444+20151118')
1205
1205
1206 >>> v = b'3.6'
1206 >>> v = b'3.6'
1207 >>> versiontuple(v, 2)
1207 >>> versiontuple(v, 2)
1208 (3, 6)
1208 (3, 6)
1209 >>> versiontuple(v, 3)
1209 >>> versiontuple(v, 3)
1210 (3, 6, None)
1210 (3, 6, None)
1211 >>> versiontuple(v, 4)
1211 >>> versiontuple(v, 4)
1212 (3, 6, None, None)
1212 (3, 6, None, None)
1213
1213
1214 >>> v = b'3.9-rc'
1214 >>> v = b'3.9-rc'
1215 >>> versiontuple(v, 2)
1215 >>> versiontuple(v, 2)
1216 (3, 9)
1216 (3, 9)
1217 >>> versiontuple(v, 3)
1217 >>> versiontuple(v, 3)
1218 (3, 9, None)
1218 (3, 9, None)
1219 >>> versiontuple(v, 4)
1219 >>> versiontuple(v, 4)
1220 (3, 9, None, 'rc')
1220 (3, 9, None, 'rc')
1221
1221
1222 >>> v = b'3.9-rc+2-02a8fea4289b'
1222 >>> v = b'3.9-rc+2-02a8fea4289b'
1223 >>> versiontuple(v, 2)
1223 >>> versiontuple(v, 2)
1224 (3, 9)
1224 (3, 9)
1225 >>> versiontuple(v, 3)
1225 >>> versiontuple(v, 3)
1226 (3, 9, None)
1226 (3, 9, None)
1227 >>> versiontuple(v, 4)
1227 >>> versiontuple(v, 4)
1228 (3, 9, None, 'rc+2-02a8fea4289b')
1228 (3, 9, None, 'rc+2-02a8fea4289b')
1229
1229
1230 >>> versiontuple(b'4.6rc0')
1230 >>> versiontuple(b'4.6rc0')
1231 (4, 6, None, 'rc0')
1231 (4, 6, None, 'rc0')
1232 >>> versiontuple(b'4.6rc0+12-425d55e54f98')
1232 >>> versiontuple(b'4.6rc0+12-425d55e54f98')
1233 (4, 6, None, 'rc0+12-425d55e54f98')
1233 (4, 6, None, 'rc0+12-425d55e54f98')
1234 >>> versiontuple(b'.1.2.3')
1234 >>> versiontuple(b'.1.2.3')
1235 (None, None, None, '.1.2.3')
1235 (None, None, None, '.1.2.3')
1236 >>> versiontuple(b'12.34..5')
1236 >>> versiontuple(b'12.34..5')
1237 (12, 34, None, '..5')
1237 (12, 34, None, '..5')
1238 >>> versiontuple(b'1.2.3.4.5.6')
1238 >>> versiontuple(b'1.2.3.4.5.6')
1239 (1, 2, 3, '.4.5.6')
1239 (1, 2, 3, '.4.5.6')
1240 """
1240 """
1241 if not v:
1241 if not v:
1242 v = version()
1242 v = version()
1243 m = remod.match(br'(\d+(?:\.\d+){,2})[+-]?(.*)', v)
1243 m = remod.match(br'(\d+(?:\.\d+){,2})[+-]?(.*)', v)
1244 if not m:
1244 if not m:
1245 vparts, extra = b'', v
1245 vparts, extra = b'', v
1246 elif m.group(2):
1246 elif m.group(2):
1247 vparts, extra = m.groups()
1247 vparts, extra = m.groups()
1248 else:
1248 else:
1249 vparts, extra = m.group(1), None
1249 vparts, extra = m.group(1), None
1250
1250
1251 assert vparts is not None # help pytype
1251 assert vparts is not None # help pytype
1252
1252
1253 vints = []
1253 vints = []
1254 for i in vparts.split(b'.'):
1254 for i in vparts.split(b'.'):
1255 try:
1255 try:
1256 vints.append(int(i))
1256 vints.append(int(i))
1257 except ValueError:
1257 except ValueError:
1258 break
1258 break
1259 # (3, 6) -> (3, 6, None)
1259 # (3, 6) -> (3, 6, None)
1260 while len(vints) < 3:
1260 while len(vints) < 3:
1261 vints.append(None)
1261 vints.append(None)
1262
1262
1263 if n == 2:
1263 if n == 2:
1264 return (vints[0], vints[1])
1264 return (vints[0], vints[1])
1265 if n == 3:
1265 if n == 3:
1266 return (vints[0], vints[1], vints[2])
1266 return (vints[0], vints[1], vints[2])
1267 if n == 4:
1267 if n == 4:
1268 return (vints[0], vints[1], vints[2], extra)
1268 return (vints[0], vints[1], vints[2], extra)
1269
1269
1270 raise error.ProgrammingError(b"invalid version part request: %d" % n)
1270 raise error.ProgrammingError(b"invalid version part request: %d" % n)
1271
1271
1272
1272
1273 def cachefunc(func):
1273 def cachefunc(func):
1274 '''cache the result of function calls'''
1274 '''cache the result of function calls'''
1275 # XXX doesn't handle keywords args
1275 # XXX doesn't handle keywords args
1276 if func.__code__.co_argcount == 0:
1276 if func.__code__.co_argcount == 0:
1277 listcache = []
1277 listcache = []
1278
1278
1279 def f():
1279 def f():
1280 if len(listcache) == 0:
1280 if len(listcache) == 0:
1281 listcache.append(func())
1281 listcache.append(func())
1282 return listcache[0]
1282 return listcache[0]
1283
1283
1284 return f
1284 return f
1285 cache = {}
1285 cache = {}
1286 if func.__code__.co_argcount == 1:
1286 if func.__code__.co_argcount == 1:
1287 # we gain a small amount of time because
1287 # we gain a small amount of time because
1288 # we don't need to pack/unpack the list
1288 # we don't need to pack/unpack the list
1289 def f(arg):
1289 def f(arg):
1290 if arg not in cache:
1290 if arg not in cache:
1291 cache[arg] = func(arg)
1291 cache[arg] = func(arg)
1292 return cache[arg]
1292 return cache[arg]
1293
1293
1294 else:
1294 else:
1295
1295
1296 def f(*args):
1296 def f(*args):
1297 if args not in cache:
1297 if args not in cache:
1298 cache[args] = func(*args)
1298 cache[args] = func(*args)
1299 return cache[args]
1299 return cache[args]
1300
1300
1301 return f
1301 return f
1302
1302
1303
1303
1304 class cow:
1304 class cow:
1305 """helper class to make copy-on-write easier
1305 """helper class to make copy-on-write easier
1306
1306
1307 Call preparewrite before doing any writes.
1307 Call preparewrite before doing any writes.
1308 """
1308 """
1309
1309
1310 def preparewrite(self):
1310 def preparewrite(self):
1311 """call this before writes, return self or a copied new object"""
1311 """call this before writes, return self or a copied new object"""
1312 if getattr(self, '_copied', 0):
1312 if getattr(self, '_copied', 0):
1313 self._copied -= 1
1313 self._copied -= 1
1314 # Function cow.__init__ expects 1 arg(s), got 2 [wrong-arg-count]
1314 # Function cow.__init__ expects 1 arg(s), got 2 [wrong-arg-count]
1315 return self.__class__(self) # pytype: disable=wrong-arg-count
1315 return self.__class__(self) # pytype: disable=wrong-arg-count
1316 return self
1316 return self
1317
1317
1318 def copy(self):
1318 def copy(self):
1319 """always do a cheap copy"""
1319 """always do a cheap copy"""
1320 self._copied = getattr(self, '_copied', 0) + 1
1320 self._copied = getattr(self, '_copied', 0) + 1
1321 return self
1321 return self
1322
1322
1323
1323
1324 class sortdict(collections.OrderedDict):
1324 class sortdict(collections.OrderedDict):
1325 """a simple sorted dictionary
1325 """a simple sorted dictionary
1326
1326
1327 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1327 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1328 >>> d2 = d1.copy()
1328 >>> d2 = d1.copy()
1329 >>> list(d2.items())
1329 >>> list(d2.items())
1330 [('a', 0), ('b', 1)]
1330 [('a', 0), ('b', 1)]
1331 >>> d2.update([(b'a', 2)])
1331 >>> d2.update([(b'a', 2)])
1332 >>> list(d2.keys()) # should still be in last-set order
1332 >>> list(d2.keys()) # should still be in last-set order
1333 ['b', 'a']
1333 ['b', 'a']
1334 >>> d1.insert(1, b'a.5', 0.5)
1334 >>> d1.insert(1, b'a.5', 0.5)
1335 >>> list(d1.items())
1335 >>> list(d1.items())
1336 [('a', 0), ('a.5', 0.5), ('b', 1)]
1336 [('a', 0), ('a.5', 0.5), ('b', 1)]
1337 """
1337 """
1338
1338
1339 def __setitem__(self, key, value):
1339 def __setitem__(self, key, value):
1340 if key in self:
1340 if key in self:
1341 del self[key]
1341 del self[key]
1342 super(sortdict, self).__setitem__(key, value)
1342 super(sortdict, self).__setitem__(key, value)
1343
1343
1344 if pycompat.ispypy:
1344 if pycompat.ispypy:
1345 # __setitem__() isn't called as of PyPy 5.8.0
1345 # __setitem__() isn't called as of PyPy 5.8.0
1346 def update(self, src, **f):
1346 def update(self, src, **f):
1347 if isinstance(src, dict):
1347 if isinstance(src, dict):
1348 src = src.items()
1348 src = src.items()
1349 for k, v in src:
1349 for k, v in src:
1350 self[k] = v
1350 self[k] = v
1351 for k in f:
1351 for k in f:
1352 self[k] = f[k]
1352 self[k] = f[k]
1353
1353
1354 def insert(self, position, key, value):
1354 def insert(self, position, key, value):
1355 for i, (k, v) in enumerate(list(self.items())):
1355 for i, (k, v) in enumerate(list(self.items())):
1356 if i == position:
1356 if i == position:
1357 self[key] = value
1357 self[key] = value
1358 if i >= position:
1358 if i >= position:
1359 del self[k]
1359 del self[k]
1360 self[k] = v
1360 self[k] = v
1361
1361
1362
1362
1363 class cowdict(cow, dict):
1363 class cowdict(cow, dict):
1364 """copy-on-write dict
1364 """copy-on-write dict
1365
1365
1366 Be sure to call d = d.preparewrite() before writing to d.
1366 Be sure to call d = d.preparewrite() before writing to d.
1367
1367
1368 >>> a = cowdict()
1368 >>> a = cowdict()
1369 >>> a is a.preparewrite()
1369 >>> a is a.preparewrite()
1370 True
1370 True
1371 >>> b = a.copy()
1371 >>> b = a.copy()
1372 >>> b is a
1372 >>> b is a
1373 True
1373 True
1374 >>> c = b.copy()
1374 >>> c = b.copy()
1375 >>> c is a
1375 >>> c is a
1376 True
1376 True
1377 >>> a = a.preparewrite()
1377 >>> a = a.preparewrite()
1378 >>> b is a
1378 >>> b is a
1379 False
1379 False
1380 >>> a is a.preparewrite()
1380 >>> a is a.preparewrite()
1381 True
1381 True
1382 >>> c = c.preparewrite()
1382 >>> c = c.preparewrite()
1383 >>> b is c
1383 >>> b is c
1384 False
1384 False
1385 >>> b is b.preparewrite()
1385 >>> b is b.preparewrite()
1386 True
1386 True
1387 """
1387 """
1388
1388
1389
1389
1390 class cowsortdict(cow, sortdict):
1390 class cowsortdict(cow, sortdict):
1391 """copy-on-write sortdict
1391 """copy-on-write sortdict
1392
1392
1393 Be sure to call d = d.preparewrite() before writing to d.
1393 Be sure to call d = d.preparewrite() before writing to d.
1394 """
1394 """
1395
1395
1396
1396
1397 class transactional: # pytype: disable=ignored-metaclass
1397 class transactional: # pytype: disable=ignored-metaclass
1398 """Base class for making a transactional type into a context manager."""
1398 """Base class for making a transactional type into a context manager."""
1399
1399
1400 __metaclass__ = abc.ABCMeta
1400 __metaclass__ = abc.ABCMeta
1401
1401
1402 @abc.abstractmethod
1402 @abc.abstractmethod
1403 def close(self):
1403 def close(self):
1404 """Successfully closes the transaction."""
1404 """Successfully closes the transaction."""
1405
1405
1406 @abc.abstractmethod
1406 @abc.abstractmethod
1407 def release(self):
1407 def release(self):
1408 """Marks the end of the transaction.
1408 """Marks the end of the transaction.
1409
1409
1410 If the transaction has not been closed, it will be aborted.
1410 If the transaction has not been closed, it will be aborted.
1411 """
1411 """
1412
1412
1413 def __enter__(self):
1413 def __enter__(self):
1414 return self
1414 return self
1415
1415
1416 def __exit__(self, exc_type, exc_val, exc_tb):
1416 def __exit__(self, exc_type, exc_val, exc_tb):
1417 try:
1417 try:
1418 if exc_type is None:
1418 if exc_type is None:
1419 self.close()
1419 self.close()
1420 finally:
1420 finally:
1421 self.release()
1421 self.release()
1422
1422
1423
1423
1424 @contextlib.contextmanager
1424 @contextlib.contextmanager
1425 def acceptintervention(tr=None):
1425 def acceptintervention(tr=None):
1426 """A context manager that closes the transaction on InterventionRequired
1426 """A context manager that closes the transaction on InterventionRequired
1427
1427
1428 If no transaction was provided, this simply runs the body and returns
1428 If no transaction was provided, this simply runs the body and returns
1429 """
1429 """
1430 if not tr:
1430 if not tr:
1431 yield
1431 yield
1432 return
1432 return
1433 try:
1433 try:
1434 yield
1434 yield
1435 tr.close()
1435 tr.close()
1436 except error.InterventionRequired:
1436 except error.InterventionRequired:
1437 tr.close()
1437 tr.close()
1438 raise
1438 raise
1439 finally:
1439 finally:
1440 tr.release()
1440 tr.release()
1441
1441
1442
1442
1443 @contextlib.contextmanager
1443 @contextlib.contextmanager
1444 def nullcontextmanager(enter_result=None):
1444 def nullcontextmanager(enter_result=None):
1445 yield enter_result
1445 yield enter_result
1446
1446
1447
1447
1448 class _lrucachenode:
1448 class _lrucachenode:
1449 """A node in a doubly linked list.
1449 """A node in a doubly linked list.
1450
1450
1451 Holds a reference to nodes on either side as well as a key-value
1451 Holds a reference to nodes on either side as well as a key-value
1452 pair for the dictionary entry.
1452 pair for the dictionary entry.
1453 """
1453 """
1454
1454
1455 __slots__ = ('next', 'prev', 'key', 'value', 'cost')
1455 __slots__ = ('next', 'prev', 'key', 'value', 'cost')
1456
1456
1457 def __init__(self):
1457 def __init__(self):
1458 self.next = self
1458 self.next = self
1459 self.prev = self
1459 self.prev = self
1460
1460
1461 self.key = _notset
1461 self.key = _notset
1462 self.value = None
1462 self.value = None
1463 self.cost = 0
1463 self.cost = 0
1464
1464
1465 def markempty(self):
1465 def markempty(self):
1466 """Mark the node as emptied."""
1466 """Mark the node as emptied."""
1467 self.key = _notset
1467 self.key = _notset
1468 self.value = None
1468 self.value = None
1469 self.cost = 0
1469 self.cost = 0
1470
1470
1471
1471
1472 class lrucachedict:
1472 class lrucachedict:
1473 """Dict that caches most recent accesses and sets.
1473 """Dict that caches most recent accesses and sets.
1474
1474
1475 The dict consists of an actual backing dict - indexed by original
1475 The dict consists of an actual backing dict - indexed by original
1476 key - and a doubly linked circular list defining the order of entries in
1476 key - and a doubly linked circular list defining the order of entries in
1477 the cache.
1477 the cache.
1478
1478
1479 The head node is the newest entry in the cache. If the cache is full,
1479 The head node is the newest entry in the cache. If the cache is full,
1480 we recycle head.prev and make it the new head. Cache accesses result in
1480 we recycle head.prev and make it the new head. Cache accesses result in
1481 the node being moved to before the existing head and being marked as the
1481 the node being moved to before the existing head and being marked as the
1482 new head node.
1482 new head node.
1483
1483
1484 Items in the cache can be inserted with an optional "cost" value. This is
1484 Items in the cache can be inserted with an optional "cost" value. This is
1485 simply an integer that is specified by the caller. The cache can be queried
1485 simply an integer that is specified by the caller. The cache can be queried
1486 for the total cost of all items presently in the cache.
1486 for the total cost of all items presently in the cache.
1487
1487
1488 The cache can also define a maximum cost. If a cache insertion would
1488 The cache can also define a maximum cost. If a cache insertion would
1489 cause the total cost of the cache to go beyond the maximum cost limit,
1489 cause the total cost of the cache to go beyond the maximum cost limit,
1490 nodes will be evicted to make room for the new code. This can be used
1490 nodes will be evicted to make room for the new code. This can be used
1491 to e.g. set a max memory limit and associate an estimated bytes size
1491 to e.g. set a max memory limit and associate an estimated bytes size
1492 cost to each item in the cache. By default, no maximum cost is enforced.
1492 cost to each item in the cache. By default, no maximum cost is enforced.
1493 """
1493 """
1494
1494
1495 def __init__(self, max, maxcost=0):
1495 def __init__(self, max, maxcost=0):
1496 self._cache = {}
1496 self._cache = {}
1497
1497
1498 self._head = _lrucachenode()
1498 self._head = _lrucachenode()
1499 self._size = 1
1499 self._size = 1
1500 self.capacity = max
1500 self.capacity = max
1501 self.totalcost = 0
1501 self.totalcost = 0
1502 self.maxcost = maxcost
1502 self.maxcost = maxcost
1503
1503
1504 def __len__(self):
1504 def __len__(self):
1505 return len(self._cache)
1505 return len(self._cache)
1506
1506
1507 def __contains__(self, k):
1507 def __contains__(self, k):
1508 return k in self._cache
1508 return k in self._cache
1509
1509
1510 def __iter__(self):
1510 def __iter__(self):
1511 # We don't have to iterate in cache order, but why not.
1511 # We don't have to iterate in cache order, but why not.
1512 n = self._head
1512 n = self._head
1513 for i in range(len(self._cache)):
1513 for i in range(len(self._cache)):
1514 yield n.key
1514 yield n.key
1515 n = n.next
1515 n = n.next
1516
1516
1517 def __getitem__(self, k):
1517 def __getitem__(self, k):
1518 node = self._cache[k]
1518 node = self._cache[k]
1519 self._movetohead(node)
1519 self._movetohead(node)
1520 return node.value
1520 return node.value
1521
1521
1522 def insert(self, k, v, cost=0):
1522 def insert(self, k, v, cost=0):
1523 """Insert a new item in the cache with optional cost value."""
1523 """Insert a new item in the cache with optional cost value."""
1524 node = self._cache.get(k)
1524 node = self._cache.get(k)
1525 # Replace existing value and mark as newest.
1525 # Replace existing value and mark as newest.
1526 if node is not None:
1526 if node is not None:
1527 self.totalcost -= node.cost
1527 self.totalcost -= node.cost
1528 node.value = v
1528 node.value = v
1529 node.cost = cost
1529 node.cost = cost
1530 self.totalcost += cost
1530 self.totalcost += cost
1531 self._movetohead(node)
1531 self._movetohead(node)
1532
1532
1533 if self.maxcost:
1533 if self.maxcost:
1534 self._enforcecostlimit()
1534 self._enforcecostlimit()
1535
1535
1536 return
1536 return
1537
1537
1538 if self._size < self.capacity:
1538 if self._size < self.capacity:
1539 node = self._addcapacity()
1539 node = self._addcapacity()
1540 else:
1540 else:
1541 # Grab the last/oldest item.
1541 # Grab the last/oldest item.
1542 node = self._head.prev
1542 node = self._head.prev
1543
1543
1544 # At capacity. Kill the old entry.
1544 # At capacity. Kill the old entry.
1545 if node.key is not _notset:
1545 if node.key is not _notset:
1546 self.totalcost -= node.cost
1546 self.totalcost -= node.cost
1547 del self._cache[node.key]
1547 del self._cache[node.key]
1548
1548
1549 node.key = k
1549 node.key = k
1550 node.value = v
1550 node.value = v
1551 node.cost = cost
1551 node.cost = cost
1552 self.totalcost += cost
1552 self.totalcost += cost
1553 self._cache[k] = node
1553 self._cache[k] = node
1554 # And mark it as newest entry. No need to adjust order since it
1554 # And mark it as newest entry. No need to adjust order since it
1555 # is already self._head.prev.
1555 # is already self._head.prev.
1556 self._head = node
1556 self._head = node
1557
1557
1558 if self.maxcost:
1558 if self.maxcost:
1559 self._enforcecostlimit()
1559 self._enforcecostlimit()
1560
1560
1561 def __setitem__(self, k, v):
1561 def __setitem__(self, k, v):
1562 self.insert(k, v)
1562 self.insert(k, v)
1563
1563
1564 def __delitem__(self, k):
1564 def __delitem__(self, k):
1565 self.pop(k)
1565 self.pop(k)
1566
1566
1567 def pop(self, k, default=_notset):
1567 def pop(self, k, default=_notset):
1568 try:
1568 try:
1569 node = self._cache.pop(k)
1569 node = self._cache.pop(k)
1570 except KeyError:
1570 except KeyError:
1571 if default is _notset:
1571 if default is _notset:
1572 raise
1572 raise
1573 return default
1573 return default
1574
1574
1575 value = node.value
1575 value = node.value
1576 self.totalcost -= node.cost
1576 self.totalcost -= node.cost
1577 node.markempty()
1577 node.markempty()
1578
1578
1579 # Temporarily mark as newest item before re-adjusting head to make
1579 # Temporarily mark as newest item before re-adjusting head to make
1580 # this node the oldest item.
1580 # this node the oldest item.
1581 self._movetohead(node)
1581 self._movetohead(node)
1582 self._head = node.next
1582 self._head = node.next
1583
1583
1584 return value
1584 return value
1585
1585
1586 # Additional dict methods.
1586 # Additional dict methods.
1587
1587
1588 def get(self, k, default=None):
1588 def get(self, k, default=None):
1589 try:
1589 try:
1590 return self.__getitem__(k)
1590 return self.__getitem__(k)
1591 except KeyError:
1591 except KeyError:
1592 return default
1592 return default
1593
1593
1594 def peek(self, k, default=_notset):
1594 def peek(self, k, default=_notset):
1595 """Get the specified item without moving it to the head
1595 """Get the specified item without moving it to the head
1596
1596
1597 Unlike get(), this doesn't mutate the internal state. But be aware
1597 Unlike get(), this doesn't mutate the internal state. But be aware
1598 that it doesn't mean peek() is thread safe.
1598 that it doesn't mean peek() is thread safe.
1599 """
1599 """
1600 try:
1600 try:
1601 node = self._cache[k]
1601 node = self._cache[k]
1602 return node.value
1602 return node.value
1603 except KeyError:
1603 except KeyError:
1604 if default is _notset:
1604 if default is _notset:
1605 raise
1605 raise
1606 return default
1606 return default
1607
1607
1608 def clear(self):
1608 def clear(self):
1609 n = self._head
1609 n = self._head
1610 while n.key is not _notset:
1610 while n.key is not _notset:
1611 self.totalcost -= n.cost
1611 self.totalcost -= n.cost
1612 n.markempty()
1612 n.markempty()
1613 n = n.next
1613 n = n.next
1614
1614
1615 self._cache.clear()
1615 self._cache.clear()
1616
1616
1617 def copy(self, capacity=None, maxcost=0):
1617 def copy(self, capacity=None, maxcost=0):
1618 """Create a new cache as a copy of the current one.
1618 """Create a new cache as a copy of the current one.
1619
1619
1620 By default, the new cache has the same capacity as the existing one.
1620 By default, the new cache has the same capacity as the existing one.
1621 But, the cache capacity can be changed as part of performing the
1621 But, the cache capacity can be changed as part of performing the
1622 copy.
1622 copy.
1623
1623
1624 Items in the copy have an insertion/access order matching this
1624 Items in the copy have an insertion/access order matching this
1625 instance.
1625 instance.
1626 """
1626 """
1627
1627
1628 capacity = capacity or self.capacity
1628 capacity = capacity or self.capacity
1629 maxcost = maxcost or self.maxcost
1629 maxcost = maxcost or self.maxcost
1630 result = lrucachedict(capacity, maxcost=maxcost)
1630 result = lrucachedict(capacity, maxcost=maxcost)
1631
1631
1632 # We copy entries by iterating in oldest-to-newest order so the copy
1632 # We copy entries by iterating in oldest-to-newest order so the copy
1633 # has the correct ordering.
1633 # has the correct ordering.
1634
1634
1635 # Find the first non-empty entry.
1635 # Find the first non-empty entry.
1636 n = self._head.prev
1636 n = self._head.prev
1637 while n.key is _notset and n is not self._head:
1637 while n.key is _notset and n is not self._head:
1638 n = n.prev
1638 n = n.prev
1639
1639
1640 # We could potentially skip the first N items when decreasing capacity.
1640 # We could potentially skip the first N items when decreasing capacity.
1641 # But let's keep it simple unless it is a performance problem.
1641 # But let's keep it simple unless it is a performance problem.
1642 for i in range(len(self._cache)):
1642 for i in range(len(self._cache)):
1643 result.insert(n.key, n.value, cost=n.cost)
1643 result.insert(n.key, n.value, cost=n.cost)
1644 n = n.prev
1644 n = n.prev
1645
1645
1646 return result
1646 return result
1647
1647
1648 def popoldest(self):
1648 def popoldest(self):
1649 """Remove the oldest item from the cache.
1649 """Remove the oldest item from the cache.
1650
1650
1651 Returns the (key, value) describing the removed cache entry.
1651 Returns the (key, value) describing the removed cache entry.
1652 """
1652 """
1653 if not self._cache:
1653 if not self._cache:
1654 return
1654 return
1655
1655
1656 # Walk the linked list backwards starting at tail node until we hit
1656 # Walk the linked list backwards starting at tail node until we hit
1657 # a non-empty node.
1657 # a non-empty node.
1658 n = self._head.prev
1658 n = self._head.prev
1659
1659
1660 while n.key is _notset:
1660 while n.key is _notset:
1661 n = n.prev
1661 n = n.prev
1662
1662
1663 key, value = n.key, n.value
1663 key, value = n.key, n.value
1664
1664
1665 # And remove it from the cache and mark it as empty.
1665 # And remove it from the cache and mark it as empty.
1666 del self._cache[n.key]
1666 del self._cache[n.key]
1667 self.totalcost -= n.cost
1667 self.totalcost -= n.cost
1668 n.markempty()
1668 n.markempty()
1669
1669
1670 return key, value
1670 return key, value
1671
1671
1672 def _movetohead(self, node: _lrucachenode):
1672 def _movetohead(self, node: _lrucachenode):
1673 """Mark a node as the newest, making it the new head.
1673 """Mark a node as the newest, making it the new head.
1674
1674
1675 When a node is accessed, it becomes the freshest entry in the LRU
1675 When a node is accessed, it becomes the freshest entry in the LRU
1676 list, which is denoted by self._head.
1676 list, which is denoted by self._head.
1677
1677
1678 Visually, let's make ``N`` the new head node (* denotes head):
1678 Visually, let's make ``N`` the new head node (* denotes head):
1679
1679
1680 previous/oldest <-> head <-> next/next newest
1680 previous/oldest <-> head <-> next/next newest
1681
1681
1682 ----<->--- A* ---<->-----
1682 ----<->--- A* ---<->-----
1683 | |
1683 | |
1684 E <-> D <-> N <-> C <-> B
1684 E <-> D <-> N <-> C <-> B
1685
1685
1686 To:
1686 To:
1687
1687
1688 ----<->--- N* ---<->-----
1688 ----<->--- N* ---<->-----
1689 | |
1689 | |
1690 E <-> D <-> C <-> B <-> A
1690 E <-> D <-> C <-> B <-> A
1691
1691
1692 This requires the following moves:
1692 This requires the following moves:
1693
1693
1694 C.next = D (node.prev.next = node.next)
1694 C.next = D (node.prev.next = node.next)
1695 D.prev = C (node.next.prev = node.prev)
1695 D.prev = C (node.next.prev = node.prev)
1696 E.next = N (head.prev.next = node)
1696 E.next = N (head.prev.next = node)
1697 N.prev = E (node.prev = head.prev)
1697 N.prev = E (node.prev = head.prev)
1698 N.next = A (node.next = head)
1698 N.next = A (node.next = head)
1699 A.prev = N (head.prev = node)
1699 A.prev = N (head.prev = node)
1700 """
1700 """
1701 head = self._head
1701 head = self._head
1702 # C.next = D
1702 # C.next = D
1703 node.prev.next = node.next
1703 node.prev.next = node.next
1704 # D.prev = C
1704 # D.prev = C
1705 node.next.prev = node.prev
1705 node.next.prev = node.prev
1706 # N.prev = E
1706 # N.prev = E
1707 node.prev = head.prev
1707 node.prev = head.prev
1708 # N.next = A
1708 # N.next = A
1709 # It is tempting to do just "head" here, however if node is
1709 # It is tempting to do just "head" here, however if node is
1710 # adjacent to head, this will do bad things.
1710 # adjacent to head, this will do bad things.
1711 node.next = head.prev.next
1711 node.next = head.prev.next
1712 # E.next = N
1712 # E.next = N
1713 node.next.prev = node
1713 node.next.prev = node
1714 # A.prev = N
1714 # A.prev = N
1715 node.prev.next = node
1715 node.prev.next = node
1716
1716
1717 self._head = node
1717 self._head = node
1718
1718
1719 def _addcapacity(self) -> _lrucachenode:
1719 def _addcapacity(self) -> _lrucachenode:
1720 """Add a node to the circular linked list.
1720 """Add a node to the circular linked list.
1721
1721
1722 The new node is inserted before the head node.
1722 The new node is inserted before the head node.
1723 """
1723 """
1724 head = self._head
1724 head = self._head
1725 node = _lrucachenode()
1725 node = _lrucachenode()
1726 head.prev.next = node
1726 head.prev.next = node
1727 node.prev = head.prev
1727 node.prev = head.prev
1728 node.next = head
1728 node.next = head
1729 head.prev = node
1729 head.prev = node
1730 self._size += 1
1730 self._size += 1
1731 return node
1731 return node
1732
1732
1733 def _enforcecostlimit(self):
1733 def _enforcecostlimit(self):
1734 # This should run after an insertion. It should only be called if total
1734 # This should run after an insertion. It should only be called if total
1735 # cost limits are being enforced.
1735 # cost limits are being enforced.
1736 # The most recently inserted node is never evicted.
1736 # The most recently inserted node is never evicted.
1737 if len(self) <= 1 or self.totalcost <= self.maxcost:
1737 if len(self) <= 1 or self.totalcost <= self.maxcost:
1738 return
1738 return
1739
1739
1740 # This is logically equivalent to calling popoldest() until we
1740 # This is logically equivalent to calling popoldest() until we
1741 # free up enough cost. We don't do that since popoldest() needs
1741 # free up enough cost. We don't do that since popoldest() needs
1742 # to walk the linked list and doing this in a loop would be
1742 # to walk the linked list and doing this in a loop would be
1743 # quadratic. So we find the first non-empty node and then
1743 # quadratic. So we find the first non-empty node and then
1744 # walk nodes until we free up enough capacity.
1744 # walk nodes until we free up enough capacity.
1745 #
1745 #
1746 # If we only removed the minimum number of nodes to free enough
1746 # If we only removed the minimum number of nodes to free enough
1747 # cost at insert time, chances are high that the next insert would
1747 # cost at insert time, chances are high that the next insert would
1748 # also require pruning. This would effectively constitute quadratic
1748 # also require pruning. This would effectively constitute quadratic
1749 # behavior for insert-heavy workloads. To mitigate this, we set a
1749 # behavior for insert-heavy workloads. To mitigate this, we set a
1750 # target cost that is a percentage of the max cost. This will tend
1750 # target cost that is a percentage of the max cost. This will tend
1751 # to free more nodes when the high water mark is reached, which
1751 # to free more nodes when the high water mark is reached, which
1752 # lowers the chances of needing to prune on the subsequent insert.
1752 # lowers the chances of needing to prune on the subsequent insert.
1753 targetcost = int(self.maxcost * 0.75)
1753 targetcost = int(self.maxcost * 0.75)
1754
1754
1755 n = self._head.prev
1755 n = self._head.prev
1756 while n.key is _notset:
1756 while n.key is _notset:
1757 n = n.prev
1757 n = n.prev
1758
1758
1759 while len(self) > 1 and self.totalcost > targetcost:
1759 while len(self) > 1 and self.totalcost > targetcost:
1760 del self._cache[n.key]
1760 del self._cache[n.key]
1761 self.totalcost -= n.cost
1761 self.totalcost -= n.cost
1762 n.markempty()
1762 n.markempty()
1763 n = n.prev
1763 n = n.prev
1764
1764
1765
1765
1766 def lrucachefunc(func):
1766 def lrucachefunc(func):
1767 '''cache most recent results of function calls'''
1767 '''cache most recent results of function calls'''
1768 cache = {}
1768 cache = {}
1769 order = collections.deque()
1769 order = collections.deque()
1770 if func.__code__.co_argcount == 1:
1770 if func.__code__.co_argcount == 1:
1771
1771
1772 def f(arg):
1772 def f(arg):
1773 if arg not in cache:
1773 if arg not in cache:
1774 if len(cache) > 20:
1774 if len(cache) > 20:
1775 del cache[order.popleft()]
1775 del cache[order.popleft()]
1776 cache[arg] = func(arg)
1776 cache[arg] = func(arg)
1777 else:
1777 else:
1778 order.remove(arg)
1778 order.remove(arg)
1779 order.append(arg)
1779 order.append(arg)
1780 return cache[arg]
1780 return cache[arg]
1781
1781
1782 else:
1782 else:
1783
1783
1784 def f(*args):
1784 def f(*args):
1785 if args not in cache:
1785 if args not in cache:
1786 if len(cache) > 20:
1786 if len(cache) > 20:
1787 del cache[order.popleft()]
1787 del cache[order.popleft()]
1788 cache[args] = func(*args)
1788 cache[args] = func(*args)
1789 else:
1789 else:
1790 order.remove(args)
1790 order.remove(args)
1791 order.append(args)
1791 order.append(args)
1792 return cache[args]
1792 return cache[args]
1793
1793
1794 return f
1794 return f
1795
1795
1796
1796
1797 class propertycache:
1797 class propertycache:
1798 def __init__(self, func):
1798 def __init__(self, func):
1799 self.func = func
1799 self.func = func
1800 self.name = func.__name__
1800 self.name = func.__name__
1801
1801
1802 def __get__(self, obj, type=None):
1802 def __get__(self, obj, type=None):
1803 result = self.func(obj)
1803 result = self.func(obj)
1804 self.cachevalue(obj, result)
1804 self.cachevalue(obj, result)
1805 return result
1805 return result
1806
1806
1807 def cachevalue(self, obj, value):
1807 def cachevalue(self, obj, value):
1808 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1808 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1809 obj.__dict__[self.name] = value
1809 obj.__dict__[self.name] = value
1810
1810
1811
1811
1812 def clearcachedproperty(obj, prop):
1812 def clearcachedproperty(obj, prop):
1813 '''clear a cached property value, if one has been set'''
1813 '''clear a cached property value, if one has been set'''
1814 prop = pycompat.sysstr(prop)
1814 prop = pycompat.sysstr(prop)
1815 if prop in obj.__dict__:
1815 if prop in obj.__dict__:
1816 del obj.__dict__[prop]
1816 del obj.__dict__[prop]
1817
1817
1818
1818
1819 def increasingchunks(source, min=1024, max=65536):
1819 def increasingchunks(source, min=1024, max=65536):
1820 """return no less than min bytes per chunk while data remains,
1820 """return no less than min bytes per chunk while data remains,
1821 doubling min after each chunk until it reaches max"""
1821 doubling min after each chunk until it reaches max"""
1822
1822
1823 def log2(x):
1823 def log2(x):
1824 if not x:
1824 if not x:
1825 return 0
1825 return 0
1826 i = 0
1826 i = 0
1827 while x:
1827 while x:
1828 x >>= 1
1828 x >>= 1
1829 i += 1
1829 i += 1
1830 return i - 1
1830 return i - 1
1831
1831
1832 buf = []
1832 buf = []
1833 blen = 0
1833 blen = 0
1834 for chunk in source:
1834 for chunk in source:
1835 buf.append(chunk)
1835 buf.append(chunk)
1836 blen += len(chunk)
1836 blen += len(chunk)
1837 if blen >= min:
1837 if blen >= min:
1838 if min < max:
1838 if min < max:
1839 min = min << 1
1839 min = min << 1
1840 nmin = 1 << log2(blen)
1840 nmin = 1 << log2(blen)
1841 if nmin > min:
1841 if nmin > min:
1842 min = nmin
1842 min = nmin
1843 if min > max:
1843 if min > max:
1844 min = max
1844 min = max
1845 yield b''.join(buf)
1845 yield b''.join(buf)
1846 blen = 0
1846 blen = 0
1847 buf = []
1847 buf = []
1848 if buf:
1848 if buf:
1849 yield b''.join(buf)
1849 yield b''.join(buf)
1850
1850
1851
1851
1852 def always(fn):
1852 def always(fn):
1853 return True
1853 return True
1854
1854
1855
1855
1856 def never(fn):
1856 def never(fn):
1857 return False
1857 return False
1858
1858
1859
1859
1860 def nogc(func=None) -> Any:
1860 def nogc(func=None) -> Any:
1861 """disable garbage collector
1861 """disable garbage collector
1862
1862
1863 Python's garbage collector triggers a GC each time a certain number of
1863 Python's garbage collector triggers a GC each time a certain number of
1864 container objects (the number being defined by gc.get_threshold()) are
1864 container objects (the number being defined by gc.get_threshold()) are
1865 allocated even when marked not to be tracked by the collector. Tracking has
1865 allocated even when marked not to be tracked by the collector. Tracking has
1866 no effect on when GCs are triggered, only on what objects the GC looks
1866 no effect on when GCs are triggered, only on what objects the GC looks
1867 into. As a workaround, disable GC while building complex (huge)
1867 into. As a workaround, disable GC while building complex (huge)
1868 containers.
1868 containers.
1869
1869
1870 This garbage collector issue have been fixed in 2.7. But it still affect
1870 This garbage collector issue have been fixed in 2.7. But it still affect
1871 CPython's performance.
1871 CPython's performance.
1872 """
1872 """
1873 if func is None:
1873 if func is None:
1874 return _nogc_context()
1874 return _nogc_context()
1875 else:
1875 else:
1876 return _nogc_decorator(func)
1876 return _nogc_decorator(func)
1877
1877
1878
1878
1879 @contextlib.contextmanager
1879 @contextlib.contextmanager
1880 def _nogc_context():
1880 def _nogc_context():
1881 gcenabled = gc.isenabled()
1881 gcenabled = gc.isenabled()
1882 gc.disable()
1882 gc.disable()
1883 try:
1883 try:
1884 yield
1884 yield
1885 finally:
1885 finally:
1886 if gcenabled:
1886 if gcenabled:
1887 gc.enable()
1887 gc.enable()
1888
1888
1889
1889
1890 def _nogc_decorator(func):
1890 def _nogc_decorator(func):
1891 def wrapper(*args, **kwargs):
1891 def wrapper(*args, **kwargs):
1892 with _nogc_context():
1892 with _nogc_context():
1893 return func(*args, **kwargs)
1893 return func(*args, **kwargs)
1894
1894
1895 return wrapper
1895 return wrapper
1896
1896
1897
1897
1898 if pycompat.ispypy:
1898 if pycompat.ispypy:
1899 # PyPy runs slower with gc disabled
1899 # PyPy runs slower with gc disabled
1900 nogc = lambda x: x
1900 nogc = lambda x: x
1901
1901
1902
1902
1903 def pathto(root: bytes, n1: bytes, n2: bytes) -> bytes:
1903 def pathto(root: bytes, n1: bytes, n2: bytes) -> bytes:
1904 """return the relative path from one place to another.
1904 """return the relative path from one place to another.
1905 root should use os.sep to separate directories
1905 root should use os.sep to separate directories
1906 n1 should use os.sep to separate directories
1906 n1 should use os.sep to separate directories
1907 n2 should use "/" to separate directories
1907 n2 should use "/" to separate directories
1908 returns an os.sep-separated path.
1908 returns an os.sep-separated path.
1909
1909
1910 If n1 is a relative path, it's assumed it's
1910 If n1 is a relative path, it's assumed it's
1911 relative to root.
1911 relative to root.
1912 n2 should always be relative to root.
1912 n2 should always be relative to root.
1913 """
1913 """
1914 if not n1:
1914 if not n1:
1915 return localpath(n2)
1915 return localpath(n2)
1916 if os.path.isabs(n1):
1916 if os.path.isabs(n1):
1917 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1917 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1918 return os.path.join(root, localpath(n2))
1918 return os.path.join(root, localpath(n2))
1919 n2 = b'/'.join((pconvert(root), n2))
1919 n2 = b'/'.join((pconvert(root), n2))
1920 a, b = splitpath(n1), n2.split(b'/')
1920 a, b = splitpath(n1), n2.split(b'/')
1921 a.reverse()
1921 a.reverse()
1922 b.reverse()
1922 b.reverse()
1923 while a and b and a[-1] == b[-1]:
1923 while a and b and a[-1] == b[-1]:
1924 a.pop()
1924 a.pop()
1925 b.pop()
1925 b.pop()
1926 b.reverse()
1926 b.reverse()
1927 return pycompat.ossep.join(([b'..'] * len(a)) + b) or b'.'
1927 return pycompat.ossep.join(([b'..'] * len(a)) + b) or b'.'
1928
1928
1929
1929
1930 def checksignature(func, depth=1):
1930 def checksignature(func, depth=1):
1931 '''wrap a function with code to check for calling errors'''
1931 '''wrap a function with code to check for calling errors'''
1932
1932
1933 def check(*args, **kwargs):
1933 def check(*args, **kwargs):
1934 try:
1934 try:
1935 return func(*args, **kwargs)
1935 return func(*args, **kwargs)
1936 except TypeError:
1936 except TypeError:
1937 if len(traceback.extract_tb(sys.exc_info()[2])) == depth:
1937 if len(traceback.extract_tb(sys.exc_info()[2])) == depth:
1938 raise error.SignatureError
1938 raise error.SignatureError
1939 raise
1939 raise
1940
1940
1941 return check
1941 return check
1942
1942
1943
1943
1944 # a whilelist of known filesystems where hardlink works reliably
1944 # a whilelist of known filesystems where hardlink works reliably
1945 _hardlinkfswhitelist = {
1945 _hardlinkfswhitelist = {
1946 b'apfs',
1946 b'apfs',
1947 b'btrfs',
1947 b'btrfs',
1948 b'ext2',
1948 b'ext2',
1949 b'ext3',
1949 b'ext3',
1950 b'ext4',
1950 b'ext4',
1951 b'hfs',
1951 b'hfs',
1952 b'jfs',
1952 b'jfs',
1953 b'NTFS',
1953 b'NTFS',
1954 b'reiserfs',
1954 b'reiserfs',
1955 b'tmpfs',
1955 b'tmpfs',
1956 b'ufs',
1956 b'ufs',
1957 b'xfs',
1957 b'xfs',
1958 b'zfs',
1958 b'zfs',
1959 }
1959 }
1960
1960
1961
1961
1962 def copyfile(
1962 def copyfile(
1963 src,
1963 src,
1964 dest,
1964 dest,
1965 hardlink=False,
1965 hardlink=False,
1966 copystat=False,
1966 copystat=False,
1967 checkambig=False,
1967 checkambig=False,
1968 nb_bytes=None,
1968 nb_bytes=None,
1969 no_hardlink_cb=None,
1969 no_hardlink_cb=None,
1970 check_fs_hardlink=True,
1970 check_fs_hardlink=True,
1971 ):
1971 ):
1972 """copy a file, preserving mode and optionally other stat info like
1972 """copy a file, preserving mode and optionally other stat info like
1973 atime/mtime
1973 atime/mtime
1974
1974
1975 checkambig argument is used with filestat, and is useful only if
1975 checkambig argument is used with filestat, and is useful only if
1976 destination file is guarded by any lock (e.g. repo.lock or
1976 destination file is guarded by any lock (e.g. repo.lock or
1977 repo.wlock).
1977 repo.wlock).
1978
1978
1979 copystat and checkambig should be exclusive.
1979 copystat and checkambig should be exclusive.
1980
1980
1981 nb_bytes: if set only copy the first `nb_bytes` of the source file.
1981 nb_bytes: if set only copy the first `nb_bytes` of the source file.
1982 """
1982 """
1983 assert not (copystat and checkambig)
1983 assert not (copystat and checkambig)
1984 oldstat = None
1984 oldstat = None
1985 if os.path.lexists(dest):
1985 if os.path.lexists(dest):
1986 if checkambig:
1986 if checkambig:
1987 oldstat = checkambig and filestat.frompath(dest)
1987 oldstat = checkambig and filestat.frompath(dest)
1988 unlink(dest)
1988 unlink(dest)
1989 if hardlink and check_fs_hardlink:
1989 if hardlink and check_fs_hardlink:
1990 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1990 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1991 # unless we are confident that dest is on a whitelisted filesystem.
1991 # unless we are confident that dest is on a whitelisted filesystem.
1992 try:
1992 try:
1993 fstype = getfstype(os.path.dirname(dest))
1993 fstype = getfstype(os.path.dirname(dest))
1994 except OSError:
1994 except OSError:
1995 fstype = None
1995 fstype = None
1996 if fstype not in _hardlinkfswhitelist:
1996 if fstype not in _hardlinkfswhitelist:
1997 if no_hardlink_cb is not None:
1997 if no_hardlink_cb is not None:
1998 no_hardlink_cb()
1998 no_hardlink_cb()
1999 hardlink = False
1999 hardlink = False
2000 if hardlink:
2000 if hardlink:
2001 try:
2001 try:
2002 oslink(src, dest)
2002 oslink(src, dest)
2003 if nb_bytes is not None:
2003 if nb_bytes is not None:
2004 m = "the `nb_bytes` argument is incompatible with `hardlink`"
2004 m = "the `nb_bytes` argument is incompatible with `hardlink`"
2005 raise error.ProgrammingError(m)
2005 raise error.ProgrammingError(m)
2006 return
2006 return
2007 except (IOError, OSError) as exc:
2007 except (IOError, OSError) as exc:
2008 if exc.errno != errno.EEXIST and no_hardlink_cb is not None:
2008 if exc.errno != errno.EEXIST and no_hardlink_cb is not None:
2009 no_hardlink_cb()
2009 no_hardlink_cb()
2010 # fall back to normal copy
2010 # fall back to normal copy
2011 if os.path.islink(src):
2011 if os.path.islink(src):
2012 os.symlink(os.readlink(src), dest)
2012 os.symlink(os.readlink(src), dest)
2013 # copytime is ignored for symlinks, but in general copytime isn't needed
2013 # copytime is ignored for symlinks, but in general copytime isn't needed
2014 # for them anyway
2014 # for them anyway
2015 if nb_bytes is not None:
2015 if nb_bytes is not None:
2016 m = "cannot use `nb_bytes` on a symlink"
2016 m = "cannot use `nb_bytes` on a symlink"
2017 raise error.ProgrammingError(m)
2017 raise error.ProgrammingError(m)
2018 else:
2018 else:
2019 try:
2019 try:
2020 shutil.copyfile(src, dest)
2020 shutil.copyfile(src, dest)
2021 if copystat:
2021 if copystat:
2022 # copystat also copies mode
2022 # copystat also copies mode
2023 shutil.copystat(src, dest)
2023 shutil.copystat(src, dest)
2024 else:
2024 else:
2025 shutil.copymode(src, dest)
2025 shutil.copymode(src, dest)
2026 if oldstat and oldstat.stat:
2026 if oldstat and oldstat.stat:
2027 newstat = filestat.frompath(dest)
2027 newstat = filestat.frompath(dest)
2028 if newstat.isambig(oldstat):
2028 if newstat.isambig(oldstat):
2029 # stat of copied file is ambiguous to original one
2029 # stat of copied file is ambiguous to original one
2030 advanced = (
2030 advanced = (
2031 oldstat.stat[stat.ST_MTIME] + 1
2031 oldstat.stat[stat.ST_MTIME] + 1
2032 ) & 0x7FFFFFFF
2032 ) & 0x7FFFFFFF
2033 os.utime(dest, (advanced, advanced))
2033 os.utime(dest, (advanced, advanced))
2034 # We could do something smarter using `copy_file_range` call or similar
2034 # We could do something smarter using `copy_file_range` call or similar
2035 if nb_bytes is not None:
2035 if nb_bytes is not None:
2036 with open(dest, mode='r+') as f:
2036 with open(dest, mode='r+') as f:
2037 f.truncate(nb_bytes)
2037 f.truncate(nb_bytes)
2038 except shutil.Error as inst:
2038 except shutil.Error as inst:
2039 raise error.Abort(stringutil.forcebytestr(inst))
2039 raise error.Abort(stringutil.forcebytestr(inst))
2040
2040
2041
2041
2042 def copyfiles(src, dst, hardlink=None, progress=None):
2042 def copyfiles(src, dst, hardlink=None, progress=None):
2043 """Copy a directory tree using hardlinks if possible."""
2043 """Copy a directory tree using hardlinks if possible."""
2044 num = 0
2044 num = 0
2045
2045
2046 def settopic():
2046 def settopic():
2047 if progress:
2047 if progress:
2048 progress.topic = _(b'linking') if hardlink else _(b'copying')
2048 progress.topic = _(b'linking') if hardlink else _(b'copying')
2049
2049
2050 if os.path.isdir(src):
2050 if os.path.isdir(src):
2051 if hardlink is None:
2051 if hardlink is None:
2052 hardlink = (
2052 hardlink = (
2053 os.stat(src).st_dev == os.stat(os.path.dirname(dst)).st_dev
2053 os.stat(src).st_dev == os.stat(os.path.dirname(dst)).st_dev
2054 )
2054 )
2055 settopic()
2055 settopic()
2056 os.mkdir(dst)
2056 os.mkdir(dst)
2057 for name, kind in listdir(src):
2057 for name, kind in listdir(src):
2058 srcname = os.path.join(src, name)
2058 srcname = os.path.join(src, name)
2059 dstname = os.path.join(dst, name)
2059 dstname = os.path.join(dst, name)
2060 hardlink, n = copyfiles(srcname, dstname, hardlink, progress)
2060 hardlink, n = copyfiles(srcname, dstname, hardlink, progress)
2061 num += n
2061 num += n
2062 else:
2062 else:
2063 if hardlink is None:
2063 if hardlink is None:
2064 hardlink = (
2064 hardlink = (
2065 os.stat(os.path.dirname(src)).st_dev
2065 os.stat(os.path.dirname(src)).st_dev
2066 == os.stat(os.path.dirname(dst)).st_dev
2066 == os.stat(os.path.dirname(dst)).st_dev
2067 )
2067 )
2068 settopic()
2068 settopic()
2069
2069
2070 if hardlink:
2070 if hardlink:
2071 try:
2071 try:
2072 oslink(src, dst)
2072 oslink(src, dst)
2073 except (IOError, OSError) as exc:
2073 except (IOError, OSError) as exc:
2074 if exc.errno != errno.EEXIST:
2074 if exc.errno != errno.EEXIST:
2075 hardlink = False
2075 hardlink = False
2076 # XXX maybe try to relink if the file exist ?
2076 # XXX maybe try to relink if the file exist ?
2077 shutil.copy(src, dst)
2077 shutil.copy(src, dst)
2078 else:
2078 else:
2079 shutil.copy(src, dst)
2079 shutil.copy(src, dst)
2080 num += 1
2080 num += 1
2081 if progress:
2081 if progress:
2082 progress.increment()
2082 progress.increment()
2083
2083
2084 return hardlink, num
2084 return hardlink, num
2085
2085
2086
2086
2087 _winreservednames = {
2087 _winreservednames = {
2088 b'con',
2088 b'con',
2089 b'prn',
2089 b'prn',
2090 b'aux',
2090 b'aux',
2091 b'nul',
2091 b'nul',
2092 b'com1',
2092 b'com1',
2093 b'com2',
2093 b'com2',
2094 b'com3',
2094 b'com3',
2095 b'com4',
2095 b'com4',
2096 b'com5',
2096 b'com5',
2097 b'com6',
2097 b'com6',
2098 b'com7',
2098 b'com7',
2099 b'com8',
2099 b'com8',
2100 b'com9',
2100 b'com9',
2101 b'lpt1',
2101 b'lpt1',
2102 b'lpt2',
2102 b'lpt2',
2103 b'lpt3',
2103 b'lpt3',
2104 b'lpt4',
2104 b'lpt4',
2105 b'lpt5',
2105 b'lpt5',
2106 b'lpt6',
2106 b'lpt6',
2107 b'lpt7',
2107 b'lpt7',
2108 b'lpt8',
2108 b'lpt8',
2109 b'lpt9',
2109 b'lpt9',
2110 }
2110 }
2111 _winreservedchars = b':*?"<>|'
2111 _winreservedchars = b':*?"<>|'
2112
2112
2113
2113
2114 def checkwinfilename(path: bytes) -> Optional[bytes]:
2114 def checkwinfilename(path: bytes) -> Optional[bytes]:
2115 r"""Check that the base-relative path is a valid filename on Windows.
2115 r"""Check that the base-relative path is a valid filename on Windows.
2116 Returns None if the path is ok, or a UI string describing the problem.
2116 Returns None if the path is ok, or a UI string describing the problem.
2117
2117
2118 >>> checkwinfilename(b"just/a/normal/path")
2118 >>> checkwinfilename(b"just/a/normal/path")
2119 >>> checkwinfilename(b"foo/bar/con.xml")
2119 >>> checkwinfilename(b"foo/bar/con.xml")
2120 "filename contains 'con', which is reserved on Windows"
2120 "filename contains 'con', which is reserved on Windows"
2121 >>> checkwinfilename(b"foo/con.xml/bar")
2121 >>> checkwinfilename(b"foo/con.xml/bar")
2122 "filename contains 'con', which is reserved on Windows"
2122 "filename contains 'con', which is reserved on Windows"
2123 >>> checkwinfilename(b"foo/bar/xml.con")
2123 >>> checkwinfilename(b"foo/bar/xml.con")
2124 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
2124 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
2125 "filename contains 'AUX', which is reserved on Windows"
2125 "filename contains 'AUX', which is reserved on Windows"
2126 >>> checkwinfilename(b"foo/bar/bla:.txt")
2126 >>> checkwinfilename(b"foo/bar/bla:.txt")
2127 "filename contains ':', which is reserved on Windows"
2127 "filename contains ':', which is reserved on Windows"
2128 >>> checkwinfilename(b"foo/bar/b\07la.txt")
2128 >>> checkwinfilename(b"foo/bar/b\07la.txt")
2129 "filename contains '\\x07', which is invalid on Windows"
2129 "filename contains '\\x07', which is invalid on Windows"
2130 >>> checkwinfilename(b"foo/bar/bla ")
2130 >>> checkwinfilename(b"foo/bar/bla ")
2131 "filename ends with ' ', which is not allowed on Windows"
2131 "filename ends with ' ', which is not allowed on Windows"
2132 >>> checkwinfilename(b"../bar")
2132 >>> checkwinfilename(b"../bar")
2133 >>> checkwinfilename(b"foo\\")
2133 >>> checkwinfilename(b"foo\\")
2134 "filename ends with '\\', which is invalid on Windows"
2134 "filename ends with '\\', which is invalid on Windows"
2135 >>> checkwinfilename(b"foo\\/bar")
2135 >>> checkwinfilename(b"foo\\/bar")
2136 "directory name ends with '\\', which is invalid on Windows"
2136 "directory name ends with '\\', which is invalid on Windows"
2137 """
2137 """
2138 if path.endswith(b'\\'):
2138 if path.endswith(b'\\'):
2139 return _(b"filename ends with '\\', which is invalid on Windows")
2139 return _(b"filename ends with '\\', which is invalid on Windows")
2140 if b'\\/' in path:
2140 if b'\\/' in path:
2141 return _(b"directory name ends with '\\', which is invalid on Windows")
2141 return _(b"directory name ends with '\\', which is invalid on Windows")
2142 for n in path.replace(b'\\', b'/').split(b'/'):
2142 for n in path.replace(b'\\', b'/').split(b'/'):
2143 if not n:
2143 if not n:
2144 continue
2144 continue
2145 for c in _filenamebytestr(n):
2145 for c in _filenamebytestr(n):
2146 if c in _winreservedchars:
2146 if c in _winreservedchars:
2147 return (
2147 return (
2148 _(
2148 _(
2149 b"filename contains '%s', which is reserved "
2149 b"filename contains '%s', which is reserved "
2150 b"on Windows"
2150 b"on Windows"
2151 )
2151 )
2152 % c
2152 % c
2153 )
2153 )
2154 if ord(c) <= 31:
2154 if ord(c) <= 31:
2155 return _(
2155 return _(
2156 b"filename contains '%s', which is invalid on Windows"
2156 b"filename contains '%s', which is invalid on Windows"
2157 ) % stringutil.escapestr(c)
2157 ) % stringutil.escapestr(c)
2158 base = n.split(b'.')[0]
2158 base = n.split(b'.')[0]
2159 if base and base.lower() in _winreservednames:
2159 if base and base.lower() in _winreservednames:
2160 return (
2160 return (
2161 _(b"filename contains '%s', which is reserved on Windows")
2161 _(b"filename contains '%s', which is reserved on Windows")
2162 % base
2162 % base
2163 )
2163 )
2164 t = n[-1:]
2164 t = n[-1:]
2165 if t in b'. ' and n not in b'..':
2165 if t in b'. ' and n not in b'..':
2166 return (
2166 return (
2167 _(
2167 _(
2168 b"filename ends with '%s', which is not allowed "
2168 b"filename ends with '%s', which is not allowed "
2169 b"on Windows"
2169 b"on Windows"
2170 )
2170 )
2171 % t
2171 % t
2172 )
2172 )
2173
2173
2174
2174
2175 timer = getattr(time, "perf_counter", None)
2175 timer = getattr(time, "perf_counter", None)
2176
2176
2177 if pycompat.iswindows:
2177 if pycompat.iswindows:
2178 checkosfilename = checkwinfilename
2178 checkosfilename = checkwinfilename
2179 if not timer:
2179 if not timer:
2180 timer = time.clock # pytype: disable=module-attr
2180 timer = time.clock # pytype: disable=module-attr
2181 else:
2181 else:
2182 # mercurial.windows doesn't have platform.checkosfilename
2182 # mercurial.windows doesn't have platform.checkosfilename
2183 checkosfilename = platform.checkosfilename # pytype: disable=module-attr
2183 checkosfilename = platform.checkosfilename # pytype: disable=module-attr
2184 if not timer:
2184 if not timer:
2185 timer = time.time
2185 timer = time.time
2186
2186
2187
2187
2188 def makelock(info, pathname):
2188 def makelock(info, pathname):
2189 """Create a lock file atomically if possible
2189 """Create a lock file atomically if possible
2190
2190
2191 This may leave a stale lock file if symlink isn't supported and signal
2191 This may leave a stale lock file if symlink isn't supported and signal
2192 interrupt is enabled.
2192 interrupt is enabled.
2193 """
2193 """
2194 try:
2194 try:
2195 return os.symlink(info, pathname)
2195 return os.symlink(info, pathname)
2196 except OSError as why:
2196 except OSError as why:
2197 if why.errno == errno.EEXIST:
2197 if why.errno == errno.EEXIST:
2198 raise
2198 raise
2199 except AttributeError: # no symlink in os
2199 except AttributeError: # no symlink in os
2200 pass
2200 pass
2201
2201
2202 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
2202 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
2203 ld = os.open(pathname, flags)
2203 ld = os.open(pathname, flags)
2204 os.write(ld, info)
2204 try:
2205 os.close(ld)
2205 os.write(ld, info)
2206 finally:
2207 os.close(ld)
2206
2208
2207
2209
2208 def readlock(pathname: bytes) -> bytes:
2210 def readlock(pathname: bytes) -> bytes:
2209 try:
2211 try:
2210 return readlink(pathname)
2212 return readlink(pathname)
2211 except OSError as why:
2213 except OSError as why:
2212 if why.errno not in (errno.EINVAL, errno.ENOSYS):
2214 if why.errno not in (errno.EINVAL, errno.ENOSYS):
2213 raise
2215 raise
2214 except AttributeError: # no symlink in os
2216 except AttributeError: # no symlink in os
2215 pass
2217 pass
2216 with posixfile(pathname, b'rb') as fp:
2218 with posixfile(pathname, b'rb') as fp:
2217 return fp.read()
2219 return fp.read()
2218
2220
2219
2221
2220 def fstat(fp):
2222 def fstat(fp):
2221 '''stat file object that may not have fileno method.'''
2223 '''stat file object that may not have fileno method.'''
2222 try:
2224 try:
2223 return os.fstat(fp.fileno())
2225 return os.fstat(fp.fileno())
2224 except AttributeError:
2226 except AttributeError:
2225 return os.stat(fp.name)
2227 return os.stat(fp.name)
2226
2228
2227
2229
2228 # File system features
2230 # File system features
2229
2231
2230
2232
2231 def fscasesensitive(path: bytes) -> bool:
2233 def fscasesensitive(path: bytes) -> bool:
2232 """
2234 """
2233 Return true if the given path is on a case-sensitive filesystem
2235 Return true if the given path is on a case-sensitive filesystem
2234
2236
2235 Requires a path (like /foo/.hg) ending with a foldable final
2237 Requires a path (like /foo/.hg) ending with a foldable final
2236 directory component.
2238 directory component.
2237 """
2239 """
2238 s1 = os.lstat(path)
2240 s1 = os.lstat(path)
2239 d, b = os.path.split(path)
2241 d, b = os.path.split(path)
2240 b2 = b.upper()
2242 b2 = b.upper()
2241 if b == b2:
2243 if b == b2:
2242 b2 = b.lower()
2244 b2 = b.lower()
2243 if b == b2:
2245 if b == b2:
2244 return True # no evidence against case sensitivity
2246 return True # no evidence against case sensitivity
2245 p2 = os.path.join(d, b2)
2247 p2 = os.path.join(d, b2)
2246 try:
2248 try:
2247 s2 = os.lstat(p2)
2249 s2 = os.lstat(p2)
2248 if s2 == s1:
2250 if s2 == s1:
2249 return False
2251 return False
2250 return True
2252 return True
2251 except OSError:
2253 except OSError:
2252 return True
2254 return True
2253
2255
2254
2256
2255 _re2_input = lambda x: x
2257 _re2_input = lambda x: x
2256 # google-re2 will need to be tell to not output error on its own
2258 # google-re2 will need to be tell to not output error on its own
2257 _re2_options = None
2259 _re2_options = None
2258 try:
2260 try:
2259 import re2 # pytype: disable=import-error
2261 import re2 # pytype: disable=import-error
2260
2262
2261 _re2 = None
2263 _re2 = None
2262 except ImportError:
2264 except ImportError:
2263 _re2 = False
2265 _re2 = False
2264
2266
2265
2267
2266 def has_re2():
2268 def has_re2():
2267 """return True is re2 is available, False otherwise"""
2269 """return True is re2 is available, False otherwise"""
2268 if _re2 is None:
2270 if _re2 is None:
2269 _re._checkre2()
2271 _re._checkre2()
2270 return _re2
2272 return _re2
2271
2273
2272
2274
2273 class _re:
2275 class _re:
2274 @staticmethod
2276 @staticmethod
2275 def _checkre2():
2277 def _checkre2():
2276 global _re2
2278 global _re2
2277 global _re2_input
2279 global _re2_input
2278 global _re2_options
2280 global _re2_options
2279 if _re2 is not None:
2281 if _re2 is not None:
2280 # we already have the answer
2282 # we already have the answer
2281 return
2283 return
2282
2284
2283 check_pattern = br'\[([^\[]+)\]'
2285 check_pattern = br'\[([^\[]+)\]'
2284 check_input = b'[ui]'
2286 check_input = b'[ui]'
2285 try:
2287 try:
2286 # check if match works, see issue3964
2288 # check if match works, see issue3964
2287 _re2 = bool(re2.match(check_pattern, check_input))
2289 _re2 = bool(re2.match(check_pattern, check_input))
2288 except ImportError:
2290 except ImportError:
2289 _re2 = False
2291 _re2 = False
2290 except TypeError:
2292 except TypeError:
2291 # the `pyre-2` project provides a re2 module that accept bytes
2293 # the `pyre-2` project provides a re2 module that accept bytes
2292 # the `fb-re2` project provides a re2 module that acccept sysstr
2294 # the `fb-re2` project provides a re2 module that acccept sysstr
2293 check_pattern = pycompat.sysstr(check_pattern)
2295 check_pattern = pycompat.sysstr(check_pattern)
2294 check_input = pycompat.sysstr(check_input)
2296 check_input = pycompat.sysstr(check_input)
2295 _re2 = bool(re2.match(check_pattern, check_input))
2297 _re2 = bool(re2.match(check_pattern, check_input))
2296 _re2_input = pycompat.sysstr
2298 _re2_input = pycompat.sysstr
2297 try:
2299 try:
2298 quiet = re2.Options()
2300 quiet = re2.Options()
2299 quiet.log_errors = False
2301 quiet.log_errors = False
2300 _re2_options = quiet
2302 _re2_options = quiet
2301 except AttributeError:
2303 except AttributeError:
2302 pass
2304 pass
2303
2305
2304 def compile(self, pat, flags=0):
2306 def compile(self, pat, flags=0):
2305 """Compile a regular expression, using re2 if possible
2307 """Compile a regular expression, using re2 if possible
2306
2308
2307 For best performance, use only re2-compatible regexp features. The
2309 For best performance, use only re2-compatible regexp features. The
2308 only flags from the re module that are re2-compatible are
2310 only flags from the re module that are re2-compatible are
2309 IGNORECASE and MULTILINE."""
2311 IGNORECASE and MULTILINE."""
2310 if _re2 is None:
2312 if _re2 is None:
2311 self._checkre2()
2313 self._checkre2()
2312 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
2314 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
2313 if flags & remod.IGNORECASE:
2315 if flags & remod.IGNORECASE:
2314 pat = b'(?i)' + pat
2316 pat = b'(?i)' + pat
2315 if flags & remod.MULTILINE:
2317 if flags & remod.MULTILINE:
2316 pat = b'(?m)' + pat
2318 pat = b'(?m)' + pat
2317 try:
2319 try:
2318 input_regex = _re2_input(pat)
2320 input_regex = _re2_input(pat)
2319 if _re2_options is not None:
2321 if _re2_options is not None:
2320 compiled = re2.compile(input_regex, options=_re2_options)
2322 compiled = re2.compile(input_regex, options=_re2_options)
2321 else:
2323 else:
2322 compiled = re2.compile(input_regex)
2324 compiled = re2.compile(input_regex)
2323 return compiled
2325 return compiled
2324 except re2.error:
2326 except re2.error:
2325 pass
2327 pass
2326 return remod.compile(pat, flags)
2328 return remod.compile(pat, flags)
2327
2329
2328 @propertycache
2330 @propertycache
2329 def escape(self):
2331 def escape(self):
2330 """Return the version of escape corresponding to self.compile.
2332 """Return the version of escape corresponding to self.compile.
2331
2333
2332 This is imperfect because whether re2 or re is used for a particular
2334 This is imperfect because whether re2 or re is used for a particular
2333 function depends on the flags, etc, but it's the best we can do.
2335 function depends on the flags, etc, but it's the best we can do.
2334 """
2336 """
2335 global _re2
2337 global _re2
2336 if _re2 is None:
2338 if _re2 is None:
2337 self._checkre2()
2339 self._checkre2()
2338 if _re2:
2340 if _re2:
2339 return re2.escape
2341 return re2.escape
2340 else:
2342 else:
2341 return remod.escape
2343 return remod.escape
2342
2344
2343
2345
2344 re = _re()
2346 re = _re()
2345
2347
2346 _fspathcache = {}
2348 _fspathcache = {}
2347
2349
2348
2350
2349 def fspath(name: bytes, root: bytes) -> bytes:
2351 def fspath(name: bytes, root: bytes) -> bytes:
2350 """Get name in the case stored in the filesystem
2352 """Get name in the case stored in the filesystem
2351
2353
2352 The name should be relative to root, and be normcase-ed for efficiency.
2354 The name should be relative to root, and be normcase-ed for efficiency.
2353
2355
2354 Note that this function is unnecessary, and should not be
2356 Note that this function is unnecessary, and should not be
2355 called, for case-sensitive filesystems (simply because it's expensive).
2357 called, for case-sensitive filesystems (simply because it's expensive).
2356
2358
2357 The root should be normcase-ed, too.
2359 The root should be normcase-ed, too.
2358 """
2360 """
2359
2361
2360 def _makefspathcacheentry(dir):
2362 def _makefspathcacheentry(dir):
2361 return {normcase(n): n for n in os.listdir(dir)}
2363 return {normcase(n): n for n in os.listdir(dir)}
2362
2364
2363 seps = pycompat.ossep
2365 seps = pycompat.ossep
2364 if pycompat.osaltsep:
2366 if pycompat.osaltsep:
2365 seps = seps + pycompat.osaltsep
2367 seps = seps + pycompat.osaltsep
2366 # Protect backslashes. This gets silly very quickly.
2368 # Protect backslashes. This gets silly very quickly.
2367 seps.replace(b'\\', b'\\\\')
2369 seps.replace(b'\\', b'\\\\')
2368 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
2370 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
2369 dir = os.path.normpath(root)
2371 dir = os.path.normpath(root)
2370 result = []
2372 result = []
2371 for part, sep in pattern.findall(name):
2373 for part, sep in pattern.findall(name):
2372 if sep:
2374 if sep:
2373 result.append(sep)
2375 result.append(sep)
2374 continue
2376 continue
2375
2377
2376 if dir not in _fspathcache:
2378 if dir not in _fspathcache:
2377 _fspathcache[dir] = _makefspathcacheentry(dir)
2379 _fspathcache[dir] = _makefspathcacheentry(dir)
2378 contents = _fspathcache[dir]
2380 contents = _fspathcache[dir]
2379
2381
2380 found = contents.get(part)
2382 found = contents.get(part)
2381 if not found:
2383 if not found:
2382 # retry "once per directory" per "dirstate.walk" which
2384 # retry "once per directory" per "dirstate.walk" which
2383 # may take place for each patches of "hg qpush", for example
2385 # may take place for each patches of "hg qpush", for example
2384 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
2386 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
2385 found = contents.get(part)
2387 found = contents.get(part)
2386
2388
2387 result.append(found or part)
2389 result.append(found or part)
2388 dir = os.path.join(dir, part)
2390 dir = os.path.join(dir, part)
2389
2391
2390 return b''.join(result)
2392 return b''.join(result)
2391
2393
2392
2394
2393 def checknlink(testfile: bytes) -> bool:
2395 def checknlink(testfile: bytes) -> bool:
2394 '''check whether hardlink count reporting works properly'''
2396 '''check whether hardlink count reporting works properly'''
2395
2397
2396 # testfile may be open, so we need a separate file for checking to
2398 # testfile may be open, so we need a separate file for checking to
2397 # work around issue2543 (or testfile may get lost on Samba shares)
2399 # work around issue2543 (or testfile may get lost on Samba shares)
2398 f1, f2, fp = None, None, None
2400 f1, f2, fp = None, None, None
2399 try:
2401 try:
2400 fd, f1 = pycompat.mkstemp(
2402 fd, f1 = pycompat.mkstemp(
2401 prefix=b'.%s-' % os.path.basename(testfile),
2403 prefix=b'.%s-' % os.path.basename(testfile),
2402 suffix=b'1~',
2404 suffix=b'1~',
2403 dir=os.path.dirname(testfile),
2405 dir=os.path.dirname(testfile),
2404 )
2406 )
2405 os.close(fd)
2407 os.close(fd)
2406 f2 = b'%s2~' % f1[:-2]
2408 f2 = b'%s2~' % f1[:-2]
2407
2409
2408 oslink(f1, f2)
2410 oslink(f1, f2)
2409 # nlinks() may behave differently for files on Windows shares if
2411 # nlinks() may behave differently for files on Windows shares if
2410 # the file is open.
2412 # the file is open.
2411 fp = posixfile(f2)
2413 fp = posixfile(f2)
2412 return nlinks(f2) > 1
2414 return nlinks(f2) > 1
2413 except OSError:
2415 except OSError:
2414 return False
2416 return False
2415 finally:
2417 finally:
2416 if fp is not None:
2418 if fp is not None:
2417 fp.close()
2419 fp.close()
2418 for f in (f1, f2):
2420 for f in (f1, f2):
2419 try:
2421 try:
2420 if f is not None:
2422 if f is not None:
2421 os.unlink(f)
2423 os.unlink(f)
2422 except OSError:
2424 except OSError:
2423 pass
2425 pass
2424
2426
2425
2427
2426 def endswithsep(path: bytes) -> bool:
2428 def endswithsep(path: bytes) -> bool:
2427 '''Check path ends with os.sep or os.altsep.'''
2429 '''Check path ends with os.sep or os.altsep.'''
2428 return bool( # help pytype
2430 return bool( # help pytype
2429 path.endswith(pycompat.ossep)
2431 path.endswith(pycompat.ossep)
2430 or pycompat.osaltsep
2432 or pycompat.osaltsep
2431 and path.endswith(pycompat.osaltsep)
2433 and path.endswith(pycompat.osaltsep)
2432 )
2434 )
2433
2435
2434
2436
2435 def splitpath(path: bytes) -> List[bytes]:
2437 def splitpath(path: bytes) -> List[bytes]:
2436 """Split path by os.sep.
2438 """Split path by os.sep.
2437 Note that this function does not use os.altsep because this is
2439 Note that this function does not use os.altsep because this is
2438 an alternative of simple "xxx.split(os.sep)".
2440 an alternative of simple "xxx.split(os.sep)".
2439 It is recommended to use os.path.normpath() before using this
2441 It is recommended to use os.path.normpath() before using this
2440 function if need."""
2442 function if need."""
2441 return path.split(pycompat.ossep)
2443 return path.split(pycompat.ossep)
2442
2444
2443
2445
2444 def mktempcopy(
2446 def mktempcopy(
2445 name: bytes,
2447 name: bytes,
2446 emptyok: bool = False,
2448 emptyok: bool = False,
2447 createmode: Optional[int] = None,
2449 createmode: Optional[int] = None,
2448 enforcewritable: bool = False,
2450 enforcewritable: bool = False,
2449 ) -> bytes:
2451 ) -> bytes:
2450 """Create a temporary file with the same contents from name
2452 """Create a temporary file with the same contents from name
2451
2453
2452 The permission bits are copied from the original file.
2454 The permission bits are copied from the original file.
2453
2455
2454 If the temporary file is going to be truncated immediately, you
2456 If the temporary file is going to be truncated immediately, you
2455 can use emptyok=True as an optimization.
2457 can use emptyok=True as an optimization.
2456
2458
2457 Returns the name of the temporary file.
2459 Returns the name of the temporary file.
2458 """
2460 """
2459 d, fn = os.path.split(name)
2461 d, fn = os.path.split(name)
2460 fd, temp = pycompat.mkstemp(prefix=b'.%s-' % fn, suffix=b'~', dir=d)
2462 fd, temp = pycompat.mkstemp(prefix=b'.%s-' % fn, suffix=b'~', dir=d)
2461 os.close(fd)
2463 os.close(fd)
2462 # Temporary files are created with mode 0600, which is usually not
2464 # Temporary files are created with mode 0600, which is usually not
2463 # what we want. If the original file already exists, just copy
2465 # what we want. If the original file already exists, just copy
2464 # its mode. Otherwise, manually obey umask.
2466 # its mode. Otherwise, manually obey umask.
2465 copymode(name, temp, createmode, enforcewritable)
2467 copymode(name, temp, createmode, enforcewritable)
2466
2468
2467 if emptyok:
2469 if emptyok:
2468 return temp
2470 return temp
2469 try:
2471 try:
2470 try:
2472 try:
2471 ifp = posixfile(name, b"rb")
2473 ifp = posixfile(name, b"rb")
2472 except IOError as inst:
2474 except IOError as inst:
2473 if inst.errno == errno.ENOENT:
2475 if inst.errno == errno.ENOENT:
2474 return temp
2476 return temp
2475 if not getattr(inst, 'filename', None):
2477 if not getattr(inst, 'filename', None):
2476 inst.filename = name
2478 inst.filename = name
2477 raise
2479 raise
2478 ofp = posixfile(temp, b"wb")
2480 ofp = posixfile(temp, b"wb")
2479 for chunk in filechunkiter(ifp):
2481 for chunk in filechunkiter(ifp):
2480 ofp.write(chunk)
2482 ofp.write(chunk)
2481 ifp.close()
2483 ifp.close()
2482 ofp.close()
2484 ofp.close()
2483 except: # re-raises
2485 except: # re-raises
2484 try:
2486 try:
2485 os.unlink(temp)
2487 os.unlink(temp)
2486 except OSError:
2488 except OSError:
2487 pass
2489 pass
2488 raise
2490 raise
2489 return temp
2491 return temp
2490
2492
2491
2493
2492 class filestat:
2494 class filestat:
2493 """help to exactly detect change of a file
2495 """help to exactly detect change of a file
2494
2496
2495 'stat' attribute is result of 'os.stat()' if specified 'path'
2497 'stat' attribute is result of 'os.stat()' if specified 'path'
2496 exists. Otherwise, it is None. This can avoid preparative
2498 exists. Otherwise, it is None. This can avoid preparative
2497 'exists()' examination on client side of this class.
2499 'exists()' examination on client side of this class.
2498 """
2500 """
2499
2501
2500 def __init__(self, stat: Optional[os.stat_result]) -> None:
2502 def __init__(self, stat: Optional[os.stat_result]) -> None:
2501 self.stat = stat
2503 self.stat = stat
2502
2504
2503 @classmethod
2505 @classmethod
2504 def frompath(cls: Type[_Tfilestat], path: bytes) -> _Tfilestat:
2506 def frompath(cls: Type[_Tfilestat], path: bytes) -> _Tfilestat:
2505 try:
2507 try:
2506 stat = os.stat(path)
2508 stat = os.stat(path)
2507 except FileNotFoundError:
2509 except FileNotFoundError:
2508 stat = None
2510 stat = None
2509 return cls(stat)
2511 return cls(stat)
2510
2512
2511 @classmethod
2513 @classmethod
2512 def fromfp(cls: Type[_Tfilestat], fp: BinaryIO) -> _Tfilestat:
2514 def fromfp(cls: Type[_Tfilestat], fp: BinaryIO) -> _Tfilestat:
2513 stat = os.fstat(fp.fileno())
2515 stat = os.fstat(fp.fileno())
2514 return cls(stat)
2516 return cls(stat)
2515
2517
2516 __hash__ = object.__hash__
2518 __hash__ = object.__hash__
2517
2519
2518 def __eq__(self, old) -> bool:
2520 def __eq__(self, old) -> bool:
2519 try:
2521 try:
2520 # if ambiguity between stat of new and old file is
2522 # if ambiguity between stat of new and old file is
2521 # avoided, comparison of size, ctime and mtime is enough
2523 # avoided, comparison of size, ctime and mtime is enough
2522 # to exactly detect change of a file regardless of platform
2524 # to exactly detect change of a file regardless of platform
2523 return (
2525 return (
2524 self.stat.st_size == old.stat.st_size
2526 self.stat.st_size == old.stat.st_size
2525 and self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2527 and self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2526 and self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME]
2528 and self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME]
2527 )
2529 )
2528 except AttributeError:
2530 except AttributeError:
2529 pass
2531 pass
2530 try:
2532 try:
2531 return self.stat is None and old.stat is None
2533 return self.stat is None and old.stat is None
2532 except AttributeError:
2534 except AttributeError:
2533 return False
2535 return False
2534
2536
2535 def isambig(self, old: _Tfilestat) -> bool:
2537 def isambig(self, old: _Tfilestat) -> bool:
2536 """Examine whether new (= self) stat is ambiguous against old one
2538 """Examine whether new (= self) stat is ambiguous against old one
2537
2539
2538 "S[N]" below means stat of a file at N-th change:
2540 "S[N]" below means stat of a file at N-th change:
2539
2541
2540 - S[n-1].ctime < S[n].ctime: can detect change of a file
2542 - S[n-1].ctime < S[n].ctime: can detect change of a file
2541 - S[n-1].ctime == S[n].ctime
2543 - S[n-1].ctime == S[n].ctime
2542 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2544 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2543 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2545 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2544 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2546 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2545 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2547 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2546
2548
2547 Case (*2) above means that a file was changed twice or more at
2549 Case (*2) above means that a file was changed twice or more at
2548 same time in sec (= S[n-1].ctime), and comparison of timestamp
2550 same time in sec (= S[n-1].ctime), and comparison of timestamp
2549 is ambiguous.
2551 is ambiguous.
2550
2552
2551 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2553 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2552 timestamp is ambiguous".
2554 timestamp is ambiguous".
2553
2555
2554 But advancing mtime only in case (*2) doesn't work as
2556 But advancing mtime only in case (*2) doesn't work as
2555 expected, because naturally advanced S[n].mtime in case (*1)
2557 expected, because naturally advanced S[n].mtime in case (*1)
2556 might be equal to manually advanced S[n-1 or earlier].mtime.
2558 might be equal to manually advanced S[n-1 or earlier].mtime.
2557
2559
2558 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2560 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2559 treated as ambiguous regardless of mtime, to avoid overlooking
2561 treated as ambiguous regardless of mtime, to avoid overlooking
2560 by confliction between such mtime.
2562 by confliction between such mtime.
2561
2563
2562 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2564 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2563 S[n].mtime", even if size of a file isn't changed.
2565 S[n].mtime", even if size of a file isn't changed.
2564 """
2566 """
2565 try:
2567 try:
2566 return self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2568 return self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2567 except AttributeError:
2569 except AttributeError:
2568 return False
2570 return False
2569
2571
2570 def avoidambig(self, path: bytes, old: _Tfilestat) -> bool:
2572 def avoidambig(self, path: bytes, old: _Tfilestat) -> bool:
2571 """Change file stat of specified path to avoid ambiguity
2573 """Change file stat of specified path to avoid ambiguity
2572
2574
2573 'old' should be previous filestat of 'path'.
2575 'old' should be previous filestat of 'path'.
2574
2576
2575 This skips avoiding ambiguity, if a process doesn't have
2577 This skips avoiding ambiguity, if a process doesn't have
2576 appropriate privileges for 'path'. This returns False in this
2578 appropriate privileges for 'path'. This returns False in this
2577 case.
2579 case.
2578
2580
2579 Otherwise, this returns True, as "ambiguity is avoided".
2581 Otherwise, this returns True, as "ambiguity is avoided".
2580 """
2582 """
2581 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2583 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2582 try:
2584 try:
2583 os.utime(path, (advanced, advanced))
2585 os.utime(path, (advanced, advanced))
2584 except PermissionError:
2586 except PermissionError:
2585 # utime() on the file created by another user causes EPERM,
2587 # utime() on the file created by another user causes EPERM,
2586 # if a process doesn't have appropriate privileges
2588 # if a process doesn't have appropriate privileges
2587 return False
2589 return False
2588 return True
2590 return True
2589
2591
2590 def __ne__(self, other) -> bool:
2592 def __ne__(self, other) -> bool:
2591 return not self == other
2593 return not self == other
2592
2594
2593
2595
2594 class atomictempfile:
2596 class atomictempfile:
2595 """writable file object that atomically updates a file
2597 """writable file object that atomically updates a file
2596
2598
2597 All writes will go to a temporary copy of the original file. Call
2599 All writes will go to a temporary copy of the original file. Call
2598 close() when you are done writing, and atomictempfile will rename
2600 close() when you are done writing, and atomictempfile will rename
2599 the temporary copy to the original name, making the changes
2601 the temporary copy to the original name, making the changes
2600 visible. If the object is destroyed without being closed, all your
2602 visible. If the object is destroyed without being closed, all your
2601 writes are discarded.
2603 writes are discarded.
2602
2604
2603 checkambig argument of constructor is used with filestat, and is
2605 checkambig argument of constructor is used with filestat, and is
2604 useful only if target file is guarded by any lock (e.g. repo.lock
2606 useful only if target file is guarded by any lock (e.g. repo.lock
2605 or repo.wlock).
2607 or repo.wlock).
2606 """
2608 """
2607
2609
2608 def __init__(self, name, mode=b'w+b', createmode=None, checkambig=False):
2610 def __init__(self, name, mode=b'w+b', createmode=None, checkambig=False):
2609 self.__name = name # permanent name
2611 self.__name = name # permanent name
2610 self._tempname = mktempcopy(
2612 self._tempname = mktempcopy(
2611 name,
2613 name,
2612 emptyok=(b'w' in mode),
2614 emptyok=(b'w' in mode),
2613 createmode=createmode,
2615 createmode=createmode,
2614 enforcewritable=(b'w' in mode),
2616 enforcewritable=(b'w' in mode),
2615 )
2617 )
2616
2618
2617 self._fp = posixfile(self._tempname, mode)
2619 self._fp = posixfile(self._tempname, mode)
2618 self._checkambig = checkambig
2620 self._checkambig = checkambig
2619
2621
2620 # delegated methods
2622 # delegated methods
2621 self.read = self._fp.read
2623 self.read = self._fp.read
2622 self.write = self._fp.write
2624 self.write = self._fp.write
2623 self.writelines = self._fp.writelines
2625 self.writelines = self._fp.writelines
2624 self.seek = self._fp.seek
2626 self.seek = self._fp.seek
2625 self.tell = self._fp.tell
2627 self.tell = self._fp.tell
2626 self.fileno = self._fp.fileno
2628 self.fileno = self._fp.fileno
2627
2629
2628 def close(self):
2630 def close(self):
2629 if not self._fp.closed:
2631 if not self._fp.closed:
2630 self._fp.close()
2632 self._fp.close()
2631 filename = localpath(self.__name)
2633 filename = localpath(self.__name)
2632 oldstat = self._checkambig and filestat.frompath(filename)
2634 oldstat = self._checkambig and filestat.frompath(filename)
2633 if oldstat and oldstat.stat:
2635 if oldstat and oldstat.stat:
2634 rename(self._tempname, filename)
2636 rename(self._tempname, filename)
2635 newstat = filestat.frompath(filename)
2637 newstat = filestat.frompath(filename)
2636 if newstat.isambig(oldstat):
2638 if newstat.isambig(oldstat):
2637 # stat of changed file is ambiguous to original one
2639 # stat of changed file is ambiguous to original one
2638 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2640 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2639 os.utime(filename, (advanced, advanced))
2641 os.utime(filename, (advanced, advanced))
2640 else:
2642 else:
2641 rename(self._tempname, filename)
2643 rename(self._tempname, filename)
2642
2644
2643 def discard(self):
2645 def discard(self):
2644 if not self._fp.closed:
2646 if not self._fp.closed:
2645 try:
2647 try:
2646 os.unlink(self._tempname)
2648 os.unlink(self._tempname)
2647 except OSError:
2649 except OSError:
2648 pass
2650 pass
2649 self._fp.close()
2651 self._fp.close()
2650
2652
2651 def __del__(self):
2653 def __del__(self):
2652 if hasattr(self, '_fp'): # constructor actually did something
2654 if hasattr(self, '_fp'): # constructor actually did something
2653 self.discard()
2655 self.discard()
2654
2656
2655 def __enter__(self):
2657 def __enter__(self):
2656 return self
2658 return self
2657
2659
2658 def __exit__(self, exctype, excvalue, traceback):
2660 def __exit__(self, exctype, excvalue, traceback):
2659 if exctype is not None:
2661 if exctype is not None:
2660 self.discard()
2662 self.discard()
2661 else:
2663 else:
2662 self.close()
2664 self.close()
2663
2665
2664
2666
2665 def tryrmdir(f):
2667 def tryrmdir(f):
2666 try:
2668 try:
2667 removedirs(f)
2669 removedirs(f)
2668 except OSError as e:
2670 except OSError as e:
2669 if e.errno != errno.ENOENT and e.errno != errno.ENOTEMPTY:
2671 if e.errno != errno.ENOENT and e.errno != errno.ENOTEMPTY:
2670 raise
2672 raise
2671
2673
2672
2674
2673 def unlinkpath(
2675 def unlinkpath(
2674 f: bytes, ignoremissing: bool = False, rmdir: bool = True
2676 f: bytes, ignoremissing: bool = False, rmdir: bool = True
2675 ) -> None:
2677 ) -> None:
2676 """unlink and remove the directory if it is empty"""
2678 """unlink and remove the directory if it is empty"""
2677 if ignoremissing:
2679 if ignoremissing:
2678 tryunlink(f)
2680 tryunlink(f)
2679 else:
2681 else:
2680 unlink(f)
2682 unlink(f)
2681 if rmdir:
2683 if rmdir:
2682 # try removing directories that might now be empty
2684 # try removing directories that might now be empty
2683 try:
2685 try:
2684 removedirs(os.path.dirname(f))
2686 removedirs(os.path.dirname(f))
2685 except OSError:
2687 except OSError:
2686 pass
2688 pass
2687
2689
2688
2690
2689 def tryunlink(f: bytes) -> bool:
2691 def tryunlink(f: bytes) -> bool:
2690 """Attempt to remove a file, ignoring FileNotFoundError.
2692 """Attempt to remove a file, ignoring FileNotFoundError.
2691
2693
2692 Returns False in case the file did not exit, True otherwise
2694 Returns False in case the file did not exit, True otherwise
2693 """
2695 """
2694 try:
2696 try:
2695 unlink(f)
2697 unlink(f)
2696 return True
2698 return True
2697 except FileNotFoundError:
2699 except FileNotFoundError:
2698 return False
2700 return False
2699
2701
2700
2702
2701 def makedirs(
2703 def makedirs(
2702 name: bytes, mode: Optional[int] = None, notindexed: bool = False
2704 name: bytes, mode: Optional[int] = None, notindexed: bool = False
2703 ) -> None:
2705 ) -> None:
2704 """recursive directory creation with parent mode inheritance
2706 """recursive directory creation with parent mode inheritance
2705
2707
2706 Newly created directories are marked as "not to be indexed by
2708 Newly created directories are marked as "not to be indexed by
2707 the content indexing service", if ``notindexed`` is specified
2709 the content indexing service", if ``notindexed`` is specified
2708 for "write" mode access.
2710 for "write" mode access.
2709 """
2711 """
2710 try:
2712 try:
2711 makedir(name, notindexed)
2713 makedir(name, notindexed)
2712 except OSError as err:
2714 except OSError as err:
2713 if err.errno == errno.EEXIST:
2715 if err.errno == errno.EEXIST:
2714 return
2716 return
2715 if err.errno != errno.ENOENT or not name:
2717 if err.errno != errno.ENOENT or not name:
2716 raise
2718 raise
2717 parent = os.path.dirname(abspath(name))
2719 parent = os.path.dirname(abspath(name))
2718 if parent == name:
2720 if parent == name:
2719 raise
2721 raise
2720 makedirs(parent, mode, notindexed)
2722 makedirs(parent, mode, notindexed)
2721 try:
2723 try:
2722 makedir(name, notindexed)
2724 makedir(name, notindexed)
2723 except OSError as err:
2725 except OSError as err:
2724 # Catch EEXIST to handle races
2726 # Catch EEXIST to handle races
2725 if err.errno == errno.EEXIST:
2727 if err.errno == errno.EEXIST:
2726 return
2728 return
2727 raise
2729 raise
2728 if mode is not None:
2730 if mode is not None:
2729 os.chmod(name, mode)
2731 os.chmod(name, mode)
2730
2732
2731
2733
2732 def readfile(path: bytes) -> bytes:
2734 def readfile(path: bytes) -> bytes:
2733 with open(path, b'rb') as fp:
2735 with open(path, b'rb') as fp:
2734 return fp.read()
2736 return fp.read()
2735
2737
2736
2738
2737 def writefile(path: bytes, text: bytes) -> None:
2739 def writefile(path: bytes, text: bytes) -> None:
2738 with open(path, b'wb') as fp:
2740 with open(path, b'wb') as fp:
2739 fp.write(text)
2741 fp.write(text)
2740
2742
2741
2743
2742 def appendfile(path: bytes, text: bytes) -> None:
2744 def appendfile(path: bytes, text: bytes) -> None:
2743 with open(path, b'ab') as fp:
2745 with open(path, b'ab') as fp:
2744 fp.write(text)
2746 fp.write(text)
2745
2747
2746
2748
2747 class chunkbuffer:
2749 class chunkbuffer:
2748 """Allow arbitrary sized chunks of data to be efficiently read from an
2750 """Allow arbitrary sized chunks of data to be efficiently read from an
2749 iterator over chunks of arbitrary size."""
2751 iterator over chunks of arbitrary size."""
2750
2752
2751 def __init__(self, in_iter):
2753 def __init__(self, in_iter):
2752 """in_iter is the iterator that's iterating over the input chunks."""
2754 """in_iter is the iterator that's iterating over the input chunks."""
2753
2755
2754 def splitbig(chunks):
2756 def splitbig(chunks):
2755 for chunk in chunks:
2757 for chunk in chunks:
2756 if len(chunk) > 2**20:
2758 if len(chunk) > 2**20:
2757 pos = 0
2759 pos = 0
2758 while pos < len(chunk):
2760 while pos < len(chunk):
2759 end = pos + 2**18
2761 end = pos + 2**18
2760 yield chunk[pos:end]
2762 yield chunk[pos:end]
2761 pos = end
2763 pos = end
2762 else:
2764 else:
2763 yield chunk
2765 yield chunk
2764
2766
2765 self.iter = splitbig(in_iter)
2767 self.iter = splitbig(in_iter)
2766 self._queue = collections.deque()
2768 self._queue = collections.deque()
2767 self._chunkoffset = 0
2769 self._chunkoffset = 0
2768
2770
2769 def read(self, l=None):
2771 def read(self, l=None):
2770 """Read L bytes of data from the iterator of chunks of data.
2772 """Read L bytes of data from the iterator of chunks of data.
2771 Returns less than L bytes if the iterator runs dry.
2773 Returns less than L bytes if the iterator runs dry.
2772
2774
2773 If size parameter is omitted, read everything"""
2775 If size parameter is omitted, read everything"""
2774 if l is None:
2776 if l is None:
2775 return b''.join(self.iter)
2777 return b''.join(self.iter)
2776
2778
2777 left = l
2779 left = l
2778 buf = []
2780 buf = []
2779 queue = self._queue
2781 queue = self._queue
2780 while left > 0:
2782 while left > 0:
2781 # refill the queue
2783 # refill the queue
2782 if not queue:
2784 if not queue:
2783 target = 2**18
2785 target = 2**18
2784 for chunk in self.iter:
2786 for chunk in self.iter:
2785 queue.append(chunk)
2787 queue.append(chunk)
2786 target -= len(chunk)
2788 target -= len(chunk)
2787 if target <= 0:
2789 if target <= 0:
2788 break
2790 break
2789 if not queue:
2791 if not queue:
2790 break
2792 break
2791
2793
2792 # The easy way to do this would be to queue.popleft(), modify the
2794 # The easy way to do this would be to queue.popleft(), modify the
2793 # chunk (if necessary), then queue.appendleft(). However, for cases
2795 # chunk (if necessary), then queue.appendleft(). However, for cases
2794 # where we read partial chunk content, this incurs 2 dequeue
2796 # where we read partial chunk content, this incurs 2 dequeue
2795 # mutations and creates a new str for the remaining chunk in the
2797 # mutations and creates a new str for the remaining chunk in the
2796 # queue. Our code below avoids this overhead.
2798 # queue. Our code below avoids this overhead.
2797
2799
2798 chunk = queue[0]
2800 chunk = queue[0]
2799 chunkl = len(chunk)
2801 chunkl = len(chunk)
2800 offset = self._chunkoffset
2802 offset = self._chunkoffset
2801
2803
2802 # Use full chunk.
2804 # Use full chunk.
2803 if offset == 0 and left >= chunkl:
2805 if offset == 0 and left >= chunkl:
2804 left -= chunkl
2806 left -= chunkl
2805 queue.popleft()
2807 queue.popleft()
2806 buf.append(chunk)
2808 buf.append(chunk)
2807 # self._chunkoffset remains at 0.
2809 # self._chunkoffset remains at 0.
2808 continue
2810 continue
2809
2811
2810 chunkremaining = chunkl - offset
2812 chunkremaining = chunkl - offset
2811
2813
2812 # Use all of unconsumed part of chunk.
2814 # Use all of unconsumed part of chunk.
2813 if left >= chunkremaining:
2815 if left >= chunkremaining:
2814 left -= chunkremaining
2816 left -= chunkremaining
2815 queue.popleft()
2817 queue.popleft()
2816 # offset == 0 is enabled by block above, so this won't merely
2818 # offset == 0 is enabled by block above, so this won't merely
2817 # copy via ``chunk[0:]``.
2819 # copy via ``chunk[0:]``.
2818 buf.append(chunk[offset:])
2820 buf.append(chunk[offset:])
2819 self._chunkoffset = 0
2821 self._chunkoffset = 0
2820
2822
2821 # Partial chunk needed.
2823 # Partial chunk needed.
2822 else:
2824 else:
2823 buf.append(chunk[offset : offset + left])
2825 buf.append(chunk[offset : offset + left])
2824 self._chunkoffset += left
2826 self._chunkoffset += left
2825 left -= chunkremaining
2827 left -= chunkremaining
2826
2828
2827 return b''.join(buf)
2829 return b''.join(buf)
2828
2830
2829
2831
2830 def filechunkiter(f, size=131072, limit=None):
2832 def filechunkiter(f, size=131072, limit=None):
2831 """Create a generator that produces the data in the file size
2833 """Create a generator that produces the data in the file size
2832 (default 131072) bytes at a time, up to optional limit (default is
2834 (default 131072) bytes at a time, up to optional limit (default is
2833 to read all data). Chunks may be less than size bytes if the
2835 to read all data). Chunks may be less than size bytes if the
2834 chunk is the last chunk in the file, or the file is a socket or
2836 chunk is the last chunk in the file, or the file is a socket or
2835 some other type of file that sometimes reads less data than is
2837 some other type of file that sometimes reads less data than is
2836 requested."""
2838 requested."""
2837 assert size >= 0
2839 assert size >= 0
2838 assert limit is None or limit >= 0
2840 assert limit is None or limit >= 0
2839 while True:
2841 while True:
2840 if limit is None:
2842 if limit is None:
2841 nbytes = size
2843 nbytes = size
2842 else:
2844 else:
2843 nbytes = min(limit, size)
2845 nbytes = min(limit, size)
2844 s = nbytes and f.read(nbytes)
2846 s = nbytes and f.read(nbytes)
2845 if not s:
2847 if not s:
2846 break
2848 break
2847 if limit:
2849 if limit:
2848 limit -= len(s)
2850 limit -= len(s)
2849 yield s
2851 yield s
2850
2852
2851
2853
2852 class cappedreader:
2854 class cappedreader:
2853 """A file object proxy that allows reading up to N bytes.
2855 """A file object proxy that allows reading up to N bytes.
2854
2856
2855 Given a source file object, instances of this type allow reading up to
2857 Given a source file object, instances of this type allow reading up to
2856 N bytes from that source file object. Attempts to read past the allowed
2858 N bytes from that source file object. Attempts to read past the allowed
2857 limit are treated as EOF.
2859 limit are treated as EOF.
2858
2860
2859 It is assumed that I/O is not performed on the original file object
2861 It is assumed that I/O is not performed on the original file object
2860 in addition to I/O that is performed by this instance. If there is,
2862 in addition to I/O that is performed by this instance. If there is,
2861 state tracking will get out of sync and unexpected results will ensue.
2863 state tracking will get out of sync and unexpected results will ensue.
2862 """
2864 """
2863
2865
2864 def __init__(self, fh, limit):
2866 def __init__(self, fh, limit):
2865 """Allow reading up to <limit> bytes from <fh>."""
2867 """Allow reading up to <limit> bytes from <fh>."""
2866 self._fh = fh
2868 self._fh = fh
2867 self._left = limit
2869 self._left = limit
2868
2870
2869 def read(self, n=-1):
2871 def read(self, n=-1):
2870 if not self._left:
2872 if not self._left:
2871 return b''
2873 return b''
2872
2874
2873 if n < 0:
2875 if n < 0:
2874 n = self._left
2876 n = self._left
2875
2877
2876 data = self._fh.read(min(n, self._left))
2878 data = self._fh.read(min(n, self._left))
2877 self._left -= len(data)
2879 self._left -= len(data)
2878 assert self._left >= 0
2880 assert self._left >= 0
2879
2881
2880 return data
2882 return data
2881
2883
2882 def readinto(self, b):
2884 def readinto(self, b):
2883 res = self.read(len(b))
2885 res = self.read(len(b))
2884 if res is None:
2886 if res is None:
2885 return None
2887 return None
2886
2888
2887 b[0 : len(res)] = res
2889 b[0 : len(res)] = res
2888 return len(res)
2890 return len(res)
2889
2891
2890
2892
2891 def unitcountfn(*unittable):
2893 def unitcountfn(*unittable):
2892 '''return a function that renders a readable count of some quantity'''
2894 '''return a function that renders a readable count of some quantity'''
2893
2895
2894 def go(count):
2896 def go(count):
2895 for multiplier, divisor, format in unittable:
2897 for multiplier, divisor, format in unittable:
2896 if abs(count) >= divisor * multiplier:
2898 if abs(count) >= divisor * multiplier:
2897 return format % (count / float(divisor))
2899 return format % (count / float(divisor))
2898 return unittable[-1][2] % count
2900 return unittable[-1][2] % count
2899
2901
2900 return go
2902 return go
2901
2903
2902
2904
2903 def processlinerange(fromline: int, toline: int) -> Tuple[int, int]:
2905 def processlinerange(fromline: int, toline: int) -> Tuple[int, int]:
2904 """Check that linerange <fromline>:<toline> makes sense and return a
2906 """Check that linerange <fromline>:<toline> makes sense and return a
2905 0-based range.
2907 0-based range.
2906
2908
2907 >>> processlinerange(10, 20)
2909 >>> processlinerange(10, 20)
2908 (9, 20)
2910 (9, 20)
2909 >>> processlinerange(2, 1)
2911 >>> processlinerange(2, 1)
2910 Traceback (most recent call last):
2912 Traceback (most recent call last):
2911 ...
2913 ...
2912 ParseError: line range must be positive
2914 ParseError: line range must be positive
2913 >>> processlinerange(0, 5)
2915 >>> processlinerange(0, 5)
2914 Traceback (most recent call last):
2916 Traceback (most recent call last):
2915 ...
2917 ...
2916 ParseError: fromline must be strictly positive
2918 ParseError: fromline must be strictly positive
2917 """
2919 """
2918 if toline - fromline < 0:
2920 if toline - fromline < 0:
2919 raise error.ParseError(_(b"line range must be positive"))
2921 raise error.ParseError(_(b"line range must be positive"))
2920 if fromline < 1:
2922 if fromline < 1:
2921 raise error.ParseError(_(b"fromline must be strictly positive"))
2923 raise error.ParseError(_(b"fromline must be strictly positive"))
2922 return fromline - 1, toline
2924 return fromline - 1, toline
2923
2925
2924
2926
2925 bytecount = unitcountfn(
2927 bytecount = unitcountfn(
2926 (100, 1 << 30, _(b'%.0f GB')),
2928 (100, 1 << 30, _(b'%.0f GB')),
2927 (10, 1 << 30, _(b'%.1f GB')),
2929 (10, 1 << 30, _(b'%.1f GB')),
2928 (1, 1 << 30, _(b'%.2f GB')),
2930 (1, 1 << 30, _(b'%.2f GB')),
2929 (100, 1 << 20, _(b'%.0f MB')),
2931 (100, 1 << 20, _(b'%.0f MB')),
2930 (10, 1 << 20, _(b'%.1f MB')),
2932 (10, 1 << 20, _(b'%.1f MB')),
2931 (1, 1 << 20, _(b'%.2f MB')),
2933 (1, 1 << 20, _(b'%.2f MB')),
2932 (100, 1 << 10, _(b'%.0f KB')),
2934 (100, 1 << 10, _(b'%.0f KB')),
2933 (10, 1 << 10, _(b'%.1f KB')),
2935 (10, 1 << 10, _(b'%.1f KB')),
2934 (1, 1 << 10, _(b'%.2f KB')),
2936 (1, 1 << 10, _(b'%.2f KB')),
2935 (1, 1, _(b'%.0f bytes')),
2937 (1, 1, _(b'%.0f bytes')),
2936 )
2938 )
2937
2939
2938
2940
2939 class transformingwriter(typelib.BinaryIO_Proxy):
2941 class transformingwriter(typelib.BinaryIO_Proxy):
2940 """Writable file wrapper to transform data by function"""
2942 """Writable file wrapper to transform data by function"""
2941
2943
2942 def __init__(self, fp: BinaryIO, encode: Callable[[bytes], bytes]) -> None:
2944 def __init__(self, fp: BinaryIO, encode: Callable[[bytes], bytes]) -> None:
2943 self._fp = fp
2945 self._fp = fp
2944 self._encode = encode
2946 self._encode = encode
2945
2947
2946 def close(self) -> None:
2948 def close(self) -> None:
2947 self._fp.close()
2949 self._fp.close()
2948
2950
2949 def flush(self) -> None:
2951 def flush(self) -> None:
2950 self._fp.flush()
2952 self._fp.flush()
2951
2953
2952 def write(self, data: bytes) -> int:
2954 def write(self, data: bytes) -> int:
2953 return self._fp.write(self._encode(data))
2955 return self._fp.write(self._encode(data))
2954
2956
2955
2957
2956 # Matches a single EOL which can either be a CRLF where repeated CR
2958 # Matches a single EOL which can either be a CRLF where repeated CR
2957 # are removed or a LF. We do not care about old Macintosh files, so a
2959 # are removed or a LF. We do not care about old Macintosh files, so a
2958 # stray CR is an error.
2960 # stray CR is an error.
2959 _eolre = remod.compile(br'\r*\n')
2961 _eolre = remod.compile(br'\r*\n')
2960
2962
2961
2963
2962 def tolf(s: bytes) -> bytes:
2964 def tolf(s: bytes) -> bytes:
2963 return _eolre.sub(b'\n', s)
2965 return _eolre.sub(b'\n', s)
2964
2966
2965
2967
2966 def tocrlf(s: bytes) -> bytes:
2968 def tocrlf(s: bytes) -> bytes:
2967 return _eolre.sub(b'\r\n', s)
2969 return _eolre.sub(b'\r\n', s)
2968
2970
2969
2971
2970 def _crlfwriter(fp: typelib.BinaryIO_Proxy) -> typelib.BinaryIO_Proxy:
2972 def _crlfwriter(fp: typelib.BinaryIO_Proxy) -> typelib.BinaryIO_Proxy:
2971 return transformingwriter(fp, tocrlf)
2973 return transformingwriter(fp, tocrlf)
2972
2974
2973
2975
2974 if pycompat.oslinesep == b'\r\n':
2976 if pycompat.oslinesep == b'\r\n':
2975 tonativeeol = tocrlf
2977 tonativeeol = tocrlf
2976 fromnativeeol = tolf
2978 fromnativeeol = tolf
2977 nativeeolwriter = _crlfwriter
2979 nativeeolwriter = _crlfwriter
2978 else:
2980 else:
2979 tonativeeol = pycompat.identity
2981 tonativeeol = pycompat.identity
2980 fromnativeeol = pycompat.identity
2982 fromnativeeol = pycompat.identity
2981 nativeeolwriter = pycompat.identity
2983 nativeeolwriter = pycompat.identity
2982
2984
2983 if typing.TYPE_CHECKING:
2985 if typing.TYPE_CHECKING:
2984 # Replace the various overloads that come along with aliasing other methods
2986 # Replace the various overloads that come along with aliasing other methods
2985 # with the narrow definition that we care about in the type checking phase
2987 # with the narrow definition that we care about in the type checking phase
2986 # only. This ensures that both Windows and POSIX see only the definition
2988 # only. This ensures that both Windows and POSIX see only the definition
2987 # that is actually available.
2989 # that is actually available.
2988
2990
2989 def tonativeeol(s: bytes) -> bytes:
2991 def tonativeeol(s: bytes) -> bytes:
2990 raise NotImplementedError
2992 raise NotImplementedError
2991
2993
2992 def fromnativeeol(s: bytes) -> bytes:
2994 def fromnativeeol(s: bytes) -> bytes:
2993 raise NotImplementedError
2995 raise NotImplementedError
2994
2996
2995 def nativeeolwriter(fp: typelib.BinaryIO_Proxy) -> typelib.BinaryIO_Proxy:
2997 def nativeeolwriter(fp: typelib.BinaryIO_Proxy) -> typelib.BinaryIO_Proxy:
2996 raise NotImplementedError
2998 raise NotImplementedError
2997
2999
2998
3000
2999 # TODO delete since workaround variant for Python 2 no longer needed.
3001 # TODO delete since workaround variant for Python 2 no longer needed.
3000 def iterfile(fp):
3002 def iterfile(fp):
3001 return fp
3003 return fp
3002
3004
3003
3005
3004 def iterlines(iterator: Iterable[bytes]) -> Iterator[bytes]:
3006 def iterlines(iterator: Iterable[bytes]) -> Iterator[bytes]:
3005 for chunk in iterator:
3007 for chunk in iterator:
3006 for line in chunk.splitlines():
3008 for line in chunk.splitlines():
3007 yield line
3009 yield line
3008
3010
3009
3011
3010 def expandpath(path: bytes) -> bytes:
3012 def expandpath(path: bytes) -> bytes:
3011 return os.path.expanduser(os.path.expandvars(path))
3013 return os.path.expanduser(os.path.expandvars(path))
3012
3014
3013
3015
3014 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
3016 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
3015 """Return the result of interpolating items in the mapping into string s.
3017 """Return the result of interpolating items in the mapping into string s.
3016
3018
3017 prefix is a single character string, or a two character string with
3019 prefix is a single character string, or a two character string with
3018 a backslash as the first character if the prefix needs to be escaped in
3020 a backslash as the first character if the prefix needs to be escaped in
3019 a regular expression.
3021 a regular expression.
3020
3022
3021 fn is an optional function that will be applied to the replacement text
3023 fn is an optional function that will be applied to the replacement text
3022 just before replacement.
3024 just before replacement.
3023
3025
3024 escape_prefix is an optional flag that allows using doubled prefix for
3026 escape_prefix is an optional flag that allows using doubled prefix for
3025 its escaping.
3027 its escaping.
3026 """
3028 """
3027 fn = fn or (lambda s: s)
3029 fn = fn or (lambda s: s)
3028 patterns = b'|'.join(mapping.keys())
3030 patterns = b'|'.join(mapping.keys())
3029 if escape_prefix:
3031 if escape_prefix:
3030 patterns += b'|' + prefix
3032 patterns += b'|' + prefix
3031 if len(prefix) > 1:
3033 if len(prefix) > 1:
3032 prefix_char = prefix[1:]
3034 prefix_char = prefix[1:]
3033 else:
3035 else:
3034 prefix_char = prefix
3036 prefix_char = prefix
3035 mapping[prefix_char] = prefix_char
3037 mapping[prefix_char] = prefix_char
3036 r = remod.compile(br'%s(%s)' % (prefix, patterns))
3038 r = remod.compile(br'%s(%s)' % (prefix, patterns))
3037 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
3039 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
3038
3040
3039
3041
3040 timecount = unitcountfn(
3042 timecount = unitcountfn(
3041 (1, 1e3, _(b'%.0f s')),
3043 (1, 1e3, _(b'%.0f s')),
3042 (100, 1, _(b'%.1f s')),
3044 (100, 1, _(b'%.1f s')),
3043 (10, 1, _(b'%.2f s')),
3045 (10, 1, _(b'%.2f s')),
3044 (1, 1, _(b'%.3f s')),
3046 (1, 1, _(b'%.3f s')),
3045 (100, 0.001, _(b'%.1f ms')),
3047 (100, 0.001, _(b'%.1f ms')),
3046 (10, 0.001, _(b'%.2f ms')),
3048 (10, 0.001, _(b'%.2f ms')),
3047 (1, 0.001, _(b'%.3f ms')),
3049 (1, 0.001, _(b'%.3f ms')),
3048 (100, 0.000001, _(b'%.1f us')),
3050 (100, 0.000001, _(b'%.1f us')),
3049 (10, 0.000001, _(b'%.2f us')),
3051 (10, 0.000001, _(b'%.2f us')),
3050 (1, 0.000001, _(b'%.3f us')),
3052 (1, 0.000001, _(b'%.3f us')),
3051 (100, 0.000000001, _(b'%.1f ns')),
3053 (100, 0.000000001, _(b'%.1f ns')),
3052 (10, 0.000000001, _(b'%.2f ns')),
3054 (10, 0.000000001, _(b'%.2f ns')),
3053 (1, 0.000000001, _(b'%.3f ns')),
3055 (1, 0.000000001, _(b'%.3f ns')),
3054 )
3056 )
3055
3057
3056
3058
3057 @attr.s
3059 @attr.s
3058 class timedcmstats:
3060 class timedcmstats:
3059 """Stats information produced by the timedcm context manager on entering."""
3061 """Stats information produced by the timedcm context manager on entering."""
3060
3062
3061 # the starting value of the timer as a float (meaning and resulution is
3063 # the starting value of the timer as a float (meaning and resulution is
3062 # platform dependent, see util.timer)
3064 # platform dependent, see util.timer)
3063 start = attr.ib(default=attr.Factory(lambda: timer()))
3065 start = attr.ib(default=attr.Factory(lambda: timer()))
3064 # the number of seconds as a floating point value; starts at 0, updated when
3066 # the number of seconds as a floating point value; starts at 0, updated when
3065 # the context is exited.
3067 # the context is exited.
3066 elapsed = attr.ib(default=0)
3068 elapsed = attr.ib(default=0)
3067 # the number of nested timedcm context managers.
3069 # the number of nested timedcm context managers.
3068 level = attr.ib(default=1)
3070 level = attr.ib(default=1)
3069
3071
3070 def __bytes__(self):
3072 def __bytes__(self):
3071 return timecount(self.elapsed) if self.elapsed else b'<unknown>'
3073 return timecount(self.elapsed) if self.elapsed else b'<unknown>'
3072
3074
3073 __str__ = encoding.strmethod(__bytes__)
3075 __str__ = encoding.strmethod(__bytes__)
3074
3076
3075
3077
3076 @contextlib.contextmanager
3078 @contextlib.contextmanager
3077 def timedcm(whencefmt, *whenceargs):
3079 def timedcm(whencefmt, *whenceargs):
3078 """A context manager that produces timing information for a given context.
3080 """A context manager that produces timing information for a given context.
3079
3081
3080 On entering a timedcmstats instance is produced.
3082 On entering a timedcmstats instance is produced.
3081
3083
3082 This context manager is reentrant.
3084 This context manager is reentrant.
3083
3085
3084 """
3086 """
3085 # track nested context managers
3087 # track nested context managers
3086 timedcm._nested += 1
3088 timedcm._nested += 1
3087 timing_stats = timedcmstats(level=timedcm._nested)
3089 timing_stats = timedcmstats(level=timedcm._nested)
3088 try:
3090 try:
3089 with tracing.log(whencefmt, *whenceargs):
3091 with tracing.log(whencefmt, *whenceargs):
3090 yield timing_stats
3092 yield timing_stats
3091 finally:
3093 finally:
3092 timing_stats.elapsed = timer() - timing_stats.start
3094 timing_stats.elapsed = timer() - timing_stats.start
3093 timedcm._nested -= 1
3095 timedcm._nested -= 1
3094
3096
3095
3097
3096 timedcm._nested = 0
3098 timedcm._nested = 0
3097
3099
3098
3100
3099 def timed(func):
3101 def timed(func):
3100 """Report the execution time of a function call to stderr.
3102 """Report the execution time of a function call to stderr.
3101
3103
3102 During development, use as a decorator when you need to measure
3104 During development, use as a decorator when you need to measure
3103 the cost of a function, e.g. as follows:
3105 the cost of a function, e.g. as follows:
3104
3106
3105 @util.timed
3107 @util.timed
3106 def foo(a, b, c):
3108 def foo(a, b, c):
3107 pass
3109 pass
3108 """
3110 """
3109
3111
3110 def wrapper(*args, **kwargs):
3112 def wrapper(*args, **kwargs):
3111 with timedcm(pycompat.bytestr(func.__name__)) as time_stats:
3113 with timedcm(pycompat.bytestr(func.__name__)) as time_stats:
3112 result = func(*args, **kwargs)
3114 result = func(*args, **kwargs)
3113 stderr = procutil.stderr
3115 stderr = procutil.stderr
3114 stderr.write(
3116 stderr.write(
3115 b'%s%s: %s\n'
3117 b'%s%s: %s\n'
3116 % (
3118 % (
3117 b' ' * time_stats.level * 2,
3119 b' ' * time_stats.level * 2,
3118 pycompat.bytestr(func.__name__),
3120 pycompat.bytestr(func.__name__),
3119 time_stats,
3121 time_stats,
3120 )
3122 )
3121 )
3123 )
3122 return result
3124 return result
3123
3125
3124 return wrapper
3126 return wrapper
3125
3127
3126
3128
3127 _sizeunits = (
3129 _sizeunits = (
3128 (b'm', 2**20),
3130 (b'm', 2**20),
3129 (b'k', 2**10),
3131 (b'k', 2**10),
3130 (b'g', 2**30),
3132 (b'g', 2**30),
3131 (b'kb', 2**10),
3133 (b'kb', 2**10),
3132 (b'mb', 2**20),
3134 (b'mb', 2**20),
3133 (b'gb', 2**30),
3135 (b'gb', 2**30),
3134 (b'b', 1),
3136 (b'b', 1),
3135 )
3137 )
3136
3138
3137
3139
3138 def sizetoint(s: bytes) -> int:
3140 def sizetoint(s: bytes) -> int:
3139 """Convert a space specifier to a byte count.
3141 """Convert a space specifier to a byte count.
3140
3142
3141 >>> sizetoint(b'30')
3143 >>> sizetoint(b'30')
3142 30
3144 30
3143 >>> sizetoint(b'2.2kb')
3145 >>> sizetoint(b'2.2kb')
3144 2252
3146 2252
3145 >>> sizetoint(b'6M')
3147 >>> sizetoint(b'6M')
3146 6291456
3148 6291456
3147 """
3149 """
3148 t = s.strip().lower()
3150 t = s.strip().lower()
3149 try:
3151 try:
3150 for k, u in _sizeunits:
3152 for k, u in _sizeunits:
3151 if t.endswith(k):
3153 if t.endswith(k):
3152 return int(float(t[: -len(k)]) * u)
3154 return int(float(t[: -len(k)]) * u)
3153 return int(t)
3155 return int(t)
3154 except ValueError:
3156 except ValueError:
3155 raise error.ParseError(_(b"couldn't parse size: %s") % s)
3157 raise error.ParseError(_(b"couldn't parse size: %s") % s)
3156
3158
3157
3159
3158 class hooks:
3160 class hooks:
3159 """A collection of hook functions that can be used to extend a
3161 """A collection of hook functions that can be used to extend a
3160 function's behavior. Hooks are called in lexicographic order,
3162 function's behavior. Hooks are called in lexicographic order,
3161 based on the names of their sources."""
3163 based on the names of their sources."""
3162
3164
3163 def __init__(self):
3165 def __init__(self):
3164 self._hooks = []
3166 self._hooks = []
3165
3167
3166 def add(self, source, hook):
3168 def add(self, source, hook):
3167 self._hooks.append((source, hook))
3169 self._hooks.append((source, hook))
3168
3170
3169 def __call__(self, *args):
3171 def __call__(self, *args):
3170 self._hooks.sort(key=lambda x: x[0])
3172 self._hooks.sort(key=lambda x: x[0])
3171 results = []
3173 results = []
3172 for source, hook in self._hooks:
3174 for source, hook in self._hooks:
3173 results.append(hook(*args))
3175 results.append(hook(*args))
3174 return results
3176 return results
3175
3177
3176
3178
3177 def getstackframes(skip=0, line=b' %-*s in %s\n', fileline=b'%s:%d', depth=0):
3179 def getstackframes(skip=0, line=b' %-*s in %s\n', fileline=b'%s:%d', depth=0):
3178 """Yields lines for a nicely formatted stacktrace.
3180 """Yields lines for a nicely formatted stacktrace.
3179 Skips the 'skip' last entries, then return the last 'depth' entries.
3181 Skips the 'skip' last entries, then return the last 'depth' entries.
3180 Each file+linenumber is formatted according to fileline.
3182 Each file+linenumber is formatted according to fileline.
3181 Each line is formatted according to line.
3183 Each line is formatted according to line.
3182 If line is None, it yields:
3184 If line is None, it yields:
3183 length of longest filepath+line number,
3185 length of longest filepath+line number,
3184 filepath+linenumber,
3186 filepath+linenumber,
3185 function
3187 function
3186
3188
3187 Not be used in production code but very convenient while developing.
3189 Not be used in production code but very convenient while developing.
3188 """
3190 """
3189 entries = [
3191 entries = [
3190 (fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3192 (fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3191 for fn, ln, func, _text in traceback.extract_stack()[: -skip - 1]
3193 for fn, ln, func, _text in traceback.extract_stack()[: -skip - 1]
3192 ][-depth:]
3194 ][-depth:]
3193 if entries:
3195 if entries:
3194 fnmax = max(len(entry[0]) for entry in entries)
3196 fnmax = max(len(entry[0]) for entry in entries)
3195 for fnln, func in entries:
3197 for fnln, func in entries:
3196 if line is None:
3198 if line is None:
3197 yield (fnmax, fnln, func)
3199 yield (fnmax, fnln, func)
3198 else:
3200 else:
3199 yield line % (fnmax, fnln, func)
3201 yield line % (fnmax, fnln, func)
3200
3202
3201
3203
3202 def debugstacktrace(
3204 def debugstacktrace(
3203 msg=b'stacktrace',
3205 msg=b'stacktrace',
3204 skip=0,
3206 skip=0,
3205 f=procutil.stderr,
3207 f=procutil.stderr,
3206 otherf=procutil.stdout,
3208 otherf=procutil.stdout,
3207 depth=0,
3209 depth=0,
3208 prefix=b'',
3210 prefix=b'',
3209 ):
3211 ):
3210 """Writes a message to f (stderr) with a nicely formatted stacktrace.
3212 """Writes a message to f (stderr) with a nicely formatted stacktrace.
3211 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3213 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3212 By default it will flush stdout first.
3214 By default it will flush stdout first.
3213 It can be used everywhere and intentionally does not require an ui object.
3215 It can be used everywhere and intentionally does not require an ui object.
3214 Not be used in production code but very convenient while developing.
3216 Not be used in production code but very convenient while developing.
3215 """
3217 """
3216 if otherf:
3218 if otherf:
3217 otherf.flush()
3219 otherf.flush()
3218 f.write(b'%s%s at:\n' % (prefix, msg.rstrip()))
3220 f.write(b'%s%s at:\n' % (prefix, msg.rstrip()))
3219 for line in getstackframes(skip + 1, depth=depth):
3221 for line in getstackframes(skip + 1, depth=depth):
3220 f.write(prefix + line)
3222 f.write(prefix + line)
3221 f.flush()
3223 f.flush()
3222
3224
3223
3225
3224 # convenient shortcut
3226 # convenient shortcut
3225 dst = debugstacktrace
3227 dst = debugstacktrace
3226
3228
3227
3229
3228 def safename(f, tag, ctx, others=None):
3230 def safename(f, tag, ctx, others=None):
3229 """
3231 """
3230 Generate a name that it is safe to rename f to in the given context.
3232 Generate a name that it is safe to rename f to in the given context.
3231
3233
3232 f: filename to rename
3234 f: filename to rename
3233 tag: a string tag that will be included in the new name
3235 tag: a string tag that will be included in the new name
3234 ctx: a context, in which the new name must not exist
3236 ctx: a context, in which the new name must not exist
3235 others: a set of other filenames that the new name must not be in
3237 others: a set of other filenames that the new name must not be in
3236
3238
3237 Returns a file name of the form oldname~tag[~number] which does not exist
3239 Returns a file name of the form oldname~tag[~number] which does not exist
3238 in the provided context and is not in the set of other names.
3240 in the provided context and is not in the set of other names.
3239 """
3241 """
3240 if others is None:
3242 if others is None:
3241 others = set()
3243 others = set()
3242
3244
3243 fn = b'%s~%s' % (f, tag)
3245 fn = b'%s~%s' % (f, tag)
3244 if fn not in ctx and fn not in others:
3246 if fn not in ctx and fn not in others:
3245 return fn
3247 return fn
3246 for n in itertools.count(1):
3248 for n in itertools.count(1):
3247 fn = b'%s~%s~%s' % (f, tag, n)
3249 fn = b'%s~%s~%s' % (f, tag, n)
3248 if fn not in ctx and fn not in others:
3250 if fn not in ctx and fn not in others:
3249 return fn
3251 return fn
3250
3252
3251
3253
3252 def readexactly(stream, n):
3254 def readexactly(stream, n):
3253 '''read n bytes from stream.read and abort if less was available'''
3255 '''read n bytes from stream.read and abort if less was available'''
3254 s = stream.read(n)
3256 s = stream.read(n)
3255 if len(s) < n:
3257 if len(s) < n:
3256 raise error.Abort(
3258 raise error.Abort(
3257 _(b"stream ended unexpectedly (got %d bytes, expected %d)")
3259 _(b"stream ended unexpectedly (got %d bytes, expected %d)")
3258 % (len(s), n)
3260 % (len(s), n)
3259 )
3261 )
3260 return s
3262 return s
3261
3263
3262
3264
3263 def uvarintencode(value):
3265 def uvarintencode(value):
3264 """Encode an unsigned integer value to a varint.
3266 """Encode an unsigned integer value to a varint.
3265
3267
3266 A varint is a variable length integer of 1 or more bytes. Each byte
3268 A varint is a variable length integer of 1 or more bytes. Each byte
3267 except the last has the most significant bit set. The lower 7 bits of
3269 except the last has the most significant bit set. The lower 7 bits of
3268 each byte store the 2's complement representation, least significant group
3270 each byte store the 2's complement representation, least significant group
3269 first.
3271 first.
3270
3272
3271 >>> uvarintencode(0)
3273 >>> uvarintencode(0)
3272 '\\x00'
3274 '\\x00'
3273 >>> uvarintencode(1)
3275 >>> uvarintencode(1)
3274 '\\x01'
3276 '\\x01'
3275 >>> uvarintencode(127)
3277 >>> uvarintencode(127)
3276 '\\x7f'
3278 '\\x7f'
3277 >>> uvarintencode(1337)
3279 >>> uvarintencode(1337)
3278 '\\xb9\\n'
3280 '\\xb9\\n'
3279 >>> uvarintencode(65536)
3281 >>> uvarintencode(65536)
3280 '\\x80\\x80\\x04'
3282 '\\x80\\x80\\x04'
3281 >>> uvarintencode(-1)
3283 >>> uvarintencode(-1)
3282 Traceback (most recent call last):
3284 Traceback (most recent call last):
3283 ...
3285 ...
3284 ProgrammingError: negative value for uvarint: -1
3286 ProgrammingError: negative value for uvarint: -1
3285 """
3287 """
3286 if value < 0:
3288 if value < 0:
3287 raise error.ProgrammingError(b'negative value for uvarint: %d' % value)
3289 raise error.ProgrammingError(b'negative value for uvarint: %d' % value)
3288 bits = value & 0x7F
3290 bits = value & 0x7F
3289 value >>= 7
3291 value >>= 7
3290 bytes = []
3292 bytes = []
3291 while value:
3293 while value:
3292 bytes.append(pycompat.bytechr(0x80 | bits))
3294 bytes.append(pycompat.bytechr(0x80 | bits))
3293 bits = value & 0x7F
3295 bits = value & 0x7F
3294 value >>= 7
3296 value >>= 7
3295 bytes.append(pycompat.bytechr(bits))
3297 bytes.append(pycompat.bytechr(bits))
3296
3298
3297 return b''.join(bytes)
3299 return b''.join(bytes)
3298
3300
3299
3301
3300 def uvarintdecodestream(fh):
3302 def uvarintdecodestream(fh):
3301 """Decode an unsigned variable length integer from a stream.
3303 """Decode an unsigned variable length integer from a stream.
3302
3304
3303 The passed argument is anything that has a ``.read(N)`` method.
3305 The passed argument is anything that has a ``.read(N)`` method.
3304
3306
3305 >>> from io import BytesIO
3307 >>> from io import BytesIO
3306 >>> uvarintdecodestream(BytesIO(b'\\x00'))
3308 >>> uvarintdecodestream(BytesIO(b'\\x00'))
3307 0
3309 0
3308 >>> uvarintdecodestream(BytesIO(b'\\x01'))
3310 >>> uvarintdecodestream(BytesIO(b'\\x01'))
3309 1
3311 1
3310 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
3312 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
3311 127
3313 127
3312 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
3314 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
3313 1337
3315 1337
3314 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
3316 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
3315 65536
3317 65536
3316 >>> uvarintdecodestream(BytesIO(b'\\x80'))
3318 >>> uvarintdecodestream(BytesIO(b'\\x80'))
3317 Traceback (most recent call last):
3319 Traceback (most recent call last):
3318 ...
3320 ...
3319 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
3321 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
3320 """
3322 """
3321 result = 0
3323 result = 0
3322 shift = 0
3324 shift = 0
3323 while True:
3325 while True:
3324 byte = ord(readexactly(fh, 1))
3326 byte = ord(readexactly(fh, 1))
3325 result |= (byte & 0x7F) << shift
3327 result |= (byte & 0x7F) << shift
3326 if not (byte & 0x80):
3328 if not (byte & 0x80):
3327 return result
3329 return result
3328 shift += 7
3330 shift += 7
3329
3331
3330
3332
3331 # Passing the '' locale means that the locale should be set according to the
3333 # Passing the '' locale means that the locale should be set according to the
3332 # user settings (environment variables).
3334 # user settings (environment variables).
3333 # Python sometimes avoids setting the global locale settings. When interfacing
3335 # Python sometimes avoids setting the global locale settings. When interfacing
3334 # with C code (e.g. the curses module or the Subversion bindings), the global
3336 # with C code (e.g. the curses module or the Subversion bindings), the global
3335 # locale settings must be initialized correctly. Python 2 does not initialize
3337 # locale settings must be initialized correctly. Python 2 does not initialize
3336 # the global locale settings on interpreter startup. Python 3 sometimes
3338 # the global locale settings on interpreter startup. Python 3 sometimes
3337 # initializes LC_CTYPE, but not consistently at least on Windows. Therefore we
3339 # initializes LC_CTYPE, but not consistently at least on Windows. Therefore we
3338 # explicitly initialize it to get consistent behavior if it's not already
3340 # explicitly initialize it to get consistent behavior if it's not already
3339 # initialized. Since CPython commit 177d921c8c03d30daa32994362023f777624b10d,
3341 # initialized. Since CPython commit 177d921c8c03d30daa32994362023f777624b10d,
3340 # LC_CTYPE is always initialized. If we require Python 3.8+, we should re-check
3342 # LC_CTYPE is always initialized. If we require Python 3.8+, we should re-check
3341 # if we can remove this code.
3343 # if we can remove this code.
3342 @contextlib.contextmanager
3344 @contextlib.contextmanager
3343 def with_lc_ctype():
3345 def with_lc_ctype():
3344 oldloc = locale.setlocale(locale.LC_CTYPE, None)
3346 oldloc = locale.setlocale(locale.LC_CTYPE, None)
3345 if oldloc == 'C':
3347 if oldloc == 'C':
3346 try:
3348 try:
3347 try:
3349 try:
3348 locale.setlocale(locale.LC_CTYPE, '')
3350 locale.setlocale(locale.LC_CTYPE, '')
3349 except locale.Error:
3351 except locale.Error:
3350 # The likely case is that the locale from the environment
3352 # The likely case is that the locale from the environment
3351 # variables is unknown.
3353 # variables is unknown.
3352 pass
3354 pass
3353 yield
3355 yield
3354 finally:
3356 finally:
3355 locale.setlocale(locale.LC_CTYPE, oldloc)
3357 locale.setlocale(locale.LC_CTYPE, oldloc)
3356 else:
3358 else:
3357 yield
3359 yield
3358
3360
3359
3361
3360 def _estimatememory() -> Optional[int]:
3362 def _estimatememory() -> Optional[int]:
3361 """Provide an estimate for the available system memory in Bytes.
3363 """Provide an estimate for the available system memory in Bytes.
3362
3364
3363 If no estimate can be provided on the platform, returns None.
3365 If no estimate can be provided on the platform, returns None.
3364 """
3366 """
3365 if pycompat.sysplatform.startswith(b'win'):
3367 if pycompat.sysplatform.startswith(b'win'):
3366 # On Windows, use the GlobalMemoryStatusEx kernel function directly.
3368 # On Windows, use the GlobalMemoryStatusEx kernel function directly.
3367 # noinspection PyPep8Naming
3369 # noinspection PyPep8Naming
3368 from ctypes import c_long as DWORD, c_ulonglong as DWORDLONG
3370 from ctypes import c_long as DWORD, c_ulonglong as DWORDLONG
3369 from ctypes.wintypes import ( # pytype: disable=import-error
3371 from ctypes.wintypes import ( # pytype: disable=import-error
3370 Structure,
3372 Structure,
3371 byref,
3373 byref,
3372 sizeof,
3374 sizeof,
3373 windll,
3375 windll,
3374 )
3376 )
3375
3377
3376 class MEMORYSTATUSEX(Structure):
3378 class MEMORYSTATUSEX(Structure):
3377 _fields_ = [
3379 _fields_ = [
3378 ('dwLength', DWORD),
3380 ('dwLength', DWORD),
3379 ('dwMemoryLoad', DWORD),
3381 ('dwMemoryLoad', DWORD),
3380 ('ullTotalPhys', DWORDLONG),
3382 ('ullTotalPhys', DWORDLONG),
3381 ('ullAvailPhys', DWORDLONG),
3383 ('ullAvailPhys', DWORDLONG),
3382 ('ullTotalPageFile', DWORDLONG),
3384 ('ullTotalPageFile', DWORDLONG),
3383 ('ullAvailPageFile', DWORDLONG),
3385 ('ullAvailPageFile', DWORDLONG),
3384 ('ullTotalVirtual', DWORDLONG),
3386 ('ullTotalVirtual', DWORDLONG),
3385 ('ullAvailVirtual', DWORDLONG),
3387 ('ullAvailVirtual', DWORDLONG),
3386 ('ullExtendedVirtual', DWORDLONG),
3388 ('ullExtendedVirtual', DWORDLONG),
3387 ]
3389 ]
3388
3390
3389 x = MEMORYSTATUSEX()
3391 x = MEMORYSTATUSEX()
3390 x.dwLength = sizeof(x)
3392 x.dwLength = sizeof(x)
3391 windll.kernel32.GlobalMemoryStatusEx(byref(x))
3393 windll.kernel32.GlobalMemoryStatusEx(byref(x))
3392 return x.ullAvailPhys
3394 return x.ullAvailPhys
3393
3395
3394 # On newer Unix-like systems and Mac OSX, the sysconf interface
3396 # On newer Unix-like systems and Mac OSX, the sysconf interface
3395 # can be used. _SC_PAGE_SIZE is part of POSIX; _SC_PHYS_PAGES
3397 # can be used. _SC_PAGE_SIZE is part of POSIX; _SC_PHYS_PAGES
3396 # seems to be implemented on most systems.
3398 # seems to be implemented on most systems.
3397 try:
3399 try:
3398 pagesize = os.sysconf(os.sysconf_names['SC_PAGE_SIZE'])
3400 pagesize = os.sysconf(os.sysconf_names['SC_PAGE_SIZE'])
3399 pages = os.sysconf(os.sysconf_names['SC_PHYS_PAGES'])
3401 pages = os.sysconf(os.sysconf_names['SC_PHYS_PAGES'])
3400 return pagesize * pages
3402 return pagesize * pages
3401 except OSError: # sysconf can fail
3403 except OSError: # sysconf can fail
3402 pass
3404 pass
3403 except KeyError: # unknown parameter
3405 except KeyError: # unknown parameter
3404 pass
3406 pass
General Comments 0
You need to be logged in to leave comments. Login now