##// END OF EJS Templates
util: make `mmapread()` work on Windows again...
Matt Harbison -
r52823:bc9ed92d default
parent child Browse files
Show More
@@ -1,3406 +1,3415
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import annotations
16 from __future__ import annotations
17
17
18 import abc
18 import abc
19 import collections
19 import collections
20 import contextlib
20 import contextlib
21 import errno
21 import errno
22 import gc
22 import gc
23 import hashlib
23 import hashlib
24 import io
24 import io
25 import itertools
25 import itertools
26 import locale
26 import locale
27 import mmap
27 import mmap
28 import os
28 import os
29 import pickle # provides util.pickle symbol
29 import pickle # provides util.pickle symbol
30 import re as remod
30 import re as remod
31 import shutil
31 import shutil
32 import stat
32 import stat
33 import sys
33 import sys
34 import time
34 import time
35 import traceback
35 import traceback
36 import typing
36 import typing
37 import warnings
37 import warnings
38
38
39 from typing import (
39 from typing import (
40 Any,
40 Any,
41 BinaryIO,
41 BinaryIO,
42 Callable,
42 Callable,
43 Iterable,
43 Iterable,
44 Iterator,
44 Iterator,
45 List,
45 List,
46 Optional,
46 Optional,
47 Tuple,
47 Tuple,
48 Type,
48 Type,
49 TypeVar,
49 TypeVar,
50 )
50 )
51
51
52 from .node import hex
52 from .node import hex
53 from .thirdparty import attr
53 from .thirdparty import attr
54
54
55 # Force pytype to use the non-vendored package
55 # Force pytype to use the non-vendored package
56 if typing.TYPE_CHECKING:
56 if typing.TYPE_CHECKING:
57 # noinspection PyPackageRequirements
57 # noinspection PyPackageRequirements
58 import attr
58 import attr
59
59
60 from .pycompat import (
60 from .pycompat import (
61 open,
61 open,
62 )
62 )
63 from hgdemandimport import tracing
63 from hgdemandimport import tracing
64 from . import (
64 from . import (
65 encoding,
65 encoding,
66 error,
66 error,
67 i18n,
67 i18n,
68 policy,
68 policy,
69 pycompat,
69 pycompat,
70 typelib,
70 typelib,
71 urllibcompat,
71 urllibcompat,
72 )
72 )
73 from .utils import (
73 from .utils import (
74 compression,
74 compression,
75 hashutil,
75 hashutil,
76 procutil,
76 procutil,
77 stringutil,
77 stringutil,
78 )
78 )
79
79
80 # keeps pyflakes happy
80 # keeps pyflakes happy
81 assert [
81 assert [
82 Iterable,
82 Iterable,
83 Iterator,
83 Iterator,
84 List,
84 List,
85 Optional,
85 Optional,
86 Tuple,
86 Tuple,
87 ]
87 ]
88
88
89
89
90 base85 = policy.importmod('base85')
90 base85 = policy.importmod('base85')
91 osutil = policy.importmod('osutil')
91 osutil = policy.importmod('osutil')
92
92
93 b85decode = base85.b85decode
93 b85decode = base85.b85decode
94 b85encode = base85.b85encode
94 b85encode = base85.b85encode
95
95
96 cookielib = pycompat.cookielib
96 cookielib = pycompat.cookielib
97 httplib = pycompat.httplib
97 httplib = pycompat.httplib
98 safehasattr = pycompat.safehasattr
98 safehasattr = pycompat.safehasattr
99 socketserver = pycompat.socketserver
99 socketserver = pycompat.socketserver
100 bytesio = io.BytesIO
100 bytesio = io.BytesIO
101 # TODO deprecate stringio name, as it is a lie on Python 3.
101 # TODO deprecate stringio name, as it is a lie on Python 3.
102 stringio = bytesio
102 stringio = bytesio
103 xmlrpclib = pycompat.xmlrpclib
103 xmlrpclib = pycompat.xmlrpclib
104
104
105 httpserver = urllibcompat.httpserver
105 httpserver = urllibcompat.httpserver
106 urlerr = urllibcompat.urlerr
106 urlerr = urllibcompat.urlerr
107 urlreq = urllibcompat.urlreq
107 urlreq = urllibcompat.urlreq
108
108
109 # workaround for win32mbcs
109 # workaround for win32mbcs
110 _filenamebytestr = pycompat.bytestr
110 _filenamebytestr = pycompat.bytestr
111
111
112 if pycompat.iswindows:
112 if pycompat.iswindows:
113 from . import windows as platform
113 from . import windows as platform
114 else:
114 else:
115 from . import posix as platform
115 from . import posix as platform
116
116
117 _ = i18n._
117 _ = i18n._
118
118
119 abspath = platform.abspath
119 abspath = platform.abspath
120 bindunixsocket = platform.bindunixsocket
120 bindunixsocket = platform.bindunixsocket
121 cachestat = platform.cachestat
121 cachestat = platform.cachestat
122 checkexec = platform.checkexec
122 checkexec = platform.checkexec
123 checklink = platform.checklink
123 checklink = platform.checklink
124 copymode = platform.copymode
124 copymode = platform.copymode
125 expandglobs = platform.expandglobs
125 expandglobs = platform.expandglobs
126 getfsmountpoint = platform.getfsmountpoint
126 getfsmountpoint = platform.getfsmountpoint
127 getfstype = platform.getfstype
127 getfstype = platform.getfstype
128 get_password = platform.get_password
128 get_password = platform.get_password
129 groupmembers = platform.groupmembers
129 groupmembers = platform.groupmembers
130 groupname = platform.groupname
130 groupname = platform.groupname
131 isexec = platform.isexec
131 isexec = platform.isexec
132 isowner = platform.isowner
132 isowner = platform.isowner
133 listdir = osutil.listdir
133 listdir = osutil.listdir
134 localpath = platform.localpath
134 localpath = platform.localpath
135 lookupreg = platform.lookupreg
135 lookupreg = platform.lookupreg
136 makedir = platform.makedir
136 makedir = platform.makedir
137 nlinks = platform.nlinks
137 nlinks = platform.nlinks
138 normpath = platform.normpath
138 normpath = platform.normpath
139 normcase = platform.normcase
139 normcase = platform.normcase
140 normcasespec = platform.normcasespec
140 normcasespec = platform.normcasespec
141 normcasefallback = platform.normcasefallback
141 normcasefallback = platform.normcasefallback
142 openhardlinks = platform.openhardlinks
142 openhardlinks = platform.openhardlinks
143 oslink = platform.oslink
143 oslink = platform.oslink
144 parsepatchoutput = platform.parsepatchoutput
144 parsepatchoutput = platform.parsepatchoutput
145 pconvert = platform.pconvert
145 pconvert = platform.pconvert
146 poll = platform.poll
146 poll = platform.poll
147 posixfile = platform.posixfile
147 posixfile = platform.posixfile
148 readlink = platform.readlink
148 readlink = platform.readlink
149 rename = platform.rename
149 rename = platform.rename
150 removedirs = platform.removedirs
150 removedirs = platform.removedirs
151 samedevice = platform.samedevice
151 samedevice = platform.samedevice
152 samefile = platform.samefile
152 samefile = platform.samefile
153 samestat = platform.samestat
153 samestat = platform.samestat
154 setflags = platform.setflags
154 setflags = platform.setflags
155 split = platform.split
155 split = platform.split
156 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
156 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
157 statisexec = platform.statisexec
157 statisexec = platform.statisexec
158 statislink = platform.statislink
158 statislink = platform.statislink
159 umask = platform.umask
159 umask = platform.umask
160 unlink = platform.unlink
160 unlink = platform.unlink
161 username = platform.username
161 username = platform.username
162
162
163
163
164 if typing.TYPE_CHECKING:
164 if typing.TYPE_CHECKING:
165 _Tfilestat = TypeVar('_Tfilestat', bound='filestat')
165 _Tfilestat = TypeVar('_Tfilestat', bound='filestat')
166
166
167
167
168 def setumask(val: int) -> None:
168 def setumask(val: int) -> None:
169 '''updates the umask. used by chg server'''
169 '''updates the umask. used by chg server'''
170 if pycompat.iswindows:
170 if pycompat.iswindows:
171 return
171 return
172 os.umask(val)
172 os.umask(val)
173 global umask
173 global umask
174 platform.umask = umask = val & 0o777
174 platform.umask = umask = val & 0o777
175
175
176
176
177 # small compat layer
177 # small compat layer
178 compengines = compression.compengines
178 compengines = compression.compengines
179 SERVERROLE = compression.SERVERROLE
179 SERVERROLE = compression.SERVERROLE
180 CLIENTROLE = compression.CLIENTROLE
180 CLIENTROLE = compression.CLIENTROLE
181
181
182 # Python compatibility
182 # Python compatibility
183
183
184 _notset = object()
184 _notset = object()
185
185
186
186
187 def bitsfrom(container):
187 def bitsfrom(container):
188 bits = 0
188 bits = 0
189 for bit in container:
189 for bit in container:
190 bits |= bit
190 bits |= bit
191 return bits
191 return bits
192
192
193
193
194 # python 2.6 still have deprecation warning enabled by default. We do not want
194 # python 2.6 still have deprecation warning enabled by default. We do not want
195 # to display anything to standard user so detect if we are running test and
195 # to display anything to standard user so detect if we are running test and
196 # only use python deprecation warning in this case.
196 # only use python deprecation warning in this case.
197 _dowarn = bool(encoding.environ.get(b'HGEMITWARNINGS'))
197 _dowarn = bool(encoding.environ.get(b'HGEMITWARNINGS'))
198 if _dowarn:
198 if _dowarn:
199 # explicitly unfilter our warning for python 2.7
199 # explicitly unfilter our warning for python 2.7
200 #
200 #
201 # The option of setting PYTHONWARNINGS in the test runner was investigated.
201 # The option of setting PYTHONWARNINGS in the test runner was investigated.
202 # However, module name set through PYTHONWARNINGS was exactly matched, so
202 # However, module name set through PYTHONWARNINGS was exactly matched, so
203 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
203 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
204 # makes the whole PYTHONWARNINGS thing useless for our usecase.
204 # makes the whole PYTHONWARNINGS thing useless for our usecase.
205 warnings.filterwarnings('default', '', DeprecationWarning, 'mercurial')
205 warnings.filterwarnings('default', '', DeprecationWarning, 'mercurial')
206 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext')
206 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext')
207 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext3rd')
207 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext3rd')
208 if _dowarn:
208 if _dowarn:
209 # silence warning emitted by passing user string to re.sub()
209 # silence warning emitted by passing user string to re.sub()
210 warnings.filterwarnings(
210 warnings.filterwarnings(
211 'ignore', 'bad escape', DeprecationWarning, 'mercurial'
211 'ignore', 'bad escape', DeprecationWarning, 'mercurial'
212 )
212 )
213 warnings.filterwarnings(
213 warnings.filterwarnings(
214 'ignore', 'invalid escape sequence', DeprecationWarning, 'mercurial'
214 'ignore', 'invalid escape sequence', DeprecationWarning, 'mercurial'
215 )
215 )
216 # TODO: reinvent imp.is_frozen()
216 # TODO: reinvent imp.is_frozen()
217 warnings.filterwarnings(
217 warnings.filterwarnings(
218 'ignore',
218 'ignore',
219 'the imp module is deprecated',
219 'the imp module is deprecated',
220 DeprecationWarning,
220 DeprecationWarning,
221 'mercurial',
221 'mercurial',
222 )
222 )
223
223
224
224
225 def nouideprecwarn(msg, version, stacklevel=1):
225 def nouideprecwarn(msg, version, stacklevel=1):
226 """Issue an python native deprecation warning
226 """Issue an python native deprecation warning
227
227
228 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
228 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
229 """
229 """
230 if _dowarn:
230 if _dowarn:
231 msg += (
231 msg += (
232 b"\n(compatibility will be dropped after Mercurial-%s,"
232 b"\n(compatibility will be dropped after Mercurial-%s,"
233 b" update your code.)"
233 b" update your code.)"
234 ) % version
234 ) % version
235 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
235 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
236 # on python 3 with chg, we will need to explicitly flush the output
236 # on python 3 with chg, we will need to explicitly flush the output
237 sys.stderr.flush()
237 sys.stderr.flush()
238
238
239
239
240 DIGESTS = {
240 DIGESTS = {
241 b'md5': hashlib.md5,
241 b'md5': hashlib.md5,
242 b'sha1': hashutil.sha1,
242 b'sha1': hashutil.sha1,
243 b'sha512': hashlib.sha512,
243 b'sha512': hashlib.sha512,
244 }
244 }
245 # List of digest types from strongest to weakest
245 # List of digest types from strongest to weakest
246 DIGESTS_BY_STRENGTH = [b'sha512', b'sha1', b'md5']
246 DIGESTS_BY_STRENGTH = [b'sha512', b'sha1', b'md5']
247
247
248 for k in DIGESTS_BY_STRENGTH:
248 for k in DIGESTS_BY_STRENGTH:
249 assert k in DIGESTS
249 assert k in DIGESTS
250
250
251
251
252 class digester:
252 class digester:
253 """helper to compute digests.
253 """helper to compute digests.
254
254
255 This helper can be used to compute one or more digests given their name.
255 This helper can be used to compute one or more digests given their name.
256
256
257 >>> d = digester([b'md5', b'sha1'])
257 >>> d = digester([b'md5', b'sha1'])
258 >>> d.update(b'foo')
258 >>> d.update(b'foo')
259 >>> [k for k in sorted(d)]
259 >>> [k for k in sorted(d)]
260 ['md5', 'sha1']
260 ['md5', 'sha1']
261 >>> d[b'md5']
261 >>> d[b'md5']
262 'acbd18db4cc2f85cedef654fccc4a4d8'
262 'acbd18db4cc2f85cedef654fccc4a4d8'
263 >>> d[b'sha1']
263 >>> d[b'sha1']
264 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
264 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
265 >>> digester.preferred([b'md5', b'sha1'])
265 >>> digester.preferred([b'md5', b'sha1'])
266 'sha1'
266 'sha1'
267 """
267 """
268
268
269 def __init__(self, digests, s=b''):
269 def __init__(self, digests, s=b''):
270 self._hashes = {}
270 self._hashes = {}
271 for k in digests:
271 for k in digests:
272 if k not in DIGESTS:
272 if k not in DIGESTS:
273 raise error.Abort(_(b'unknown digest type: %s') % k)
273 raise error.Abort(_(b'unknown digest type: %s') % k)
274 self._hashes[k] = DIGESTS[k]()
274 self._hashes[k] = DIGESTS[k]()
275 if s:
275 if s:
276 self.update(s)
276 self.update(s)
277
277
278 def update(self, data):
278 def update(self, data):
279 for h in self._hashes.values():
279 for h in self._hashes.values():
280 h.update(data)
280 h.update(data)
281
281
282 def __getitem__(self, key):
282 def __getitem__(self, key):
283 if key not in DIGESTS:
283 if key not in DIGESTS:
284 raise error.Abort(_(b'unknown digest type: %s') % k)
284 raise error.Abort(_(b'unknown digest type: %s') % k)
285 return hex(self._hashes[key].digest())
285 return hex(self._hashes[key].digest())
286
286
287 def __iter__(self):
287 def __iter__(self):
288 return iter(self._hashes)
288 return iter(self._hashes)
289
289
290 @staticmethod
290 @staticmethod
291 def preferred(supported):
291 def preferred(supported):
292 """returns the strongest digest type in both supported and DIGESTS."""
292 """returns the strongest digest type in both supported and DIGESTS."""
293
293
294 for k in DIGESTS_BY_STRENGTH:
294 for k in DIGESTS_BY_STRENGTH:
295 if k in supported:
295 if k in supported:
296 return k
296 return k
297 return None
297 return None
298
298
299
299
300 class digestchecker:
300 class digestchecker:
301 """file handle wrapper that additionally checks content against a given
301 """file handle wrapper that additionally checks content against a given
302 size and digests.
302 size and digests.
303
303
304 d = digestchecker(fh, size, {'md5': '...'})
304 d = digestchecker(fh, size, {'md5': '...'})
305
305
306 When multiple digests are given, all of them are validated.
306 When multiple digests are given, all of them are validated.
307 """
307 """
308
308
309 def __init__(self, fh, size, digests):
309 def __init__(self, fh, size, digests):
310 self._fh = fh
310 self._fh = fh
311 self._size = size
311 self._size = size
312 self._got = 0
312 self._got = 0
313 self._digests = dict(digests)
313 self._digests = dict(digests)
314 self._digester = digester(self._digests.keys())
314 self._digester = digester(self._digests.keys())
315
315
316 def read(self, length=-1):
316 def read(self, length=-1):
317 content = self._fh.read(length)
317 content = self._fh.read(length)
318 self._digester.update(content)
318 self._digester.update(content)
319 self._got += len(content)
319 self._got += len(content)
320 return content
320 return content
321
321
322 def validate(self):
322 def validate(self):
323 if self._size != self._got:
323 if self._size != self._got:
324 raise error.Abort(
324 raise error.Abort(
325 _(b'size mismatch: expected %d, got %d')
325 _(b'size mismatch: expected %d, got %d')
326 % (self._size, self._got)
326 % (self._size, self._got)
327 )
327 )
328 for k, v in self._digests.items():
328 for k, v in self._digests.items():
329 if v != self._digester[k]:
329 if v != self._digester[k]:
330 # i18n: first parameter is a digest name
330 # i18n: first parameter is a digest name
331 raise error.Abort(
331 raise error.Abort(
332 _(b'%s mismatch: expected %s, got %s')
332 _(b'%s mismatch: expected %s, got %s')
333 % (k, v, self._digester[k])
333 % (k, v, self._digester[k])
334 )
334 )
335
335
336
336
337 try:
337 try:
338 buffer = buffer # pytype: disable=name-error
338 buffer = buffer # pytype: disable=name-error
339 except NameError:
339 except NameError:
340
340
341 def buffer(sliceable, offset=0, length=None):
341 def buffer(sliceable, offset=0, length=None):
342 if length is not None:
342 if length is not None:
343 view = memoryview(sliceable)[offset : offset + length]
343 view = memoryview(sliceable)[offset : offset + length]
344 else:
344 else:
345 view = memoryview(sliceable)[offset:]
345 view = memoryview(sliceable)[offset:]
346 return view.toreadonly()
346 return view.toreadonly()
347
347
348
348
349 _chunksize = 4096
349 _chunksize = 4096
350
350
351
351
352 class bufferedinputpipe:
352 class bufferedinputpipe:
353 """a manually buffered input pipe
353 """a manually buffered input pipe
354
354
355 Python will not let us use buffered IO and lazy reading with 'polling' at
355 Python will not let us use buffered IO and lazy reading with 'polling' at
356 the same time. We cannot probe the buffer state and select will not detect
356 the same time. We cannot probe the buffer state and select will not detect
357 that data are ready to read if they are already buffered.
357 that data are ready to read if they are already buffered.
358
358
359 This class let us work around that by implementing its own buffering
359 This class let us work around that by implementing its own buffering
360 (allowing efficient readline) while offering a way to know if the buffer is
360 (allowing efficient readline) while offering a way to know if the buffer is
361 empty from the output (allowing collaboration of the buffer with polling).
361 empty from the output (allowing collaboration of the buffer with polling).
362
362
363 This class lives in the 'util' module because it makes use of the 'os'
363 This class lives in the 'util' module because it makes use of the 'os'
364 module from the python stdlib.
364 module from the python stdlib.
365 """
365 """
366
366
367 def __new__(cls, fh):
367 def __new__(cls, fh):
368 # If we receive a fileobjectproxy, we need to use a variation of this
368 # If we receive a fileobjectproxy, we need to use a variation of this
369 # class that notifies observers about activity.
369 # class that notifies observers about activity.
370 if isinstance(fh, fileobjectproxy):
370 if isinstance(fh, fileobjectproxy):
371 cls = observedbufferedinputpipe
371 cls = observedbufferedinputpipe
372
372
373 return super(bufferedinputpipe, cls).__new__(cls)
373 return super(bufferedinputpipe, cls).__new__(cls)
374
374
375 def __init__(self, input):
375 def __init__(self, input):
376 self._input = input
376 self._input = input
377 self._buffer = []
377 self._buffer = []
378 self._eof = False
378 self._eof = False
379 self._lenbuf = 0
379 self._lenbuf = 0
380
380
381 @property
381 @property
382 def hasbuffer(self):
382 def hasbuffer(self):
383 """True is any data is currently buffered
383 """True is any data is currently buffered
384
384
385 This will be used externally a pre-step for polling IO. If there is
385 This will be used externally a pre-step for polling IO. If there is
386 already data then no polling should be set in place."""
386 already data then no polling should be set in place."""
387 return bool(self._buffer)
387 return bool(self._buffer)
388
388
389 @property
389 @property
390 def closed(self):
390 def closed(self):
391 return self._input.closed
391 return self._input.closed
392
392
393 def fileno(self):
393 def fileno(self):
394 return self._input.fileno()
394 return self._input.fileno()
395
395
396 def close(self):
396 def close(self):
397 return self._input.close()
397 return self._input.close()
398
398
399 def read(self, size):
399 def read(self, size):
400 while (not self._eof) and (self._lenbuf < size):
400 while (not self._eof) and (self._lenbuf < size):
401 self._fillbuffer()
401 self._fillbuffer()
402 return self._frombuffer(size)
402 return self._frombuffer(size)
403
403
404 def unbufferedread(self, size):
404 def unbufferedread(self, size):
405 if not self._eof and self._lenbuf == 0:
405 if not self._eof and self._lenbuf == 0:
406 self._fillbuffer(max(size, _chunksize))
406 self._fillbuffer(max(size, _chunksize))
407 return self._frombuffer(min(self._lenbuf, size))
407 return self._frombuffer(min(self._lenbuf, size))
408
408
409 def readline(self, *args, **kwargs):
409 def readline(self, *args, **kwargs):
410 if len(self._buffer) > 1:
410 if len(self._buffer) > 1:
411 # this should not happen because both read and readline end with a
411 # this should not happen because both read and readline end with a
412 # _frombuffer call that collapse it.
412 # _frombuffer call that collapse it.
413 self._buffer = [b''.join(self._buffer)]
413 self._buffer = [b''.join(self._buffer)]
414 self._lenbuf = len(self._buffer[0])
414 self._lenbuf = len(self._buffer[0])
415 lfi = -1
415 lfi = -1
416 if self._buffer:
416 if self._buffer:
417 lfi = self._buffer[-1].find(b'\n')
417 lfi = self._buffer[-1].find(b'\n')
418 while (not self._eof) and lfi < 0:
418 while (not self._eof) and lfi < 0:
419 self._fillbuffer()
419 self._fillbuffer()
420 if self._buffer:
420 if self._buffer:
421 lfi = self._buffer[-1].find(b'\n')
421 lfi = self._buffer[-1].find(b'\n')
422 size = lfi + 1
422 size = lfi + 1
423 if lfi < 0: # end of file
423 if lfi < 0: # end of file
424 size = self._lenbuf
424 size = self._lenbuf
425 elif len(self._buffer) > 1:
425 elif len(self._buffer) > 1:
426 # we need to take previous chunks into account
426 # we need to take previous chunks into account
427 size += self._lenbuf - len(self._buffer[-1])
427 size += self._lenbuf - len(self._buffer[-1])
428 return self._frombuffer(size)
428 return self._frombuffer(size)
429
429
430 def _frombuffer(self, size):
430 def _frombuffer(self, size):
431 """return at most 'size' data from the buffer
431 """return at most 'size' data from the buffer
432
432
433 The data are removed from the buffer."""
433 The data are removed from the buffer."""
434 if size == 0 or not self._buffer:
434 if size == 0 or not self._buffer:
435 return b''
435 return b''
436 buf = self._buffer[0]
436 buf = self._buffer[0]
437 if len(self._buffer) > 1:
437 if len(self._buffer) > 1:
438 buf = b''.join(self._buffer)
438 buf = b''.join(self._buffer)
439
439
440 data = buf[:size]
440 data = buf[:size]
441 buf = buf[len(data) :]
441 buf = buf[len(data) :]
442 if buf:
442 if buf:
443 self._buffer = [buf]
443 self._buffer = [buf]
444 self._lenbuf = len(buf)
444 self._lenbuf = len(buf)
445 else:
445 else:
446 self._buffer = []
446 self._buffer = []
447 self._lenbuf = 0
447 self._lenbuf = 0
448 return data
448 return data
449
449
450 def _fillbuffer(self, size=_chunksize):
450 def _fillbuffer(self, size=_chunksize):
451 """read data to the buffer"""
451 """read data to the buffer"""
452 data = os.read(self._input.fileno(), size)
452 data = os.read(self._input.fileno(), size)
453 if not data:
453 if not data:
454 self._eof = True
454 self._eof = True
455 else:
455 else:
456 self._lenbuf += len(data)
456 self._lenbuf += len(data)
457 self._buffer.append(data)
457 self._buffer.append(data)
458
458
459 return data
459 return data
460
460
461
461
462 def has_mmap_populate():
462 def has_mmap_populate():
463 return hasattr(osutil, "background_mmap_populate") or hasattr(
463 return hasattr(osutil, "background_mmap_populate") or hasattr(
464 mmap, 'MAP_POPULATE'
464 mmap, 'MAP_POPULATE'
465 )
465 )
466
466
467
467
468 def mmapread(fp, size=None, pre_populate=True):
468 def mmapread(fp, size=None, pre_populate=True):
469 """Read a file content using mmap
469 """Read a file content using mmap
470
470
471 The responsability of checking the file system is mmap safe is the
471 The responsability of checking the file system is mmap safe is the
472 responsability of the caller (see `vfs.is_mmap_safe`).
472 responsability of the caller (see `vfs.is_mmap_safe`).
473
473
474 In some case, a normal string might be returned.
474 In some case, a normal string might be returned.
475
475
476 If `pre_populate` is True (the default), the mmapped data will be
476 If `pre_populate` is True (the default), the mmapped data will be
477 pre-populated in memory if the system support this option, this slow down
477 pre-populated in memory if the system support this option, this slow down
478 the initial mmaping but avoid potentially crippling page fault on later
478 the initial mmaping but avoid potentially crippling page fault on later
479 access. If this is not the desired behavior, set `pre_populate` to False.
479 access. If this is not the desired behavior, set `pre_populate` to False.
480 """
480 """
481 if size == 0:
481 if size == 0:
482 # size of 0 to mmap.mmap() means "all data"
482 # size of 0 to mmap.mmap() means "all data"
483 # rather than "zero bytes", so special case that.
483 # rather than "zero bytes", so special case that.
484 return b''
484 return b''
485 elif size is None:
485 elif size is None:
486 size = 0
486 size = 0
487 fd = getattr(fp, 'fileno', lambda: fp)()
487 fd = getattr(fp, 'fileno', lambda: fp)()
488 flags = mmap.MAP_PRIVATE
488
489 bg_populate = hasattr(osutil, "background_mmap_populate")
489 if pycompat.iswindows:
490 if pre_populate and not bg_populate:
490 _mmap = lambda fd, size: mmap.mmap(fd, size, access=mmap.ACCESS_READ)
491 flags |= getattr(mmap, 'MAP_POPULATE', 0)
491 else:
492 flags = mmap.MAP_PRIVATE
493 bg_populate = hasattr(osutil, "background_mmap_populate")
494
495 if pre_populate and not bg_populate:
496 flags |= getattr(mmap, 'MAP_POPULATE', 0)
497
498 def _mmap(fd, size) -> mmap.mmap:
499 m = mmap.mmap(fd, size, flags=flags, prot=mmap.PROT_READ)
500 if pre_populate and bg_populate:
501 osutil.background_mmap_populate(m)
502 return m
503
492 try:
504 try:
493 m = mmap.mmap(fd, size, flags=flags, prot=mmap.PROT_READ)
505 return _mmap(fd, size)
494 if pre_populate and bg_populate:
495 osutil.background_mmap_populate(m)
496 return m
497 except ValueError:
506 except ValueError:
498 # Empty files cannot be mmapped, but mmapread should still work. Check
507 # Empty files cannot be mmapped, but mmapread should still work. Check
499 # if the file is empty, and if so, return an empty buffer.
508 # if the file is empty, and if so, return an empty buffer.
500 if os.fstat(fd).st_size == 0:
509 if os.fstat(fd).st_size == 0:
501 return b''
510 return b''
502 raise
511 raise
503
512
504
513
505 class fileobjectproxy:
514 class fileobjectproxy:
506 """A proxy around file objects that tells a watcher when events occur.
515 """A proxy around file objects that tells a watcher when events occur.
507
516
508 This type is intended to only be used for testing purposes. Think hard
517 This type is intended to only be used for testing purposes. Think hard
509 before using it in important code.
518 before using it in important code.
510 """
519 """
511
520
512 __slots__ = (
521 __slots__ = (
513 '_orig',
522 '_orig',
514 '_observer',
523 '_observer',
515 )
524 )
516
525
517 def __init__(self, fh, observer):
526 def __init__(self, fh, observer):
518 object.__setattr__(self, '_orig', fh)
527 object.__setattr__(self, '_orig', fh)
519 object.__setattr__(self, '_observer', observer)
528 object.__setattr__(self, '_observer', observer)
520
529
521 def __getattribute__(self, name):
530 def __getattribute__(self, name):
522 ours = {
531 ours = {
523 '_observer',
532 '_observer',
524 # IOBase
533 # IOBase
525 'close',
534 'close',
526 # closed if a property
535 # closed if a property
527 'fileno',
536 'fileno',
528 'flush',
537 'flush',
529 'isatty',
538 'isatty',
530 'readable',
539 'readable',
531 'readline',
540 'readline',
532 'readlines',
541 'readlines',
533 'seek',
542 'seek',
534 'seekable',
543 'seekable',
535 'tell',
544 'tell',
536 'truncate',
545 'truncate',
537 'writable',
546 'writable',
538 'writelines',
547 'writelines',
539 # RawIOBase
548 # RawIOBase
540 'read',
549 'read',
541 'readall',
550 'readall',
542 'readinto',
551 'readinto',
543 'write',
552 'write',
544 # BufferedIOBase
553 # BufferedIOBase
545 # raw is a property
554 # raw is a property
546 'detach',
555 'detach',
547 # read defined above
556 # read defined above
548 'read1',
557 'read1',
549 # readinto defined above
558 # readinto defined above
550 # write defined above
559 # write defined above
551 }
560 }
552
561
553 # We only observe some methods.
562 # We only observe some methods.
554 if name in ours:
563 if name in ours:
555 return object.__getattribute__(self, name)
564 return object.__getattribute__(self, name)
556
565
557 return getattr(object.__getattribute__(self, '_orig'), name)
566 return getattr(object.__getattribute__(self, '_orig'), name)
558
567
559 def __nonzero__(self):
568 def __nonzero__(self):
560 return bool(object.__getattribute__(self, '_orig'))
569 return bool(object.__getattribute__(self, '_orig'))
561
570
562 __bool__ = __nonzero__
571 __bool__ = __nonzero__
563
572
564 def __delattr__(self, name):
573 def __delattr__(self, name):
565 return delattr(object.__getattribute__(self, '_orig'), name)
574 return delattr(object.__getattribute__(self, '_orig'), name)
566
575
567 def __setattr__(self, name, value):
576 def __setattr__(self, name, value):
568 return setattr(object.__getattribute__(self, '_orig'), name, value)
577 return setattr(object.__getattribute__(self, '_orig'), name, value)
569
578
570 def __iter__(self):
579 def __iter__(self):
571 return object.__getattribute__(self, '_orig').__iter__()
580 return object.__getattribute__(self, '_orig').__iter__()
572
581
573 def _observedcall(self, name, *args, **kwargs):
582 def _observedcall(self, name, *args, **kwargs):
574 # Call the original object.
583 # Call the original object.
575 orig = object.__getattribute__(self, '_orig')
584 orig = object.__getattribute__(self, '_orig')
576 res = getattr(orig, name)(*args, **kwargs)
585 res = getattr(orig, name)(*args, **kwargs)
577
586
578 # Call a method on the observer of the same name with arguments
587 # Call a method on the observer of the same name with arguments
579 # so it can react, log, etc.
588 # so it can react, log, etc.
580 observer = object.__getattribute__(self, '_observer')
589 observer = object.__getattribute__(self, '_observer')
581 fn = getattr(observer, name, None)
590 fn = getattr(observer, name, None)
582 if fn:
591 if fn:
583 fn(res, *args, **kwargs)
592 fn(res, *args, **kwargs)
584
593
585 return res
594 return res
586
595
587 def close(self, *args, **kwargs):
596 def close(self, *args, **kwargs):
588 return object.__getattribute__(self, '_observedcall')(
597 return object.__getattribute__(self, '_observedcall')(
589 'close', *args, **kwargs
598 'close', *args, **kwargs
590 )
599 )
591
600
592 def fileno(self, *args, **kwargs):
601 def fileno(self, *args, **kwargs):
593 return object.__getattribute__(self, '_observedcall')(
602 return object.__getattribute__(self, '_observedcall')(
594 'fileno', *args, **kwargs
603 'fileno', *args, **kwargs
595 )
604 )
596
605
597 def flush(self, *args, **kwargs):
606 def flush(self, *args, **kwargs):
598 return object.__getattribute__(self, '_observedcall')(
607 return object.__getattribute__(self, '_observedcall')(
599 'flush', *args, **kwargs
608 'flush', *args, **kwargs
600 )
609 )
601
610
602 def isatty(self, *args, **kwargs):
611 def isatty(self, *args, **kwargs):
603 return object.__getattribute__(self, '_observedcall')(
612 return object.__getattribute__(self, '_observedcall')(
604 'isatty', *args, **kwargs
613 'isatty', *args, **kwargs
605 )
614 )
606
615
607 def readable(self, *args, **kwargs):
616 def readable(self, *args, **kwargs):
608 return object.__getattribute__(self, '_observedcall')(
617 return object.__getattribute__(self, '_observedcall')(
609 'readable', *args, **kwargs
618 'readable', *args, **kwargs
610 )
619 )
611
620
612 def readline(self, *args, **kwargs):
621 def readline(self, *args, **kwargs):
613 return object.__getattribute__(self, '_observedcall')(
622 return object.__getattribute__(self, '_observedcall')(
614 'readline', *args, **kwargs
623 'readline', *args, **kwargs
615 )
624 )
616
625
617 def readlines(self, *args, **kwargs):
626 def readlines(self, *args, **kwargs):
618 return object.__getattribute__(self, '_observedcall')(
627 return object.__getattribute__(self, '_observedcall')(
619 'readlines', *args, **kwargs
628 'readlines', *args, **kwargs
620 )
629 )
621
630
622 def seek(self, *args, **kwargs):
631 def seek(self, *args, **kwargs):
623 return object.__getattribute__(self, '_observedcall')(
632 return object.__getattribute__(self, '_observedcall')(
624 'seek', *args, **kwargs
633 'seek', *args, **kwargs
625 )
634 )
626
635
627 def seekable(self, *args, **kwargs):
636 def seekable(self, *args, **kwargs):
628 return object.__getattribute__(self, '_observedcall')(
637 return object.__getattribute__(self, '_observedcall')(
629 'seekable', *args, **kwargs
638 'seekable', *args, **kwargs
630 )
639 )
631
640
632 def tell(self, *args, **kwargs):
641 def tell(self, *args, **kwargs):
633 return object.__getattribute__(self, '_observedcall')(
642 return object.__getattribute__(self, '_observedcall')(
634 'tell', *args, **kwargs
643 'tell', *args, **kwargs
635 )
644 )
636
645
637 def truncate(self, *args, **kwargs):
646 def truncate(self, *args, **kwargs):
638 return object.__getattribute__(self, '_observedcall')(
647 return object.__getattribute__(self, '_observedcall')(
639 'truncate', *args, **kwargs
648 'truncate', *args, **kwargs
640 )
649 )
641
650
642 def writable(self, *args, **kwargs):
651 def writable(self, *args, **kwargs):
643 return object.__getattribute__(self, '_observedcall')(
652 return object.__getattribute__(self, '_observedcall')(
644 'writable', *args, **kwargs
653 'writable', *args, **kwargs
645 )
654 )
646
655
647 def writelines(self, *args, **kwargs):
656 def writelines(self, *args, **kwargs):
648 return object.__getattribute__(self, '_observedcall')(
657 return object.__getattribute__(self, '_observedcall')(
649 'writelines', *args, **kwargs
658 'writelines', *args, **kwargs
650 )
659 )
651
660
652 def read(self, *args, **kwargs):
661 def read(self, *args, **kwargs):
653 return object.__getattribute__(self, '_observedcall')(
662 return object.__getattribute__(self, '_observedcall')(
654 'read', *args, **kwargs
663 'read', *args, **kwargs
655 )
664 )
656
665
657 def readall(self, *args, **kwargs):
666 def readall(self, *args, **kwargs):
658 return object.__getattribute__(self, '_observedcall')(
667 return object.__getattribute__(self, '_observedcall')(
659 'readall', *args, **kwargs
668 'readall', *args, **kwargs
660 )
669 )
661
670
662 def readinto(self, *args, **kwargs):
671 def readinto(self, *args, **kwargs):
663 return object.__getattribute__(self, '_observedcall')(
672 return object.__getattribute__(self, '_observedcall')(
664 'readinto', *args, **kwargs
673 'readinto', *args, **kwargs
665 )
674 )
666
675
667 def write(self, *args, **kwargs):
676 def write(self, *args, **kwargs):
668 return object.__getattribute__(self, '_observedcall')(
677 return object.__getattribute__(self, '_observedcall')(
669 'write', *args, **kwargs
678 'write', *args, **kwargs
670 )
679 )
671
680
672 def detach(self, *args, **kwargs):
681 def detach(self, *args, **kwargs):
673 return object.__getattribute__(self, '_observedcall')(
682 return object.__getattribute__(self, '_observedcall')(
674 'detach', *args, **kwargs
683 'detach', *args, **kwargs
675 )
684 )
676
685
677 def read1(self, *args, **kwargs):
686 def read1(self, *args, **kwargs):
678 return object.__getattribute__(self, '_observedcall')(
687 return object.__getattribute__(self, '_observedcall')(
679 'read1', *args, **kwargs
688 'read1', *args, **kwargs
680 )
689 )
681
690
682
691
683 class observedbufferedinputpipe(bufferedinputpipe):
692 class observedbufferedinputpipe(bufferedinputpipe):
684 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
693 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
685
694
686 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
695 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
687 bypass ``fileobjectproxy``. Because of this, we need to make
696 bypass ``fileobjectproxy``. Because of this, we need to make
688 ``bufferedinputpipe`` aware of these operations.
697 ``bufferedinputpipe`` aware of these operations.
689
698
690 This variation of ``bufferedinputpipe`` can notify observers about
699 This variation of ``bufferedinputpipe`` can notify observers about
691 ``os.read()`` events. It also re-publishes other events, such as
700 ``os.read()`` events. It also re-publishes other events, such as
692 ``read()`` and ``readline()``.
701 ``read()`` and ``readline()``.
693 """
702 """
694
703
695 def _fillbuffer(self, size=_chunksize):
704 def _fillbuffer(self, size=_chunksize):
696 res = super(observedbufferedinputpipe, self)._fillbuffer(size=size)
705 res = super(observedbufferedinputpipe, self)._fillbuffer(size=size)
697
706
698 fn = getattr(self._input._observer, 'osread', None)
707 fn = getattr(self._input._observer, 'osread', None)
699 if fn:
708 if fn:
700 fn(res, size)
709 fn(res, size)
701
710
702 return res
711 return res
703
712
704 # We use different observer methods because the operation isn't
713 # We use different observer methods because the operation isn't
705 # performed on the actual file object but on us.
714 # performed on the actual file object but on us.
706 def read(self, size):
715 def read(self, size):
707 res = super(observedbufferedinputpipe, self).read(size)
716 res = super(observedbufferedinputpipe, self).read(size)
708
717
709 fn = getattr(self._input._observer, 'bufferedread', None)
718 fn = getattr(self._input._observer, 'bufferedread', None)
710 if fn:
719 if fn:
711 fn(res, size)
720 fn(res, size)
712
721
713 return res
722 return res
714
723
715 def readline(self, *args, **kwargs):
724 def readline(self, *args, **kwargs):
716 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
725 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
717
726
718 fn = getattr(self._input._observer, 'bufferedreadline', None)
727 fn = getattr(self._input._observer, 'bufferedreadline', None)
719 if fn:
728 if fn:
720 fn(res)
729 fn(res)
721
730
722 return res
731 return res
723
732
724
733
725 PROXIED_SOCKET_METHODS = {
734 PROXIED_SOCKET_METHODS = {
726 'makefile',
735 'makefile',
727 'recv',
736 'recv',
728 'recvfrom',
737 'recvfrom',
729 'recvfrom_into',
738 'recvfrom_into',
730 'recv_into',
739 'recv_into',
731 'send',
740 'send',
732 'sendall',
741 'sendall',
733 'sendto',
742 'sendto',
734 'setblocking',
743 'setblocking',
735 'settimeout',
744 'settimeout',
736 'gettimeout',
745 'gettimeout',
737 'setsockopt',
746 'setsockopt',
738 }
747 }
739
748
740
749
741 class socketproxy:
750 class socketproxy:
742 """A proxy around a socket that tells a watcher when events occur.
751 """A proxy around a socket that tells a watcher when events occur.
743
752
744 This is like ``fileobjectproxy`` except for sockets.
753 This is like ``fileobjectproxy`` except for sockets.
745
754
746 This type is intended to only be used for testing purposes. Think hard
755 This type is intended to only be used for testing purposes. Think hard
747 before using it in important code.
756 before using it in important code.
748 """
757 """
749
758
750 __slots__ = (
759 __slots__ = (
751 '_orig',
760 '_orig',
752 '_observer',
761 '_observer',
753 )
762 )
754
763
755 def __init__(self, sock, observer):
764 def __init__(self, sock, observer):
756 object.__setattr__(self, '_orig', sock)
765 object.__setattr__(self, '_orig', sock)
757 object.__setattr__(self, '_observer', observer)
766 object.__setattr__(self, '_observer', observer)
758
767
759 def __getattribute__(self, name):
768 def __getattribute__(self, name):
760 if name in PROXIED_SOCKET_METHODS:
769 if name in PROXIED_SOCKET_METHODS:
761 return object.__getattribute__(self, name)
770 return object.__getattribute__(self, name)
762
771
763 return getattr(object.__getattribute__(self, '_orig'), name)
772 return getattr(object.__getattribute__(self, '_orig'), name)
764
773
765 def __delattr__(self, name):
774 def __delattr__(self, name):
766 return delattr(object.__getattribute__(self, '_orig'), name)
775 return delattr(object.__getattribute__(self, '_orig'), name)
767
776
768 def __setattr__(self, name, value):
777 def __setattr__(self, name, value):
769 return setattr(object.__getattribute__(self, '_orig'), name, value)
778 return setattr(object.__getattribute__(self, '_orig'), name, value)
770
779
771 def __nonzero__(self):
780 def __nonzero__(self):
772 return bool(object.__getattribute__(self, '_orig'))
781 return bool(object.__getattribute__(self, '_orig'))
773
782
774 __bool__ = __nonzero__
783 __bool__ = __nonzero__
775
784
776 def _observedcall(self, name, *args, **kwargs):
785 def _observedcall(self, name, *args, **kwargs):
777 # Call the original object.
786 # Call the original object.
778 orig = object.__getattribute__(self, '_orig')
787 orig = object.__getattribute__(self, '_orig')
779 res = getattr(orig, name)(*args, **kwargs)
788 res = getattr(orig, name)(*args, **kwargs)
780
789
781 # Call a method on the observer of the same name with arguments
790 # Call a method on the observer of the same name with arguments
782 # so it can react, log, etc.
791 # so it can react, log, etc.
783 observer = object.__getattribute__(self, '_observer')
792 observer = object.__getattribute__(self, '_observer')
784 fn = getattr(observer, name, None)
793 fn = getattr(observer, name, None)
785 if fn:
794 if fn:
786 fn(res, *args, **kwargs)
795 fn(res, *args, **kwargs)
787
796
788 return res
797 return res
789
798
790 def makefile(self, *args, **kwargs):
799 def makefile(self, *args, **kwargs):
791 res = object.__getattribute__(self, '_observedcall')(
800 res = object.__getattribute__(self, '_observedcall')(
792 'makefile', *args, **kwargs
801 'makefile', *args, **kwargs
793 )
802 )
794
803
795 # The file object may be used for I/O. So we turn it into a
804 # The file object may be used for I/O. So we turn it into a
796 # proxy using our observer.
805 # proxy using our observer.
797 observer = object.__getattribute__(self, '_observer')
806 observer = object.__getattribute__(self, '_observer')
798 return makeloggingfileobject(
807 return makeloggingfileobject(
799 observer.fh,
808 observer.fh,
800 res,
809 res,
801 observer.name,
810 observer.name,
802 reads=observer.reads,
811 reads=observer.reads,
803 writes=observer.writes,
812 writes=observer.writes,
804 logdata=observer.logdata,
813 logdata=observer.logdata,
805 logdataapis=observer.logdataapis,
814 logdataapis=observer.logdataapis,
806 )
815 )
807
816
808 def recv(self, *args, **kwargs):
817 def recv(self, *args, **kwargs):
809 return object.__getattribute__(self, '_observedcall')(
818 return object.__getattribute__(self, '_observedcall')(
810 'recv', *args, **kwargs
819 'recv', *args, **kwargs
811 )
820 )
812
821
813 def recvfrom(self, *args, **kwargs):
822 def recvfrom(self, *args, **kwargs):
814 return object.__getattribute__(self, '_observedcall')(
823 return object.__getattribute__(self, '_observedcall')(
815 'recvfrom', *args, **kwargs
824 'recvfrom', *args, **kwargs
816 )
825 )
817
826
818 def recvfrom_into(self, *args, **kwargs):
827 def recvfrom_into(self, *args, **kwargs):
819 return object.__getattribute__(self, '_observedcall')(
828 return object.__getattribute__(self, '_observedcall')(
820 'recvfrom_into', *args, **kwargs
829 'recvfrom_into', *args, **kwargs
821 )
830 )
822
831
823 def recv_into(self, *args, **kwargs):
832 def recv_into(self, *args, **kwargs):
824 return object.__getattribute__(self, '_observedcall')(
833 return object.__getattribute__(self, '_observedcall')(
825 'recv_info', *args, **kwargs
834 'recv_info', *args, **kwargs
826 )
835 )
827
836
828 def send(self, *args, **kwargs):
837 def send(self, *args, **kwargs):
829 return object.__getattribute__(self, '_observedcall')(
838 return object.__getattribute__(self, '_observedcall')(
830 'send', *args, **kwargs
839 'send', *args, **kwargs
831 )
840 )
832
841
833 def sendall(self, *args, **kwargs):
842 def sendall(self, *args, **kwargs):
834 return object.__getattribute__(self, '_observedcall')(
843 return object.__getattribute__(self, '_observedcall')(
835 'sendall', *args, **kwargs
844 'sendall', *args, **kwargs
836 )
845 )
837
846
838 def sendto(self, *args, **kwargs):
847 def sendto(self, *args, **kwargs):
839 return object.__getattribute__(self, '_observedcall')(
848 return object.__getattribute__(self, '_observedcall')(
840 'sendto', *args, **kwargs
849 'sendto', *args, **kwargs
841 )
850 )
842
851
843 def setblocking(self, *args, **kwargs):
852 def setblocking(self, *args, **kwargs):
844 return object.__getattribute__(self, '_observedcall')(
853 return object.__getattribute__(self, '_observedcall')(
845 'setblocking', *args, **kwargs
854 'setblocking', *args, **kwargs
846 )
855 )
847
856
848 def settimeout(self, *args, **kwargs):
857 def settimeout(self, *args, **kwargs):
849 return object.__getattribute__(self, '_observedcall')(
858 return object.__getattribute__(self, '_observedcall')(
850 'settimeout', *args, **kwargs
859 'settimeout', *args, **kwargs
851 )
860 )
852
861
853 def gettimeout(self, *args, **kwargs):
862 def gettimeout(self, *args, **kwargs):
854 return object.__getattribute__(self, '_observedcall')(
863 return object.__getattribute__(self, '_observedcall')(
855 'gettimeout', *args, **kwargs
864 'gettimeout', *args, **kwargs
856 )
865 )
857
866
858 def setsockopt(self, *args, **kwargs):
867 def setsockopt(self, *args, **kwargs):
859 return object.__getattribute__(self, '_observedcall')(
868 return object.__getattribute__(self, '_observedcall')(
860 'setsockopt', *args, **kwargs
869 'setsockopt', *args, **kwargs
861 )
870 )
862
871
863
872
864 class baseproxyobserver:
873 class baseproxyobserver:
865 def __init__(self, fh, name, logdata, logdataapis):
874 def __init__(self, fh, name, logdata, logdataapis):
866 self.fh = fh
875 self.fh = fh
867 self.name = name
876 self.name = name
868 self.logdata = logdata
877 self.logdata = logdata
869 self.logdataapis = logdataapis
878 self.logdataapis = logdataapis
870
879
871 def _writedata(self, data):
880 def _writedata(self, data):
872 if not self.logdata:
881 if not self.logdata:
873 if self.logdataapis:
882 if self.logdataapis:
874 self.fh.write(b'\n')
883 self.fh.write(b'\n')
875 self.fh.flush()
884 self.fh.flush()
876 return
885 return
877
886
878 # Simple case writes all data on a single line.
887 # Simple case writes all data on a single line.
879 if b'\n' not in data:
888 if b'\n' not in data:
880 if self.logdataapis:
889 if self.logdataapis:
881 self.fh.write(b': %s\n' % stringutil.escapestr(data))
890 self.fh.write(b': %s\n' % stringutil.escapestr(data))
882 else:
891 else:
883 self.fh.write(
892 self.fh.write(
884 b'%s> %s\n' % (self.name, stringutil.escapestr(data))
893 b'%s> %s\n' % (self.name, stringutil.escapestr(data))
885 )
894 )
886 self.fh.flush()
895 self.fh.flush()
887 return
896 return
888
897
889 # Data with newlines is written to multiple lines.
898 # Data with newlines is written to multiple lines.
890 if self.logdataapis:
899 if self.logdataapis:
891 self.fh.write(b':\n')
900 self.fh.write(b':\n')
892
901
893 lines = data.splitlines(True)
902 lines = data.splitlines(True)
894 for line in lines:
903 for line in lines:
895 self.fh.write(
904 self.fh.write(
896 b'%s> %s\n' % (self.name, stringutil.escapestr(line))
905 b'%s> %s\n' % (self.name, stringutil.escapestr(line))
897 )
906 )
898 self.fh.flush()
907 self.fh.flush()
899
908
900
909
901 class fileobjectobserver(baseproxyobserver):
910 class fileobjectobserver(baseproxyobserver):
902 """Logs file object activity."""
911 """Logs file object activity."""
903
912
904 def __init__(
913 def __init__(
905 self, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
914 self, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
906 ):
915 ):
907 super(fileobjectobserver, self).__init__(fh, name, logdata, logdataapis)
916 super(fileobjectobserver, self).__init__(fh, name, logdata, logdataapis)
908 self.reads = reads
917 self.reads = reads
909 self.writes = writes
918 self.writes = writes
910
919
911 def read(self, res, size=-1):
920 def read(self, res, size=-1):
912 if not self.reads:
921 if not self.reads:
913 return
922 return
914 # Python 3 can return None from reads at EOF instead of empty strings.
923 # Python 3 can return None from reads at EOF instead of empty strings.
915 if res is None:
924 if res is None:
916 res = b''
925 res = b''
917
926
918 if size == -1 and res == b'':
927 if size == -1 and res == b'':
919 # Suppress pointless read(-1) calls that return
928 # Suppress pointless read(-1) calls that return
920 # nothing. These happen _a lot_ on Python 3, and there
929 # nothing. These happen _a lot_ on Python 3, and there
921 # doesn't seem to be a better workaround to have matching
930 # doesn't seem to be a better workaround to have matching
922 # Python 2 and 3 behavior. :(
931 # Python 2 and 3 behavior. :(
923 return
932 return
924
933
925 if self.logdataapis:
934 if self.logdataapis:
926 self.fh.write(b'%s> read(%d) -> %d' % (self.name, size, len(res)))
935 self.fh.write(b'%s> read(%d) -> %d' % (self.name, size, len(res)))
927
936
928 self._writedata(res)
937 self._writedata(res)
929
938
930 def readline(self, res, limit=-1):
939 def readline(self, res, limit=-1):
931 if not self.reads:
940 if not self.reads:
932 return
941 return
933
942
934 if self.logdataapis:
943 if self.logdataapis:
935 self.fh.write(b'%s> readline() -> %d' % (self.name, len(res)))
944 self.fh.write(b'%s> readline() -> %d' % (self.name, len(res)))
936
945
937 self._writedata(res)
946 self._writedata(res)
938
947
939 def readinto(self, res, dest):
948 def readinto(self, res, dest):
940 if not self.reads:
949 if not self.reads:
941 return
950 return
942
951
943 if self.logdataapis:
952 if self.logdataapis:
944 self.fh.write(
953 self.fh.write(
945 b'%s> readinto(%d) -> %r' % (self.name, len(dest), res)
954 b'%s> readinto(%d) -> %r' % (self.name, len(dest), res)
946 )
955 )
947
956
948 data = dest[0:res] if res is not None else b''
957 data = dest[0:res] if res is not None else b''
949
958
950 # _writedata() uses "in" operator and is confused by memoryview because
959 # _writedata() uses "in" operator and is confused by memoryview because
951 # characters are ints on Python 3.
960 # characters are ints on Python 3.
952 if isinstance(data, memoryview):
961 if isinstance(data, memoryview):
953 data = data.tobytes()
962 data = data.tobytes()
954
963
955 self._writedata(data)
964 self._writedata(data)
956
965
957 def write(self, res, data):
966 def write(self, res, data):
958 if not self.writes:
967 if not self.writes:
959 return
968 return
960
969
961 # Python 2 returns None from some write() calls. Python 3 (reasonably)
970 # Python 2 returns None from some write() calls. Python 3 (reasonably)
962 # returns the integer bytes written.
971 # returns the integer bytes written.
963 if res is None and data:
972 if res is None and data:
964 res = len(data)
973 res = len(data)
965
974
966 if self.logdataapis:
975 if self.logdataapis:
967 self.fh.write(b'%s> write(%d) -> %r' % (self.name, len(data), res))
976 self.fh.write(b'%s> write(%d) -> %r' % (self.name, len(data), res))
968
977
969 self._writedata(data)
978 self._writedata(data)
970
979
971 def flush(self, res):
980 def flush(self, res):
972 if not self.writes:
981 if not self.writes:
973 return
982 return
974
983
975 self.fh.write(b'%s> flush() -> %r\n' % (self.name, res))
984 self.fh.write(b'%s> flush() -> %r\n' % (self.name, res))
976
985
977 # For observedbufferedinputpipe.
986 # For observedbufferedinputpipe.
978 def bufferedread(self, res, size):
987 def bufferedread(self, res, size):
979 if not self.reads:
988 if not self.reads:
980 return
989 return
981
990
982 if self.logdataapis:
991 if self.logdataapis:
983 self.fh.write(
992 self.fh.write(
984 b'%s> bufferedread(%d) -> %d' % (self.name, size, len(res))
993 b'%s> bufferedread(%d) -> %d' % (self.name, size, len(res))
985 )
994 )
986
995
987 self._writedata(res)
996 self._writedata(res)
988
997
989 def bufferedreadline(self, res):
998 def bufferedreadline(self, res):
990 if not self.reads:
999 if not self.reads:
991 return
1000 return
992
1001
993 if self.logdataapis:
1002 if self.logdataapis:
994 self.fh.write(
1003 self.fh.write(
995 b'%s> bufferedreadline() -> %d' % (self.name, len(res))
1004 b'%s> bufferedreadline() -> %d' % (self.name, len(res))
996 )
1005 )
997
1006
998 self._writedata(res)
1007 self._writedata(res)
999
1008
1000
1009
1001 def makeloggingfileobject(
1010 def makeloggingfileobject(
1002 logh, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
1011 logh, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
1003 ):
1012 ):
1004 """Turn a file object into a logging file object."""
1013 """Turn a file object into a logging file object."""
1005
1014
1006 observer = fileobjectobserver(
1015 observer = fileobjectobserver(
1007 logh,
1016 logh,
1008 name,
1017 name,
1009 reads=reads,
1018 reads=reads,
1010 writes=writes,
1019 writes=writes,
1011 logdata=logdata,
1020 logdata=logdata,
1012 logdataapis=logdataapis,
1021 logdataapis=logdataapis,
1013 )
1022 )
1014 return fileobjectproxy(fh, observer)
1023 return fileobjectproxy(fh, observer)
1015
1024
1016
1025
1017 class socketobserver(baseproxyobserver):
1026 class socketobserver(baseproxyobserver):
1018 """Logs socket activity."""
1027 """Logs socket activity."""
1019
1028
1020 def __init__(
1029 def __init__(
1021 self,
1030 self,
1022 fh,
1031 fh,
1023 name,
1032 name,
1024 reads=True,
1033 reads=True,
1025 writes=True,
1034 writes=True,
1026 states=True,
1035 states=True,
1027 logdata=False,
1036 logdata=False,
1028 logdataapis=True,
1037 logdataapis=True,
1029 ):
1038 ):
1030 super(socketobserver, self).__init__(fh, name, logdata, logdataapis)
1039 super(socketobserver, self).__init__(fh, name, logdata, logdataapis)
1031 self.reads = reads
1040 self.reads = reads
1032 self.writes = writes
1041 self.writes = writes
1033 self.states = states
1042 self.states = states
1034
1043
1035 def makefile(self, res, mode=None, bufsize=None):
1044 def makefile(self, res, mode=None, bufsize=None):
1036 if not self.states:
1045 if not self.states:
1037 return
1046 return
1038
1047
1039 self.fh.write(b'%s> makefile(%r, %r)\n' % (self.name, mode, bufsize))
1048 self.fh.write(b'%s> makefile(%r, %r)\n' % (self.name, mode, bufsize))
1040
1049
1041 def recv(self, res, size, flags=0):
1050 def recv(self, res, size, flags=0):
1042 if not self.reads:
1051 if not self.reads:
1043 return
1052 return
1044
1053
1045 if self.logdataapis:
1054 if self.logdataapis:
1046 self.fh.write(
1055 self.fh.write(
1047 b'%s> recv(%d, %d) -> %d' % (self.name, size, flags, len(res))
1056 b'%s> recv(%d, %d) -> %d' % (self.name, size, flags, len(res))
1048 )
1057 )
1049 self._writedata(res)
1058 self._writedata(res)
1050
1059
1051 def recvfrom(self, res, size, flags=0):
1060 def recvfrom(self, res, size, flags=0):
1052 if not self.reads:
1061 if not self.reads:
1053 return
1062 return
1054
1063
1055 if self.logdataapis:
1064 if self.logdataapis:
1056 self.fh.write(
1065 self.fh.write(
1057 b'%s> recvfrom(%d, %d) -> %d'
1066 b'%s> recvfrom(%d, %d) -> %d'
1058 % (self.name, size, flags, len(res[0]))
1067 % (self.name, size, flags, len(res[0]))
1059 )
1068 )
1060
1069
1061 self._writedata(res[0])
1070 self._writedata(res[0])
1062
1071
1063 def recvfrom_into(self, res, buf, size, flags=0):
1072 def recvfrom_into(self, res, buf, size, flags=0):
1064 if not self.reads:
1073 if not self.reads:
1065 return
1074 return
1066
1075
1067 if self.logdataapis:
1076 if self.logdataapis:
1068 self.fh.write(
1077 self.fh.write(
1069 b'%s> recvfrom_into(%d, %d) -> %d'
1078 b'%s> recvfrom_into(%d, %d) -> %d'
1070 % (self.name, size, flags, res[0])
1079 % (self.name, size, flags, res[0])
1071 )
1080 )
1072
1081
1073 self._writedata(buf[0 : res[0]])
1082 self._writedata(buf[0 : res[0]])
1074
1083
1075 def recv_into(self, res, buf, size=0, flags=0):
1084 def recv_into(self, res, buf, size=0, flags=0):
1076 if not self.reads:
1085 if not self.reads:
1077 return
1086 return
1078
1087
1079 if self.logdataapis:
1088 if self.logdataapis:
1080 self.fh.write(
1089 self.fh.write(
1081 b'%s> recv_into(%d, %d) -> %d' % (self.name, size, flags, res)
1090 b'%s> recv_into(%d, %d) -> %d' % (self.name, size, flags, res)
1082 )
1091 )
1083
1092
1084 self._writedata(buf[0:res])
1093 self._writedata(buf[0:res])
1085
1094
1086 def send(self, res, data, flags=0):
1095 def send(self, res, data, flags=0):
1087 if not self.writes:
1096 if not self.writes:
1088 return
1097 return
1089
1098
1090 self.fh.write(
1099 self.fh.write(
1091 b'%s> send(%d, %d) -> %d' % (self.name, len(data), flags, len(res))
1100 b'%s> send(%d, %d) -> %d' % (self.name, len(data), flags, len(res))
1092 )
1101 )
1093 self._writedata(data)
1102 self._writedata(data)
1094
1103
1095 def sendall(self, res, data, flags=0):
1104 def sendall(self, res, data, flags=0):
1096 if not self.writes:
1105 if not self.writes:
1097 return
1106 return
1098
1107
1099 if self.logdataapis:
1108 if self.logdataapis:
1100 # Returns None on success. So don't bother reporting return value.
1109 # Returns None on success. So don't bother reporting return value.
1101 self.fh.write(
1110 self.fh.write(
1102 b'%s> sendall(%d, %d)' % (self.name, len(data), flags)
1111 b'%s> sendall(%d, %d)' % (self.name, len(data), flags)
1103 )
1112 )
1104
1113
1105 self._writedata(data)
1114 self._writedata(data)
1106
1115
1107 def sendto(self, res, data, flagsoraddress, address=None):
1116 def sendto(self, res, data, flagsoraddress, address=None):
1108 if not self.writes:
1117 if not self.writes:
1109 return
1118 return
1110
1119
1111 if address:
1120 if address:
1112 flags = flagsoraddress
1121 flags = flagsoraddress
1113 else:
1122 else:
1114 flags = 0
1123 flags = 0
1115
1124
1116 if self.logdataapis:
1125 if self.logdataapis:
1117 self.fh.write(
1126 self.fh.write(
1118 b'%s> sendto(%d, %d, %r) -> %d'
1127 b'%s> sendto(%d, %d, %r) -> %d'
1119 % (self.name, len(data), flags, address, res)
1128 % (self.name, len(data), flags, address, res)
1120 )
1129 )
1121
1130
1122 self._writedata(data)
1131 self._writedata(data)
1123
1132
1124 def setblocking(self, res, flag):
1133 def setblocking(self, res, flag):
1125 if not self.states:
1134 if not self.states:
1126 return
1135 return
1127
1136
1128 self.fh.write(b'%s> setblocking(%r)\n' % (self.name, flag))
1137 self.fh.write(b'%s> setblocking(%r)\n' % (self.name, flag))
1129
1138
1130 def settimeout(self, res, value):
1139 def settimeout(self, res, value):
1131 if not self.states:
1140 if not self.states:
1132 return
1141 return
1133
1142
1134 self.fh.write(b'%s> settimeout(%r)\n' % (self.name, value))
1143 self.fh.write(b'%s> settimeout(%r)\n' % (self.name, value))
1135
1144
1136 def gettimeout(self, res):
1145 def gettimeout(self, res):
1137 if not self.states:
1146 if not self.states:
1138 return
1147 return
1139
1148
1140 self.fh.write(b'%s> gettimeout() -> %f\n' % (self.name, res))
1149 self.fh.write(b'%s> gettimeout() -> %f\n' % (self.name, res))
1141
1150
1142 def setsockopt(self, res, level, optname, value):
1151 def setsockopt(self, res, level, optname, value):
1143 if not self.states:
1152 if not self.states:
1144 return
1153 return
1145
1154
1146 self.fh.write(
1155 self.fh.write(
1147 b'%s> setsockopt(%r, %r, %r) -> %r\n'
1156 b'%s> setsockopt(%r, %r, %r) -> %r\n'
1148 % (self.name, level, optname, value, res)
1157 % (self.name, level, optname, value, res)
1149 )
1158 )
1150
1159
1151
1160
1152 def makeloggingsocket(
1161 def makeloggingsocket(
1153 logh,
1162 logh,
1154 fh,
1163 fh,
1155 name,
1164 name,
1156 reads=True,
1165 reads=True,
1157 writes=True,
1166 writes=True,
1158 states=True,
1167 states=True,
1159 logdata=False,
1168 logdata=False,
1160 logdataapis=True,
1169 logdataapis=True,
1161 ):
1170 ):
1162 """Turn a socket into a logging socket."""
1171 """Turn a socket into a logging socket."""
1163
1172
1164 observer = socketobserver(
1173 observer = socketobserver(
1165 logh,
1174 logh,
1166 name,
1175 name,
1167 reads=reads,
1176 reads=reads,
1168 writes=writes,
1177 writes=writes,
1169 states=states,
1178 states=states,
1170 logdata=logdata,
1179 logdata=logdata,
1171 logdataapis=logdataapis,
1180 logdataapis=logdataapis,
1172 )
1181 )
1173 return socketproxy(fh, observer)
1182 return socketproxy(fh, observer)
1174
1183
1175
1184
1176 def version():
1185 def version():
1177 """Return version information if available."""
1186 """Return version information if available."""
1178 try:
1187 try:
1179 from . import __version__ # pytype: disable=import-error
1188 from . import __version__ # pytype: disable=import-error
1180
1189
1181 return __version__.version
1190 return __version__.version
1182 except ImportError:
1191 except ImportError:
1183 return b'unknown'
1192 return b'unknown'
1184
1193
1185
1194
1186 def versiontuple(v=None, n=4):
1195 def versiontuple(v=None, n=4):
1187 """Parses a Mercurial version string into an N-tuple.
1196 """Parses a Mercurial version string into an N-tuple.
1188
1197
1189 The version string to be parsed is specified with the ``v`` argument.
1198 The version string to be parsed is specified with the ``v`` argument.
1190 If it isn't defined, the current Mercurial version string will be parsed.
1199 If it isn't defined, the current Mercurial version string will be parsed.
1191
1200
1192 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1201 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1193 returned values:
1202 returned values:
1194
1203
1195 >>> v = b'3.6.1+190-df9b73d2d444'
1204 >>> v = b'3.6.1+190-df9b73d2d444'
1196 >>> versiontuple(v, 2)
1205 >>> versiontuple(v, 2)
1197 (3, 6)
1206 (3, 6)
1198 >>> versiontuple(v, 3)
1207 >>> versiontuple(v, 3)
1199 (3, 6, 1)
1208 (3, 6, 1)
1200 >>> versiontuple(v, 4)
1209 >>> versiontuple(v, 4)
1201 (3, 6, 1, '190-df9b73d2d444')
1210 (3, 6, 1, '190-df9b73d2d444')
1202
1211
1203 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1212 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1204 (3, 6, 1, '190-df9b73d2d444+20151118')
1213 (3, 6, 1, '190-df9b73d2d444+20151118')
1205
1214
1206 >>> v = b'3.6'
1215 >>> v = b'3.6'
1207 >>> versiontuple(v, 2)
1216 >>> versiontuple(v, 2)
1208 (3, 6)
1217 (3, 6)
1209 >>> versiontuple(v, 3)
1218 >>> versiontuple(v, 3)
1210 (3, 6, None)
1219 (3, 6, None)
1211 >>> versiontuple(v, 4)
1220 >>> versiontuple(v, 4)
1212 (3, 6, None, None)
1221 (3, 6, None, None)
1213
1222
1214 >>> v = b'3.9-rc'
1223 >>> v = b'3.9-rc'
1215 >>> versiontuple(v, 2)
1224 >>> versiontuple(v, 2)
1216 (3, 9)
1225 (3, 9)
1217 >>> versiontuple(v, 3)
1226 >>> versiontuple(v, 3)
1218 (3, 9, None)
1227 (3, 9, None)
1219 >>> versiontuple(v, 4)
1228 >>> versiontuple(v, 4)
1220 (3, 9, None, 'rc')
1229 (3, 9, None, 'rc')
1221
1230
1222 >>> v = b'3.9-rc+2-02a8fea4289b'
1231 >>> v = b'3.9-rc+2-02a8fea4289b'
1223 >>> versiontuple(v, 2)
1232 >>> versiontuple(v, 2)
1224 (3, 9)
1233 (3, 9)
1225 >>> versiontuple(v, 3)
1234 >>> versiontuple(v, 3)
1226 (3, 9, None)
1235 (3, 9, None)
1227 >>> versiontuple(v, 4)
1236 >>> versiontuple(v, 4)
1228 (3, 9, None, 'rc+2-02a8fea4289b')
1237 (3, 9, None, 'rc+2-02a8fea4289b')
1229
1238
1230 >>> versiontuple(b'4.6rc0')
1239 >>> versiontuple(b'4.6rc0')
1231 (4, 6, None, 'rc0')
1240 (4, 6, None, 'rc0')
1232 >>> versiontuple(b'4.6rc0+12-425d55e54f98')
1241 >>> versiontuple(b'4.6rc0+12-425d55e54f98')
1233 (4, 6, None, 'rc0+12-425d55e54f98')
1242 (4, 6, None, 'rc0+12-425d55e54f98')
1234 >>> versiontuple(b'.1.2.3')
1243 >>> versiontuple(b'.1.2.3')
1235 (None, None, None, '.1.2.3')
1244 (None, None, None, '.1.2.3')
1236 >>> versiontuple(b'12.34..5')
1245 >>> versiontuple(b'12.34..5')
1237 (12, 34, None, '..5')
1246 (12, 34, None, '..5')
1238 >>> versiontuple(b'1.2.3.4.5.6')
1247 >>> versiontuple(b'1.2.3.4.5.6')
1239 (1, 2, 3, '.4.5.6')
1248 (1, 2, 3, '.4.5.6')
1240 """
1249 """
1241 if not v:
1250 if not v:
1242 v = version()
1251 v = version()
1243 m = remod.match(br'(\d+(?:\.\d+){,2})[+-]?(.*)', v)
1252 m = remod.match(br'(\d+(?:\.\d+){,2})[+-]?(.*)', v)
1244 if not m:
1253 if not m:
1245 vparts, extra = b'', v
1254 vparts, extra = b'', v
1246 elif m.group(2):
1255 elif m.group(2):
1247 vparts, extra = m.groups()
1256 vparts, extra = m.groups()
1248 else:
1257 else:
1249 vparts, extra = m.group(1), None
1258 vparts, extra = m.group(1), None
1250
1259
1251 assert vparts is not None # help pytype
1260 assert vparts is not None # help pytype
1252
1261
1253 vints = []
1262 vints = []
1254 for i in vparts.split(b'.'):
1263 for i in vparts.split(b'.'):
1255 try:
1264 try:
1256 vints.append(int(i))
1265 vints.append(int(i))
1257 except ValueError:
1266 except ValueError:
1258 break
1267 break
1259 # (3, 6) -> (3, 6, None)
1268 # (3, 6) -> (3, 6, None)
1260 while len(vints) < 3:
1269 while len(vints) < 3:
1261 vints.append(None)
1270 vints.append(None)
1262
1271
1263 if n == 2:
1272 if n == 2:
1264 return (vints[0], vints[1])
1273 return (vints[0], vints[1])
1265 if n == 3:
1274 if n == 3:
1266 return (vints[0], vints[1], vints[2])
1275 return (vints[0], vints[1], vints[2])
1267 if n == 4:
1276 if n == 4:
1268 return (vints[0], vints[1], vints[2], extra)
1277 return (vints[0], vints[1], vints[2], extra)
1269
1278
1270 raise error.ProgrammingError(b"invalid version part request: %d" % n)
1279 raise error.ProgrammingError(b"invalid version part request: %d" % n)
1271
1280
1272
1281
1273 def cachefunc(func):
1282 def cachefunc(func):
1274 '''cache the result of function calls'''
1283 '''cache the result of function calls'''
1275 # XXX doesn't handle keywords args
1284 # XXX doesn't handle keywords args
1276 if func.__code__.co_argcount == 0:
1285 if func.__code__.co_argcount == 0:
1277 listcache = []
1286 listcache = []
1278
1287
1279 def f():
1288 def f():
1280 if len(listcache) == 0:
1289 if len(listcache) == 0:
1281 listcache.append(func())
1290 listcache.append(func())
1282 return listcache[0]
1291 return listcache[0]
1283
1292
1284 return f
1293 return f
1285 cache = {}
1294 cache = {}
1286 if func.__code__.co_argcount == 1:
1295 if func.__code__.co_argcount == 1:
1287 # we gain a small amount of time because
1296 # we gain a small amount of time because
1288 # we don't need to pack/unpack the list
1297 # we don't need to pack/unpack the list
1289 def f(arg):
1298 def f(arg):
1290 if arg not in cache:
1299 if arg not in cache:
1291 cache[arg] = func(arg)
1300 cache[arg] = func(arg)
1292 return cache[arg]
1301 return cache[arg]
1293
1302
1294 else:
1303 else:
1295
1304
1296 def f(*args):
1305 def f(*args):
1297 if args not in cache:
1306 if args not in cache:
1298 cache[args] = func(*args)
1307 cache[args] = func(*args)
1299 return cache[args]
1308 return cache[args]
1300
1309
1301 return f
1310 return f
1302
1311
1303
1312
1304 class cow:
1313 class cow:
1305 """helper class to make copy-on-write easier
1314 """helper class to make copy-on-write easier
1306
1315
1307 Call preparewrite before doing any writes.
1316 Call preparewrite before doing any writes.
1308 """
1317 """
1309
1318
1310 def preparewrite(self):
1319 def preparewrite(self):
1311 """call this before writes, return self or a copied new object"""
1320 """call this before writes, return self or a copied new object"""
1312 if getattr(self, '_copied', 0):
1321 if getattr(self, '_copied', 0):
1313 self._copied -= 1
1322 self._copied -= 1
1314 # Function cow.__init__ expects 1 arg(s), got 2 [wrong-arg-count]
1323 # Function cow.__init__ expects 1 arg(s), got 2 [wrong-arg-count]
1315 return self.__class__(self) # pytype: disable=wrong-arg-count
1324 return self.__class__(self) # pytype: disable=wrong-arg-count
1316 return self
1325 return self
1317
1326
1318 def copy(self):
1327 def copy(self):
1319 """always do a cheap copy"""
1328 """always do a cheap copy"""
1320 self._copied = getattr(self, '_copied', 0) + 1
1329 self._copied = getattr(self, '_copied', 0) + 1
1321 return self
1330 return self
1322
1331
1323
1332
1324 class sortdict(collections.OrderedDict):
1333 class sortdict(collections.OrderedDict):
1325 """a simple sorted dictionary
1334 """a simple sorted dictionary
1326
1335
1327 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1336 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1328 >>> d2 = d1.copy()
1337 >>> d2 = d1.copy()
1329 >>> list(d2.items())
1338 >>> list(d2.items())
1330 [('a', 0), ('b', 1)]
1339 [('a', 0), ('b', 1)]
1331 >>> d2.update([(b'a', 2)])
1340 >>> d2.update([(b'a', 2)])
1332 >>> list(d2.keys()) # should still be in last-set order
1341 >>> list(d2.keys()) # should still be in last-set order
1333 ['b', 'a']
1342 ['b', 'a']
1334 >>> d1.insert(1, b'a.5', 0.5)
1343 >>> d1.insert(1, b'a.5', 0.5)
1335 >>> list(d1.items())
1344 >>> list(d1.items())
1336 [('a', 0), ('a.5', 0.5), ('b', 1)]
1345 [('a', 0), ('a.5', 0.5), ('b', 1)]
1337 """
1346 """
1338
1347
1339 def __setitem__(self, key, value):
1348 def __setitem__(self, key, value):
1340 if key in self:
1349 if key in self:
1341 del self[key]
1350 del self[key]
1342 super(sortdict, self).__setitem__(key, value)
1351 super(sortdict, self).__setitem__(key, value)
1343
1352
1344 if pycompat.ispypy:
1353 if pycompat.ispypy:
1345 # __setitem__() isn't called as of PyPy 5.8.0
1354 # __setitem__() isn't called as of PyPy 5.8.0
1346 def update(self, src, **f):
1355 def update(self, src, **f):
1347 if isinstance(src, dict):
1356 if isinstance(src, dict):
1348 src = src.items()
1357 src = src.items()
1349 for k, v in src:
1358 for k, v in src:
1350 self[k] = v
1359 self[k] = v
1351 for k in f:
1360 for k in f:
1352 self[k] = f[k]
1361 self[k] = f[k]
1353
1362
1354 def insert(self, position, key, value):
1363 def insert(self, position, key, value):
1355 for i, (k, v) in enumerate(list(self.items())):
1364 for i, (k, v) in enumerate(list(self.items())):
1356 if i == position:
1365 if i == position:
1357 self[key] = value
1366 self[key] = value
1358 if i >= position:
1367 if i >= position:
1359 del self[k]
1368 del self[k]
1360 self[k] = v
1369 self[k] = v
1361
1370
1362
1371
1363 class cowdict(cow, dict):
1372 class cowdict(cow, dict):
1364 """copy-on-write dict
1373 """copy-on-write dict
1365
1374
1366 Be sure to call d = d.preparewrite() before writing to d.
1375 Be sure to call d = d.preparewrite() before writing to d.
1367
1376
1368 >>> a = cowdict()
1377 >>> a = cowdict()
1369 >>> a is a.preparewrite()
1378 >>> a is a.preparewrite()
1370 True
1379 True
1371 >>> b = a.copy()
1380 >>> b = a.copy()
1372 >>> b is a
1381 >>> b is a
1373 True
1382 True
1374 >>> c = b.copy()
1383 >>> c = b.copy()
1375 >>> c is a
1384 >>> c is a
1376 True
1385 True
1377 >>> a = a.preparewrite()
1386 >>> a = a.preparewrite()
1378 >>> b is a
1387 >>> b is a
1379 False
1388 False
1380 >>> a is a.preparewrite()
1389 >>> a is a.preparewrite()
1381 True
1390 True
1382 >>> c = c.preparewrite()
1391 >>> c = c.preparewrite()
1383 >>> b is c
1392 >>> b is c
1384 False
1393 False
1385 >>> b is b.preparewrite()
1394 >>> b is b.preparewrite()
1386 True
1395 True
1387 """
1396 """
1388
1397
1389
1398
1390 class cowsortdict(cow, sortdict):
1399 class cowsortdict(cow, sortdict):
1391 """copy-on-write sortdict
1400 """copy-on-write sortdict
1392
1401
1393 Be sure to call d = d.preparewrite() before writing to d.
1402 Be sure to call d = d.preparewrite() before writing to d.
1394 """
1403 """
1395
1404
1396
1405
1397 class transactional: # pytype: disable=ignored-metaclass
1406 class transactional: # pytype: disable=ignored-metaclass
1398 """Base class for making a transactional type into a context manager."""
1407 """Base class for making a transactional type into a context manager."""
1399
1408
1400 __metaclass__ = abc.ABCMeta
1409 __metaclass__ = abc.ABCMeta
1401
1410
1402 @abc.abstractmethod
1411 @abc.abstractmethod
1403 def close(self):
1412 def close(self):
1404 """Successfully closes the transaction."""
1413 """Successfully closes the transaction."""
1405
1414
1406 @abc.abstractmethod
1415 @abc.abstractmethod
1407 def release(self):
1416 def release(self):
1408 """Marks the end of the transaction.
1417 """Marks the end of the transaction.
1409
1418
1410 If the transaction has not been closed, it will be aborted.
1419 If the transaction has not been closed, it will be aborted.
1411 """
1420 """
1412
1421
1413 def __enter__(self):
1422 def __enter__(self):
1414 return self
1423 return self
1415
1424
1416 def __exit__(self, exc_type, exc_val, exc_tb):
1425 def __exit__(self, exc_type, exc_val, exc_tb):
1417 try:
1426 try:
1418 if exc_type is None:
1427 if exc_type is None:
1419 self.close()
1428 self.close()
1420 finally:
1429 finally:
1421 self.release()
1430 self.release()
1422
1431
1423
1432
1424 @contextlib.contextmanager
1433 @contextlib.contextmanager
1425 def acceptintervention(tr=None):
1434 def acceptintervention(tr=None):
1426 """A context manager that closes the transaction on InterventionRequired
1435 """A context manager that closes the transaction on InterventionRequired
1427
1436
1428 If no transaction was provided, this simply runs the body and returns
1437 If no transaction was provided, this simply runs the body and returns
1429 """
1438 """
1430 if not tr:
1439 if not tr:
1431 yield
1440 yield
1432 return
1441 return
1433 try:
1442 try:
1434 yield
1443 yield
1435 tr.close()
1444 tr.close()
1436 except error.InterventionRequired:
1445 except error.InterventionRequired:
1437 tr.close()
1446 tr.close()
1438 raise
1447 raise
1439 finally:
1448 finally:
1440 tr.release()
1449 tr.release()
1441
1450
1442
1451
1443 @contextlib.contextmanager
1452 @contextlib.contextmanager
1444 def nullcontextmanager(enter_result=None):
1453 def nullcontextmanager(enter_result=None):
1445 yield enter_result
1454 yield enter_result
1446
1455
1447
1456
1448 class _lrucachenode:
1457 class _lrucachenode:
1449 """A node in a doubly linked list.
1458 """A node in a doubly linked list.
1450
1459
1451 Holds a reference to nodes on either side as well as a key-value
1460 Holds a reference to nodes on either side as well as a key-value
1452 pair for the dictionary entry.
1461 pair for the dictionary entry.
1453 """
1462 """
1454
1463
1455 __slots__ = ('next', 'prev', 'key', 'value', 'cost')
1464 __slots__ = ('next', 'prev', 'key', 'value', 'cost')
1456
1465
1457 def __init__(self):
1466 def __init__(self):
1458 self.next = self
1467 self.next = self
1459 self.prev = self
1468 self.prev = self
1460
1469
1461 self.key = _notset
1470 self.key = _notset
1462 self.value = None
1471 self.value = None
1463 self.cost = 0
1472 self.cost = 0
1464
1473
1465 def markempty(self):
1474 def markempty(self):
1466 """Mark the node as emptied."""
1475 """Mark the node as emptied."""
1467 self.key = _notset
1476 self.key = _notset
1468 self.value = None
1477 self.value = None
1469 self.cost = 0
1478 self.cost = 0
1470
1479
1471
1480
1472 class lrucachedict:
1481 class lrucachedict:
1473 """Dict that caches most recent accesses and sets.
1482 """Dict that caches most recent accesses and sets.
1474
1483
1475 The dict consists of an actual backing dict - indexed by original
1484 The dict consists of an actual backing dict - indexed by original
1476 key - and a doubly linked circular list defining the order of entries in
1485 key - and a doubly linked circular list defining the order of entries in
1477 the cache.
1486 the cache.
1478
1487
1479 The head node is the newest entry in the cache. If the cache is full,
1488 The head node is the newest entry in the cache. If the cache is full,
1480 we recycle head.prev and make it the new head. Cache accesses result in
1489 we recycle head.prev and make it the new head. Cache accesses result in
1481 the node being moved to before the existing head and being marked as the
1490 the node being moved to before the existing head and being marked as the
1482 new head node.
1491 new head node.
1483
1492
1484 Items in the cache can be inserted with an optional "cost" value. This is
1493 Items in the cache can be inserted with an optional "cost" value. This is
1485 simply an integer that is specified by the caller. The cache can be queried
1494 simply an integer that is specified by the caller. The cache can be queried
1486 for the total cost of all items presently in the cache.
1495 for the total cost of all items presently in the cache.
1487
1496
1488 The cache can also define a maximum cost. If a cache insertion would
1497 The cache can also define a maximum cost. If a cache insertion would
1489 cause the total cost of the cache to go beyond the maximum cost limit,
1498 cause the total cost of the cache to go beyond the maximum cost limit,
1490 nodes will be evicted to make room for the new code. This can be used
1499 nodes will be evicted to make room for the new code. This can be used
1491 to e.g. set a max memory limit and associate an estimated bytes size
1500 to e.g. set a max memory limit and associate an estimated bytes size
1492 cost to each item in the cache. By default, no maximum cost is enforced.
1501 cost to each item in the cache. By default, no maximum cost is enforced.
1493 """
1502 """
1494
1503
1495 def __init__(self, max, maxcost=0):
1504 def __init__(self, max, maxcost=0):
1496 self._cache = {}
1505 self._cache = {}
1497
1506
1498 self._head = _lrucachenode()
1507 self._head = _lrucachenode()
1499 self._size = 1
1508 self._size = 1
1500 self.capacity = max
1509 self.capacity = max
1501 self.totalcost = 0
1510 self.totalcost = 0
1502 self.maxcost = maxcost
1511 self.maxcost = maxcost
1503
1512
1504 def __len__(self):
1513 def __len__(self):
1505 return len(self._cache)
1514 return len(self._cache)
1506
1515
1507 def __contains__(self, k):
1516 def __contains__(self, k):
1508 return k in self._cache
1517 return k in self._cache
1509
1518
1510 def __iter__(self):
1519 def __iter__(self):
1511 # We don't have to iterate in cache order, but why not.
1520 # We don't have to iterate in cache order, but why not.
1512 n = self._head
1521 n = self._head
1513 for i in range(len(self._cache)):
1522 for i in range(len(self._cache)):
1514 yield n.key
1523 yield n.key
1515 n = n.next
1524 n = n.next
1516
1525
1517 def __getitem__(self, k):
1526 def __getitem__(self, k):
1518 node = self._cache[k]
1527 node = self._cache[k]
1519 self._movetohead(node)
1528 self._movetohead(node)
1520 return node.value
1529 return node.value
1521
1530
1522 def insert(self, k, v, cost=0):
1531 def insert(self, k, v, cost=0):
1523 """Insert a new item in the cache with optional cost value."""
1532 """Insert a new item in the cache with optional cost value."""
1524 node = self._cache.get(k)
1533 node = self._cache.get(k)
1525 # Replace existing value and mark as newest.
1534 # Replace existing value and mark as newest.
1526 if node is not None:
1535 if node is not None:
1527 self.totalcost -= node.cost
1536 self.totalcost -= node.cost
1528 node.value = v
1537 node.value = v
1529 node.cost = cost
1538 node.cost = cost
1530 self.totalcost += cost
1539 self.totalcost += cost
1531 self._movetohead(node)
1540 self._movetohead(node)
1532
1541
1533 if self.maxcost:
1542 if self.maxcost:
1534 self._enforcecostlimit()
1543 self._enforcecostlimit()
1535
1544
1536 return
1545 return
1537
1546
1538 if self._size < self.capacity:
1547 if self._size < self.capacity:
1539 node = self._addcapacity()
1548 node = self._addcapacity()
1540 else:
1549 else:
1541 # Grab the last/oldest item.
1550 # Grab the last/oldest item.
1542 node = self._head.prev
1551 node = self._head.prev
1543
1552
1544 # At capacity. Kill the old entry.
1553 # At capacity. Kill the old entry.
1545 if node.key is not _notset:
1554 if node.key is not _notset:
1546 self.totalcost -= node.cost
1555 self.totalcost -= node.cost
1547 del self._cache[node.key]
1556 del self._cache[node.key]
1548
1557
1549 node.key = k
1558 node.key = k
1550 node.value = v
1559 node.value = v
1551 node.cost = cost
1560 node.cost = cost
1552 self.totalcost += cost
1561 self.totalcost += cost
1553 self._cache[k] = node
1562 self._cache[k] = node
1554 # And mark it as newest entry. No need to adjust order since it
1563 # And mark it as newest entry. No need to adjust order since it
1555 # is already self._head.prev.
1564 # is already self._head.prev.
1556 self._head = node
1565 self._head = node
1557
1566
1558 if self.maxcost:
1567 if self.maxcost:
1559 self._enforcecostlimit()
1568 self._enforcecostlimit()
1560
1569
1561 def __setitem__(self, k, v):
1570 def __setitem__(self, k, v):
1562 self.insert(k, v)
1571 self.insert(k, v)
1563
1572
1564 def __delitem__(self, k):
1573 def __delitem__(self, k):
1565 self.pop(k)
1574 self.pop(k)
1566
1575
1567 def pop(self, k, default=_notset):
1576 def pop(self, k, default=_notset):
1568 try:
1577 try:
1569 node = self._cache.pop(k)
1578 node = self._cache.pop(k)
1570 except KeyError:
1579 except KeyError:
1571 if default is _notset:
1580 if default is _notset:
1572 raise
1581 raise
1573 return default
1582 return default
1574
1583
1575 value = node.value
1584 value = node.value
1576 self.totalcost -= node.cost
1585 self.totalcost -= node.cost
1577 node.markempty()
1586 node.markempty()
1578
1587
1579 # Temporarily mark as newest item before re-adjusting head to make
1588 # Temporarily mark as newest item before re-adjusting head to make
1580 # this node the oldest item.
1589 # this node the oldest item.
1581 self._movetohead(node)
1590 self._movetohead(node)
1582 self._head = node.next
1591 self._head = node.next
1583
1592
1584 return value
1593 return value
1585
1594
1586 # Additional dict methods.
1595 # Additional dict methods.
1587
1596
1588 def get(self, k, default=None):
1597 def get(self, k, default=None):
1589 try:
1598 try:
1590 return self.__getitem__(k)
1599 return self.__getitem__(k)
1591 except KeyError:
1600 except KeyError:
1592 return default
1601 return default
1593
1602
1594 def peek(self, k, default=_notset):
1603 def peek(self, k, default=_notset):
1595 """Get the specified item without moving it to the head
1604 """Get the specified item without moving it to the head
1596
1605
1597 Unlike get(), this doesn't mutate the internal state. But be aware
1606 Unlike get(), this doesn't mutate the internal state. But be aware
1598 that it doesn't mean peek() is thread safe.
1607 that it doesn't mean peek() is thread safe.
1599 """
1608 """
1600 try:
1609 try:
1601 node = self._cache[k]
1610 node = self._cache[k]
1602 return node.value
1611 return node.value
1603 except KeyError:
1612 except KeyError:
1604 if default is _notset:
1613 if default is _notset:
1605 raise
1614 raise
1606 return default
1615 return default
1607
1616
1608 def clear(self):
1617 def clear(self):
1609 n = self._head
1618 n = self._head
1610 while n.key is not _notset:
1619 while n.key is not _notset:
1611 self.totalcost -= n.cost
1620 self.totalcost -= n.cost
1612 n.markempty()
1621 n.markempty()
1613 n = n.next
1622 n = n.next
1614
1623
1615 self._cache.clear()
1624 self._cache.clear()
1616
1625
1617 def copy(self, capacity=None, maxcost=0):
1626 def copy(self, capacity=None, maxcost=0):
1618 """Create a new cache as a copy of the current one.
1627 """Create a new cache as a copy of the current one.
1619
1628
1620 By default, the new cache has the same capacity as the existing one.
1629 By default, the new cache has the same capacity as the existing one.
1621 But, the cache capacity can be changed as part of performing the
1630 But, the cache capacity can be changed as part of performing the
1622 copy.
1631 copy.
1623
1632
1624 Items in the copy have an insertion/access order matching this
1633 Items in the copy have an insertion/access order matching this
1625 instance.
1634 instance.
1626 """
1635 """
1627
1636
1628 capacity = capacity or self.capacity
1637 capacity = capacity or self.capacity
1629 maxcost = maxcost or self.maxcost
1638 maxcost = maxcost or self.maxcost
1630 result = lrucachedict(capacity, maxcost=maxcost)
1639 result = lrucachedict(capacity, maxcost=maxcost)
1631
1640
1632 # We copy entries by iterating in oldest-to-newest order so the copy
1641 # We copy entries by iterating in oldest-to-newest order so the copy
1633 # has the correct ordering.
1642 # has the correct ordering.
1634
1643
1635 # Find the first non-empty entry.
1644 # Find the first non-empty entry.
1636 n = self._head.prev
1645 n = self._head.prev
1637 while n.key is _notset and n is not self._head:
1646 while n.key is _notset and n is not self._head:
1638 n = n.prev
1647 n = n.prev
1639
1648
1640 # We could potentially skip the first N items when decreasing capacity.
1649 # We could potentially skip the first N items when decreasing capacity.
1641 # But let's keep it simple unless it is a performance problem.
1650 # But let's keep it simple unless it is a performance problem.
1642 for i in range(len(self._cache)):
1651 for i in range(len(self._cache)):
1643 result.insert(n.key, n.value, cost=n.cost)
1652 result.insert(n.key, n.value, cost=n.cost)
1644 n = n.prev
1653 n = n.prev
1645
1654
1646 return result
1655 return result
1647
1656
1648 def popoldest(self):
1657 def popoldest(self):
1649 """Remove the oldest item from the cache.
1658 """Remove the oldest item from the cache.
1650
1659
1651 Returns the (key, value) describing the removed cache entry.
1660 Returns the (key, value) describing the removed cache entry.
1652 """
1661 """
1653 if not self._cache:
1662 if not self._cache:
1654 return
1663 return
1655
1664
1656 # Walk the linked list backwards starting at tail node until we hit
1665 # Walk the linked list backwards starting at tail node until we hit
1657 # a non-empty node.
1666 # a non-empty node.
1658 n = self._head.prev
1667 n = self._head.prev
1659
1668
1660 while n.key is _notset:
1669 while n.key is _notset:
1661 n = n.prev
1670 n = n.prev
1662
1671
1663 key, value = n.key, n.value
1672 key, value = n.key, n.value
1664
1673
1665 # And remove it from the cache and mark it as empty.
1674 # And remove it from the cache and mark it as empty.
1666 del self._cache[n.key]
1675 del self._cache[n.key]
1667 self.totalcost -= n.cost
1676 self.totalcost -= n.cost
1668 n.markempty()
1677 n.markempty()
1669
1678
1670 return key, value
1679 return key, value
1671
1680
1672 def _movetohead(self, node: _lrucachenode):
1681 def _movetohead(self, node: _lrucachenode):
1673 """Mark a node as the newest, making it the new head.
1682 """Mark a node as the newest, making it the new head.
1674
1683
1675 When a node is accessed, it becomes the freshest entry in the LRU
1684 When a node is accessed, it becomes the freshest entry in the LRU
1676 list, which is denoted by self._head.
1685 list, which is denoted by self._head.
1677
1686
1678 Visually, let's make ``N`` the new head node (* denotes head):
1687 Visually, let's make ``N`` the new head node (* denotes head):
1679
1688
1680 previous/oldest <-> head <-> next/next newest
1689 previous/oldest <-> head <-> next/next newest
1681
1690
1682 ----<->--- A* ---<->-----
1691 ----<->--- A* ---<->-----
1683 | |
1692 | |
1684 E <-> D <-> N <-> C <-> B
1693 E <-> D <-> N <-> C <-> B
1685
1694
1686 To:
1695 To:
1687
1696
1688 ----<->--- N* ---<->-----
1697 ----<->--- N* ---<->-----
1689 | |
1698 | |
1690 E <-> D <-> C <-> B <-> A
1699 E <-> D <-> C <-> B <-> A
1691
1700
1692 This requires the following moves:
1701 This requires the following moves:
1693
1702
1694 C.next = D (node.prev.next = node.next)
1703 C.next = D (node.prev.next = node.next)
1695 D.prev = C (node.next.prev = node.prev)
1704 D.prev = C (node.next.prev = node.prev)
1696 E.next = N (head.prev.next = node)
1705 E.next = N (head.prev.next = node)
1697 N.prev = E (node.prev = head.prev)
1706 N.prev = E (node.prev = head.prev)
1698 N.next = A (node.next = head)
1707 N.next = A (node.next = head)
1699 A.prev = N (head.prev = node)
1708 A.prev = N (head.prev = node)
1700 """
1709 """
1701 head = self._head
1710 head = self._head
1702 # C.next = D
1711 # C.next = D
1703 node.prev.next = node.next
1712 node.prev.next = node.next
1704 # D.prev = C
1713 # D.prev = C
1705 node.next.prev = node.prev
1714 node.next.prev = node.prev
1706 # N.prev = E
1715 # N.prev = E
1707 node.prev = head.prev
1716 node.prev = head.prev
1708 # N.next = A
1717 # N.next = A
1709 # It is tempting to do just "head" here, however if node is
1718 # It is tempting to do just "head" here, however if node is
1710 # adjacent to head, this will do bad things.
1719 # adjacent to head, this will do bad things.
1711 node.next = head.prev.next
1720 node.next = head.prev.next
1712 # E.next = N
1721 # E.next = N
1713 node.next.prev = node
1722 node.next.prev = node
1714 # A.prev = N
1723 # A.prev = N
1715 node.prev.next = node
1724 node.prev.next = node
1716
1725
1717 self._head = node
1726 self._head = node
1718
1727
1719 def _addcapacity(self) -> _lrucachenode:
1728 def _addcapacity(self) -> _lrucachenode:
1720 """Add a node to the circular linked list.
1729 """Add a node to the circular linked list.
1721
1730
1722 The new node is inserted before the head node.
1731 The new node is inserted before the head node.
1723 """
1732 """
1724 head = self._head
1733 head = self._head
1725 node = _lrucachenode()
1734 node = _lrucachenode()
1726 head.prev.next = node
1735 head.prev.next = node
1727 node.prev = head.prev
1736 node.prev = head.prev
1728 node.next = head
1737 node.next = head
1729 head.prev = node
1738 head.prev = node
1730 self._size += 1
1739 self._size += 1
1731 return node
1740 return node
1732
1741
1733 def _enforcecostlimit(self):
1742 def _enforcecostlimit(self):
1734 # This should run after an insertion. It should only be called if total
1743 # This should run after an insertion. It should only be called if total
1735 # cost limits are being enforced.
1744 # cost limits are being enforced.
1736 # The most recently inserted node is never evicted.
1745 # The most recently inserted node is never evicted.
1737 if len(self) <= 1 or self.totalcost <= self.maxcost:
1746 if len(self) <= 1 or self.totalcost <= self.maxcost:
1738 return
1747 return
1739
1748
1740 # This is logically equivalent to calling popoldest() until we
1749 # This is logically equivalent to calling popoldest() until we
1741 # free up enough cost. We don't do that since popoldest() needs
1750 # free up enough cost. We don't do that since popoldest() needs
1742 # to walk the linked list and doing this in a loop would be
1751 # to walk the linked list and doing this in a loop would be
1743 # quadratic. So we find the first non-empty node and then
1752 # quadratic. So we find the first non-empty node and then
1744 # walk nodes until we free up enough capacity.
1753 # walk nodes until we free up enough capacity.
1745 #
1754 #
1746 # If we only removed the minimum number of nodes to free enough
1755 # If we only removed the minimum number of nodes to free enough
1747 # cost at insert time, chances are high that the next insert would
1756 # cost at insert time, chances are high that the next insert would
1748 # also require pruning. This would effectively constitute quadratic
1757 # also require pruning. This would effectively constitute quadratic
1749 # behavior for insert-heavy workloads. To mitigate this, we set a
1758 # behavior for insert-heavy workloads. To mitigate this, we set a
1750 # target cost that is a percentage of the max cost. This will tend
1759 # target cost that is a percentage of the max cost. This will tend
1751 # to free more nodes when the high water mark is reached, which
1760 # to free more nodes when the high water mark is reached, which
1752 # lowers the chances of needing to prune on the subsequent insert.
1761 # lowers the chances of needing to prune on the subsequent insert.
1753 targetcost = int(self.maxcost * 0.75)
1762 targetcost = int(self.maxcost * 0.75)
1754
1763
1755 n = self._head.prev
1764 n = self._head.prev
1756 while n.key is _notset:
1765 while n.key is _notset:
1757 n = n.prev
1766 n = n.prev
1758
1767
1759 while len(self) > 1 and self.totalcost > targetcost:
1768 while len(self) > 1 and self.totalcost > targetcost:
1760 del self._cache[n.key]
1769 del self._cache[n.key]
1761 self.totalcost -= n.cost
1770 self.totalcost -= n.cost
1762 n.markempty()
1771 n.markempty()
1763 n = n.prev
1772 n = n.prev
1764
1773
1765
1774
1766 def lrucachefunc(func):
1775 def lrucachefunc(func):
1767 '''cache most recent results of function calls'''
1776 '''cache most recent results of function calls'''
1768 cache = {}
1777 cache = {}
1769 order = collections.deque()
1778 order = collections.deque()
1770 if func.__code__.co_argcount == 1:
1779 if func.__code__.co_argcount == 1:
1771
1780
1772 def f(arg):
1781 def f(arg):
1773 if arg not in cache:
1782 if arg not in cache:
1774 if len(cache) > 20:
1783 if len(cache) > 20:
1775 del cache[order.popleft()]
1784 del cache[order.popleft()]
1776 cache[arg] = func(arg)
1785 cache[arg] = func(arg)
1777 else:
1786 else:
1778 order.remove(arg)
1787 order.remove(arg)
1779 order.append(arg)
1788 order.append(arg)
1780 return cache[arg]
1789 return cache[arg]
1781
1790
1782 else:
1791 else:
1783
1792
1784 def f(*args):
1793 def f(*args):
1785 if args not in cache:
1794 if args not in cache:
1786 if len(cache) > 20:
1795 if len(cache) > 20:
1787 del cache[order.popleft()]
1796 del cache[order.popleft()]
1788 cache[args] = func(*args)
1797 cache[args] = func(*args)
1789 else:
1798 else:
1790 order.remove(args)
1799 order.remove(args)
1791 order.append(args)
1800 order.append(args)
1792 return cache[args]
1801 return cache[args]
1793
1802
1794 return f
1803 return f
1795
1804
1796
1805
1797 class propertycache:
1806 class propertycache:
1798 def __init__(self, func):
1807 def __init__(self, func):
1799 self.func = func
1808 self.func = func
1800 self.name = func.__name__
1809 self.name = func.__name__
1801
1810
1802 def __get__(self, obj, type=None):
1811 def __get__(self, obj, type=None):
1803 result = self.func(obj)
1812 result = self.func(obj)
1804 self.cachevalue(obj, result)
1813 self.cachevalue(obj, result)
1805 return result
1814 return result
1806
1815
1807 def cachevalue(self, obj, value):
1816 def cachevalue(self, obj, value):
1808 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1817 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1809 obj.__dict__[self.name] = value
1818 obj.__dict__[self.name] = value
1810
1819
1811
1820
1812 def clearcachedproperty(obj, prop):
1821 def clearcachedproperty(obj, prop):
1813 '''clear a cached property value, if one has been set'''
1822 '''clear a cached property value, if one has been set'''
1814 prop = pycompat.sysstr(prop)
1823 prop = pycompat.sysstr(prop)
1815 if prop in obj.__dict__:
1824 if prop in obj.__dict__:
1816 del obj.__dict__[prop]
1825 del obj.__dict__[prop]
1817
1826
1818
1827
1819 def increasingchunks(source, min=1024, max=65536):
1828 def increasingchunks(source, min=1024, max=65536):
1820 """return no less than min bytes per chunk while data remains,
1829 """return no less than min bytes per chunk while data remains,
1821 doubling min after each chunk until it reaches max"""
1830 doubling min after each chunk until it reaches max"""
1822
1831
1823 def log2(x):
1832 def log2(x):
1824 if not x:
1833 if not x:
1825 return 0
1834 return 0
1826 i = 0
1835 i = 0
1827 while x:
1836 while x:
1828 x >>= 1
1837 x >>= 1
1829 i += 1
1838 i += 1
1830 return i - 1
1839 return i - 1
1831
1840
1832 buf = []
1841 buf = []
1833 blen = 0
1842 blen = 0
1834 for chunk in source:
1843 for chunk in source:
1835 buf.append(chunk)
1844 buf.append(chunk)
1836 blen += len(chunk)
1845 blen += len(chunk)
1837 if blen >= min:
1846 if blen >= min:
1838 if min < max:
1847 if min < max:
1839 min = min << 1
1848 min = min << 1
1840 nmin = 1 << log2(blen)
1849 nmin = 1 << log2(blen)
1841 if nmin > min:
1850 if nmin > min:
1842 min = nmin
1851 min = nmin
1843 if min > max:
1852 if min > max:
1844 min = max
1853 min = max
1845 yield b''.join(buf)
1854 yield b''.join(buf)
1846 blen = 0
1855 blen = 0
1847 buf = []
1856 buf = []
1848 if buf:
1857 if buf:
1849 yield b''.join(buf)
1858 yield b''.join(buf)
1850
1859
1851
1860
1852 def always(fn):
1861 def always(fn):
1853 return True
1862 return True
1854
1863
1855
1864
1856 def never(fn):
1865 def never(fn):
1857 return False
1866 return False
1858
1867
1859
1868
1860 def nogc(func=None) -> Any:
1869 def nogc(func=None) -> Any:
1861 """disable garbage collector
1870 """disable garbage collector
1862
1871
1863 Python's garbage collector triggers a GC each time a certain number of
1872 Python's garbage collector triggers a GC each time a certain number of
1864 container objects (the number being defined by gc.get_threshold()) are
1873 container objects (the number being defined by gc.get_threshold()) are
1865 allocated even when marked not to be tracked by the collector. Tracking has
1874 allocated even when marked not to be tracked by the collector. Tracking has
1866 no effect on when GCs are triggered, only on what objects the GC looks
1875 no effect on when GCs are triggered, only on what objects the GC looks
1867 into. As a workaround, disable GC while building complex (huge)
1876 into. As a workaround, disable GC while building complex (huge)
1868 containers.
1877 containers.
1869
1878
1870 This garbage collector issue have been fixed in 2.7. But it still affect
1879 This garbage collector issue have been fixed in 2.7. But it still affect
1871 CPython's performance.
1880 CPython's performance.
1872 """
1881 """
1873 if func is None:
1882 if func is None:
1874 return _nogc_context()
1883 return _nogc_context()
1875 else:
1884 else:
1876 return _nogc_decorator(func)
1885 return _nogc_decorator(func)
1877
1886
1878
1887
1879 @contextlib.contextmanager
1888 @contextlib.contextmanager
1880 def _nogc_context():
1889 def _nogc_context():
1881 gcenabled = gc.isenabled()
1890 gcenabled = gc.isenabled()
1882 gc.disable()
1891 gc.disable()
1883 try:
1892 try:
1884 yield
1893 yield
1885 finally:
1894 finally:
1886 if gcenabled:
1895 if gcenabled:
1887 gc.enable()
1896 gc.enable()
1888
1897
1889
1898
1890 def _nogc_decorator(func):
1899 def _nogc_decorator(func):
1891 def wrapper(*args, **kwargs):
1900 def wrapper(*args, **kwargs):
1892 with _nogc_context():
1901 with _nogc_context():
1893 return func(*args, **kwargs)
1902 return func(*args, **kwargs)
1894
1903
1895 return wrapper
1904 return wrapper
1896
1905
1897
1906
1898 if pycompat.ispypy:
1907 if pycompat.ispypy:
1899 # PyPy runs slower with gc disabled
1908 # PyPy runs slower with gc disabled
1900 nogc = lambda x: x
1909 nogc = lambda x: x
1901
1910
1902
1911
1903 def pathto(root: bytes, n1: bytes, n2: bytes) -> bytes:
1912 def pathto(root: bytes, n1: bytes, n2: bytes) -> bytes:
1904 """return the relative path from one place to another.
1913 """return the relative path from one place to another.
1905 root should use os.sep to separate directories
1914 root should use os.sep to separate directories
1906 n1 should use os.sep to separate directories
1915 n1 should use os.sep to separate directories
1907 n2 should use "/" to separate directories
1916 n2 should use "/" to separate directories
1908 returns an os.sep-separated path.
1917 returns an os.sep-separated path.
1909
1918
1910 If n1 is a relative path, it's assumed it's
1919 If n1 is a relative path, it's assumed it's
1911 relative to root.
1920 relative to root.
1912 n2 should always be relative to root.
1921 n2 should always be relative to root.
1913 """
1922 """
1914 if not n1:
1923 if not n1:
1915 return localpath(n2)
1924 return localpath(n2)
1916 if os.path.isabs(n1):
1925 if os.path.isabs(n1):
1917 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1926 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1918 return os.path.join(root, localpath(n2))
1927 return os.path.join(root, localpath(n2))
1919 n2 = b'/'.join((pconvert(root), n2))
1928 n2 = b'/'.join((pconvert(root), n2))
1920 a, b = splitpath(n1), n2.split(b'/')
1929 a, b = splitpath(n1), n2.split(b'/')
1921 a.reverse()
1930 a.reverse()
1922 b.reverse()
1931 b.reverse()
1923 while a and b and a[-1] == b[-1]:
1932 while a and b and a[-1] == b[-1]:
1924 a.pop()
1933 a.pop()
1925 b.pop()
1934 b.pop()
1926 b.reverse()
1935 b.reverse()
1927 return pycompat.ossep.join(([b'..'] * len(a)) + b) or b'.'
1936 return pycompat.ossep.join(([b'..'] * len(a)) + b) or b'.'
1928
1937
1929
1938
1930 def checksignature(func, depth=1):
1939 def checksignature(func, depth=1):
1931 '''wrap a function with code to check for calling errors'''
1940 '''wrap a function with code to check for calling errors'''
1932
1941
1933 def check(*args, **kwargs):
1942 def check(*args, **kwargs):
1934 try:
1943 try:
1935 return func(*args, **kwargs)
1944 return func(*args, **kwargs)
1936 except TypeError:
1945 except TypeError:
1937 if len(traceback.extract_tb(sys.exc_info()[2])) == depth:
1946 if len(traceback.extract_tb(sys.exc_info()[2])) == depth:
1938 raise error.SignatureError
1947 raise error.SignatureError
1939 raise
1948 raise
1940
1949
1941 return check
1950 return check
1942
1951
1943
1952
1944 # a whilelist of known filesystems where hardlink works reliably
1953 # a whilelist of known filesystems where hardlink works reliably
1945 _hardlinkfswhitelist = {
1954 _hardlinkfswhitelist = {
1946 b'apfs',
1955 b'apfs',
1947 b'btrfs',
1956 b'btrfs',
1948 b'ext2',
1957 b'ext2',
1949 b'ext3',
1958 b'ext3',
1950 b'ext4',
1959 b'ext4',
1951 b'hfs',
1960 b'hfs',
1952 b'jfs',
1961 b'jfs',
1953 b'NTFS',
1962 b'NTFS',
1954 b'reiserfs',
1963 b'reiserfs',
1955 b'tmpfs',
1964 b'tmpfs',
1956 b'ufs',
1965 b'ufs',
1957 b'xfs',
1966 b'xfs',
1958 b'zfs',
1967 b'zfs',
1959 }
1968 }
1960
1969
1961
1970
1962 def copyfile(
1971 def copyfile(
1963 src,
1972 src,
1964 dest,
1973 dest,
1965 hardlink=False,
1974 hardlink=False,
1966 copystat=False,
1975 copystat=False,
1967 checkambig=False,
1976 checkambig=False,
1968 nb_bytes=None,
1977 nb_bytes=None,
1969 no_hardlink_cb=None,
1978 no_hardlink_cb=None,
1970 check_fs_hardlink=True,
1979 check_fs_hardlink=True,
1971 ):
1980 ):
1972 """copy a file, preserving mode and optionally other stat info like
1981 """copy a file, preserving mode and optionally other stat info like
1973 atime/mtime
1982 atime/mtime
1974
1983
1975 checkambig argument is used with filestat, and is useful only if
1984 checkambig argument is used with filestat, and is useful only if
1976 destination file is guarded by any lock (e.g. repo.lock or
1985 destination file is guarded by any lock (e.g. repo.lock or
1977 repo.wlock).
1986 repo.wlock).
1978
1987
1979 copystat and checkambig should be exclusive.
1988 copystat and checkambig should be exclusive.
1980
1989
1981 nb_bytes: if set only copy the first `nb_bytes` of the source file.
1990 nb_bytes: if set only copy the first `nb_bytes` of the source file.
1982 """
1991 """
1983 assert not (copystat and checkambig)
1992 assert not (copystat and checkambig)
1984 oldstat = None
1993 oldstat = None
1985 if os.path.lexists(dest):
1994 if os.path.lexists(dest):
1986 if checkambig:
1995 if checkambig:
1987 oldstat = checkambig and filestat.frompath(dest)
1996 oldstat = checkambig and filestat.frompath(dest)
1988 unlink(dest)
1997 unlink(dest)
1989 if hardlink and check_fs_hardlink:
1998 if hardlink and check_fs_hardlink:
1990 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1999 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1991 # unless we are confident that dest is on a whitelisted filesystem.
2000 # unless we are confident that dest is on a whitelisted filesystem.
1992 try:
2001 try:
1993 fstype = getfstype(os.path.dirname(dest))
2002 fstype = getfstype(os.path.dirname(dest))
1994 except OSError:
2003 except OSError:
1995 fstype = None
2004 fstype = None
1996 if fstype not in _hardlinkfswhitelist:
2005 if fstype not in _hardlinkfswhitelist:
1997 if no_hardlink_cb is not None:
2006 if no_hardlink_cb is not None:
1998 no_hardlink_cb()
2007 no_hardlink_cb()
1999 hardlink = False
2008 hardlink = False
2000 if hardlink:
2009 if hardlink:
2001 try:
2010 try:
2002 oslink(src, dest)
2011 oslink(src, dest)
2003 if nb_bytes is not None:
2012 if nb_bytes is not None:
2004 m = "the `nb_bytes` argument is incompatible with `hardlink`"
2013 m = "the `nb_bytes` argument is incompatible with `hardlink`"
2005 raise error.ProgrammingError(m)
2014 raise error.ProgrammingError(m)
2006 return
2015 return
2007 except (IOError, OSError) as exc:
2016 except (IOError, OSError) as exc:
2008 if exc.errno != errno.EEXIST and no_hardlink_cb is not None:
2017 if exc.errno != errno.EEXIST and no_hardlink_cb is not None:
2009 no_hardlink_cb()
2018 no_hardlink_cb()
2010 # fall back to normal copy
2019 # fall back to normal copy
2011 if os.path.islink(src):
2020 if os.path.islink(src):
2012 os.symlink(os.readlink(src), dest)
2021 os.symlink(os.readlink(src), dest)
2013 # copytime is ignored for symlinks, but in general copytime isn't needed
2022 # copytime is ignored for symlinks, but in general copytime isn't needed
2014 # for them anyway
2023 # for them anyway
2015 if nb_bytes is not None:
2024 if nb_bytes is not None:
2016 m = "cannot use `nb_bytes` on a symlink"
2025 m = "cannot use `nb_bytes` on a symlink"
2017 raise error.ProgrammingError(m)
2026 raise error.ProgrammingError(m)
2018 else:
2027 else:
2019 try:
2028 try:
2020 shutil.copyfile(src, dest)
2029 shutil.copyfile(src, dest)
2021 if copystat:
2030 if copystat:
2022 # copystat also copies mode
2031 # copystat also copies mode
2023 shutil.copystat(src, dest)
2032 shutil.copystat(src, dest)
2024 else:
2033 else:
2025 shutil.copymode(src, dest)
2034 shutil.copymode(src, dest)
2026 if oldstat and oldstat.stat:
2035 if oldstat and oldstat.stat:
2027 newstat = filestat.frompath(dest)
2036 newstat = filestat.frompath(dest)
2028 if newstat.isambig(oldstat):
2037 if newstat.isambig(oldstat):
2029 # stat of copied file is ambiguous to original one
2038 # stat of copied file is ambiguous to original one
2030 advanced = (
2039 advanced = (
2031 oldstat.stat[stat.ST_MTIME] + 1
2040 oldstat.stat[stat.ST_MTIME] + 1
2032 ) & 0x7FFFFFFF
2041 ) & 0x7FFFFFFF
2033 os.utime(dest, (advanced, advanced))
2042 os.utime(dest, (advanced, advanced))
2034 # We could do something smarter using `copy_file_range` call or similar
2043 # We could do something smarter using `copy_file_range` call or similar
2035 if nb_bytes is not None:
2044 if nb_bytes is not None:
2036 with open(dest, mode='r+') as f:
2045 with open(dest, mode='r+') as f:
2037 f.truncate(nb_bytes)
2046 f.truncate(nb_bytes)
2038 except shutil.Error as inst:
2047 except shutil.Error as inst:
2039 raise error.Abort(stringutil.forcebytestr(inst))
2048 raise error.Abort(stringutil.forcebytestr(inst))
2040
2049
2041
2050
2042 def copyfiles(src, dst, hardlink=None, progress=None):
2051 def copyfiles(src, dst, hardlink=None, progress=None):
2043 """Copy a directory tree using hardlinks if possible."""
2052 """Copy a directory tree using hardlinks if possible."""
2044 num = 0
2053 num = 0
2045
2054
2046 def settopic():
2055 def settopic():
2047 if progress:
2056 if progress:
2048 progress.topic = _(b'linking') if hardlink else _(b'copying')
2057 progress.topic = _(b'linking') if hardlink else _(b'copying')
2049
2058
2050 if os.path.isdir(src):
2059 if os.path.isdir(src):
2051 if hardlink is None:
2060 if hardlink is None:
2052 hardlink = (
2061 hardlink = (
2053 os.stat(src).st_dev == os.stat(os.path.dirname(dst)).st_dev
2062 os.stat(src).st_dev == os.stat(os.path.dirname(dst)).st_dev
2054 )
2063 )
2055 settopic()
2064 settopic()
2056 os.mkdir(dst)
2065 os.mkdir(dst)
2057 for name, kind in listdir(src):
2066 for name, kind in listdir(src):
2058 srcname = os.path.join(src, name)
2067 srcname = os.path.join(src, name)
2059 dstname = os.path.join(dst, name)
2068 dstname = os.path.join(dst, name)
2060 hardlink, n = copyfiles(srcname, dstname, hardlink, progress)
2069 hardlink, n = copyfiles(srcname, dstname, hardlink, progress)
2061 num += n
2070 num += n
2062 else:
2071 else:
2063 if hardlink is None:
2072 if hardlink is None:
2064 hardlink = (
2073 hardlink = (
2065 os.stat(os.path.dirname(src)).st_dev
2074 os.stat(os.path.dirname(src)).st_dev
2066 == os.stat(os.path.dirname(dst)).st_dev
2075 == os.stat(os.path.dirname(dst)).st_dev
2067 )
2076 )
2068 settopic()
2077 settopic()
2069
2078
2070 if hardlink:
2079 if hardlink:
2071 try:
2080 try:
2072 oslink(src, dst)
2081 oslink(src, dst)
2073 except (IOError, OSError) as exc:
2082 except (IOError, OSError) as exc:
2074 if exc.errno != errno.EEXIST:
2083 if exc.errno != errno.EEXIST:
2075 hardlink = False
2084 hardlink = False
2076 # XXX maybe try to relink if the file exist ?
2085 # XXX maybe try to relink if the file exist ?
2077 shutil.copy(src, dst)
2086 shutil.copy(src, dst)
2078 else:
2087 else:
2079 shutil.copy(src, dst)
2088 shutil.copy(src, dst)
2080 num += 1
2089 num += 1
2081 if progress:
2090 if progress:
2082 progress.increment()
2091 progress.increment()
2083
2092
2084 return hardlink, num
2093 return hardlink, num
2085
2094
2086
2095
2087 _winreservednames = {
2096 _winreservednames = {
2088 b'con',
2097 b'con',
2089 b'prn',
2098 b'prn',
2090 b'aux',
2099 b'aux',
2091 b'nul',
2100 b'nul',
2092 b'com1',
2101 b'com1',
2093 b'com2',
2102 b'com2',
2094 b'com3',
2103 b'com3',
2095 b'com4',
2104 b'com4',
2096 b'com5',
2105 b'com5',
2097 b'com6',
2106 b'com6',
2098 b'com7',
2107 b'com7',
2099 b'com8',
2108 b'com8',
2100 b'com9',
2109 b'com9',
2101 b'lpt1',
2110 b'lpt1',
2102 b'lpt2',
2111 b'lpt2',
2103 b'lpt3',
2112 b'lpt3',
2104 b'lpt4',
2113 b'lpt4',
2105 b'lpt5',
2114 b'lpt5',
2106 b'lpt6',
2115 b'lpt6',
2107 b'lpt7',
2116 b'lpt7',
2108 b'lpt8',
2117 b'lpt8',
2109 b'lpt9',
2118 b'lpt9',
2110 }
2119 }
2111 _winreservedchars = b':*?"<>|'
2120 _winreservedchars = b':*?"<>|'
2112
2121
2113
2122
2114 def checkwinfilename(path: bytes) -> Optional[bytes]:
2123 def checkwinfilename(path: bytes) -> Optional[bytes]:
2115 r"""Check that the base-relative path is a valid filename on Windows.
2124 r"""Check that the base-relative path is a valid filename on Windows.
2116 Returns None if the path is ok, or a UI string describing the problem.
2125 Returns None if the path is ok, or a UI string describing the problem.
2117
2126
2118 >>> checkwinfilename(b"just/a/normal/path")
2127 >>> checkwinfilename(b"just/a/normal/path")
2119 >>> checkwinfilename(b"foo/bar/con.xml")
2128 >>> checkwinfilename(b"foo/bar/con.xml")
2120 "filename contains 'con', which is reserved on Windows"
2129 "filename contains 'con', which is reserved on Windows"
2121 >>> checkwinfilename(b"foo/con.xml/bar")
2130 >>> checkwinfilename(b"foo/con.xml/bar")
2122 "filename contains 'con', which is reserved on Windows"
2131 "filename contains 'con', which is reserved on Windows"
2123 >>> checkwinfilename(b"foo/bar/xml.con")
2132 >>> checkwinfilename(b"foo/bar/xml.con")
2124 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
2133 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
2125 "filename contains 'AUX', which is reserved on Windows"
2134 "filename contains 'AUX', which is reserved on Windows"
2126 >>> checkwinfilename(b"foo/bar/bla:.txt")
2135 >>> checkwinfilename(b"foo/bar/bla:.txt")
2127 "filename contains ':', which is reserved on Windows"
2136 "filename contains ':', which is reserved on Windows"
2128 >>> checkwinfilename(b"foo/bar/b\07la.txt")
2137 >>> checkwinfilename(b"foo/bar/b\07la.txt")
2129 "filename contains '\\x07', which is invalid on Windows"
2138 "filename contains '\\x07', which is invalid on Windows"
2130 >>> checkwinfilename(b"foo/bar/bla ")
2139 >>> checkwinfilename(b"foo/bar/bla ")
2131 "filename ends with ' ', which is not allowed on Windows"
2140 "filename ends with ' ', which is not allowed on Windows"
2132 >>> checkwinfilename(b"../bar")
2141 >>> checkwinfilename(b"../bar")
2133 >>> checkwinfilename(b"foo\\")
2142 >>> checkwinfilename(b"foo\\")
2134 "filename ends with '\\', which is invalid on Windows"
2143 "filename ends with '\\', which is invalid on Windows"
2135 >>> checkwinfilename(b"foo\\/bar")
2144 >>> checkwinfilename(b"foo\\/bar")
2136 "directory name ends with '\\', which is invalid on Windows"
2145 "directory name ends with '\\', which is invalid on Windows"
2137 """
2146 """
2138 if path.endswith(b'\\'):
2147 if path.endswith(b'\\'):
2139 return _(b"filename ends with '\\', which is invalid on Windows")
2148 return _(b"filename ends with '\\', which is invalid on Windows")
2140 if b'\\/' in path:
2149 if b'\\/' in path:
2141 return _(b"directory name ends with '\\', which is invalid on Windows")
2150 return _(b"directory name ends with '\\', which is invalid on Windows")
2142 for n in path.replace(b'\\', b'/').split(b'/'):
2151 for n in path.replace(b'\\', b'/').split(b'/'):
2143 if not n:
2152 if not n:
2144 continue
2153 continue
2145 for c in _filenamebytestr(n):
2154 for c in _filenamebytestr(n):
2146 if c in _winreservedchars:
2155 if c in _winreservedchars:
2147 return (
2156 return (
2148 _(
2157 _(
2149 b"filename contains '%s', which is reserved "
2158 b"filename contains '%s', which is reserved "
2150 b"on Windows"
2159 b"on Windows"
2151 )
2160 )
2152 % c
2161 % c
2153 )
2162 )
2154 if ord(c) <= 31:
2163 if ord(c) <= 31:
2155 return _(
2164 return _(
2156 b"filename contains '%s', which is invalid on Windows"
2165 b"filename contains '%s', which is invalid on Windows"
2157 ) % stringutil.escapestr(c)
2166 ) % stringutil.escapestr(c)
2158 base = n.split(b'.')[0]
2167 base = n.split(b'.')[0]
2159 if base and base.lower() in _winreservednames:
2168 if base and base.lower() in _winreservednames:
2160 return (
2169 return (
2161 _(b"filename contains '%s', which is reserved on Windows")
2170 _(b"filename contains '%s', which is reserved on Windows")
2162 % base
2171 % base
2163 )
2172 )
2164 t = n[-1:]
2173 t = n[-1:]
2165 if t in b'. ' and n not in b'..':
2174 if t in b'. ' and n not in b'..':
2166 return (
2175 return (
2167 _(
2176 _(
2168 b"filename ends with '%s', which is not allowed "
2177 b"filename ends with '%s', which is not allowed "
2169 b"on Windows"
2178 b"on Windows"
2170 )
2179 )
2171 % t
2180 % t
2172 )
2181 )
2173
2182
2174
2183
2175 timer = getattr(time, "perf_counter", None)
2184 timer = getattr(time, "perf_counter", None)
2176
2185
2177 if pycompat.iswindows:
2186 if pycompat.iswindows:
2178 checkosfilename = checkwinfilename
2187 checkosfilename = checkwinfilename
2179 if not timer:
2188 if not timer:
2180 timer = time.clock # pytype: disable=module-attr
2189 timer = time.clock # pytype: disable=module-attr
2181 else:
2190 else:
2182 # mercurial.windows doesn't have platform.checkosfilename
2191 # mercurial.windows doesn't have platform.checkosfilename
2183 checkosfilename = platform.checkosfilename # pytype: disable=module-attr
2192 checkosfilename = platform.checkosfilename # pytype: disable=module-attr
2184 if not timer:
2193 if not timer:
2185 timer = time.time
2194 timer = time.time
2186
2195
2187
2196
2188 def makelock(info: bytes, pathname: bytes) -> None:
2197 def makelock(info: bytes, pathname: bytes) -> None:
2189 """Create a lock file atomically if possible
2198 """Create a lock file atomically if possible
2190
2199
2191 This may leave a stale lock file if symlink isn't supported and signal
2200 This may leave a stale lock file if symlink isn't supported and signal
2192 interrupt is enabled.
2201 interrupt is enabled.
2193 """
2202 """
2194 try:
2203 try:
2195 return os.symlink(info, pathname)
2204 return os.symlink(info, pathname)
2196 except OSError as why:
2205 except OSError as why:
2197 if why.errno == errno.EEXIST:
2206 if why.errno == errno.EEXIST:
2198 raise
2207 raise
2199 except AttributeError: # no symlink in os
2208 except AttributeError: # no symlink in os
2200 pass
2209 pass
2201
2210
2202 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
2211 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
2203 ld = os.open(pathname, flags)
2212 ld = os.open(pathname, flags)
2204 try:
2213 try:
2205 os.write(ld, info)
2214 os.write(ld, info)
2206 finally:
2215 finally:
2207 os.close(ld)
2216 os.close(ld)
2208
2217
2209
2218
2210 def readlock(pathname: bytes) -> bytes:
2219 def readlock(pathname: bytes) -> bytes:
2211 try:
2220 try:
2212 return readlink(pathname)
2221 return readlink(pathname)
2213 except OSError as why:
2222 except OSError as why:
2214 if why.errno not in (errno.EINVAL, errno.ENOSYS):
2223 if why.errno not in (errno.EINVAL, errno.ENOSYS):
2215 raise
2224 raise
2216 except AttributeError: # no symlink in os
2225 except AttributeError: # no symlink in os
2217 pass
2226 pass
2218 with posixfile(pathname, b'rb') as fp:
2227 with posixfile(pathname, b'rb') as fp:
2219 return fp.read()
2228 return fp.read()
2220
2229
2221
2230
2222 def fstat(fp):
2231 def fstat(fp):
2223 '''stat file object that may not have fileno method.'''
2232 '''stat file object that may not have fileno method.'''
2224 try:
2233 try:
2225 return os.fstat(fp.fileno())
2234 return os.fstat(fp.fileno())
2226 except AttributeError:
2235 except AttributeError:
2227 return os.stat(fp.name)
2236 return os.stat(fp.name)
2228
2237
2229
2238
2230 # File system features
2239 # File system features
2231
2240
2232
2241
2233 def fscasesensitive(path: bytes) -> bool:
2242 def fscasesensitive(path: bytes) -> bool:
2234 """
2243 """
2235 Return true if the given path is on a case-sensitive filesystem
2244 Return true if the given path is on a case-sensitive filesystem
2236
2245
2237 Requires a path (like /foo/.hg) ending with a foldable final
2246 Requires a path (like /foo/.hg) ending with a foldable final
2238 directory component.
2247 directory component.
2239 """
2248 """
2240 s1 = os.lstat(path)
2249 s1 = os.lstat(path)
2241 d, b = os.path.split(path)
2250 d, b = os.path.split(path)
2242 b2 = b.upper()
2251 b2 = b.upper()
2243 if b == b2:
2252 if b == b2:
2244 b2 = b.lower()
2253 b2 = b.lower()
2245 if b == b2:
2254 if b == b2:
2246 return True # no evidence against case sensitivity
2255 return True # no evidence against case sensitivity
2247 p2 = os.path.join(d, b2)
2256 p2 = os.path.join(d, b2)
2248 try:
2257 try:
2249 s2 = os.lstat(p2)
2258 s2 = os.lstat(p2)
2250 if s2 == s1:
2259 if s2 == s1:
2251 return False
2260 return False
2252 return True
2261 return True
2253 except OSError:
2262 except OSError:
2254 return True
2263 return True
2255
2264
2256
2265
2257 _re2_input = lambda x: x
2266 _re2_input = lambda x: x
2258 # google-re2 will need to be tell to not output error on its own
2267 # google-re2 will need to be tell to not output error on its own
2259 _re2_options = None
2268 _re2_options = None
2260 try:
2269 try:
2261 import re2 # pytype: disable=import-error
2270 import re2 # pytype: disable=import-error
2262
2271
2263 _re2 = None
2272 _re2 = None
2264 except ImportError:
2273 except ImportError:
2265 _re2 = False
2274 _re2 = False
2266
2275
2267
2276
2268 def has_re2():
2277 def has_re2():
2269 """return True is re2 is available, False otherwise"""
2278 """return True is re2 is available, False otherwise"""
2270 if _re2 is None:
2279 if _re2 is None:
2271 _re._checkre2()
2280 _re._checkre2()
2272 return _re2
2281 return _re2
2273
2282
2274
2283
2275 class _re:
2284 class _re:
2276 @staticmethod
2285 @staticmethod
2277 def _checkre2():
2286 def _checkre2():
2278 global _re2
2287 global _re2
2279 global _re2_input
2288 global _re2_input
2280 global _re2_options
2289 global _re2_options
2281 if _re2 is not None:
2290 if _re2 is not None:
2282 # we already have the answer
2291 # we already have the answer
2283 return
2292 return
2284
2293
2285 check_pattern = br'\[([^\[]+)\]'
2294 check_pattern = br'\[([^\[]+)\]'
2286 check_input = b'[ui]'
2295 check_input = b'[ui]'
2287 try:
2296 try:
2288 # check if match works, see issue3964
2297 # check if match works, see issue3964
2289 _re2 = bool(re2.match(check_pattern, check_input))
2298 _re2 = bool(re2.match(check_pattern, check_input))
2290 except ImportError:
2299 except ImportError:
2291 _re2 = False
2300 _re2 = False
2292 except TypeError:
2301 except TypeError:
2293 # the `pyre-2` project provides a re2 module that accept bytes
2302 # the `pyre-2` project provides a re2 module that accept bytes
2294 # the `fb-re2` project provides a re2 module that acccept sysstr
2303 # the `fb-re2` project provides a re2 module that acccept sysstr
2295 check_pattern = pycompat.sysstr(check_pattern)
2304 check_pattern = pycompat.sysstr(check_pattern)
2296 check_input = pycompat.sysstr(check_input)
2305 check_input = pycompat.sysstr(check_input)
2297 _re2 = bool(re2.match(check_pattern, check_input))
2306 _re2 = bool(re2.match(check_pattern, check_input))
2298 _re2_input = pycompat.sysstr
2307 _re2_input = pycompat.sysstr
2299 try:
2308 try:
2300 quiet = re2.Options()
2309 quiet = re2.Options()
2301 quiet.log_errors = False
2310 quiet.log_errors = False
2302 _re2_options = quiet
2311 _re2_options = quiet
2303 except AttributeError:
2312 except AttributeError:
2304 pass
2313 pass
2305
2314
2306 def compile(self, pat, flags=0):
2315 def compile(self, pat, flags=0):
2307 """Compile a regular expression, using re2 if possible
2316 """Compile a regular expression, using re2 if possible
2308
2317
2309 For best performance, use only re2-compatible regexp features. The
2318 For best performance, use only re2-compatible regexp features. The
2310 only flags from the re module that are re2-compatible are
2319 only flags from the re module that are re2-compatible are
2311 IGNORECASE and MULTILINE."""
2320 IGNORECASE and MULTILINE."""
2312 if _re2 is None:
2321 if _re2 is None:
2313 self._checkre2()
2322 self._checkre2()
2314 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
2323 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
2315 if flags & remod.IGNORECASE:
2324 if flags & remod.IGNORECASE:
2316 pat = b'(?i)' + pat
2325 pat = b'(?i)' + pat
2317 if flags & remod.MULTILINE:
2326 if flags & remod.MULTILINE:
2318 pat = b'(?m)' + pat
2327 pat = b'(?m)' + pat
2319 try:
2328 try:
2320 input_regex = _re2_input(pat)
2329 input_regex = _re2_input(pat)
2321 if _re2_options is not None:
2330 if _re2_options is not None:
2322 compiled = re2.compile(input_regex, options=_re2_options)
2331 compiled = re2.compile(input_regex, options=_re2_options)
2323 else:
2332 else:
2324 compiled = re2.compile(input_regex)
2333 compiled = re2.compile(input_regex)
2325 return compiled
2334 return compiled
2326 except re2.error:
2335 except re2.error:
2327 pass
2336 pass
2328 return remod.compile(pat, flags)
2337 return remod.compile(pat, flags)
2329
2338
2330 @propertycache
2339 @propertycache
2331 def escape(self):
2340 def escape(self):
2332 """Return the version of escape corresponding to self.compile.
2341 """Return the version of escape corresponding to self.compile.
2333
2342
2334 This is imperfect because whether re2 or re is used for a particular
2343 This is imperfect because whether re2 or re is used for a particular
2335 function depends on the flags, etc, but it's the best we can do.
2344 function depends on the flags, etc, but it's the best we can do.
2336 """
2345 """
2337 global _re2
2346 global _re2
2338 if _re2 is None:
2347 if _re2 is None:
2339 self._checkre2()
2348 self._checkre2()
2340 if _re2:
2349 if _re2:
2341 return re2.escape
2350 return re2.escape
2342 else:
2351 else:
2343 return remod.escape
2352 return remod.escape
2344
2353
2345
2354
2346 re = _re()
2355 re = _re()
2347
2356
2348 _fspathcache = {}
2357 _fspathcache = {}
2349
2358
2350
2359
2351 def fspath(name: bytes, root: bytes) -> bytes:
2360 def fspath(name: bytes, root: bytes) -> bytes:
2352 """Get name in the case stored in the filesystem
2361 """Get name in the case stored in the filesystem
2353
2362
2354 The name should be relative to root, and be normcase-ed for efficiency.
2363 The name should be relative to root, and be normcase-ed for efficiency.
2355
2364
2356 Note that this function is unnecessary, and should not be
2365 Note that this function is unnecessary, and should not be
2357 called, for case-sensitive filesystems (simply because it's expensive).
2366 called, for case-sensitive filesystems (simply because it's expensive).
2358
2367
2359 The root should be normcase-ed, too.
2368 The root should be normcase-ed, too.
2360 """
2369 """
2361
2370
2362 def _makefspathcacheentry(dir):
2371 def _makefspathcacheentry(dir):
2363 return {normcase(n): n for n in os.listdir(dir)}
2372 return {normcase(n): n for n in os.listdir(dir)}
2364
2373
2365 seps = pycompat.ossep
2374 seps = pycompat.ossep
2366 if pycompat.osaltsep:
2375 if pycompat.osaltsep:
2367 seps = seps + pycompat.osaltsep
2376 seps = seps + pycompat.osaltsep
2368 # Protect backslashes. This gets silly very quickly.
2377 # Protect backslashes. This gets silly very quickly.
2369 seps.replace(b'\\', b'\\\\')
2378 seps.replace(b'\\', b'\\\\')
2370 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
2379 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
2371 dir = os.path.normpath(root)
2380 dir = os.path.normpath(root)
2372 result = []
2381 result = []
2373 for part, sep in pattern.findall(name):
2382 for part, sep in pattern.findall(name):
2374 if sep:
2383 if sep:
2375 result.append(sep)
2384 result.append(sep)
2376 continue
2385 continue
2377
2386
2378 if dir not in _fspathcache:
2387 if dir not in _fspathcache:
2379 _fspathcache[dir] = _makefspathcacheentry(dir)
2388 _fspathcache[dir] = _makefspathcacheentry(dir)
2380 contents = _fspathcache[dir]
2389 contents = _fspathcache[dir]
2381
2390
2382 found = contents.get(part)
2391 found = contents.get(part)
2383 if not found:
2392 if not found:
2384 # retry "once per directory" per "dirstate.walk" which
2393 # retry "once per directory" per "dirstate.walk" which
2385 # may take place for each patches of "hg qpush", for example
2394 # may take place for each patches of "hg qpush", for example
2386 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
2395 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
2387 found = contents.get(part)
2396 found = contents.get(part)
2388
2397
2389 result.append(found or part)
2398 result.append(found or part)
2390 dir = os.path.join(dir, part)
2399 dir = os.path.join(dir, part)
2391
2400
2392 return b''.join(result)
2401 return b''.join(result)
2393
2402
2394
2403
2395 def checknlink(testfile: bytes) -> bool:
2404 def checknlink(testfile: bytes) -> bool:
2396 '''check whether hardlink count reporting works properly'''
2405 '''check whether hardlink count reporting works properly'''
2397
2406
2398 # testfile may be open, so we need a separate file for checking to
2407 # testfile may be open, so we need a separate file for checking to
2399 # work around issue2543 (or testfile may get lost on Samba shares)
2408 # work around issue2543 (or testfile may get lost on Samba shares)
2400 f1, f2, fp = None, None, None
2409 f1, f2, fp = None, None, None
2401 try:
2410 try:
2402 fd, f1 = pycompat.mkstemp(
2411 fd, f1 = pycompat.mkstemp(
2403 prefix=b'.%s-' % os.path.basename(testfile),
2412 prefix=b'.%s-' % os.path.basename(testfile),
2404 suffix=b'1~',
2413 suffix=b'1~',
2405 dir=os.path.dirname(testfile),
2414 dir=os.path.dirname(testfile),
2406 )
2415 )
2407 os.close(fd)
2416 os.close(fd)
2408 f2 = b'%s2~' % f1[:-2]
2417 f2 = b'%s2~' % f1[:-2]
2409
2418
2410 oslink(f1, f2)
2419 oslink(f1, f2)
2411 # nlinks() may behave differently for files on Windows shares if
2420 # nlinks() may behave differently for files on Windows shares if
2412 # the file is open.
2421 # the file is open.
2413 fp = posixfile(f2)
2422 fp = posixfile(f2)
2414 return nlinks(f2) > 1
2423 return nlinks(f2) > 1
2415 except OSError:
2424 except OSError:
2416 return False
2425 return False
2417 finally:
2426 finally:
2418 if fp is not None:
2427 if fp is not None:
2419 fp.close()
2428 fp.close()
2420 for f in (f1, f2):
2429 for f in (f1, f2):
2421 try:
2430 try:
2422 if f is not None:
2431 if f is not None:
2423 os.unlink(f)
2432 os.unlink(f)
2424 except OSError:
2433 except OSError:
2425 pass
2434 pass
2426
2435
2427
2436
2428 def endswithsep(path: bytes) -> bool:
2437 def endswithsep(path: bytes) -> bool:
2429 '''Check path ends with os.sep or os.altsep.'''
2438 '''Check path ends with os.sep or os.altsep.'''
2430 return bool( # help pytype
2439 return bool( # help pytype
2431 path.endswith(pycompat.ossep)
2440 path.endswith(pycompat.ossep)
2432 or pycompat.osaltsep
2441 or pycompat.osaltsep
2433 and path.endswith(pycompat.osaltsep)
2442 and path.endswith(pycompat.osaltsep)
2434 )
2443 )
2435
2444
2436
2445
2437 def splitpath(path: bytes) -> List[bytes]:
2446 def splitpath(path: bytes) -> List[bytes]:
2438 """Split path by os.sep.
2447 """Split path by os.sep.
2439 Note that this function does not use os.altsep because this is
2448 Note that this function does not use os.altsep because this is
2440 an alternative of simple "xxx.split(os.sep)".
2449 an alternative of simple "xxx.split(os.sep)".
2441 It is recommended to use os.path.normpath() before using this
2450 It is recommended to use os.path.normpath() before using this
2442 function if need."""
2451 function if need."""
2443 return path.split(pycompat.ossep)
2452 return path.split(pycompat.ossep)
2444
2453
2445
2454
2446 def mktempcopy(
2455 def mktempcopy(
2447 name: bytes,
2456 name: bytes,
2448 emptyok: bool = False,
2457 emptyok: bool = False,
2449 createmode: Optional[int] = None,
2458 createmode: Optional[int] = None,
2450 enforcewritable: bool = False,
2459 enforcewritable: bool = False,
2451 ) -> bytes:
2460 ) -> bytes:
2452 """Create a temporary file with the same contents from name
2461 """Create a temporary file with the same contents from name
2453
2462
2454 The permission bits are copied from the original file.
2463 The permission bits are copied from the original file.
2455
2464
2456 If the temporary file is going to be truncated immediately, you
2465 If the temporary file is going to be truncated immediately, you
2457 can use emptyok=True as an optimization.
2466 can use emptyok=True as an optimization.
2458
2467
2459 Returns the name of the temporary file.
2468 Returns the name of the temporary file.
2460 """
2469 """
2461 d, fn = os.path.split(name)
2470 d, fn = os.path.split(name)
2462 fd, temp = pycompat.mkstemp(prefix=b'.%s-' % fn, suffix=b'~', dir=d)
2471 fd, temp = pycompat.mkstemp(prefix=b'.%s-' % fn, suffix=b'~', dir=d)
2463 os.close(fd)
2472 os.close(fd)
2464 # Temporary files are created with mode 0600, which is usually not
2473 # Temporary files are created with mode 0600, which is usually not
2465 # what we want. If the original file already exists, just copy
2474 # what we want. If the original file already exists, just copy
2466 # its mode. Otherwise, manually obey umask.
2475 # its mode. Otherwise, manually obey umask.
2467 copymode(name, temp, createmode, enforcewritable)
2476 copymode(name, temp, createmode, enforcewritable)
2468
2477
2469 if emptyok:
2478 if emptyok:
2470 return temp
2479 return temp
2471 try:
2480 try:
2472 try:
2481 try:
2473 ifp = posixfile(name, b"rb")
2482 ifp = posixfile(name, b"rb")
2474 except IOError as inst:
2483 except IOError as inst:
2475 if inst.errno == errno.ENOENT:
2484 if inst.errno == errno.ENOENT:
2476 return temp
2485 return temp
2477 if not getattr(inst, 'filename', None):
2486 if not getattr(inst, 'filename', None):
2478 inst.filename = name
2487 inst.filename = name
2479 raise
2488 raise
2480 ofp = posixfile(temp, b"wb")
2489 ofp = posixfile(temp, b"wb")
2481 for chunk in filechunkiter(ifp):
2490 for chunk in filechunkiter(ifp):
2482 ofp.write(chunk)
2491 ofp.write(chunk)
2483 ifp.close()
2492 ifp.close()
2484 ofp.close()
2493 ofp.close()
2485 except: # re-raises
2494 except: # re-raises
2486 try:
2495 try:
2487 os.unlink(temp)
2496 os.unlink(temp)
2488 except OSError:
2497 except OSError:
2489 pass
2498 pass
2490 raise
2499 raise
2491 return temp
2500 return temp
2492
2501
2493
2502
2494 class filestat:
2503 class filestat:
2495 """help to exactly detect change of a file
2504 """help to exactly detect change of a file
2496
2505
2497 'stat' attribute is result of 'os.stat()' if specified 'path'
2506 'stat' attribute is result of 'os.stat()' if specified 'path'
2498 exists. Otherwise, it is None. This can avoid preparative
2507 exists. Otherwise, it is None. This can avoid preparative
2499 'exists()' examination on client side of this class.
2508 'exists()' examination on client side of this class.
2500 """
2509 """
2501
2510
2502 def __init__(self, stat: Optional[os.stat_result]) -> None:
2511 def __init__(self, stat: Optional[os.stat_result]) -> None:
2503 self.stat = stat
2512 self.stat = stat
2504
2513
2505 @classmethod
2514 @classmethod
2506 def frompath(cls: Type[_Tfilestat], path: bytes) -> _Tfilestat:
2515 def frompath(cls: Type[_Tfilestat], path: bytes) -> _Tfilestat:
2507 try:
2516 try:
2508 stat = os.stat(path)
2517 stat = os.stat(path)
2509 except FileNotFoundError:
2518 except FileNotFoundError:
2510 stat = None
2519 stat = None
2511 return cls(stat)
2520 return cls(stat)
2512
2521
2513 @classmethod
2522 @classmethod
2514 def fromfp(cls: Type[_Tfilestat], fp: BinaryIO) -> _Tfilestat:
2523 def fromfp(cls: Type[_Tfilestat], fp: BinaryIO) -> _Tfilestat:
2515 stat = os.fstat(fp.fileno())
2524 stat = os.fstat(fp.fileno())
2516 return cls(stat)
2525 return cls(stat)
2517
2526
2518 __hash__ = object.__hash__
2527 __hash__ = object.__hash__
2519
2528
2520 def __eq__(self, old) -> bool:
2529 def __eq__(self, old) -> bool:
2521 try:
2530 try:
2522 # if ambiguity between stat of new and old file is
2531 # if ambiguity between stat of new and old file is
2523 # avoided, comparison of size, ctime and mtime is enough
2532 # avoided, comparison of size, ctime and mtime is enough
2524 # to exactly detect change of a file regardless of platform
2533 # to exactly detect change of a file regardless of platform
2525 return (
2534 return (
2526 self.stat.st_size == old.stat.st_size
2535 self.stat.st_size == old.stat.st_size
2527 and self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2536 and self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2528 and self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME]
2537 and self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME]
2529 )
2538 )
2530 except AttributeError:
2539 except AttributeError:
2531 pass
2540 pass
2532 try:
2541 try:
2533 return self.stat is None and old.stat is None
2542 return self.stat is None and old.stat is None
2534 except AttributeError:
2543 except AttributeError:
2535 return False
2544 return False
2536
2545
2537 def isambig(self, old: _Tfilestat) -> bool:
2546 def isambig(self, old: _Tfilestat) -> bool:
2538 """Examine whether new (= self) stat is ambiguous against old one
2547 """Examine whether new (= self) stat is ambiguous against old one
2539
2548
2540 "S[N]" below means stat of a file at N-th change:
2549 "S[N]" below means stat of a file at N-th change:
2541
2550
2542 - S[n-1].ctime < S[n].ctime: can detect change of a file
2551 - S[n-1].ctime < S[n].ctime: can detect change of a file
2543 - S[n-1].ctime == S[n].ctime
2552 - S[n-1].ctime == S[n].ctime
2544 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2553 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2545 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2554 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2546 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2555 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2547 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2556 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2548
2557
2549 Case (*2) above means that a file was changed twice or more at
2558 Case (*2) above means that a file was changed twice or more at
2550 same time in sec (= S[n-1].ctime), and comparison of timestamp
2559 same time in sec (= S[n-1].ctime), and comparison of timestamp
2551 is ambiguous.
2560 is ambiguous.
2552
2561
2553 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2562 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2554 timestamp is ambiguous".
2563 timestamp is ambiguous".
2555
2564
2556 But advancing mtime only in case (*2) doesn't work as
2565 But advancing mtime only in case (*2) doesn't work as
2557 expected, because naturally advanced S[n].mtime in case (*1)
2566 expected, because naturally advanced S[n].mtime in case (*1)
2558 might be equal to manually advanced S[n-1 or earlier].mtime.
2567 might be equal to manually advanced S[n-1 or earlier].mtime.
2559
2568
2560 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2569 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2561 treated as ambiguous regardless of mtime, to avoid overlooking
2570 treated as ambiguous regardless of mtime, to avoid overlooking
2562 by confliction between such mtime.
2571 by confliction between such mtime.
2563
2572
2564 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2573 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2565 S[n].mtime", even if size of a file isn't changed.
2574 S[n].mtime", even if size of a file isn't changed.
2566 """
2575 """
2567 try:
2576 try:
2568 return self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2577 return self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2569 except AttributeError:
2578 except AttributeError:
2570 return False
2579 return False
2571
2580
2572 def avoidambig(self, path: bytes, old: _Tfilestat) -> bool:
2581 def avoidambig(self, path: bytes, old: _Tfilestat) -> bool:
2573 """Change file stat of specified path to avoid ambiguity
2582 """Change file stat of specified path to avoid ambiguity
2574
2583
2575 'old' should be previous filestat of 'path'.
2584 'old' should be previous filestat of 'path'.
2576
2585
2577 This skips avoiding ambiguity, if a process doesn't have
2586 This skips avoiding ambiguity, if a process doesn't have
2578 appropriate privileges for 'path'. This returns False in this
2587 appropriate privileges for 'path'. This returns False in this
2579 case.
2588 case.
2580
2589
2581 Otherwise, this returns True, as "ambiguity is avoided".
2590 Otherwise, this returns True, as "ambiguity is avoided".
2582 """
2591 """
2583 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2592 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2584 try:
2593 try:
2585 os.utime(path, (advanced, advanced))
2594 os.utime(path, (advanced, advanced))
2586 except PermissionError:
2595 except PermissionError:
2587 # utime() on the file created by another user causes EPERM,
2596 # utime() on the file created by another user causes EPERM,
2588 # if a process doesn't have appropriate privileges
2597 # if a process doesn't have appropriate privileges
2589 return False
2598 return False
2590 return True
2599 return True
2591
2600
2592 def __ne__(self, other) -> bool:
2601 def __ne__(self, other) -> bool:
2593 return not self == other
2602 return not self == other
2594
2603
2595
2604
2596 class atomictempfile:
2605 class atomictempfile:
2597 """writable file object that atomically updates a file
2606 """writable file object that atomically updates a file
2598
2607
2599 All writes will go to a temporary copy of the original file. Call
2608 All writes will go to a temporary copy of the original file. Call
2600 close() when you are done writing, and atomictempfile will rename
2609 close() when you are done writing, and atomictempfile will rename
2601 the temporary copy to the original name, making the changes
2610 the temporary copy to the original name, making the changes
2602 visible. If the object is destroyed without being closed, all your
2611 visible. If the object is destroyed without being closed, all your
2603 writes are discarded.
2612 writes are discarded.
2604
2613
2605 checkambig argument of constructor is used with filestat, and is
2614 checkambig argument of constructor is used with filestat, and is
2606 useful only if target file is guarded by any lock (e.g. repo.lock
2615 useful only if target file is guarded by any lock (e.g. repo.lock
2607 or repo.wlock).
2616 or repo.wlock).
2608 """
2617 """
2609
2618
2610 def __init__(self, name, mode=b'w+b', createmode=None, checkambig=False):
2619 def __init__(self, name, mode=b'w+b', createmode=None, checkambig=False):
2611 self.__name = name # permanent name
2620 self.__name = name # permanent name
2612 self._tempname = mktempcopy(
2621 self._tempname = mktempcopy(
2613 name,
2622 name,
2614 emptyok=(b'w' in mode),
2623 emptyok=(b'w' in mode),
2615 createmode=createmode,
2624 createmode=createmode,
2616 enforcewritable=(b'w' in mode),
2625 enforcewritable=(b'w' in mode),
2617 )
2626 )
2618
2627
2619 self._fp = posixfile(self._tempname, mode)
2628 self._fp = posixfile(self._tempname, mode)
2620 self._checkambig = checkambig
2629 self._checkambig = checkambig
2621
2630
2622 # delegated methods
2631 # delegated methods
2623 self.read = self._fp.read
2632 self.read = self._fp.read
2624 self.write = self._fp.write
2633 self.write = self._fp.write
2625 self.writelines = self._fp.writelines
2634 self.writelines = self._fp.writelines
2626 self.seek = self._fp.seek
2635 self.seek = self._fp.seek
2627 self.tell = self._fp.tell
2636 self.tell = self._fp.tell
2628 self.fileno = self._fp.fileno
2637 self.fileno = self._fp.fileno
2629
2638
2630 def close(self):
2639 def close(self):
2631 if not self._fp.closed:
2640 if not self._fp.closed:
2632 self._fp.close()
2641 self._fp.close()
2633 filename = localpath(self.__name)
2642 filename = localpath(self.__name)
2634 oldstat = self._checkambig and filestat.frompath(filename)
2643 oldstat = self._checkambig and filestat.frompath(filename)
2635 if oldstat and oldstat.stat:
2644 if oldstat and oldstat.stat:
2636 rename(self._tempname, filename)
2645 rename(self._tempname, filename)
2637 newstat = filestat.frompath(filename)
2646 newstat = filestat.frompath(filename)
2638 if newstat.isambig(oldstat):
2647 if newstat.isambig(oldstat):
2639 # stat of changed file is ambiguous to original one
2648 # stat of changed file is ambiguous to original one
2640 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2649 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2641 os.utime(filename, (advanced, advanced))
2650 os.utime(filename, (advanced, advanced))
2642 else:
2651 else:
2643 rename(self._tempname, filename)
2652 rename(self._tempname, filename)
2644
2653
2645 def discard(self):
2654 def discard(self):
2646 if not self._fp.closed:
2655 if not self._fp.closed:
2647 try:
2656 try:
2648 os.unlink(self._tempname)
2657 os.unlink(self._tempname)
2649 except OSError:
2658 except OSError:
2650 pass
2659 pass
2651 self._fp.close()
2660 self._fp.close()
2652
2661
2653 def __del__(self):
2662 def __del__(self):
2654 if hasattr(self, '_fp'): # constructor actually did something
2663 if hasattr(self, '_fp'): # constructor actually did something
2655 self.discard()
2664 self.discard()
2656
2665
2657 def __enter__(self):
2666 def __enter__(self):
2658 return self
2667 return self
2659
2668
2660 def __exit__(self, exctype, excvalue, traceback):
2669 def __exit__(self, exctype, excvalue, traceback):
2661 if exctype is not None:
2670 if exctype is not None:
2662 self.discard()
2671 self.discard()
2663 else:
2672 else:
2664 self.close()
2673 self.close()
2665
2674
2666
2675
2667 def tryrmdir(f):
2676 def tryrmdir(f):
2668 try:
2677 try:
2669 removedirs(f)
2678 removedirs(f)
2670 except OSError as e:
2679 except OSError as e:
2671 if e.errno != errno.ENOENT and e.errno != errno.ENOTEMPTY:
2680 if e.errno != errno.ENOENT and e.errno != errno.ENOTEMPTY:
2672 raise
2681 raise
2673
2682
2674
2683
2675 def unlinkpath(
2684 def unlinkpath(
2676 f: bytes, ignoremissing: bool = False, rmdir: bool = True
2685 f: bytes, ignoremissing: bool = False, rmdir: bool = True
2677 ) -> None:
2686 ) -> None:
2678 """unlink and remove the directory if it is empty"""
2687 """unlink and remove the directory if it is empty"""
2679 if ignoremissing:
2688 if ignoremissing:
2680 tryunlink(f)
2689 tryunlink(f)
2681 else:
2690 else:
2682 unlink(f)
2691 unlink(f)
2683 if rmdir:
2692 if rmdir:
2684 # try removing directories that might now be empty
2693 # try removing directories that might now be empty
2685 try:
2694 try:
2686 removedirs(os.path.dirname(f))
2695 removedirs(os.path.dirname(f))
2687 except OSError:
2696 except OSError:
2688 pass
2697 pass
2689
2698
2690
2699
2691 def tryunlink(f: bytes) -> bool:
2700 def tryunlink(f: bytes) -> bool:
2692 """Attempt to remove a file, ignoring FileNotFoundError.
2701 """Attempt to remove a file, ignoring FileNotFoundError.
2693
2702
2694 Returns False in case the file did not exit, True otherwise
2703 Returns False in case the file did not exit, True otherwise
2695 """
2704 """
2696 try:
2705 try:
2697 unlink(f)
2706 unlink(f)
2698 return True
2707 return True
2699 except FileNotFoundError:
2708 except FileNotFoundError:
2700 return False
2709 return False
2701
2710
2702
2711
2703 def makedirs(
2712 def makedirs(
2704 name: bytes, mode: Optional[int] = None, notindexed: bool = False
2713 name: bytes, mode: Optional[int] = None, notindexed: bool = False
2705 ) -> None:
2714 ) -> None:
2706 """recursive directory creation with parent mode inheritance
2715 """recursive directory creation with parent mode inheritance
2707
2716
2708 Newly created directories are marked as "not to be indexed by
2717 Newly created directories are marked as "not to be indexed by
2709 the content indexing service", if ``notindexed`` is specified
2718 the content indexing service", if ``notindexed`` is specified
2710 for "write" mode access.
2719 for "write" mode access.
2711 """
2720 """
2712 try:
2721 try:
2713 makedir(name, notindexed)
2722 makedir(name, notindexed)
2714 except OSError as err:
2723 except OSError as err:
2715 if err.errno == errno.EEXIST:
2724 if err.errno == errno.EEXIST:
2716 return
2725 return
2717 if err.errno != errno.ENOENT or not name:
2726 if err.errno != errno.ENOENT or not name:
2718 raise
2727 raise
2719 parent = os.path.dirname(abspath(name))
2728 parent = os.path.dirname(abspath(name))
2720 if parent == name:
2729 if parent == name:
2721 raise
2730 raise
2722 makedirs(parent, mode, notindexed)
2731 makedirs(parent, mode, notindexed)
2723 try:
2732 try:
2724 makedir(name, notindexed)
2733 makedir(name, notindexed)
2725 except OSError as err:
2734 except OSError as err:
2726 # Catch EEXIST to handle races
2735 # Catch EEXIST to handle races
2727 if err.errno == errno.EEXIST:
2736 if err.errno == errno.EEXIST:
2728 return
2737 return
2729 raise
2738 raise
2730 if mode is not None:
2739 if mode is not None:
2731 os.chmod(name, mode)
2740 os.chmod(name, mode)
2732
2741
2733
2742
2734 def readfile(path: bytes) -> bytes:
2743 def readfile(path: bytes) -> bytes:
2735 with open(path, b'rb') as fp:
2744 with open(path, b'rb') as fp:
2736 return fp.read()
2745 return fp.read()
2737
2746
2738
2747
2739 def writefile(path: bytes, text: bytes) -> None:
2748 def writefile(path: bytes, text: bytes) -> None:
2740 with open(path, b'wb') as fp:
2749 with open(path, b'wb') as fp:
2741 fp.write(text)
2750 fp.write(text)
2742
2751
2743
2752
2744 def appendfile(path: bytes, text: bytes) -> None:
2753 def appendfile(path: bytes, text: bytes) -> None:
2745 with open(path, b'ab') as fp:
2754 with open(path, b'ab') as fp:
2746 fp.write(text)
2755 fp.write(text)
2747
2756
2748
2757
2749 class chunkbuffer:
2758 class chunkbuffer:
2750 """Allow arbitrary sized chunks of data to be efficiently read from an
2759 """Allow arbitrary sized chunks of data to be efficiently read from an
2751 iterator over chunks of arbitrary size."""
2760 iterator over chunks of arbitrary size."""
2752
2761
2753 def __init__(self, in_iter):
2762 def __init__(self, in_iter):
2754 """in_iter is the iterator that's iterating over the input chunks."""
2763 """in_iter is the iterator that's iterating over the input chunks."""
2755
2764
2756 def splitbig(chunks):
2765 def splitbig(chunks):
2757 for chunk in chunks:
2766 for chunk in chunks:
2758 if len(chunk) > 2**20:
2767 if len(chunk) > 2**20:
2759 pos = 0
2768 pos = 0
2760 while pos < len(chunk):
2769 while pos < len(chunk):
2761 end = pos + 2**18
2770 end = pos + 2**18
2762 yield chunk[pos:end]
2771 yield chunk[pos:end]
2763 pos = end
2772 pos = end
2764 else:
2773 else:
2765 yield chunk
2774 yield chunk
2766
2775
2767 self.iter = splitbig(in_iter)
2776 self.iter = splitbig(in_iter)
2768 self._queue = collections.deque()
2777 self._queue = collections.deque()
2769 self._chunkoffset = 0
2778 self._chunkoffset = 0
2770
2779
2771 def read(self, l=None):
2780 def read(self, l=None):
2772 """Read L bytes of data from the iterator of chunks of data.
2781 """Read L bytes of data from the iterator of chunks of data.
2773 Returns less than L bytes if the iterator runs dry.
2782 Returns less than L bytes if the iterator runs dry.
2774
2783
2775 If size parameter is omitted, read everything"""
2784 If size parameter is omitted, read everything"""
2776 if l is None:
2785 if l is None:
2777 return b''.join(self.iter)
2786 return b''.join(self.iter)
2778
2787
2779 left = l
2788 left = l
2780 buf = []
2789 buf = []
2781 queue = self._queue
2790 queue = self._queue
2782 while left > 0:
2791 while left > 0:
2783 # refill the queue
2792 # refill the queue
2784 if not queue:
2793 if not queue:
2785 target = 2**18
2794 target = 2**18
2786 for chunk in self.iter:
2795 for chunk in self.iter:
2787 queue.append(chunk)
2796 queue.append(chunk)
2788 target -= len(chunk)
2797 target -= len(chunk)
2789 if target <= 0:
2798 if target <= 0:
2790 break
2799 break
2791 if not queue:
2800 if not queue:
2792 break
2801 break
2793
2802
2794 # The easy way to do this would be to queue.popleft(), modify the
2803 # The easy way to do this would be to queue.popleft(), modify the
2795 # chunk (if necessary), then queue.appendleft(). However, for cases
2804 # chunk (if necessary), then queue.appendleft(). However, for cases
2796 # where we read partial chunk content, this incurs 2 dequeue
2805 # where we read partial chunk content, this incurs 2 dequeue
2797 # mutations and creates a new str for the remaining chunk in the
2806 # mutations and creates a new str for the remaining chunk in the
2798 # queue. Our code below avoids this overhead.
2807 # queue. Our code below avoids this overhead.
2799
2808
2800 chunk = queue[0]
2809 chunk = queue[0]
2801 chunkl = len(chunk)
2810 chunkl = len(chunk)
2802 offset = self._chunkoffset
2811 offset = self._chunkoffset
2803
2812
2804 # Use full chunk.
2813 # Use full chunk.
2805 if offset == 0 and left >= chunkl:
2814 if offset == 0 and left >= chunkl:
2806 left -= chunkl
2815 left -= chunkl
2807 queue.popleft()
2816 queue.popleft()
2808 buf.append(chunk)
2817 buf.append(chunk)
2809 # self._chunkoffset remains at 0.
2818 # self._chunkoffset remains at 0.
2810 continue
2819 continue
2811
2820
2812 chunkremaining = chunkl - offset
2821 chunkremaining = chunkl - offset
2813
2822
2814 # Use all of unconsumed part of chunk.
2823 # Use all of unconsumed part of chunk.
2815 if left >= chunkremaining:
2824 if left >= chunkremaining:
2816 left -= chunkremaining
2825 left -= chunkremaining
2817 queue.popleft()
2826 queue.popleft()
2818 # offset == 0 is enabled by block above, so this won't merely
2827 # offset == 0 is enabled by block above, so this won't merely
2819 # copy via ``chunk[0:]``.
2828 # copy via ``chunk[0:]``.
2820 buf.append(chunk[offset:])
2829 buf.append(chunk[offset:])
2821 self._chunkoffset = 0
2830 self._chunkoffset = 0
2822
2831
2823 # Partial chunk needed.
2832 # Partial chunk needed.
2824 else:
2833 else:
2825 buf.append(chunk[offset : offset + left])
2834 buf.append(chunk[offset : offset + left])
2826 self._chunkoffset += left
2835 self._chunkoffset += left
2827 left -= chunkremaining
2836 left -= chunkremaining
2828
2837
2829 return b''.join(buf)
2838 return b''.join(buf)
2830
2839
2831
2840
2832 def filechunkiter(f, size=131072, limit=None):
2841 def filechunkiter(f, size=131072, limit=None):
2833 """Create a generator that produces the data in the file size
2842 """Create a generator that produces the data in the file size
2834 (default 131072) bytes at a time, up to optional limit (default is
2843 (default 131072) bytes at a time, up to optional limit (default is
2835 to read all data). Chunks may be less than size bytes if the
2844 to read all data). Chunks may be less than size bytes if the
2836 chunk is the last chunk in the file, or the file is a socket or
2845 chunk is the last chunk in the file, or the file is a socket or
2837 some other type of file that sometimes reads less data than is
2846 some other type of file that sometimes reads less data than is
2838 requested."""
2847 requested."""
2839 assert size >= 0
2848 assert size >= 0
2840 assert limit is None or limit >= 0
2849 assert limit is None or limit >= 0
2841 while True:
2850 while True:
2842 if limit is None:
2851 if limit is None:
2843 nbytes = size
2852 nbytes = size
2844 else:
2853 else:
2845 nbytes = min(limit, size)
2854 nbytes = min(limit, size)
2846 s = nbytes and f.read(nbytes)
2855 s = nbytes and f.read(nbytes)
2847 if not s:
2856 if not s:
2848 break
2857 break
2849 if limit:
2858 if limit:
2850 limit -= len(s)
2859 limit -= len(s)
2851 yield s
2860 yield s
2852
2861
2853
2862
2854 class cappedreader:
2863 class cappedreader:
2855 """A file object proxy that allows reading up to N bytes.
2864 """A file object proxy that allows reading up to N bytes.
2856
2865
2857 Given a source file object, instances of this type allow reading up to
2866 Given a source file object, instances of this type allow reading up to
2858 N bytes from that source file object. Attempts to read past the allowed
2867 N bytes from that source file object. Attempts to read past the allowed
2859 limit are treated as EOF.
2868 limit are treated as EOF.
2860
2869
2861 It is assumed that I/O is not performed on the original file object
2870 It is assumed that I/O is not performed on the original file object
2862 in addition to I/O that is performed by this instance. If there is,
2871 in addition to I/O that is performed by this instance. If there is,
2863 state tracking will get out of sync and unexpected results will ensue.
2872 state tracking will get out of sync and unexpected results will ensue.
2864 """
2873 """
2865
2874
2866 def __init__(self, fh, limit):
2875 def __init__(self, fh, limit):
2867 """Allow reading up to <limit> bytes from <fh>."""
2876 """Allow reading up to <limit> bytes from <fh>."""
2868 self._fh = fh
2877 self._fh = fh
2869 self._left = limit
2878 self._left = limit
2870
2879
2871 def read(self, n=-1):
2880 def read(self, n=-1):
2872 if not self._left:
2881 if not self._left:
2873 return b''
2882 return b''
2874
2883
2875 if n < 0:
2884 if n < 0:
2876 n = self._left
2885 n = self._left
2877
2886
2878 data = self._fh.read(min(n, self._left))
2887 data = self._fh.read(min(n, self._left))
2879 self._left -= len(data)
2888 self._left -= len(data)
2880 assert self._left >= 0
2889 assert self._left >= 0
2881
2890
2882 return data
2891 return data
2883
2892
2884 def readinto(self, b):
2893 def readinto(self, b):
2885 res = self.read(len(b))
2894 res = self.read(len(b))
2886 if res is None:
2895 if res is None:
2887 return None
2896 return None
2888
2897
2889 b[0 : len(res)] = res
2898 b[0 : len(res)] = res
2890 return len(res)
2899 return len(res)
2891
2900
2892
2901
2893 def unitcountfn(*unittable):
2902 def unitcountfn(*unittable):
2894 '''return a function that renders a readable count of some quantity'''
2903 '''return a function that renders a readable count of some quantity'''
2895
2904
2896 def go(count):
2905 def go(count):
2897 for multiplier, divisor, format in unittable:
2906 for multiplier, divisor, format in unittable:
2898 if abs(count) >= divisor * multiplier:
2907 if abs(count) >= divisor * multiplier:
2899 return format % (count / float(divisor))
2908 return format % (count / float(divisor))
2900 return unittable[-1][2] % count
2909 return unittable[-1][2] % count
2901
2910
2902 return go
2911 return go
2903
2912
2904
2913
2905 def processlinerange(fromline: int, toline: int) -> Tuple[int, int]:
2914 def processlinerange(fromline: int, toline: int) -> Tuple[int, int]:
2906 """Check that linerange <fromline>:<toline> makes sense and return a
2915 """Check that linerange <fromline>:<toline> makes sense and return a
2907 0-based range.
2916 0-based range.
2908
2917
2909 >>> processlinerange(10, 20)
2918 >>> processlinerange(10, 20)
2910 (9, 20)
2919 (9, 20)
2911 >>> processlinerange(2, 1)
2920 >>> processlinerange(2, 1)
2912 Traceback (most recent call last):
2921 Traceback (most recent call last):
2913 ...
2922 ...
2914 ParseError: line range must be positive
2923 ParseError: line range must be positive
2915 >>> processlinerange(0, 5)
2924 >>> processlinerange(0, 5)
2916 Traceback (most recent call last):
2925 Traceback (most recent call last):
2917 ...
2926 ...
2918 ParseError: fromline must be strictly positive
2927 ParseError: fromline must be strictly positive
2919 """
2928 """
2920 if toline - fromline < 0:
2929 if toline - fromline < 0:
2921 raise error.ParseError(_(b"line range must be positive"))
2930 raise error.ParseError(_(b"line range must be positive"))
2922 if fromline < 1:
2931 if fromline < 1:
2923 raise error.ParseError(_(b"fromline must be strictly positive"))
2932 raise error.ParseError(_(b"fromline must be strictly positive"))
2924 return fromline - 1, toline
2933 return fromline - 1, toline
2925
2934
2926
2935
2927 bytecount = unitcountfn(
2936 bytecount = unitcountfn(
2928 (100, 1 << 30, _(b'%.0f GB')),
2937 (100, 1 << 30, _(b'%.0f GB')),
2929 (10, 1 << 30, _(b'%.1f GB')),
2938 (10, 1 << 30, _(b'%.1f GB')),
2930 (1, 1 << 30, _(b'%.2f GB')),
2939 (1, 1 << 30, _(b'%.2f GB')),
2931 (100, 1 << 20, _(b'%.0f MB')),
2940 (100, 1 << 20, _(b'%.0f MB')),
2932 (10, 1 << 20, _(b'%.1f MB')),
2941 (10, 1 << 20, _(b'%.1f MB')),
2933 (1, 1 << 20, _(b'%.2f MB')),
2942 (1, 1 << 20, _(b'%.2f MB')),
2934 (100, 1 << 10, _(b'%.0f KB')),
2943 (100, 1 << 10, _(b'%.0f KB')),
2935 (10, 1 << 10, _(b'%.1f KB')),
2944 (10, 1 << 10, _(b'%.1f KB')),
2936 (1, 1 << 10, _(b'%.2f KB')),
2945 (1, 1 << 10, _(b'%.2f KB')),
2937 (1, 1, _(b'%.0f bytes')),
2946 (1, 1, _(b'%.0f bytes')),
2938 )
2947 )
2939
2948
2940
2949
2941 class transformingwriter(typelib.BinaryIO_Proxy):
2950 class transformingwriter(typelib.BinaryIO_Proxy):
2942 """Writable file wrapper to transform data by function"""
2951 """Writable file wrapper to transform data by function"""
2943
2952
2944 def __init__(self, fp: BinaryIO, encode: Callable[[bytes], bytes]) -> None:
2953 def __init__(self, fp: BinaryIO, encode: Callable[[bytes], bytes]) -> None:
2945 self._fp = fp
2954 self._fp = fp
2946 self._encode = encode
2955 self._encode = encode
2947
2956
2948 def close(self) -> None:
2957 def close(self) -> None:
2949 self._fp.close()
2958 self._fp.close()
2950
2959
2951 def flush(self) -> None:
2960 def flush(self) -> None:
2952 self._fp.flush()
2961 self._fp.flush()
2953
2962
2954 def write(self, data: bytes) -> int:
2963 def write(self, data: bytes) -> int:
2955 return self._fp.write(self._encode(data))
2964 return self._fp.write(self._encode(data))
2956
2965
2957
2966
2958 # Matches a single EOL which can either be a CRLF where repeated CR
2967 # Matches a single EOL which can either be a CRLF where repeated CR
2959 # are removed or a LF. We do not care about old Macintosh files, so a
2968 # are removed or a LF. We do not care about old Macintosh files, so a
2960 # stray CR is an error.
2969 # stray CR is an error.
2961 _eolre = remod.compile(br'\r*\n')
2970 _eolre = remod.compile(br'\r*\n')
2962
2971
2963
2972
2964 def tolf(s: bytes) -> bytes:
2973 def tolf(s: bytes) -> bytes:
2965 return _eolre.sub(b'\n', s)
2974 return _eolre.sub(b'\n', s)
2966
2975
2967
2976
2968 def tocrlf(s: bytes) -> bytes:
2977 def tocrlf(s: bytes) -> bytes:
2969 return _eolre.sub(b'\r\n', s)
2978 return _eolre.sub(b'\r\n', s)
2970
2979
2971
2980
2972 def _crlfwriter(fp: typelib.BinaryIO_Proxy) -> typelib.BinaryIO_Proxy:
2981 def _crlfwriter(fp: typelib.BinaryIO_Proxy) -> typelib.BinaryIO_Proxy:
2973 return transformingwriter(fp, tocrlf)
2982 return transformingwriter(fp, tocrlf)
2974
2983
2975
2984
2976 if pycompat.oslinesep == b'\r\n':
2985 if pycompat.oslinesep == b'\r\n':
2977 tonativeeol = tocrlf
2986 tonativeeol = tocrlf
2978 fromnativeeol = tolf
2987 fromnativeeol = tolf
2979 nativeeolwriter = _crlfwriter
2988 nativeeolwriter = _crlfwriter
2980 else:
2989 else:
2981 tonativeeol = pycompat.identity
2990 tonativeeol = pycompat.identity
2982 fromnativeeol = pycompat.identity
2991 fromnativeeol = pycompat.identity
2983 nativeeolwriter = pycompat.identity
2992 nativeeolwriter = pycompat.identity
2984
2993
2985 if typing.TYPE_CHECKING:
2994 if typing.TYPE_CHECKING:
2986 # Replace the various overloads that come along with aliasing other methods
2995 # Replace the various overloads that come along with aliasing other methods
2987 # with the narrow definition that we care about in the type checking phase
2996 # with the narrow definition that we care about in the type checking phase
2988 # only. This ensures that both Windows and POSIX see only the definition
2997 # only. This ensures that both Windows and POSIX see only the definition
2989 # that is actually available.
2998 # that is actually available.
2990
2999
2991 def tonativeeol(s: bytes) -> bytes:
3000 def tonativeeol(s: bytes) -> bytes:
2992 raise NotImplementedError
3001 raise NotImplementedError
2993
3002
2994 def fromnativeeol(s: bytes) -> bytes:
3003 def fromnativeeol(s: bytes) -> bytes:
2995 raise NotImplementedError
3004 raise NotImplementedError
2996
3005
2997 def nativeeolwriter(fp: typelib.BinaryIO_Proxy) -> typelib.BinaryIO_Proxy:
3006 def nativeeolwriter(fp: typelib.BinaryIO_Proxy) -> typelib.BinaryIO_Proxy:
2998 raise NotImplementedError
3007 raise NotImplementedError
2999
3008
3000
3009
3001 # TODO delete since workaround variant for Python 2 no longer needed.
3010 # TODO delete since workaround variant for Python 2 no longer needed.
3002 def iterfile(fp):
3011 def iterfile(fp):
3003 return fp
3012 return fp
3004
3013
3005
3014
3006 def iterlines(iterator: Iterable[bytes]) -> Iterator[bytes]:
3015 def iterlines(iterator: Iterable[bytes]) -> Iterator[bytes]:
3007 for chunk in iterator:
3016 for chunk in iterator:
3008 for line in chunk.splitlines():
3017 for line in chunk.splitlines():
3009 yield line
3018 yield line
3010
3019
3011
3020
3012 def expandpath(path: bytes) -> bytes:
3021 def expandpath(path: bytes) -> bytes:
3013 return os.path.expanduser(os.path.expandvars(path))
3022 return os.path.expanduser(os.path.expandvars(path))
3014
3023
3015
3024
3016 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
3025 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
3017 """Return the result of interpolating items in the mapping into string s.
3026 """Return the result of interpolating items in the mapping into string s.
3018
3027
3019 prefix is a single character string, or a two character string with
3028 prefix is a single character string, or a two character string with
3020 a backslash as the first character if the prefix needs to be escaped in
3029 a backslash as the first character if the prefix needs to be escaped in
3021 a regular expression.
3030 a regular expression.
3022
3031
3023 fn is an optional function that will be applied to the replacement text
3032 fn is an optional function that will be applied to the replacement text
3024 just before replacement.
3033 just before replacement.
3025
3034
3026 escape_prefix is an optional flag that allows using doubled prefix for
3035 escape_prefix is an optional flag that allows using doubled prefix for
3027 its escaping.
3036 its escaping.
3028 """
3037 """
3029 fn = fn or (lambda s: s)
3038 fn = fn or (lambda s: s)
3030 patterns = b'|'.join(mapping.keys())
3039 patterns = b'|'.join(mapping.keys())
3031 if escape_prefix:
3040 if escape_prefix:
3032 patterns += b'|' + prefix
3041 patterns += b'|' + prefix
3033 if len(prefix) > 1:
3042 if len(prefix) > 1:
3034 prefix_char = prefix[1:]
3043 prefix_char = prefix[1:]
3035 else:
3044 else:
3036 prefix_char = prefix
3045 prefix_char = prefix
3037 mapping[prefix_char] = prefix_char
3046 mapping[prefix_char] = prefix_char
3038 r = remod.compile(br'%s(%s)' % (prefix, patterns))
3047 r = remod.compile(br'%s(%s)' % (prefix, patterns))
3039 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
3048 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
3040
3049
3041
3050
3042 timecount = unitcountfn(
3051 timecount = unitcountfn(
3043 (1, 1e3, _(b'%.0f s')),
3052 (1, 1e3, _(b'%.0f s')),
3044 (100, 1, _(b'%.1f s')),
3053 (100, 1, _(b'%.1f s')),
3045 (10, 1, _(b'%.2f s')),
3054 (10, 1, _(b'%.2f s')),
3046 (1, 1, _(b'%.3f s')),
3055 (1, 1, _(b'%.3f s')),
3047 (100, 0.001, _(b'%.1f ms')),
3056 (100, 0.001, _(b'%.1f ms')),
3048 (10, 0.001, _(b'%.2f ms')),
3057 (10, 0.001, _(b'%.2f ms')),
3049 (1, 0.001, _(b'%.3f ms')),
3058 (1, 0.001, _(b'%.3f ms')),
3050 (100, 0.000001, _(b'%.1f us')),
3059 (100, 0.000001, _(b'%.1f us')),
3051 (10, 0.000001, _(b'%.2f us')),
3060 (10, 0.000001, _(b'%.2f us')),
3052 (1, 0.000001, _(b'%.3f us')),
3061 (1, 0.000001, _(b'%.3f us')),
3053 (100, 0.000000001, _(b'%.1f ns')),
3062 (100, 0.000000001, _(b'%.1f ns')),
3054 (10, 0.000000001, _(b'%.2f ns')),
3063 (10, 0.000000001, _(b'%.2f ns')),
3055 (1, 0.000000001, _(b'%.3f ns')),
3064 (1, 0.000000001, _(b'%.3f ns')),
3056 )
3065 )
3057
3066
3058
3067
3059 @attr.s
3068 @attr.s
3060 class timedcmstats:
3069 class timedcmstats:
3061 """Stats information produced by the timedcm context manager on entering."""
3070 """Stats information produced by the timedcm context manager on entering."""
3062
3071
3063 # the starting value of the timer as a float (meaning and resulution is
3072 # the starting value of the timer as a float (meaning and resulution is
3064 # platform dependent, see util.timer)
3073 # platform dependent, see util.timer)
3065 start = attr.ib(default=attr.Factory(lambda: timer()))
3074 start = attr.ib(default=attr.Factory(lambda: timer()))
3066 # the number of seconds as a floating point value; starts at 0, updated when
3075 # the number of seconds as a floating point value; starts at 0, updated when
3067 # the context is exited.
3076 # the context is exited.
3068 elapsed = attr.ib(default=0)
3077 elapsed = attr.ib(default=0)
3069 # the number of nested timedcm context managers.
3078 # the number of nested timedcm context managers.
3070 level = attr.ib(default=1)
3079 level = attr.ib(default=1)
3071
3080
3072 def __bytes__(self):
3081 def __bytes__(self):
3073 return timecount(self.elapsed) if self.elapsed else b'<unknown>'
3082 return timecount(self.elapsed) if self.elapsed else b'<unknown>'
3074
3083
3075 __str__ = encoding.strmethod(__bytes__)
3084 __str__ = encoding.strmethod(__bytes__)
3076
3085
3077
3086
3078 @contextlib.contextmanager
3087 @contextlib.contextmanager
3079 def timedcm(whencefmt, *whenceargs):
3088 def timedcm(whencefmt, *whenceargs):
3080 """A context manager that produces timing information for a given context.
3089 """A context manager that produces timing information for a given context.
3081
3090
3082 On entering a timedcmstats instance is produced.
3091 On entering a timedcmstats instance is produced.
3083
3092
3084 This context manager is reentrant.
3093 This context manager is reentrant.
3085
3094
3086 """
3095 """
3087 # track nested context managers
3096 # track nested context managers
3088 timedcm._nested += 1
3097 timedcm._nested += 1
3089 timing_stats = timedcmstats(level=timedcm._nested)
3098 timing_stats = timedcmstats(level=timedcm._nested)
3090 try:
3099 try:
3091 with tracing.log(whencefmt, *whenceargs):
3100 with tracing.log(whencefmt, *whenceargs):
3092 yield timing_stats
3101 yield timing_stats
3093 finally:
3102 finally:
3094 timing_stats.elapsed = timer() - timing_stats.start
3103 timing_stats.elapsed = timer() - timing_stats.start
3095 timedcm._nested -= 1
3104 timedcm._nested -= 1
3096
3105
3097
3106
3098 timedcm._nested = 0
3107 timedcm._nested = 0
3099
3108
3100
3109
3101 def timed(func):
3110 def timed(func):
3102 """Report the execution time of a function call to stderr.
3111 """Report the execution time of a function call to stderr.
3103
3112
3104 During development, use as a decorator when you need to measure
3113 During development, use as a decorator when you need to measure
3105 the cost of a function, e.g. as follows:
3114 the cost of a function, e.g. as follows:
3106
3115
3107 @util.timed
3116 @util.timed
3108 def foo(a, b, c):
3117 def foo(a, b, c):
3109 pass
3118 pass
3110 """
3119 """
3111
3120
3112 def wrapper(*args, **kwargs):
3121 def wrapper(*args, **kwargs):
3113 with timedcm(pycompat.bytestr(func.__name__)) as time_stats:
3122 with timedcm(pycompat.bytestr(func.__name__)) as time_stats:
3114 result = func(*args, **kwargs)
3123 result = func(*args, **kwargs)
3115 stderr = procutil.stderr
3124 stderr = procutil.stderr
3116 stderr.write(
3125 stderr.write(
3117 b'%s%s: %s\n'
3126 b'%s%s: %s\n'
3118 % (
3127 % (
3119 b' ' * time_stats.level * 2,
3128 b' ' * time_stats.level * 2,
3120 pycompat.bytestr(func.__name__),
3129 pycompat.bytestr(func.__name__),
3121 time_stats,
3130 time_stats,
3122 )
3131 )
3123 )
3132 )
3124 return result
3133 return result
3125
3134
3126 return wrapper
3135 return wrapper
3127
3136
3128
3137
3129 _sizeunits = (
3138 _sizeunits = (
3130 (b'm', 2**20),
3139 (b'm', 2**20),
3131 (b'k', 2**10),
3140 (b'k', 2**10),
3132 (b'g', 2**30),
3141 (b'g', 2**30),
3133 (b'kb', 2**10),
3142 (b'kb', 2**10),
3134 (b'mb', 2**20),
3143 (b'mb', 2**20),
3135 (b'gb', 2**30),
3144 (b'gb', 2**30),
3136 (b'b', 1),
3145 (b'b', 1),
3137 )
3146 )
3138
3147
3139
3148
3140 def sizetoint(s: bytes) -> int:
3149 def sizetoint(s: bytes) -> int:
3141 """Convert a space specifier to a byte count.
3150 """Convert a space specifier to a byte count.
3142
3151
3143 >>> sizetoint(b'30')
3152 >>> sizetoint(b'30')
3144 30
3153 30
3145 >>> sizetoint(b'2.2kb')
3154 >>> sizetoint(b'2.2kb')
3146 2252
3155 2252
3147 >>> sizetoint(b'6M')
3156 >>> sizetoint(b'6M')
3148 6291456
3157 6291456
3149 """
3158 """
3150 t = s.strip().lower()
3159 t = s.strip().lower()
3151 try:
3160 try:
3152 for k, u in _sizeunits:
3161 for k, u in _sizeunits:
3153 if t.endswith(k):
3162 if t.endswith(k):
3154 return int(float(t[: -len(k)]) * u)
3163 return int(float(t[: -len(k)]) * u)
3155 return int(t)
3164 return int(t)
3156 except ValueError:
3165 except ValueError:
3157 raise error.ParseError(_(b"couldn't parse size: %s") % s)
3166 raise error.ParseError(_(b"couldn't parse size: %s") % s)
3158
3167
3159
3168
3160 class hooks:
3169 class hooks:
3161 """A collection of hook functions that can be used to extend a
3170 """A collection of hook functions that can be used to extend a
3162 function's behavior. Hooks are called in lexicographic order,
3171 function's behavior. Hooks are called in lexicographic order,
3163 based on the names of their sources."""
3172 based on the names of their sources."""
3164
3173
3165 def __init__(self):
3174 def __init__(self):
3166 self._hooks = []
3175 self._hooks = []
3167
3176
3168 def add(self, source, hook):
3177 def add(self, source, hook):
3169 self._hooks.append((source, hook))
3178 self._hooks.append((source, hook))
3170
3179
3171 def __call__(self, *args):
3180 def __call__(self, *args):
3172 self._hooks.sort(key=lambda x: x[0])
3181 self._hooks.sort(key=lambda x: x[0])
3173 results = []
3182 results = []
3174 for source, hook in self._hooks:
3183 for source, hook in self._hooks:
3175 results.append(hook(*args))
3184 results.append(hook(*args))
3176 return results
3185 return results
3177
3186
3178
3187
3179 def getstackframes(skip=0, line=b' %-*s in %s\n', fileline=b'%s:%d', depth=0):
3188 def getstackframes(skip=0, line=b' %-*s in %s\n', fileline=b'%s:%d', depth=0):
3180 """Yields lines for a nicely formatted stacktrace.
3189 """Yields lines for a nicely formatted stacktrace.
3181 Skips the 'skip' last entries, then return the last 'depth' entries.
3190 Skips the 'skip' last entries, then return the last 'depth' entries.
3182 Each file+linenumber is formatted according to fileline.
3191 Each file+linenumber is formatted according to fileline.
3183 Each line is formatted according to line.
3192 Each line is formatted according to line.
3184 If line is None, it yields:
3193 If line is None, it yields:
3185 length of longest filepath+line number,
3194 length of longest filepath+line number,
3186 filepath+linenumber,
3195 filepath+linenumber,
3187 function
3196 function
3188
3197
3189 Not be used in production code but very convenient while developing.
3198 Not be used in production code but very convenient while developing.
3190 """
3199 """
3191 entries = [
3200 entries = [
3192 (fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3201 (fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3193 for fn, ln, func, _text in traceback.extract_stack()[: -skip - 1]
3202 for fn, ln, func, _text in traceback.extract_stack()[: -skip - 1]
3194 ][-depth:]
3203 ][-depth:]
3195 if entries:
3204 if entries:
3196 fnmax = max(len(entry[0]) for entry in entries)
3205 fnmax = max(len(entry[0]) for entry in entries)
3197 for fnln, func in entries:
3206 for fnln, func in entries:
3198 if line is None:
3207 if line is None:
3199 yield (fnmax, fnln, func)
3208 yield (fnmax, fnln, func)
3200 else:
3209 else:
3201 yield line % (fnmax, fnln, func)
3210 yield line % (fnmax, fnln, func)
3202
3211
3203
3212
3204 def debugstacktrace(
3213 def debugstacktrace(
3205 msg=b'stacktrace',
3214 msg=b'stacktrace',
3206 skip=0,
3215 skip=0,
3207 f=procutil.stderr,
3216 f=procutil.stderr,
3208 otherf=procutil.stdout,
3217 otherf=procutil.stdout,
3209 depth=0,
3218 depth=0,
3210 prefix=b'',
3219 prefix=b'',
3211 ):
3220 ):
3212 """Writes a message to f (stderr) with a nicely formatted stacktrace.
3221 """Writes a message to f (stderr) with a nicely formatted stacktrace.
3213 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3222 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3214 By default it will flush stdout first.
3223 By default it will flush stdout first.
3215 It can be used everywhere and intentionally does not require an ui object.
3224 It can be used everywhere and intentionally does not require an ui object.
3216 Not be used in production code but very convenient while developing.
3225 Not be used in production code but very convenient while developing.
3217 """
3226 """
3218 if otherf:
3227 if otherf:
3219 otherf.flush()
3228 otherf.flush()
3220 f.write(b'%s%s at:\n' % (prefix, msg.rstrip()))
3229 f.write(b'%s%s at:\n' % (prefix, msg.rstrip()))
3221 for line in getstackframes(skip + 1, depth=depth):
3230 for line in getstackframes(skip + 1, depth=depth):
3222 f.write(prefix + line)
3231 f.write(prefix + line)
3223 f.flush()
3232 f.flush()
3224
3233
3225
3234
3226 # convenient shortcut
3235 # convenient shortcut
3227 dst = debugstacktrace
3236 dst = debugstacktrace
3228
3237
3229
3238
3230 def safename(f, tag, ctx, others=None):
3239 def safename(f, tag, ctx, others=None):
3231 """
3240 """
3232 Generate a name that it is safe to rename f to in the given context.
3241 Generate a name that it is safe to rename f to in the given context.
3233
3242
3234 f: filename to rename
3243 f: filename to rename
3235 tag: a string tag that will be included in the new name
3244 tag: a string tag that will be included in the new name
3236 ctx: a context, in which the new name must not exist
3245 ctx: a context, in which the new name must not exist
3237 others: a set of other filenames that the new name must not be in
3246 others: a set of other filenames that the new name must not be in
3238
3247
3239 Returns a file name of the form oldname~tag[~number] which does not exist
3248 Returns a file name of the form oldname~tag[~number] which does not exist
3240 in the provided context and is not in the set of other names.
3249 in the provided context and is not in the set of other names.
3241 """
3250 """
3242 if others is None:
3251 if others is None:
3243 others = set()
3252 others = set()
3244
3253
3245 fn = b'%s~%s' % (f, tag)
3254 fn = b'%s~%s' % (f, tag)
3246 if fn not in ctx and fn not in others:
3255 if fn not in ctx and fn not in others:
3247 return fn
3256 return fn
3248 for n in itertools.count(1):
3257 for n in itertools.count(1):
3249 fn = b'%s~%s~%s' % (f, tag, n)
3258 fn = b'%s~%s~%s' % (f, tag, n)
3250 if fn not in ctx and fn not in others:
3259 if fn not in ctx and fn not in others:
3251 return fn
3260 return fn
3252
3261
3253
3262
3254 def readexactly(stream, n):
3263 def readexactly(stream, n):
3255 '''read n bytes from stream.read and abort if less was available'''
3264 '''read n bytes from stream.read and abort if less was available'''
3256 s = stream.read(n)
3265 s = stream.read(n)
3257 if len(s) < n:
3266 if len(s) < n:
3258 raise error.Abort(
3267 raise error.Abort(
3259 _(b"stream ended unexpectedly (got %d bytes, expected %d)")
3268 _(b"stream ended unexpectedly (got %d bytes, expected %d)")
3260 % (len(s), n)
3269 % (len(s), n)
3261 )
3270 )
3262 return s
3271 return s
3263
3272
3264
3273
3265 def uvarintencode(value):
3274 def uvarintencode(value):
3266 """Encode an unsigned integer value to a varint.
3275 """Encode an unsigned integer value to a varint.
3267
3276
3268 A varint is a variable length integer of 1 or more bytes. Each byte
3277 A varint is a variable length integer of 1 or more bytes. Each byte
3269 except the last has the most significant bit set. The lower 7 bits of
3278 except the last has the most significant bit set. The lower 7 bits of
3270 each byte store the 2's complement representation, least significant group
3279 each byte store the 2's complement representation, least significant group
3271 first.
3280 first.
3272
3281
3273 >>> uvarintencode(0)
3282 >>> uvarintencode(0)
3274 '\\x00'
3283 '\\x00'
3275 >>> uvarintencode(1)
3284 >>> uvarintencode(1)
3276 '\\x01'
3285 '\\x01'
3277 >>> uvarintencode(127)
3286 >>> uvarintencode(127)
3278 '\\x7f'
3287 '\\x7f'
3279 >>> uvarintencode(1337)
3288 >>> uvarintencode(1337)
3280 '\\xb9\\n'
3289 '\\xb9\\n'
3281 >>> uvarintencode(65536)
3290 >>> uvarintencode(65536)
3282 '\\x80\\x80\\x04'
3291 '\\x80\\x80\\x04'
3283 >>> uvarintencode(-1)
3292 >>> uvarintencode(-1)
3284 Traceback (most recent call last):
3293 Traceback (most recent call last):
3285 ...
3294 ...
3286 ProgrammingError: negative value for uvarint: -1
3295 ProgrammingError: negative value for uvarint: -1
3287 """
3296 """
3288 if value < 0:
3297 if value < 0:
3289 raise error.ProgrammingError(b'negative value for uvarint: %d' % value)
3298 raise error.ProgrammingError(b'negative value for uvarint: %d' % value)
3290 bits = value & 0x7F
3299 bits = value & 0x7F
3291 value >>= 7
3300 value >>= 7
3292 bytes = []
3301 bytes = []
3293 while value:
3302 while value:
3294 bytes.append(pycompat.bytechr(0x80 | bits))
3303 bytes.append(pycompat.bytechr(0x80 | bits))
3295 bits = value & 0x7F
3304 bits = value & 0x7F
3296 value >>= 7
3305 value >>= 7
3297 bytes.append(pycompat.bytechr(bits))
3306 bytes.append(pycompat.bytechr(bits))
3298
3307
3299 return b''.join(bytes)
3308 return b''.join(bytes)
3300
3309
3301
3310
3302 def uvarintdecodestream(fh):
3311 def uvarintdecodestream(fh):
3303 """Decode an unsigned variable length integer from a stream.
3312 """Decode an unsigned variable length integer from a stream.
3304
3313
3305 The passed argument is anything that has a ``.read(N)`` method.
3314 The passed argument is anything that has a ``.read(N)`` method.
3306
3315
3307 >>> from io import BytesIO
3316 >>> from io import BytesIO
3308 >>> uvarintdecodestream(BytesIO(b'\\x00'))
3317 >>> uvarintdecodestream(BytesIO(b'\\x00'))
3309 0
3318 0
3310 >>> uvarintdecodestream(BytesIO(b'\\x01'))
3319 >>> uvarintdecodestream(BytesIO(b'\\x01'))
3311 1
3320 1
3312 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
3321 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
3313 127
3322 127
3314 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
3323 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
3315 1337
3324 1337
3316 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
3325 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
3317 65536
3326 65536
3318 >>> uvarintdecodestream(BytesIO(b'\\x80'))
3327 >>> uvarintdecodestream(BytesIO(b'\\x80'))
3319 Traceback (most recent call last):
3328 Traceback (most recent call last):
3320 ...
3329 ...
3321 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
3330 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
3322 """
3331 """
3323 result = 0
3332 result = 0
3324 shift = 0
3333 shift = 0
3325 while True:
3334 while True:
3326 byte = ord(readexactly(fh, 1))
3335 byte = ord(readexactly(fh, 1))
3327 result |= (byte & 0x7F) << shift
3336 result |= (byte & 0x7F) << shift
3328 if not (byte & 0x80):
3337 if not (byte & 0x80):
3329 return result
3338 return result
3330 shift += 7
3339 shift += 7
3331
3340
3332
3341
3333 # Passing the '' locale means that the locale should be set according to the
3342 # Passing the '' locale means that the locale should be set according to the
3334 # user settings (environment variables).
3343 # user settings (environment variables).
3335 # Python sometimes avoids setting the global locale settings. When interfacing
3344 # Python sometimes avoids setting the global locale settings. When interfacing
3336 # with C code (e.g. the curses module or the Subversion bindings), the global
3345 # with C code (e.g. the curses module or the Subversion bindings), the global
3337 # locale settings must be initialized correctly. Python 2 does not initialize
3346 # locale settings must be initialized correctly. Python 2 does not initialize
3338 # the global locale settings on interpreter startup. Python 3 sometimes
3347 # the global locale settings on interpreter startup. Python 3 sometimes
3339 # initializes LC_CTYPE, but not consistently at least on Windows. Therefore we
3348 # initializes LC_CTYPE, but not consistently at least on Windows. Therefore we
3340 # explicitly initialize it to get consistent behavior if it's not already
3349 # explicitly initialize it to get consistent behavior if it's not already
3341 # initialized. Since CPython commit 177d921c8c03d30daa32994362023f777624b10d,
3350 # initialized. Since CPython commit 177d921c8c03d30daa32994362023f777624b10d,
3342 # LC_CTYPE is always initialized. If we require Python 3.8+, we should re-check
3351 # LC_CTYPE is always initialized. If we require Python 3.8+, we should re-check
3343 # if we can remove this code.
3352 # if we can remove this code.
3344 @contextlib.contextmanager
3353 @contextlib.contextmanager
3345 def with_lc_ctype():
3354 def with_lc_ctype():
3346 oldloc = locale.setlocale(locale.LC_CTYPE, None)
3355 oldloc = locale.setlocale(locale.LC_CTYPE, None)
3347 if oldloc == 'C':
3356 if oldloc == 'C':
3348 try:
3357 try:
3349 try:
3358 try:
3350 locale.setlocale(locale.LC_CTYPE, '')
3359 locale.setlocale(locale.LC_CTYPE, '')
3351 except locale.Error:
3360 except locale.Error:
3352 # The likely case is that the locale from the environment
3361 # The likely case is that the locale from the environment
3353 # variables is unknown.
3362 # variables is unknown.
3354 pass
3363 pass
3355 yield
3364 yield
3356 finally:
3365 finally:
3357 locale.setlocale(locale.LC_CTYPE, oldloc)
3366 locale.setlocale(locale.LC_CTYPE, oldloc)
3358 else:
3367 else:
3359 yield
3368 yield
3360
3369
3361
3370
3362 def _estimatememory() -> Optional[int]:
3371 def _estimatememory() -> Optional[int]:
3363 """Provide an estimate for the available system memory in Bytes.
3372 """Provide an estimate for the available system memory in Bytes.
3364
3373
3365 If no estimate can be provided on the platform, returns None.
3374 If no estimate can be provided on the platform, returns None.
3366 """
3375 """
3367 if pycompat.sysplatform.startswith(b'win'):
3376 if pycompat.sysplatform.startswith(b'win'):
3368 # On Windows, use the GlobalMemoryStatusEx kernel function directly.
3377 # On Windows, use the GlobalMemoryStatusEx kernel function directly.
3369 # noinspection PyPep8Naming
3378 # noinspection PyPep8Naming
3370 from ctypes import c_long as DWORD, c_ulonglong as DWORDLONG
3379 from ctypes import c_long as DWORD, c_ulonglong as DWORDLONG
3371 from ctypes.wintypes import ( # pytype: disable=import-error
3380 from ctypes.wintypes import ( # pytype: disable=import-error
3372 Structure,
3381 Structure,
3373 byref,
3382 byref,
3374 sizeof,
3383 sizeof,
3375 windll,
3384 windll,
3376 )
3385 )
3377
3386
3378 class MEMORYSTATUSEX(Structure):
3387 class MEMORYSTATUSEX(Structure):
3379 _fields_ = [
3388 _fields_ = [
3380 ('dwLength', DWORD),
3389 ('dwLength', DWORD),
3381 ('dwMemoryLoad', DWORD),
3390 ('dwMemoryLoad', DWORD),
3382 ('ullTotalPhys', DWORDLONG),
3391 ('ullTotalPhys', DWORDLONG),
3383 ('ullAvailPhys', DWORDLONG),
3392 ('ullAvailPhys', DWORDLONG),
3384 ('ullTotalPageFile', DWORDLONG),
3393 ('ullTotalPageFile', DWORDLONG),
3385 ('ullAvailPageFile', DWORDLONG),
3394 ('ullAvailPageFile', DWORDLONG),
3386 ('ullTotalVirtual', DWORDLONG),
3395 ('ullTotalVirtual', DWORDLONG),
3387 ('ullAvailVirtual', DWORDLONG),
3396 ('ullAvailVirtual', DWORDLONG),
3388 ('ullExtendedVirtual', DWORDLONG),
3397 ('ullExtendedVirtual', DWORDLONG),
3389 ]
3398 ]
3390
3399
3391 x = MEMORYSTATUSEX()
3400 x = MEMORYSTATUSEX()
3392 x.dwLength = sizeof(x)
3401 x.dwLength = sizeof(x)
3393 windll.kernel32.GlobalMemoryStatusEx(byref(x))
3402 windll.kernel32.GlobalMemoryStatusEx(byref(x))
3394 return x.ullAvailPhys
3403 return x.ullAvailPhys
3395
3404
3396 # On newer Unix-like systems and Mac OSX, the sysconf interface
3405 # On newer Unix-like systems and Mac OSX, the sysconf interface
3397 # can be used. _SC_PAGE_SIZE is part of POSIX; _SC_PHYS_PAGES
3406 # can be used. _SC_PAGE_SIZE is part of POSIX; _SC_PHYS_PAGES
3398 # seems to be implemented on most systems.
3407 # seems to be implemented on most systems.
3399 try:
3408 try:
3400 pagesize = os.sysconf(os.sysconf_names['SC_PAGE_SIZE'])
3409 pagesize = os.sysconf(os.sysconf_names['SC_PAGE_SIZE'])
3401 pages = os.sysconf(os.sysconf_names['SC_PHYS_PAGES'])
3410 pages = os.sysconf(os.sysconf_names['SC_PHYS_PAGES'])
3402 return pagesize * pages
3411 return pagesize * pages
3403 except OSError: # sysconf can fail
3412 except OSError: # sysconf can fail
3404 pass
3413 pass
3405 except KeyError: # unknown parameter
3414 except KeyError: # unknown parameter
3406 pass
3415 pass
General Comments 0
You need to be logged in to leave comments. Login now