##// END OF EJS Templates
mmap: populate the mapping by default...
marmoute -
r52574:522b4d72 default
parent child Browse files
Show More
@@ -1,3346 +1,3355
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16
16
17 import abc
17 import abc
18 import collections
18 import collections
19 import contextlib
19 import contextlib
20 import errno
20 import errno
21 import gc
21 import gc
22 import hashlib
22 import hashlib
23 import io
23 import io
24 import itertools
24 import itertools
25 import locale
25 import locale
26 import mmap
26 import mmap
27 import os
27 import os
28 import pickle # provides util.pickle symbol
28 import pickle # provides util.pickle symbol
29 import re as remod
29 import re as remod
30 import shutil
30 import shutil
31 import stat
31 import stat
32 import sys
32 import sys
33 import time
33 import time
34 import traceback
34 import traceback
35 import warnings
35 import warnings
36
36
37 from typing import (
37 from typing import (
38 Any,
38 Any,
39 Iterable,
39 Iterable,
40 Iterator,
40 Iterator,
41 List,
41 List,
42 Optional,
42 Optional,
43 Tuple,
43 Tuple,
44 )
44 )
45
45
46 from .node import hex
46 from .node import hex
47 from .thirdparty import attr
47 from .thirdparty import attr
48 from .pycompat import (
48 from .pycompat import (
49 open,
49 open,
50 )
50 )
51 from hgdemandimport import tracing
51 from hgdemandimport import tracing
52 from . import (
52 from . import (
53 encoding,
53 encoding,
54 error,
54 error,
55 i18n,
55 i18n,
56 policy,
56 policy,
57 pycompat,
57 pycompat,
58 urllibcompat,
58 urllibcompat,
59 )
59 )
60 from .utils import (
60 from .utils import (
61 compression,
61 compression,
62 hashutil,
62 hashutil,
63 procutil,
63 procutil,
64 stringutil,
64 stringutil,
65 )
65 )
66
66
67 # keeps pyflakes happy
67 # keeps pyflakes happy
68 assert [
68 assert [
69 Iterable,
69 Iterable,
70 Iterator,
70 Iterator,
71 List,
71 List,
72 Optional,
72 Optional,
73 Tuple,
73 Tuple,
74 ]
74 ]
75
75
76
76
77 base85 = policy.importmod('base85')
77 base85 = policy.importmod('base85')
78 osutil = policy.importmod('osutil')
78 osutil = policy.importmod('osutil')
79
79
80 b85decode = base85.b85decode
80 b85decode = base85.b85decode
81 b85encode = base85.b85encode
81 b85encode = base85.b85encode
82
82
83 cookielib = pycompat.cookielib
83 cookielib = pycompat.cookielib
84 httplib = pycompat.httplib
84 httplib = pycompat.httplib
85 safehasattr = pycompat.safehasattr
85 safehasattr = pycompat.safehasattr
86 socketserver = pycompat.socketserver
86 socketserver = pycompat.socketserver
87 bytesio = io.BytesIO
87 bytesio = io.BytesIO
88 # TODO deprecate stringio name, as it is a lie on Python 3.
88 # TODO deprecate stringio name, as it is a lie on Python 3.
89 stringio = bytesio
89 stringio = bytesio
90 xmlrpclib = pycompat.xmlrpclib
90 xmlrpclib = pycompat.xmlrpclib
91
91
92 httpserver = urllibcompat.httpserver
92 httpserver = urllibcompat.httpserver
93 urlerr = urllibcompat.urlerr
93 urlerr = urllibcompat.urlerr
94 urlreq = urllibcompat.urlreq
94 urlreq = urllibcompat.urlreq
95
95
96 # workaround for win32mbcs
96 # workaround for win32mbcs
97 _filenamebytestr = pycompat.bytestr
97 _filenamebytestr = pycompat.bytestr
98
98
99 if pycompat.iswindows:
99 if pycompat.iswindows:
100 from . import windows as platform
100 from . import windows as platform
101 else:
101 else:
102 from . import posix as platform
102 from . import posix as platform
103
103
104 _ = i18n._
104 _ = i18n._
105
105
106 abspath = platform.abspath
106 abspath = platform.abspath
107 bindunixsocket = platform.bindunixsocket
107 bindunixsocket = platform.bindunixsocket
108 cachestat = platform.cachestat
108 cachestat = platform.cachestat
109 checkexec = platform.checkexec
109 checkexec = platform.checkexec
110 checklink = platform.checklink
110 checklink = platform.checklink
111 copymode = platform.copymode
111 copymode = platform.copymode
112 expandglobs = platform.expandglobs
112 expandglobs = platform.expandglobs
113 getfsmountpoint = platform.getfsmountpoint
113 getfsmountpoint = platform.getfsmountpoint
114 getfstype = platform.getfstype
114 getfstype = platform.getfstype
115 get_password = platform.get_password
115 get_password = platform.get_password
116 groupmembers = platform.groupmembers
116 groupmembers = platform.groupmembers
117 groupname = platform.groupname
117 groupname = platform.groupname
118 isexec = platform.isexec
118 isexec = platform.isexec
119 isowner = platform.isowner
119 isowner = platform.isowner
120 listdir = osutil.listdir
120 listdir = osutil.listdir
121 localpath = platform.localpath
121 localpath = platform.localpath
122 lookupreg = platform.lookupreg
122 lookupreg = platform.lookupreg
123 makedir = platform.makedir
123 makedir = platform.makedir
124 nlinks = platform.nlinks
124 nlinks = platform.nlinks
125 normpath = platform.normpath
125 normpath = platform.normpath
126 normcase = platform.normcase
126 normcase = platform.normcase
127 normcasespec = platform.normcasespec
127 normcasespec = platform.normcasespec
128 normcasefallback = platform.normcasefallback
128 normcasefallback = platform.normcasefallback
129 openhardlinks = platform.openhardlinks
129 openhardlinks = platform.openhardlinks
130 oslink = platform.oslink
130 oslink = platform.oslink
131 parsepatchoutput = platform.parsepatchoutput
131 parsepatchoutput = platform.parsepatchoutput
132 pconvert = platform.pconvert
132 pconvert = platform.pconvert
133 poll = platform.poll
133 poll = platform.poll
134 posixfile = platform.posixfile
134 posixfile = platform.posixfile
135 readlink = platform.readlink
135 readlink = platform.readlink
136 rename = platform.rename
136 rename = platform.rename
137 removedirs = platform.removedirs
137 removedirs = platform.removedirs
138 samedevice = platform.samedevice
138 samedevice = platform.samedevice
139 samefile = platform.samefile
139 samefile = platform.samefile
140 samestat = platform.samestat
140 samestat = platform.samestat
141 setflags = platform.setflags
141 setflags = platform.setflags
142 split = platform.split
142 split = platform.split
143 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
143 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
144 statisexec = platform.statisexec
144 statisexec = platform.statisexec
145 statislink = platform.statislink
145 statislink = platform.statislink
146 umask = platform.umask
146 umask = platform.umask
147 unlink = platform.unlink
147 unlink = platform.unlink
148 username = platform.username
148 username = platform.username
149
149
150
150
151 def setumask(val: int) -> None:
151 def setumask(val: int) -> None:
152 '''updates the umask. used by chg server'''
152 '''updates the umask. used by chg server'''
153 if pycompat.iswindows:
153 if pycompat.iswindows:
154 return
154 return
155 os.umask(val)
155 os.umask(val)
156 global umask
156 global umask
157 platform.umask = umask = val & 0o777
157 platform.umask = umask = val & 0o777
158
158
159
159
160 # small compat layer
160 # small compat layer
161 compengines = compression.compengines
161 compengines = compression.compengines
162 SERVERROLE = compression.SERVERROLE
162 SERVERROLE = compression.SERVERROLE
163 CLIENTROLE = compression.CLIENTROLE
163 CLIENTROLE = compression.CLIENTROLE
164
164
165 # Python compatibility
165 # Python compatibility
166
166
167 _notset = object()
167 _notset = object()
168
168
169
169
170 def bitsfrom(container):
170 def bitsfrom(container):
171 bits = 0
171 bits = 0
172 for bit in container:
172 for bit in container:
173 bits |= bit
173 bits |= bit
174 return bits
174 return bits
175
175
176
176
177 # python 2.6 still have deprecation warning enabled by default. We do not want
177 # python 2.6 still have deprecation warning enabled by default. We do not want
178 # to display anything to standard user so detect if we are running test and
178 # to display anything to standard user so detect if we are running test and
179 # only use python deprecation warning in this case.
179 # only use python deprecation warning in this case.
180 _dowarn = bool(encoding.environ.get(b'HGEMITWARNINGS'))
180 _dowarn = bool(encoding.environ.get(b'HGEMITWARNINGS'))
181 if _dowarn:
181 if _dowarn:
182 # explicitly unfilter our warning for python 2.7
182 # explicitly unfilter our warning for python 2.7
183 #
183 #
184 # The option of setting PYTHONWARNINGS in the test runner was investigated.
184 # The option of setting PYTHONWARNINGS in the test runner was investigated.
185 # However, module name set through PYTHONWARNINGS was exactly matched, so
185 # However, module name set through PYTHONWARNINGS was exactly matched, so
186 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
186 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
187 # makes the whole PYTHONWARNINGS thing useless for our usecase.
187 # makes the whole PYTHONWARNINGS thing useless for our usecase.
188 warnings.filterwarnings('default', '', DeprecationWarning, 'mercurial')
188 warnings.filterwarnings('default', '', DeprecationWarning, 'mercurial')
189 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext')
189 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext')
190 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext3rd')
190 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext3rd')
191 if _dowarn:
191 if _dowarn:
192 # silence warning emitted by passing user string to re.sub()
192 # silence warning emitted by passing user string to re.sub()
193 warnings.filterwarnings(
193 warnings.filterwarnings(
194 'ignore', 'bad escape', DeprecationWarning, 'mercurial'
194 'ignore', 'bad escape', DeprecationWarning, 'mercurial'
195 )
195 )
196 warnings.filterwarnings(
196 warnings.filterwarnings(
197 'ignore', 'invalid escape sequence', DeprecationWarning, 'mercurial'
197 'ignore', 'invalid escape sequence', DeprecationWarning, 'mercurial'
198 )
198 )
199 # TODO: reinvent imp.is_frozen()
199 # TODO: reinvent imp.is_frozen()
200 warnings.filterwarnings(
200 warnings.filterwarnings(
201 'ignore',
201 'ignore',
202 'the imp module is deprecated',
202 'the imp module is deprecated',
203 DeprecationWarning,
203 DeprecationWarning,
204 'mercurial',
204 'mercurial',
205 )
205 )
206
206
207
207
208 def nouideprecwarn(msg, version, stacklevel=1):
208 def nouideprecwarn(msg, version, stacklevel=1):
209 """Issue an python native deprecation warning
209 """Issue an python native deprecation warning
210
210
211 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
211 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
212 """
212 """
213 if _dowarn:
213 if _dowarn:
214 msg += (
214 msg += (
215 b"\n(compatibility will be dropped after Mercurial-%s,"
215 b"\n(compatibility will be dropped after Mercurial-%s,"
216 b" update your code.)"
216 b" update your code.)"
217 ) % version
217 ) % version
218 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
218 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
219 # on python 3 with chg, we will need to explicitly flush the output
219 # on python 3 with chg, we will need to explicitly flush the output
220 sys.stderr.flush()
220 sys.stderr.flush()
221
221
222
222
223 DIGESTS = {
223 DIGESTS = {
224 b'md5': hashlib.md5,
224 b'md5': hashlib.md5,
225 b'sha1': hashutil.sha1,
225 b'sha1': hashutil.sha1,
226 b'sha512': hashlib.sha512,
226 b'sha512': hashlib.sha512,
227 }
227 }
228 # List of digest types from strongest to weakest
228 # List of digest types from strongest to weakest
229 DIGESTS_BY_STRENGTH = [b'sha512', b'sha1', b'md5']
229 DIGESTS_BY_STRENGTH = [b'sha512', b'sha1', b'md5']
230
230
231 for k in DIGESTS_BY_STRENGTH:
231 for k in DIGESTS_BY_STRENGTH:
232 assert k in DIGESTS
232 assert k in DIGESTS
233
233
234
234
235 class digester:
235 class digester:
236 """helper to compute digests.
236 """helper to compute digests.
237
237
238 This helper can be used to compute one or more digests given their name.
238 This helper can be used to compute one or more digests given their name.
239
239
240 >>> d = digester([b'md5', b'sha1'])
240 >>> d = digester([b'md5', b'sha1'])
241 >>> d.update(b'foo')
241 >>> d.update(b'foo')
242 >>> [k for k in sorted(d)]
242 >>> [k for k in sorted(d)]
243 ['md5', 'sha1']
243 ['md5', 'sha1']
244 >>> d[b'md5']
244 >>> d[b'md5']
245 'acbd18db4cc2f85cedef654fccc4a4d8'
245 'acbd18db4cc2f85cedef654fccc4a4d8'
246 >>> d[b'sha1']
246 >>> d[b'sha1']
247 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
247 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
248 >>> digester.preferred([b'md5', b'sha1'])
248 >>> digester.preferred([b'md5', b'sha1'])
249 'sha1'
249 'sha1'
250 """
250 """
251
251
252 def __init__(self, digests, s=b''):
252 def __init__(self, digests, s=b''):
253 self._hashes = {}
253 self._hashes = {}
254 for k in digests:
254 for k in digests:
255 if k not in DIGESTS:
255 if k not in DIGESTS:
256 raise error.Abort(_(b'unknown digest type: %s') % k)
256 raise error.Abort(_(b'unknown digest type: %s') % k)
257 self._hashes[k] = DIGESTS[k]()
257 self._hashes[k] = DIGESTS[k]()
258 if s:
258 if s:
259 self.update(s)
259 self.update(s)
260
260
261 def update(self, data):
261 def update(self, data):
262 for h in self._hashes.values():
262 for h in self._hashes.values():
263 h.update(data)
263 h.update(data)
264
264
265 def __getitem__(self, key):
265 def __getitem__(self, key):
266 if key not in DIGESTS:
266 if key not in DIGESTS:
267 raise error.Abort(_(b'unknown digest type: %s') % k)
267 raise error.Abort(_(b'unknown digest type: %s') % k)
268 return hex(self._hashes[key].digest())
268 return hex(self._hashes[key].digest())
269
269
270 def __iter__(self):
270 def __iter__(self):
271 return iter(self._hashes)
271 return iter(self._hashes)
272
272
273 @staticmethod
273 @staticmethod
274 def preferred(supported):
274 def preferred(supported):
275 """returns the strongest digest type in both supported and DIGESTS."""
275 """returns the strongest digest type in both supported and DIGESTS."""
276
276
277 for k in DIGESTS_BY_STRENGTH:
277 for k in DIGESTS_BY_STRENGTH:
278 if k in supported:
278 if k in supported:
279 return k
279 return k
280 return None
280 return None
281
281
282
282
283 class digestchecker:
283 class digestchecker:
284 """file handle wrapper that additionally checks content against a given
284 """file handle wrapper that additionally checks content against a given
285 size and digests.
285 size and digests.
286
286
287 d = digestchecker(fh, size, {'md5': '...'})
287 d = digestchecker(fh, size, {'md5': '...'})
288
288
289 When multiple digests are given, all of them are validated.
289 When multiple digests are given, all of them are validated.
290 """
290 """
291
291
292 def __init__(self, fh, size, digests):
292 def __init__(self, fh, size, digests):
293 self._fh = fh
293 self._fh = fh
294 self._size = size
294 self._size = size
295 self._got = 0
295 self._got = 0
296 self._digests = dict(digests)
296 self._digests = dict(digests)
297 self._digester = digester(self._digests.keys())
297 self._digester = digester(self._digests.keys())
298
298
299 def read(self, length=-1):
299 def read(self, length=-1):
300 content = self._fh.read(length)
300 content = self._fh.read(length)
301 self._digester.update(content)
301 self._digester.update(content)
302 self._got += len(content)
302 self._got += len(content)
303 return content
303 return content
304
304
305 def validate(self):
305 def validate(self):
306 if self._size != self._got:
306 if self._size != self._got:
307 raise error.Abort(
307 raise error.Abort(
308 _(b'size mismatch: expected %d, got %d')
308 _(b'size mismatch: expected %d, got %d')
309 % (self._size, self._got)
309 % (self._size, self._got)
310 )
310 )
311 for k, v in self._digests.items():
311 for k, v in self._digests.items():
312 if v != self._digester[k]:
312 if v != self._digester[k]:
313 # i18n: first parameter is a digest name
313 # i18n: first parameter is a digest name
314 raise error.Abort(
314 raise error.Abort(
315 _(b'%s mismatch: expected %s, got %s')
315 _(b'%s mismatch: expected %s, got %s')
316 % (k, v, self._digester[k])
316 % (k, v, self._digester[k])
317 )
317 )
318
318
319
319
320 try:
320 try:
321 buffer = buffer # pytype: disable=name-error
321 buffer = buffer # pytype: disable=name-error
322 except NameError:
322 except NameError:
323
323
324 def buffer(sliceable, offset=0, length=None):
324 def buffer(sliceable, offset=0, length=None):
325 if length is not None:
325 if length is not None:
326 return memoryview(sliceable)[offset : offset + length]
326 return memoryview(sliceable)[offset : offset + length]
327 return memoryview(sliceable)[offset:]
327 return memoryview(sliceable)[offset:]
328
328
329
329
330 _chunksize = 4096
330 _chunksize = 4096
331
331
332
332
333 class bufferedinputpipe:
333 class bufferedinputpipe:
334 """a manually buffered input pipe
334 """a manually buffered input pipe
335
335
336 Python will not let us use buffered IO and lazy reading with 'polling' at
336 Python will not let us use buffered IO and lazy reading with 'polling' at
337 the same time. We cannot probe the buffer state and select will not detect
337 the same time. We cannot probe the buffer state and select will not detect
338 that data are ready to read if they are already buffered.
338 that data are ready to read if they are already buffered.
339
339
340 This class let us work around that by implementing its own buffering
340 This class let us work around that by implementing its own buffering
341 (allowing efficient readline) while offering a way to know if the buffer is
341 (allowing efficient readline) while offering a way to know if the buffer is
342 empty from the output (allowing collaboration of the buffer with polling).
342 empty from the output (allowing collaboration of the buffer with polling).
343
343
344 This class lives in the 'util' module because it makes use of the 'os'
344 This class lives in the 'util' module because it makes use of the 'os'
345 module from the python stdlib.
345 module from the python stdlib.
346 """
346 """
347
347
348 def __new__(cls, fh):
348 def __new__(cls, fh):
349 # If we receive a fileobjectproxy, we need to use a variation of this
349 # If we receive a fileobjectproxy, we need to use a variation of this
350 # class that notifies observers about activity.
350 # class that notifies observers about activity.
351 if isinstance(fh, fileobjectproxy):
351 if isinstance(fh, fileobjectproxy):
352 cls = observedbufferedinputpipe
352 cls = observedbufferedinputpipe
353
353
354 return super(bufferedinputpipe, cls).__new__(cls)
354 return super(bufferedinputpipe, cls).__new__(cls)
355
355
356 def __init__(self, input):
356 def __init__(self, input):
357 self._input = input
357 self._input = input
358 self._buffer = []
358 self._buffer = []
359 self._eof = False
359 self._eof = False
360 self._lenbuf = 0
360 self._lenbuf = 0
361
361
362 @property
362 @property
363 def hasbuffer(self):
363 def hasbuffer(self):
364 """True is any data is currently buffered
364 """True is any data is currently buffered
365
365
366 This will be used externally a pre-step for polling IO. If there is
366 This will be used externally a pre-step for polling IO. If there is
367 already data then no polling should be set in place."""
367 already data then no polling should be set in place."""
368 return bool(self._buffer)
368 return bool(self._buffer)
369
369
370 @property
370 @property
371 def closed(self):
371 def closed(self):
372 return self._input.closed
372 return self._input.closed
373
373
374 def fileno(self):
374 def fileno(self):
375 return self._input.fileno()
375 return self._input.fileno()
376
376
377 def close(self):
377 def close(self):
378 return self._input.close()
378 return self._input.close()
379
379
380 def read(self, size):
380 def read(self, size):
381 while (not self._eof) and (self._lenbuf < size):
381 while (not self._eof) and (self._lenbuf < size):
382 self._fillbuffer()
382 self._fillbuffer()
383 return self._frombuffer(size)
383 return self._frombuffer(size)
384
384
385 def unbufferedread(self, size):
385 def unbufferedread(self, size):
386 if not self._eof and self._lenbuf == 0:
386 if not self._eof and self._lenbuf == 0:
387 self._fillbuffer(max(size, _chunksize))
387 self._fillbuffer(max(size, _chunksize))
388 return self._frombuffer(min(self._lenbuf, size))
388 return self._frombuffer(min(self._lenbuf, size))
389
389
390 def readline(self, *args, **kwargs):
390 def readline(self, *args, **kwargs):
391 if len(self._buffer) > 1:
391 if len(self._buffer) > 1:
392 # this should not happen because both read and readline end with a
392 # this should not happen because both read and readline end with a
393 # _frombuffer call that collapse it.
393 # _frombuffer call that collapse it.
394 self._buffer = [b''.join(self._buffer)]
394 self._buffer = [b''.join(self._buffer)]
395 self._lenbuf = len(self._buffer[0])
395 self._lenbuf = len(self._buffer[0])
396 lfi = -1
396 lfi = -1
397 if self._buffer:
397 if self._buffer:
398 lfi = self._buffer[-1].find(b'\n')
398 lfi = self._buffer[-1].find(b'\n')
399 while (not self._eof) and lfi < 0:
399 while (not self._eof) and lfi < 0:
400 self._fillbuffer()
400 self._fillbuffer()
401 if self._buffer:
401 if self._buffer:
402 lfi = self._buffer[-1].find(b'\n')
402 lfi = self._buffer[-1].find(b'\n')
403 size = lfi + 1
403 size = lfi + 1
404 if lfi < 0: # end of file
404 if lfi < 0: # end of file
405 size = self._lenbuf
405 size = self._lenbuf
406 elif len(self._buffer) > 1:
406 elif len(self._buffer) > 1:
407 # we need to take previous chunks into account
407 # we need to take previous chunks into account
408 size += self._lenbuf - len(self._buffer[-1])
408 size += self._lenbuf - len(self._buffer[-1])
409 return self._frombuffer(size)
409 return self._frombuffer(size)
410
410
411 def _frombuffer(self, size):
411 def _frombuffer(self, size):
412 """return at most 'size' data from the buffer
412 """return at most 'size' data from the buffer
413
413
414 The data are removed from the buffer."""
414 The data are removed from the buffer."""
415 if size == 0 or not self._buffer:
415 if size == 0 or not self._buffer:
416 return b''
416 return b''
417 buf = self._buffer[0]
417 buf = self._buffer[0]
418 if len(self._buffer) > 1:
418 if len(self._buffer) > 1:
419 buf = b''.join(self._buffer)
419 buf = b''.join(self._buffer)
420
420
421 data = buf[:size]
421 data = buf[:size]
422 buf = buf[len(data) :]
422 buf = buf[len(data) :]
423 if buf:
423 if buf:
424 self._buffer = [buf]
424 self._buffer = [buf]
425 self._lenbuf = len(buf)
425 self._lenbuf = len(buf)
426 else:
426 else:
427 self._buffer = []
427 self._buffer = []
428 self._lenbuf = 0
428 self._lenbuf = 0
429 return data
429 return data
430
430
431 def _fillbuffer(self, size=_chunksize):
431 def _fillbuffer(self, size=_chunksize):
432 """read data to the buffer"""
432 """read data to the buffer"""
433 data = os.read(self._input.fileno(), size)
433 data = os.read(self._input.fileno(), size)
434 if not data:
434 if not data:
435 self._eof = True
435 self._eof = True
436 else:
436 else:
437 self._lenbuf += len(data)
437 self._lenbuf += len(data)
438 self._buffer.append(data)
438 self._buffer.append(data)
439
439
440 return data
440 return data
441
441
442
442
443 def mmapread(fp, size=None):
443 def mmapread(fp, size=None, pre_populate=True):
444 """Read a file content using mmap
444 """Read a file content using mmap
445
445
446 The responsability of checking the file system is mmap safe is the
446 The responsability of checking the file system is mmap safe is the
447 responsability of the caller.
447 responsability of the caller (see `vfs.is_mmap_safe`).
448
448
449 In some case, a normal string might be returned.
449 In some case, a normal string might be returned.
450
451 If `pre_populate` is True (the default), the mmapped data will be
452 pre-populated in memory if the system support this option, this slow down
453 the initial mmaping but avoid potentially crippling page fault on later
454 access. If this is not the desired behavior, set `pre_populate` to False.
450 """
455 """
451 if size == 0:
456 if size == 0:
452 # size of 0 to mmap.mmap() means "all data"
457 # size of 0 to mmap.mmap() means "all data"
453 # rather than "zero bytes", so special case that.
458 # rather than "zero bytes", so special case that.
454 return b''
459 return b''
455 elif size is None:
460 elif size is None:
456 size = 0
461 size = 0
457 fd = getattr(fp, 'fileno', lambda: fp)()
462 fd = getattr(fp, 'fileno', lambda: fp)()
463 flags = mmap.MAP_PRIVATE
464 if pre_populate:
465 flags |= getattr(mmap, 'MAP_POPULATE', 0)
458 try:
466 try:
459 return mmap.mmap(fd, size, access=mmap.ACCESS_READ)
467 m = mmap.mmap(fd, size, flags=flags, prot=mmap.PROT_READ)
468 return m
460 except ValueError:
469 except ValueError:
461 # Empty files cannot be mmapped, but mmapread should still work. Check
470 # Empty files cannot be mmapped, but mmapread should still work. Check
462 # if the file is empty, and if so, return an empty buffer.
471 # if the file is empty, and if so, return an empty buffer.
463 if os.fstat(fd).st_size == 0:
472 if os.fstat(fd).st_size == 0:
464 return b''
473 return b''
465 raise
474 raise
466
475
467
476
468 class fileobjectproxy:
477 class fileobjectproxy:
469 """A proxy around file objects that tells a watcher when events occur.
478 """A proxy around file objects that tells a watcher when events occur.
470
479
471 This type is intended to only be used for testing purposes. Think hard
480 This type is intended to only be used for testing purposes. Think hard
472 before using it in important code.
481 before using it in important code.
473 """
482 """
474
483
475 __slots__ = (
484 __slots__ = (
476 '_orig',
485 '_orig',
477 '_observer',
486 '_observer',
478 )
487 )
479
488
480 def __init__(self, fh, observer):
489 def __init__(self, fh, observer):
481 object.__setattr__(self, '_orig', fh)
490 object.__setattr__(self, '_orig', fh)
482 object.__setattr__(self, '_observer', observer)
491 object.__setattr__(self, '_observer', observer)
483
492
484 def __getattribute__(self, name):
493 def __getattribute__(self, name):
485 ours = {
494 ours = {
486 '_observer',
495 '_observer',
487 # IOBase
496 # IOBase
488 'close',
497 'close',
489 # closed if a property
498 # closed if a property
490 'fileno',
499 'fileno',
491 'flush',
500 'flush',
492 'isatty',
501 'isatty',
493 'readable',
502 'readable',
494 'readline',
503 'readline',
495 'readlines',
504 'readlines',
496 'seek',
505 'seek',
497 'seekable',
506 'seekable',
498 'tell',
507 'tell',
499 'truncate',
508 'truncate',
500 'writable',
509 'writable',
501 'writelines',
510 'writelines',
502 # RawIOBase
511 # RawIOBase
503 'read',
512 'read',
504 'readall',
513 'readall',
505 'readinto',
514 'readinto',
506 'write',
515 'write',
507 # BufferedIOBase
516 # BufferedIOBase
508 # raw is a property
517 # raw is a property
509 'detach',
518 'detach',
510 # read defined above
519 # read defined above
511 'read1',
520 'read1',
512 # readinto defined above
521 # readinto defined above
513 # write defined above
522 # write defined above
514 }
523 }
515
524
516 # We only observe some methods.
525 # We only observe some methods.
517 if name in ours:
526 if name in ours:
518 return object.__getattribute__(self, name)
527 return object.__getattribute__(self, name)
519
528
520 return getattr(object.__getattribute__(self, '_orig'), name)
529 return getattr(object.__getattribute__(self, '_orig'), name)
521
530
522 def __nonzero__(self):
531 def __nonzero__(self):
523 return bool(object.__getattribute__(self, '_orig'))
532 return bool(object.__getattribute__(self, '_orig'))
524
533
525 __bool__ = __nonzero__
534 __bool__ = __nonzero__
526
535
527 def __delattr__(self, name):
536 def __delattr__(self, name):
528 return delattr(object.__getattribute__(self, '_orig'), name)
537 return delattr(object.__getattribute__(self, '_orig'), name)
529
538
530 def __setattr__(self, name, value):
539 def __setattr__(self, name, value):
531 return setattr(object.__getattribute__(self, '_orig'), name, value)
540 return setattr(object.__getattribute__(self, '_orig'), name, value)
532
541
533 def __iter__(self):
542 def __iter__(self):
534 return object.__getattribute__(self, '_orig').__iter__()
543 return object.__getattribute__(self, '_orig').__iter__()
535
544
536 def _observedcall(self, name, *args, **kwargs):
545 def _observedcall(self, name, *args, **kwargs):
537 # Call the original object.
546 # Call the original object.
538 orig = object.__getattribute__(self, '_orig')
547 orig = object.__getattribute__(self, '_orig')
539 res = getattr(orig, name)(*args, **kwargs)
548 res = getattr(orig, name)(*args, **kwargs)
540
549
541 # Call a method on the observer of the same name with arguments
550 # Call a method on the observer of the same name with arguments
542 # so it can react, log, etc.
551 # so it can react, log, etc.
543 observer = object.__getattribute__(self, '_observer')
552 observer = object.__getattribute__(self, '_observer')
544 fn = getattr(observer, name, None)
553 fn = getattr(observer, name, None)
545 if fn:
554 if fn:
546 fn(res, *args, **kwargs)
555 fn(res, *args, **kwargs)
547
556
548 return res
557 return res
549
558
550 def close(self, *args, **kwargs):
559 def close(self, *args, **kwargs):
551 return object.__getattribute__(self, '_observedcall')(
560 return object.__getattribute__(self, '_observedcall')(
552 'close', *args, **kwargs
561 'close', *args, **kwargs
553 )
562 )
554
563
555 def fileno(self, *args, **kwargs):
564 def fileno(self, *args, **kwargs):
556 return object.__getattribute__(self, '_observedcall')(
565 return object.__getattribute__(self, '_observedcall')(
557 'fileno', *args, **kwargs
566 'fileno', *args, **kwargs
558 )
567 )
559
568
560 def flush(self, *args, **kwargs):
569 def flush(self, *args, **kwargs):
561 return object.__getattribute__(self, '_observedcall')(
570 return object.__getattribute__(self, '_observedcall')(
562 'flush', *args, **kwargs
571 'flush', *args, **kwargs
563 )
572 )
564
573
565 def isatty(self, *args, **kwargs):
574 def isatty(self, *args, **kwargs):
566 return object.__getattribute__(self, '_observedcall')(
575 return object.__getattribute__(self, '_observedcall')(
567 'isatty', *args, **kwargs
576 'isatty', *args, **kwargs
568 )
577 )
569
578
570 def readable(self, *args, **kwargs):
579 def readable(self, *args, **kwargs):
571 return object.__getattribute__(self, '_observedcall')(
580 return object.__getattribute__(self, '_observedcall')(
572 'readable', *args, **kwargs
581 'readable', *args, **kwargs
573 )
582 )
574
583
575 def readline(self, *args, **kwargs):
584 def readline(self, *args, **kwargs):
576 return object.__getattribute__(self, '_observedcall')(
585 return object.__getattribute__(self, '_observedcall')(
577 'readline', *args, **kwargs
586 'readline', *args, **kwargs
578 )
587 )
579
588
580 def readlines(self, *args, **kwargs):
589 def readlines(self, *args, **kwargs):
581 return object.__getattribute__(self, '_observedcall')(
590 return object.__getattribute__(self, '_observedcall')(
582 'readlines', *args, **kwargs
591 'readlines', *args, **kwargs
583 )
592 )
584
593
585 def seek(self, *args, **kwargs):
594 def seek(self, *args, **kwargs):
586 return object.__getattribute__(self, '_observedcall')(
595 return object.__getattribute__(self, '_observedcall')(
587 'seek', *args, **kwargs
596 'seek', *args, **kwargs
588 )
597 )
589
598
590 def seekable(self, *args, **kwargs):
599 def seekable(self, *args, **kwargs):
591 return object.__getattribute__(self, '_observedcall')(
600 return object.__getattribute__(self, '_observedcall')(
592 'seekable', *args, **kwargs
601 'seekable', *args, **kwargs
593 )
602 )
594
603
595 def tell(self, *args, **kwargs):
604 def tell(self, *args, **kwargs):
596 return object.__getattribute__(self, '_observedcall')(
605 return object.__getattribute__(self, '_observedcall')(
597 'tell', *args, **kwargs
606 'tell', *args, **kwargs
598 )
607 )
599
608
600 def truncate(self, *args, **kwargs):
609 def truncate(self, *args, **kwargs):
601 return object.__getattribute__(self, '_observedcall')(
610 return object.__getattribute__(self, '_observedcall')(
602 'truncate', *args, **kwargs
611 'truncate', *args, **kwargs
603 )
612 )
604
613
605 def writable(self, *args, **kwargs):
614 def writable(self, *args, **kwargs):
606 return object.__getattribute__(self, '_observedcall')(
615 return object.__getattribute__(self, '_observedcall')(
607 'writable', *args, **kwargs
616 'writable', *args, **kwargs
608 )
617 )
609
618
610 def writelines(self, *args, **kwargs):
619 def writelines(self, *args, **kwargs):
611 return object.__getattribute__(self, '_observedcall')(
620 return object.__getattribute__(self, '_observedcall')(
612 'writelines', *args, **kwargs
621 'writelines', *args, **kwargs
613 )
622 )
614
623
615 def read(self, *args, **kwargs):
624 def read(self, *args, **kwargs):
616 return object.__getattribute__(self, '_observedcall')(
625 return object.__getattribute__(self, '_observedcall')(
617 'read', *args, **kwargs
626 'read', *args, **kwargs
618 )
627 )
619
628
620 def readall(self, *args, **kwargs):
629 def readall(self, *args, **kwargs):
621 return object.__getattribute__(self, '_observedcall')(
630 return object.__getattribute__(self, '_observedcall')(
622 'readall', *args, **kwargs
631 'readall', *args, **kwargs
623 )
632 )
624
633
625 def readinto(self, *args, **kwargs):
634 def readinto(self, *args, **kwargs):
626 return object.__getattribute__(self, '_observedcall')(
635 return object.__getattribute__(self, '_observedcall')(
627 'readinto', *args, **kwargs
636 'readinto', *args, **kwargs
628 )
637 )
629
638
630 def write(self, *args, **kwargs):
639 def write(self, *args, **kwargs):
631 return object.__getattribute__(self, '_observedcall')(
640 return object.__getattribute__(self, '_observedcall')(
632 'write', *args, **kwargs
641 'write', *args, **kwargs
633 )
642 )
634
643
635 def detach(self, *args, **kwargs):
644 def detach(self, *args, **kwargs):
636 return object.__getattribute__(self, '_observedcall')(
645 return object.__getattribute__(self, '_observedcall')(
637 'detach', *args, **kwargs
646 'detach', *args, **kwargs
638 )
647 )
639
648
640 def read1(self, *args, **kwargs):
649 def read1(self, *args, **kwargs):
641 return object.__getattribute__(self, '_observedcall')(
650 return object.__getattribute__(self, '_observedcall')(
642 'read1', *args, **kwargs
651 'read1', *args, **kwargs
643 )
652 )
644
653
645
654
646 class observedbufferedinputpipe(bufferedinputpipe):
655 class observedbufferedinputpipe(bufferedinputpipe):
647 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
656 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
648
657
649 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
658 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
650 bypass ``fileobjectproxy``. Because of this, we need to make
659 bypass ``fileobjectproxy``. Because of this, we need to make
651 ``bufferedinputpipe`` aware of these operations.
660 ``bufferedinputpipe`` aware of these operations.
652
661
653 This variation of ``bufferedinputpipe`` can notify observers about
662 This variation of ``bufferedinputpipe`` can notify observers about
654 ``os.read()`` events. It also re-publishes other events, such as
663 ``os.read()`` events. It also re-publishes other events, such as
655 ``read()`` and ``readline()``.
664 ``read()`` and ``readline()``.
656 """
665 """
657
666
658 def _fillbuffer(self, size=_chunksize):
667 def _fillbuffer(self, size=_chunksize):
659 res = super(observedbufferedinputpipe, self)._fillbuffer(size=size)
668 res = super(observedbufferedinputpipe, self)._fillbuffer(size=size)
660
669
661 fn = getattr(self._input._observer, 'osread', None)
670 fn = getattr(self._input._observer, 'osread', None)
662 if fn:
671 if fn:
663 fn(res, size)
672 fn(res, size)
664
673
665 return res
674 return res
666
675
667 # We use different observer methods because the operation isn't
676 # We use different observer methods because the operation isn't
668 # performed on the actual file object but on us.
677 # performed on the actual file object but on us.
669 def read(self, size):
678 def read(self, size):
670 res = super(observedbufferedinputpipe, self).read(size)
679 res = super(observedbufferedinputpipe, self).read(size)
671
680
672 fn = getattr(self._input._observer, 'bufferedread', None)
681 fn = getattr(self._input._observer, 'bufferedread', None)
673 if fn:
682 if fn:
674 fn(res, size)
683 fn(res, size)
675
684
676 return res
685 return res
677
686
678 def readline(self, *args, **kwargs):
687 def readline(self, *args, **kwargs):
679 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
688 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
680
689
681 fn = getattr(self._input._observer, 'bufferedreadline', None)
690 fn = getattr(self._input._observer, 'bufferedreadline', None)
682 if fn:
691 if fn:
683 fn(res)
692 fn(res)
684
693
685 return res
694 return res
686
695
687
696
688 PROXIED_SOCKET_METHODS = {
697 PROXIED_SOCKET_METHODS = {
689 'makefile',
698 'makefile',
690 'recv',
699 'recv',
691 'recvfrom',
700 'recvfrom',
692 'recvfrom_into',
701 'recvfrom_into',
693 'recv_into',
702 'recv_into',
694 'send',
703 'send',
695 'sendall',
704 'sendall',
696 'sendto',
705 'sendto',
697 'setblocking',
706 'setblocking',
698 'settimeout',
707 'settimeout',
699 'gettimeout',
708 'gettimeout',
700 'setsockopt',
709 'setsockopt',
701 }
710 }
702
711
703
712
704 class socketproxy:
713 class socketproxy:
705 """A proxy around a socket that tells a watcher when events occur.
714 """A proxy around a socket that tells a watcher when events occur.
706
715
707 This is like ``fileobjectproxy`` except for sockets.
716 This is like ``fileobjectproxy`` except for sockets.
708
717
709 This type is intended to only be used for testing purposes. Think hard
718 This type is intended to only be used for testing purposes. Think hard
710 before using it in important code.
719 before using it in important code.
711 """
720 """
712
721
713 __slots__ = (
722 __slots__ = (
714 '_orig',
723 '_orig',
715 '_observer',
724 '_observer',
716 )
725 )
717
726
718 def __init__(self, sock, observer):
727 def __init__(self, sock, observer):
719 object.__setattr__(self, '_orig', sock)
728 object.__setattr__(self, '_orig', sock)
720 object.__setattr__(self, '_observer', observer)
729 object.__setattr__(self, '_observer', observer)
721
730
722 def __getattribute__(self, name):
731 def __getattribute__(self, name):
723 if name in PROXIED_SOCKET_METHODS:
732 if name in PROXIED_SOCKET_METHODS:
724 return object.__getattribute__(self, name)
733 return object.__getattribute__(self, name)
725
734
726 return getattr(object.__getattribute__(self, '_orig'), name)
735 return getattr(object.__getattribute__(self, '_orig'), name)
727
736
728 def __delattr__(self, name):
737 def __delattr__(self, name):
729 return delattr(object.__getattribute__(self, '_orig'), name)
738 return delattr(object.__getattribute__(self, '_orig'), name)
730
739
731 def __setattr__(self, name, value):
740 def __setattr__(self, name, value):
732 return setattr(object.__getattribute__(self, '_orig'), name, value)
741 return setattr(object.__getattribute__(self, '_orig'), name, value)
733
742
734 def __nonzero__(self):
743 def __nonzero__(self):
735 return bool(object.__getattribute__(self, '_orig'))
744 return bool(object.__getattribute__(self, '_orig'))
736
745
737 __bool__ = __nonzero__
746 __bool__ = __nonzero__
738
747
739 def _observedcall(self, name, *args, **kwargs):
748 def _observedcall(self, name, *args, **kwargs):
740 # Call the original object.
749 # Call the original object.
741 orig = object.__getattribute__(self, '_orig')
750 orig = object.__getattribute__(self, '_orig')
742 res = getattr(orig, name)(*args, **kwargs)
751 res = getattr(orig, name)(*args, **kwargs)
743
752
744 # Call a method on the observer of the same name with arguments
753 # Call a method on the observer of the same name with arguments
745 # so it can react, log, etc.
754 # so it can react, log, etc.
746 observer = object.__getattribute__(self, '_observer')
755 observer = object.__getattribute__(self, '_observer')
747 fn = getattr(observer, name, None)
756 fn = getattr(observer, name, None)
748 if fn:
757 if fn:
749 fn(res, *args, **kwargs)
758 fn(res, *args, **kwargs)
750
759
751 return res
760 return res
752
761
753 def makefile(self, *args, **kwargs):
762 def makefile(self, *args, **kwargs):
754 res = object.__getattribute__(self, '_observedcall')(
763 res = object.__getattribute__(self, '_observedcall')(
755 'makefile', *args, **kwargs
764 'makefile', *args, **kwargs
756 )
765 )
757
766
758 # The file object may be used for I/O. So we turn it into a
767 # The file object may be used for I/O. So we turn it into a
759 # proxy using our observer.
768 # proxy using our observer.
760 observer = object.__getattribute__(self, '_observer')
769 observer = object.__getattribute__(self, '_observer')
761 return makeloggingfileobject(
770 return makeloggingfileobject(
762 observer.fh,
771 observer.fh,
763 res,
772 res,
764 observer.name,
773 observer.name,
765 reads=observer.reads,
774 reads=observer.reads,
766 writes=observer.writes,
775 writes=observer.writes,
767 logdata=observer.logdata,
776 logdata=observer.logdata,
768 logdataapis=observer.logdataapis,
777 logdataapis=observer.logdataapis,
769 )
778 )
770
779
771 def recv(self, *args, **kwargs):
780 def recv(self, *args, **kwargs):
772 return object.__getattribute__(self, '_observedcall')(
781 return object.__getattribute__(self, '_observedcall')(
773 'recv', *args, **kwargs
782 'recv', *args, **kwargs
774 )
783 )
775
784
776 def recvfrom(self, *args, **kwargs):
785 def recvfrom(self, *args, **kwargs):
777 return object.__getattribute__(self, '_observedcall')(
786 return object.__getattribute__(self, '_observedcall')(
778 'recvfrom', *args, **kwargs
787 'recvfrom', *args, **kwargs
779 )
788 )
780
789
781 def recvfrom_into(self, *args, **kwargs):
790 def recvfrom_into(self, *args, **kwargs):
782 return object.__getattribute__(self, '_observedcall')(
791 return object.__getattribute__(self, '_observedcall')(
783 'recvfrom_into', *args, **kwargs
792 'recvfrom_into', *args, **kwargs
784 )
793 )
785
794
786 def recv_into(self, *args, **kwargs):
795 def recv_into(self, *args, **kwargs):
787 return object.__getattribute__(self, '_observedcall')(
796 return object.__getattribute__(self, '_observedcall')(
788 'recv_info', *args, **kwargs
797 'recv_info', *args, **kwargs
789 )
798 )
790
799
791 def send(self, *args, **kwargs):
800 def send(self, *args, **kwargs):
792 return object.__getattribute__(self, '_observedcall')(
801 return object.__getattribute__(self, '_observedcall')(
793 'send', *args, **kwargs
802 'send', *args, **kwargs
794 )
803 )
795
804
796 def sendall(self, *args, **kwargs):
805 def sendall(self, *args, **kwargs):
797 return object.__getattribute__(self, '_observedcall')(
806 return object.__getattribute__(self, '_observedcall')(
798 'sendall', *args, **kwargs
807 'sendall', *args, **kwargs
799 )
808 )
800
809
801 def sendto(self, *args, **kwargs):
810 def sendto(self, *args, **kwargs):
802 return object.__getattribute__(self, '_observedcall')(
811 return object.__getattribute__(self, '_observedcall')(
803 'sendto', *args, **kwargs
812 'sendto', *args, **kwargs
804 )
813 )
805
814
806 def setblocking(self, *args, **kwargs):
815 def setblocking(self, *args, **kwargs):
807 return object.__getattribute__(self, '_observedcall')(
816 return object.__getattribute__(self, '_observedcall')(
808 'setblocking', *args, **kwargs
817 'setblocking', *args, **kwargs
809 )
818 )
810
819
811 def settimeout(self, *args, **kwargs):
820 def settimeout(self, *args, **kwargs):
812 return object.__getattribute__(self, '_observedcall')(
821 return object.__getattribute__(self, '_observedcall')(
813 'settimeout', *args, **kwargs
822 'settimeout', *args, **kwargs
814 )
823 )
815
824
816 def gettimeout(self, *args, **kwargs):
825 def gettimeout(self, *args, **kwargs):
817 return object.__getattribute__(self, '_observedcall')(
826 return object.__getattribute__(self, '_observedcall')(
818 'gettimeout', *args, **kwargs
827 'gettimeout', *args, **kwargs
819 )
828 )
820
829
821 def setsockopt(self, *args, **kwargs):
830 def setsockopt(self, *args, **kwargs):
822 return object.__getattribute__(self, '_observedcall')(
831 return object.__getattribute__(self, '_observedcall')(
823 'setsockopt', *args, **kwargs
832 'setsockopt', *args, **kwargs
824 )
833 )
825
834
826
835
827 class baseproxyobserver:
836 class baseproxyobserver:
828 def __init__(self, fh, name, logdata, logdataapis):
837 def __init__(self, fh, name, logdata, logdataapis):
829 self.fh = fh
838 self.fh = fh
830 self.name = name
839 self.name = name
831 self.logdata = logdata
840 self.logdata = logdata
832 self.logdataapis = logdataapis
841 self.logdataapis = logdataapis
833
842
834 def _writedata(self, data):
843 def _writedata(self, data):
835 if not self.logdata:
844 if not self.logdata:
836 if self.logdataapis:
845 if self.logdataapis:
837 self.fh.write(b'\n')
846 self.fh.write(b'\n')
838 self.fh.flush()
847 self.fh.flush()
839 return
848 return
840
849
841 # Simple case writes all data on a single line.
850 # Simple case writes all data on a single line.
842 if b'\n' not in data:
851 if b'\n' not in data:
843 if self.logdataapis:
852 if self.logdataapis:
844 self.fh.write(b': %s\n' % stringutil.escapestr(data))
853 self.fh.write(b': %s\n' % stringutil.escapestr(data))
845 else:
854 else:
846 self.fh.write(
855 self.fh.write(
847 b'%s> %s\n' % (self.name, stringutil.escapestr(data))
856 b'%s> %s\n' % (self.name, stringutil.escapestr(data))
848 )
857 )
849 self.fh.flush()
858 self.fh.flush()
850 return
859 return
851
860
852 # Data with newlines is written to multiple lines.
861 # Data with newlines is written to multiple lines.
853 if self.logdataapis:
862 if self.logdataapis:
854 self.fh.write(b':\n')
863 self.fh.write(b':\n')
855
864
856 lines = data.splitlines(True)
865 lines = data.splitlines(True)
857 for line in lines:
866 for line in lines:
858 self.fh.write(
867 self.fh.write(
859 b'%s> %s\n' % (self.name, stringutil.escapestr(line))
868 b'%s> %s\n' % (self.name, stringutil.escapestr(line))
860 )
869 )
861 self.fh.flush()
870 self.fh.flush()
862
871
863
872
864 class fileobjectobserver(baseproxyobserver):
873 class fileobjectobserver(baseproxyobserver):
865 """Logs file object activity."""
874 """Logs file object activity."""
866
875
867 def __init__(
876 def __init__(
868 self, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
877 self, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
869 ):
878 ):
870 super(fileobjectobserver, self).__init__(fh, name, logdata, logdataapis)
879 super(fileobjectobserver, self).__init__(fh, name, logdata, logdataapis)
871 self.reads = reads
880 self.reads = reads
872 self.writes = writes
881 self.writes = writes
873
882
874 def read(self, res, size=-1):
883 def read(self, res, size=-1):
875 if not self.reads:
884 if not self.reads:
876 return
885 return
877 # Python 3 can return None from reads at EOF instead of empty strings.
886 # Python 3 can return None from reads at EOF instead of empty strings.
878 if res is None:
887 if res is None:
879 res = b''
888 res = b''
880
889
881 if size == -1 and res == b'':
890 if size == -1 and res == b'':
882 # Suppress pointless read(-1) calls that return
891 # Suppress pointless read(-1) calls that return
883 # nothing. These happen _a lot_ on Python 3, and there
892 # nothing. These happen _a lot_ on Python 3, and there
884 # doesn't seem to be a better workaround to have matching
893 # doesn't seem to be a better workaround to have matching
885 # Python 2 and 3 behavior. :(
894 # Python 2 and 3 behavior. :(
886 return
895 return
887
896
888 if self.logdataapis:
897 if self.logdataapis:
889 self.fh.write(b'%s> read(%d) -> %d' % (self.name, size, len(res)))
898 self.fh.write(b'%s> read(%d) -> %d' % (self.name, size, len(res)))
890
899
891 self._writedata(res)
900 self._writedata(res)
892
901
893 def readline(self, res, limit=-1):
902 def readline(self, res, limit=-1):
894 if not self.reads:
903 if not self.reads:
895 return
904 return
896
905
897 if self.logdataapis:
906 if self.logdataapis:
898 self.fh.write(b'%s> readline() -> %d' % (self.name, len(res)))
907 self.fh.write(b'%s> readline() -> %d' % (self.name, len(res)))
899
908
900 self._writedata(res)
909 self._writedata(res)
901
910
902 def readinto(self, res, dest):
911 def readinto(self, res, dest):
903 if not self.reads:
912 if not self.reads:
904 return
913 return
905
914
906 if self.logdataapis:
915 if self.logdataapis:
907 self.fh.write(
916 self.fh.write(
908 b'%s> readinto(%d) -> %r' % (self.name, len(dest), res)
917 b'%s> readinto(%d) -> %r' % (self.name, len(dest), res)
909 )
918 )
910
919
911 data = dest[0:res] if res is not None else b''
920 data = dest[0:res] if res is not None else b''
912
921
913 # _writedata() uses "in" operator and is confused by memoryview because
922 # _writedata() uses "in" operator and is confused by memoryview because
914 # characters are ints on Python 3.
923 # characters are ints on Python 3.
915 if isinstance(data, memoryview):
924 if isinstance(data, memoryview):
916 data = data.tobytes()
925 data = data.tobytes()
917
926
918 self._writedata(data)
927 self._writedata(data)
919
928
920 def write(self, res, data):
929 def write(self, res, data):
921 if not self.writes:
930 if not self.writes:
922 return
931 return
923
932
924 # Python 2 returns None from some write() calls. Python 3 (reasonably)
933 # Python 2 returns None from some write() calls. Python 3 (reasonably)
925 # returns the integer bytes written.
934 # returns the integer bytes written.
926 if res is None and data:
935 if res is None and data:
927 res = len(data)
936 res = len(data)
928
937
929 if self.logdataapis:
938 if self.logdataapis:
930 self.fh.write(b'%s> write(%d) -> %r' % (self.name, len(data), res))
939 self.fh.write(b'%s> write(%d) -> %r' % (self.name, len(data), res))
931
940
932 self._writedata(data)
941 self._writedata(data)
933
942
934 def flush(self, res):
943 def flush(self, res):
935 if not self.writes:
944 if not self.writes:
936 return
945 return
937
946
938 self.fh.write(b'%s> flush() -> %r\n' % (self.name, res))
947 self.fh.write(b'%s> flush() -> %r\n' % (self.name, res))
939
948
940 # For observedbufferedinputpipe.
949 # For observedbufferedinputpipe.
941 def bufferedread(self, res, size):
950 def bufferedread(self, res, size):
942 if not self.reads:
951 if not self.reads:
943 return
952 return
944
953
945 if self.logdataapis:
954 if self.logdataapis:
946 self.fh.write(
955 self.fh.write(
947 b'%s> bufferedread(%d) -> %d' % (self.name, size, len(res))
956 b'%s> bufferedread(%d) -> %d' % (self.name, size, len(res))
948 )
957 )
949
958
950 self._writedata(res)
959 self._writedata(res)
951
960
952 def bufferedreadline(self, res):
961 def bufferedreadline(self, res):
953 if not self.reads:
962 if not self.reads:
954 return
963 return
955
964
956 if self.logdataapis:
965 if self.logdataapis:
957 self.fh.write(
966 self.fh.write(
958 b'%s> bufferedreadline() -> %d' % (self.name, len(res))
967 b'%s> bufferedreadline() -> %d' % (self.name, len(res))
959 )
968 )
960
969
961 self._writedata(res)
970 self._writedata(res)
962
971
963
972
964 def makeloggingfileobject(
973 def makeloggingfileobject(
965 logh, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
974 logh, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
966 ):
975 ):
967 """Turn a file object into a logging file object."""
976 """Turn a file object into a logging file object."""
968
977
969 observer = fileobjectobserver(
978 observer = fileobjectobserver(
970 logh,
979 logh,
971 name,
980 name,
972 reads=reads,
981 reads=reads,
973 writes=writes,
982 writes=writes,
974 logdata=logdata,
983 logdata=logdata,
975 logdataapis=logdataapis,
984 logdataapis=logdataapis,
976 )
985 )
977 return fileobjectproxy(fh, observer)
986 return fileobjectproxy(fh, observer)
978
987
979
988
980 class socketobserver(baseproxyobserver):
989 class socketobserver(baseproxyobserver):
981 """Logs socket activity."""
990 """Logs socket activity."""
982
991
983 def __init__(
992 def __init__(
984 self,
993 self,
985 fh,
994 fh,
986 name,
995 name,
987 reads=True,
996 reads=True,
988 writes=True,
997 writes=True,
989 states=True,
998 states=True,
990 logdata=False,
999 logdata=False,
991 logdataapis=True,
1000 logdataapis=True,
992 ):
1001 ):
993 super(socketobserver, self).__init__(fh, name, logdata, logdataapis)
1002 super(socketobserver, self).__init__(fh, name, logdata, logdataapis)
994 self.reads = reads
1003 self.reads = reads
995 self.writes = writes
1004 self.writes = writes
996 self.states = states
1005 self.states = states
997
1006
998 def makefile(self, res, mode=None, bufsize=None):
1007 def makefile(self, res, mode=None, bufsize=None):
999 if not self.states:
1008 if not self.states:
1000 return
1009 return
1001
1010
1002 self.fh.write(b'%s> makefile(%r, %r)\n' % (self.name, mode, bufsize))
1011 self.fh.write(b'%s> makefile(%r, %r)\n' % (self.name, mode, bufsize))
1003
1012
1004 def recv(self, res, size, flags=0):
1013 def recv(self, res, size, flags=0):
1005 if not self.reads:
1014 if not self.reads:
1006 return
1015 return
1007
1016
1008 if self.logdataapis:
1017 if self.logdataapis:
1009 self.fh.write(
1018 self.fh.write(
1010 b'%s> recv(%d, %d) -> %d' % (self.name, size, flags, len(res))
1019 b'%s> recv(%d, %d) -> %d' % (self.name, size, flags, len(res))
1011 )
1020 )
1012 self._writedata(res)
1021 self._writedata(res)
1013
1022
1014 def recvfrom(self, res, size, flags=0):
1023 def recvfrom(self, res, size, flags=0):
1015 if not self.reads:
1024 if not self.reads:
1016 return
1025 return
1017
1026
1018 if self.logdataapis:
1027 if self.logdataapis:
1019 self.fh.write(
1028 self.fh.write(
1020 b'%s> recvfrom(%d, %d) -> %d'
1029 b'%s> recvfrom(%d, %d) -> %d'
1021 % (self.name, size, flags, len(res[0]))
1030 % (self.name, size, flags, len(res[0]))
1022 )
1031 )
1023
1032
1024 self._writedata(res[0])
1033 self._writedata(res[0])
1025
1034
1026 def recvfrom_into(self, res, buf, size, flags=0):
1035 def recvfrom_into(self, res, buf, size, flags=0):
1027 if not self.reads:
1036 if not self.reads:
1028 return
1037 return
1029
1038
1030 if self.logdataapis:
1039 if self.logdataapis:
1031 self.fh.write(
1040 self.fh.write(
1032 b'%s> recvfrom_into(%d, %d) -> %d'
1041 b'%s> recvfrom_into(%d, %d) -> %d'
1033 % (self.name, size, flags, res[0])
1042 % (self.name, size, flags, res[0])
1034 )
1043 )
1035
1044
1036 self._writedata(buf[0 : res[0]])
1045 self._writedata(buf[0 : res[0]])
1037
1046
1038 def recv_into(self, res, buf, size=0, flags=0):
1047 def recv_into(self, res, buf, size=0, flags=0):
1039 if not self.reads:
1048 if not self.reads:
1040 return
1049 return
1041
1050
1042 if self.logdataapis:
1051 if self.logdataapis:
1043 self.fh.write(
1052 self.fh.write(
1044 b'%s> recv_into(%d, %d) -> %d' % (self.name, size, flags, res)
1053 b'%s> recv_into(%d, %d) -> %d' % (self.name, size, flags, res)
1045 )
1054 )
1046
1055
1047 self._writedata(buf[0:res])
1056 self._writedata(buf[0:res])
1048
1057
1049 def send(self, res, data, flags=0):
1058 def send(self, res, data, flags=0):
1050 if not self.writes:
1059 if not self.writes:
1051 return
1060 return
1052
1061
1053 self.fh.write(
1062 self.fh.write(
1054 b'%s> send(%d, %d) -> %d' % (self.name, len(data), flags, len(res))
1063 b'%s> send(%d, %d) -> %d' % (self.name, len(data), flags, len(res))
1055 )
1064 )
1056 self._writedata(data)
1065 self._writedata(data)
1057
1066
1058 def sendall(self, res, data, flags=0):
1067 def sendall(self, res, data, flags=0):
1059 if not self.writes:
1068 if not self.writes:
1060 return
1069 return
1061
1070
1062 if self.logdataapis:
1071 if self.logdataapis:
1063 # Returns None on success. So don't bother reporting return value.
1072 # Returns None on success. So don't bother reporting return value.
1064 self.fh.write(
1073 self.fh.write(
1065 b'%s> sendall(%d, %d)' % (self.name, len(data), flags)
1074 b'%s> sendall(%d, %d)' % (self.name, len(data), flags)
1066 )
1075 )
1067
1076
1068 self._writedata(data)
1077 self._writedata(data)
1069
1078
1070 def sendto(self, res, data, flagsoraddress, address=None):
1079 def sendto(self, res, data, flagsoraddress, address=None):
1071 if not self.writes:
1080 if not self.writes:
1072 return
1081 return
1073
1082
1074 if address:
1083 if address:
1075 flags = flagsoraddress
1084 flags = flagsoraddress
1076 else:
1085 else:
1077 flags = 0
1086 flags = 0
1078
1087
1079 if self.logdataapis:
1088 if self.logdataapis:
1080 self.fh.write(
1089 self.fh.write(
1081 b'%s> sendto(%d, %d, %r) -> %d'
1090 b'%s> sendto(%d, %d, %r) -> %d'
1082 % (self.name, len(data), flags, address, res)
1091 % (self.name, len(data), flags, address, res)
1083 )
1092 )
1084
1093
1085 self._writedata(data)
1094 self._writedata(data)
1086
1095
1087 def setblocking(self, res, flag):
1096 def setblocking(self, res, flag):
1088 if not self.states:
1097 if not self.states:
1089 return
1098 return
1090
1099
1091 self.fh.write(b'%s> setblocking(%r)\n' % (self.name, flag))
1100 self.fh.write(b'%s> setblocking(%r)\n' % (self.name, flag))
1092
1101
1093 def settimeout(self, res, value):
1102 def settimeout(self, res, value):
1094 if not self.states:
1103 if not self.states:
1095 return
1104 return
1096
1105
1097 self.fh.write(b'%s> settimeout(%r)\n' % (self.name, value))
1106 self.fh.write(b'%s> settimeout(%r)\n' % (self.name, value))
1098
1107
1099 def gettimeout(self, res):
1108 def gettimeout(self, res):
1100 if not self.states:
1109 if not self.states:
1101 return
1110 return
1102
1111
1103 self.fh.write(b'%s> gettimeout() -> %f\n' % (self.name, res))
1112 self.fh.write(b'%s> gettimeout() -> %f\n' % (self.name, res))
1104
1113
1105 def setsockopt(self, res, level, optname, value):
1114 def setsockopt(self, res, level, optname, value):
1106 if not self.states:
1115 if not self.states:
1107 return
1116 return
1108
1117
1109 self.fh.write(
1118 self.fh.write(
1110 b'%s> setsockopt(%r, %r, %r) -> %r\n'
1119 b'%s> setsockopt(%r, %r, %r) -> %r\n'
1111 % (self.name, level, optname, value, res)
1120 % (self.name, level, optname, value, res)
1112 )
1121 )
1113
1122
1114
1123
1115 def makeloggingsocket(
1124 def makeloggingsocket(
1116 logh,
1125 logh,
1117 fh,
1126 fh,
1118 name,
1127 name,
1119 reads=True,
1128 reads=True,
1120 writes=True,
1129 writes=True,
1121 states=True,
1130 states=True,
1122 logdata=False,
1131 logdata=False,
1123 logdataapis=True,
1132 logdataapis=True,
1124 ):
1133 ):
1125 """Turn a socket into a logging socket."""
1134 """Turn a socket into a logging socket."""
1126
1135
1127 observer = socketobserver(
1136 observer = socketobserver(
1128 logh,
1137 logh,
1129 name,
1138 name,
1130 reads=reads,
1139 reads=reads,
1131 writes=writes,
1140 writes=writes,
1132 states=states,
1141 states=states,
1133 logdata=logdata,
1142 logdata=logdata,
1134 logdataapis=logdataapis,
1143 logdataapis=logdataapis,
1135 )
1144 )
1136 return socketproxy(fh, observer)
1145 return socketproxy(fh, observer)
1137
1146
1138
1147
1139 def version():
1148 def version():
1140 """Return version information if available."""
1149 """Return version information if available."""
1141 try:
1150 try:
1142 from . import __version__
1151 from . import __version__
1143
1152
1144 return __version__.version
1153 return __version__.version
1145 except ImportError:
1154 except ImportError:
1146 return b'unknown'
1155 return b'unknown'
1147
1156
1148
1157
1149 def versiontuple(v=None, n=4):
1158 def versiontuple(v=None, n=4):
1150 """Parses a Mercurial version string into an N-tuple.
1159 """Parses a Mercurial version string into an N-tuple.
1151
1160
1152 The version string to be parsed is specified with the ``v`` argument.
1161 The version string to be parsed is specified with the ``v`` argument.
1153 If it isn't defined, the current Mercurial version string will be parsed.
1162 If it isn't defined, the current Mercurial version string will be parsed.
1154
1163
1155 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1164 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1156 returned values:
1165 returned values:
1157
1166
1158 >>> v = b'3.6.1+190-df9b73d2d444'
1167 >>> v = b'3.6.1+190-df9b73d2d444'
1159 >>> versiontuple(v, 2)
1168 >>> versiontuple(v, 2)
1160 (3, 6)
1169 (3, 6)
1161 >>> versiontuple(v, 3)
1170 >>> versiontuple(v, 3)
1162 (3, 6, 1)
1171 (3, 6, 1)
1163 >>> versiontuple(v, 4)
1172 >>> versiontuple(v, 4)
1164 (3, 6, 1, '190-df9b73d2d444')
1173 (3, 6, 1, '190-df9b73d2d444')
1165
1174
1166 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1175 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1167 (3, 6, 1, '190-df9b73d2d444+20151118')
1176 (3, 6, 1, '190-df9b73d2d444+20151118')
1168
1177
1169 >>> v = b'3.6'
1178 >>> v = b'3.6'
1170 >>> versiontuple(v, 2)
1179 >>> versiontuple(v, 2)
1171 (3, 6)
1180 (3, 6)
1172 >>> versiontuple(v, 3)
1181 >>> versiontuple(v, 3)
1173 (3, 6, None)
1182 (3, 6, None)
1174 >>> versiontuple(v, 4)
1183 >>> versiontuple(v, 4)
1175 (3, 6, None, None)
1184 (3, 6, None, None)
1176
1185
1177 >>> v = b'3.9-rc'
1186 >>> v = b'3.9-rc'
1178 >>> versiontuple(v, 2)
1187 >>> versiontuple(v, 2)
1179 (3, 9)
1188 (3, 9)
1180 >>> versiontuple(v, 3)
1189 >>> versiontuple(v, 3)
1181 (3, 9, None)
1190 (3, 9, None)
1182 >>> versiontuple(v, 4)
1191 >>> versiontuple(v, 4)
1183 (3, 9, None, 'rc')
1192 (3, 9, None, 'rc')
1184
1193
1185 >>> v = b'3.9-rc+2-02a8fea4289b'
1194 >>> v = b'3.9-rc+2-02a8fea4289b'
1186 >>> versiontuple(v, 2)
1195 >>> versiontuple(v, 2)
1187 (3, 9)
1196 (3, 9)
1188 >>> versiontuple(v, 3)
1197 >>> versiontuple(v, 3)
1189 (3, 9, None)
1198 (3, 9, None)
1190 >>> versiontuple(v, 4)
1199 >>> versiontuple(v, 4)
1191 (3, 9, None, 'rc+2-02a8fea4289b')
1200 (3, 9, None, 'rc+2-02a8fea4289b')
1192
1201
1193 >>> versiontuple(b'4.6rc0')
1202 >>> versiontuple(b'4.6rc0')
1194 (4, 6, None, 'rc0')
1203 (4, 6, None, 'rc0')
1195 >>> versiontuple(b'4.6rc0+12-425d55e54f98')
1204 >>> versiontuple(b'4.6rc0+12-425d55e54f98')
1196 (4, 6, None, 'rc0+12-425d55e54f98')
1205 (4, 6, None, 'rc0+12-425d55e54f98')
1197 >>> versiontuple(b'.1.2.3')
1206 >>> versiontuple(b'.1.2.3')
1198 (None, None, None, '.1.2.3')
1207 (None, None, None, '.1.2.3')
1199 >>> versiontuple(b'12.34..5')
1208 >>> versiontuple(b'12.34..5')
1200 (12, 34, None, '..5')
1209 (12, 34, None, '..5')
1201 >>> versiontuple(b'1.2.3.4.5.6')
1210 >>> versiontuple(b'1.2.3.4.5.6')
1202 (1, 2, 3, '.4.5.6')
1211 (1, 2, 3, '.4.5.6')
1203 """
1212 """
1204 if not v:
1213 if not v:
1205 v = version()
1214 v = version()
1206 m = remod.match(br'(\d+(?:\.\d+){,2})[+-]?(.*)', v)
1215 m = remod.match(br'(\d+(?:\.\d+){,2})[+-]?(.*)', v)
1207 if not m:
1216 if not m:
1208 vparts, extra = b'', v
1217 vparts, extra = b'', v
1209 elif m.group(2):
1218 elif m.group(2):
1210 vparts, extra = m.groups()
1219 vparts, extra = m.groups()
1211 else:
1220 else:
1212 vparts, extra = m.group(1), None
1221 vparts, extra = m.group(1), None
1213
1222
1214 assert vparts is not None # help pytype
1223 assert vparts is not None # help pytype
1215
1224
1216 vints = []
1225 vints = []
1217 for i in vparts.split(b'.'):
1226 for i in vparts.split(b'.'):
1218 try:
1227 try:
1219 vints.append(int(i))
1228 vints.append(int(i))
1220 except ValueError:
1229 except ValueError:
1221 break
1230 break
1222 # (3, 6) -> (3, 6, None)
1231 # (3, 6) -> (3, 6, None)
1223 while len(vints) < 3:
1232 while len(vints) < 3:
1224 vints.append(None)
1233 vints.append(None)
1225
1234
1226 if n == 2:
1235 if n == 2:
1227 return (vints[0], vints[1])
1236 return (vints[0], vints[1])
1228 if n == 3:
1237 if n == 3:
1229 return (vints[0], vints[1], vints[2])
1238 return (vints[0], vints[1], vints[2])
1230 if n == 4:
1239 if n == 4:
1231 return (vints[0], vints[1], vints[2], extra)
1240 return (vints[0], vints[1], vints[2], extra)
1232
1241
1233 raise error.ProgrammingError(b"invalid version part request: %d" % n)
1242 raise error.ProgrammingError(b"invalid version part request: %d" % n)
1234
1243
1235
1244
1236 def cachefunc(func):
1245 def cachefunc(func):
1237 '''cache the result of function calls'''
1246 '''cache the result of function calls'''
1238 # XXX doesn't handle keywords args
1247 # XXX doesn't handle keywords args
1239 if func.__code__.co_argcount == 0:
1248 if func.__code__.co_argcount == 0:
1240 listcache = []
1249 listcache = []
1241
1250
1242 def f():
1251 def f():
1243 if len(listcache) == 0:
1252 if len(listcache) == 0:
1244 listcache.append(func())
1253 listcache.append(func())
1245 return listcache[0]
1254 return listcache[0]
1246
1255
1247 return f
1256 return f
1248 cache = {}
1257 cache = {}
1249 if func.__code__.co_argcount == 1:
1258 if func.__code__.co_argcount == 1:
1250 # we gain a small amount of time because
1259 # we gain a small amount of time because
1251 # we don't need to pack/unpack the list
1260 # we don't need to pack/unpack the list
1252 def f(arg):
1261 def f(arg):
1253 if arg not in cache:
1262 if arg not in cache:
1254 cache[arg] = func(arg)
1263 cache[arg] = func(arg)
1255 return cache[arg]
1264 return cache[arg]
1256
1265
1257 else:
1266 else:
1258
1267
1259 def f(*args):
1268 def f(*args):
1260 if args not in cache:
1269 if args not in cache:
1261 cache[args] = func(*args)
1270 cache[args] = func(*args)
1262 return cache[args]
1271 return cache[args]
1263
1272
1264 return f
1273 return f
1265
1274
1266
1275
1267 class cow:
1276 class cow:
1268 """helper class to make copy-on-write easier
1277 """helper class to make copy-on-write easier
1269
1278
1270 Call preparewrite before doing any writes.
1279 Call preparewrite before doing any writes.
1271 """
1280 """
1272
1281
1273 def preparewrite(self):
1282 def preparewrite(self):
1274 """call this before writes, return self or a copied new object"""
1283 """call this before writes, return self or a copied new object"""
1275 if getattr(self, '_copied', 0):
1284 if getattr(self, '_copied', 0):
1276 self._copied -= 1
1285 self._copied -= 1
1277 # Function cow.__init__ expects 1 arg(s), got 2 [wrong-arg-count]
1286 # Function cow.__init__ expects 1 arg(s), got 2 [wrong-arg-count]
1278 return self.__class__(self) # pytype: disable=wrong-arg-count
1287 return self.__class__(self) # pytype: disable=wrong-arg-count
1279 return self
1288 return self
1280
1289
1281 def copy(self):
1290 def copy(self):
1282 """always do a cheap copy"""
1291 """always do a cheap copy"""
1283 self._copied = getattr(self, '_copied', 0) + 1
1292 self._copied = getattr(self, '_copied', 0) + 1
1284 return self
1293 return self
1285
1294
1286
1295
1287 class sortdict(collections.OrderedDict):
1296 class sortdict(collections.OrderedDict):
1288 """a simple sorted dictionary
1297 """a simple sorted dictionary
1289
1298
1290 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1299 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1291 >>> d2 = d1.copy()
1300 >>> d2 = d1.copy()
1292 >>> list(d2.items())
1301 >>> list(d2.items())
1293 [('a', 0), ('b', 1)]
1302 [('a', 0), ('b', 1)]
1294 >>> d2.update([(b'a', 2)])
1303 >>> d2.update([(b'a', 2)])
1295 >>> list(d2.keys()) # should still be in last-set order
1304 >>> list(d2.keys()) # should still be in last-set order
1296 ['b', 'a']
1305 ['b', 'a']
1297 >>> d1.insert(1, b'a.5', 0.5)
1306 >>> d1.insert(1, b'a.5', 0.5)
1298 >>> list(d1.items())
1307 >>> list(d1.items())
1299 [('a', 0), ('a.5', 0.5), ('b', 1)]
1308 [('a', 0), ('a.5', 0.5), ('b', 1)]
1300 """
1309 """
1301
1310
1302 def __setitem__(self, key, value):
1311 def __setitem__(self, key, value):
1303 if key in self:
1312 if key in self:
1304 del self[key]
1313 del self[key]
1305 super(sortdict, self).__setitem__(key, value)
1314 super(sortdict, self).__setitem__(key, value)
1306
1315
1307 if pycompat.ispypy:
1316 if pycompat.ispypy:
1308 # __setitem__() isn't called as of PyPy 5.8.0
1317 # __setitem__() isn't called as of PyPy 5.8.0
1309 def update(self, src, **f):
1318 def update(self, src, **f):
1310 if isinstance(src, dict):
1319 if isinstance(src, dict):
1311 src = src.items()
1320 src = src.items()
1312 for k, v in src:
1321 for k, v in src:
1313 self[k] = v
1322 self[k] = v
1314 for k in f:
1323 for k in f:
1315 self[k] = f[k]
1324 self[k] = f[k]
1316
1325
1317 def insert(self, position, key, value):
1326 def insert(self, position, key, value):
1318 for (i, (k, v)) in enumerate(list(self.items())):
1327 for (i, (k, v)) in enumerate(list(self.items())):
1319 if i == position:
1328 if i == position:
1320 self[key] = value
1329 self[key] = value
1321 if i >= position:
1330 if i >= position:
1322 del self[k]
1331 del self[k]
1323 self[k] = v
1332 self[k] = v
1324
1333
1325
1334
1326 class cowdict(cow, dict):
1335 class cowdict(cow, dict):
1327 """copy-on-write dict
1336 """copy-on-write dict
1328
1337
1329 Be sure to call d = d.preparewrite() before writing to d.
1338 Be sure to call d = d.preparewrite() before writing to d.
1330
1339
1331 >>> a = cowdict()
1340 >>> a = cowdict()
1332 >>> a is a.preparewrite()
1341 >>> a is a.preparewrite()
1333 True
1342 True
1334 >>> b = a.copy()
1343 >>> b = a.copy()
1335 >>> b is a
1344 >>> b is a
1336 True
1345 True
1337 >>> c = b.copy()
1346 >>> c = b.copy()
1338 >>> c is a
1347 >>> c is a
1339 True
1348 True
1340 >>> a = a.preparewrite()
1349 >>> a = a.preparewrite()
1341 >>> b is a
1350 >>> b is a
1342 False
1351 False
1343 >>> a is a.preparewrite()
1352 >>> a is a.preparewrite()
1344 True
1353 True
1345 >>> c = c.preparewrite()
1354 >>> c = c.preparewrite()
1346 >>> b is c
1355 >>> b is c
1347 False
1356 False
1348 >>> b is b.preparewrite()
1357 >>> b is b.preparewrite()
1349 True
1358 True
1350 """
1359 """
1351
1360
1352
1361
1353 class cowsortdict(cow, sortdict):
1362 class cowsortdict(cow, sortdict):
1354 """copy-on-write sortdict
1363 """copy-on-write sortdict
1355
1364
1356 Be sure to call d = d.preparewrite() before writing to d.
1365 Be sure to call d = d.preparewrite() before writing to d.
1357 """
1366 """
1358
1367
1359
1368
1360 class transactional: # pytype: disable=ignored-metaclass
1369 class transactional: # pytype: disable=ignored-metaclass
1361 """Base class for making a transactional type into a context manager."""
1370 """Base class for making a transactional type into a context manager."""
1362
1371
1363 __metaclass__ = abc.ABCMeta
1372 __metaclass__ = abc.ABCMeta
1364
1373
1365 @abc.abstractmethod
1374 @abc.abstractmethod
1366 def close(self):
1375 def close(self):
1367 """Successfully closes the transaction."""
1376 """Successfully closes the transaction."""
1368
1377
1369 @abc.abstractmethod
1378 @abc.abstractmethod
1370 def release(self):
1379 def release(self):
1371 """Marks the end of the transaction.
1380 """Marks the end of the transaction.
1372
1381
1373 If the transaction has not been closed, it will be aborted.
1382 If the transaction has not been closed, it will be aborted.
1374 """
1383 """
1375
1384
1376 def __enter__(self):
1385 def __enter__(self):
1377 return self
1386 return self
1378
1387
1379 def __exit__(self, exc_type, exc_val, exc_tb):
1388 def __exit__(self, exc_type, exc_val, exc_tb):
1380 try:
1389 try:
1381 if exc_type is None:
1390 if exc_type is None:
1382 self.close()
1391 self.close()
1383 finally:
1392 finally:
1384 self.release()
1393 self.release()
1385
1394
1386
1395
1387 @contextlib.contextmanager
1396 @contextlib.contextmanager
1388 def acceptintervention(tr=None):
1397 def acceptintervention(tr=None):
1389 """A context manager that closes the transaction on InterventionRequired
1398 """A context manager that closes the transaction on InterventionRequired
1390
1399
1391 If no transaction was provided, this simply runs the body and returns
1400 If no transaction was provided, this simply runs the body and returns
1392 """
1401 """
1393 if not tr:
1402 if not tr:
1394 yield
1403 yield
1395 return
1404 return
1396 try:
1405 try:
1397 yield
1406 yield
1398 tr.close()
1407 tr.close()
1399 except error.InterventionRequired:
1408 except error.InterventionRequired:
1400 tr.close()
1409 tr.close()
1401 raise
1410 raise
1402 finally:
1411 finally:
1403 tr.release()
1412 tr.release()
1404
1413
1405
1414
1406 @contextlib.contextmanager
1415 @contextlib.contextmanager
1407 def nullcontextmanager(enter_result=None):
1416 def nullcontextmanager(enter_result=None):
1408 yield enter_result
1417 yield enter_result
1409
1418
1410
1419
1411 class _lrucachenode:
1420 class _lrucachenode:
1412 """A node in a doubly linked list.
1421 """A node in a doubly linked list.
1413
1422
1414 Holds a reference to nodes on either side as well as a key-value
1423 Holds a reference to nodes on either side as well as a key-value
1415 pair for the dictionary entry.
1424 pair for the dictionary entry.
1416 """
1425 """
1417
1426
1418 __slots__ = ('next', 'prev', 'key', 'value', 'cost')
1427 __slots__ = ('next', 'prev', 'key', 'value', 'cost')
1419
1428
1420 def __init__(self):
1429 def __init__(self):
1421 self.next = self
1430 self.next = self
1422 self.prev = self
1431 self.prev = self
1423
1432
1424 self.key = _notset
1433 self.key = _notset
1425 self.value = None
1434 self.value = None
1426 self.cost = 0
1435 self.cost = 0
1427
1436
1428 def markempty(self):
1437 def markempty(self):
1429 """Mark the node as emptied."""
1438 """Mark the node as emptied."""
1430 self.key = _notset
1439 self.key = _notset
1431 self.value = None
1440 self.value = None
1432 self.cost = 0
1441 self.cost = 0
1433
1442
1434
1443
1435 class lrucachedict:
1444 class lrucachedict:
1436 """Dict that caches most recent accesses and sets.
1445 """Dict that caches most recent accesses and sets.
1437
1446
1438 The dict consists of an actual backing dict - indexed by original
1447 The dict consists of an actual backing dict - indexed by original
1439 key - and a doubly linked circular list defining the order of entries in
1448 key - and a doubly linked circular list defining the order of entries in
1440 the cache.
1449 the cache.
1441
1450
1442 The head node is the newest entry in the cache. If the cache is full,
1451 The head node is the newest entry in the cache. If the cache is full,
1443 we recycle head.prev and make it the new head. Cache accesses result in
1452 we recycle head.prev and make it the new head. Cache accesses result in
1444 the node being moved to before the existing head and being marked as the
1453 the node being moved to before the existing head and being marked as the
1445 new head node.
1454 new head node.
1446
1455
1447 Items in the cache can be inserted with an optional "cost" value. This is
1456 Items in the cache can be inserted with an optional "cost" value. This is
1448 simply an integer that is specified by the caller. The cache can be queried
1457 simply an integer that is specified by the caller. The cache can be queried
1449 for the total cost of all items presently in the cache.
1458 for the total cost of all items presently in the cache.
1450
1459
1451 The cache can also define a maximum cost. If a cache insertion would
1460 The cache can also define a maximum cost. If a cache insertion would
1452 cause the total cost of the cache to go beyond the maximum cost limit,
1461 cause the total cost of the cache to go beyond the maximum cost limit,
1453 nodes will be evicted to make room for the new code. This can be used
1462 nodes will be evicted to make room for the new code. This can be used
1454 to e.g. set a max memory limit and associate an estimated bytes size
1463 to e.g. set a max memory limit and associate an estimated bytes size
1455 cost to each item in the cache. By default, no maximum cost is enforced.
1464 cost to each item in the cache. By default, no maximum cost is enforced.
1456 """
1465 """
1457
1466
1458 def __init__(self, max, maxcost=0):
1467 def __init__(self, max, maxcost=0):
1459 self._cache = {}
1468 self._cache = {}
1460
1469
1461 self._head = _lrucachenode()
1470 self._head = _lrucachenode()
1462 self._size = 1
1471 self._size = 1
1463 self.capacity = max
1472 self.capacity = max
1464 self.totalcost = 0
1473 self.totalcost = 0
1465 self.maxcost = maxcost
1474 self.maxcost = maxcost
1466
1475
1467 def __len__(self):
1476 def __len__(self):
1468 return len(self._cache)
1477 return len(self._cache)
1469
1478
1470 def __contains__(self, k):
1479 def __contains__(self, k):
1471 return k in self._cache
1480 return k in self._cache
1472
1481
1473 def __iter__(self):
1482 def __iter__(self):
1474 # We don't have to iterate in cache order, but why not.
1483 # We don't have to iterate in cache order, but why not.
1475 n = self._head
1484 n = self._head
1476 for i in range(len(self._cache)):
1485 for i in range(len(self._cache)):
1477 yield n.key
1486 yield n.key
1478 n = n.next
1487 n = n.next
1479
1488
1480 def __getitem__(self, k):
1489 def __getitem__(self, k):
1481 node = self._cache[k]
1490 node = self._cache[k]
1482 self._movetohead(node)
1491 self._movetohead(node)
1483 return node.value
1492 return node.value
1484
1493
1485 def insert(self, k, v, cost=0):
1494 def insert(self, k, v, cost=0):
1486 """Insert a new item in the cache with optional cost value."""
1495 """Insert a new item in the cache with optional cost value."""
1487 node = self._cache.get(k)
1496 node = self._cache.get(k)
1488 # Replace existing value and mark as newest.
1497 # Replace existing value and mark as newest.
1489 if node is not None:
1498 if node is not None:
1490 self.totalcost -= node.cost
1499 self.totalcost -= node.cost
1491 node.value = v
1500 node.value = v
1492 node.cost = cost
1501 node.cost = cost
1493 self.totalcost += cost
1502 self.totalcost += cost
1494 self._movetohead(node)
1503 self._movetohead(node)
1495
1504
1496 if self.maxcost:
1505 if self.maxcost:
1497 self._enforcecostlimit()
1506 self._enforcecostlimit()
1498
1507
1499 return
1508 return
1500
1509
1501 if self._size < self.capacity:
1510 if self._size < self.capacity:
1502 node = self._addcapacity()
1511 node = self._addcapacity()
1503 else:
1512 else:
1504 # Grab the last/oldest item.
1513 # Grab the last/oldest item.
1505 node = self._head.prev
1514 node = self._head.prev
1506
1515
1507 # At capacity. Kill the old entry.
1516 # At capacity. Kill the old entry.
1508 if node.key is not _notset:
1517 if node.key is not _notset:
1509 self.totalcost -= node.cost
1518 self.totalcost -= node.cost
1510 del self._cache[node.key]
1519 del self._cache[node.key]
1511
1520
1512 node.key = k
1521 node.key = k
1513 node.value = v
1522 node.value = v
1514 node.cost = cost
1523 node.cost = cost
1515 self.totalcost += cost
1524 self.totalcost += cost
1516 self._cache[k] = node
1525 self._cache[k] = node
1517 # And mark it as newest entry. No need to adjust order since it
1526 # And mark it as newest entry. No need to adjust order since it
1518 # is already self._head.prev.
1527 # is already self._head.prev.
1519 self._head = node
1528 self._head = node
1520
1529
1521 if self.maxcost:
1530 if self.maxcost:
1522 self._enforcecostlimit()
1531 self._enforcecostlimit()
1523
1532
1524 def __setitem__(self, k, v):
1533 def __setitem__(self, k, v):
1525 self.insert(k, v)
1534 self.insert(k, v)
1526
1535
1527 def __delitem__(self, k):
1536 def __delitem__(self, k):
1528 self.pop(k)
1537 self.pop(k)
1529
1538
1530 def pop(self, k, default=_notset):
1539 def pop(self, k, default=_notset):
1531 try:
1540 try:
1532 node = self._cache.pop(k)
1541 node = self._cache.pop(k)
1533 except KeyError:
1542 except KeyError:
1534 if default is _notset:
1543 if default is _notset:
1535 raise
1544 raise
1536 return default
1545 return default
1537
1546
1538 value = node.value
1547 value = node.value
1539 self.totalcost -= node.cost
1548 self.totalcost -= node.cost
1540 node.markempty()
1549 node.markempty()
1541
1550
1542 # Temporarily mark as newest item before re-adjusting head to make
1551 # Temporarily mark as newest item before re-adjusting head to make
1543 # this node the oldest item.
1552 # this node the oldest item.
1544 self._movetohead(node)
1553 self._movetohead(node)
1545 self._head = node.next
1554 self._head = node.next
1546
1555
1547 return value
1556 return value
1548
1557
1549 # Additional dict methods.
1558 # Additional dict methods.
1550
1559
1551 def get(self, k, default=None):
1560 def get(self, k, default=None):
1552 try:
1561 try:
1553 return self.__getitem__(k)
1562 return self.__getitem__(k)
1554 except KeyError:
1563 except KeyError:
1555 return default
1564 return default
1556
1565
1557 def peek(self, k, default=_notset):
1566 def peek(self, k, default=_notset):
1558 """Get the specified item without moving it to the head
1567 """Get the specified item without moving it to the head
1559
1568
1560 Unlike get(), this doesn't mutate the internal state. But be aware
1569 Unlike get(), this doesn't mutate the internal state. But be aware
1561 that it doesn't mean peek() is thread safe.
1570 that it doesn't mean peek() is thread safe.
1562 """
1571 """
1563 try:
1572 try:
1564 node = self._cache[k]
1573 node = self._cache[k]
1565 return node.value
1574 return node.value
1566 except KeyError:
1575 except KeyError:
1567 if default is _notset:
1576 if default is _notset:
1568 raise
1577 raise
1569 return default
1578 return default
1570
1579
1571 def clear(self):
1580 def clear(self):
1572 n = self._head
1581 n = self._head
1573 while n.key is not _notset:
1582 while n.key is not _notset:
1574 self.totalcost -= n.cost
1583 self.totalcost -= n.cost
1575 n.markempty()
1584 n.markempty()
1576 n = n.next
1585 n = n.next
1577
1586
1578 self._cache.clear()
1587 self._cache.clear()
1579
1588
1580 def copy(self, capacity=None, maxcost=0):
1589 def copy(self, capacity=None, maxcost=0):
1581 """Create a new cache as a copy of the current one.
1590 """Create a new cache as a copy of the current one.
1582
1591
1583 By default, the new cache has the same capacity as the existing one.
1592 By default, the new cache has the same capacity as the existing one.
1584 But, the cache capacity can be changed as part of performing the
1593 But, the cache capacity can be changed as part of performing the
1585 copy.
1594 copy.
1586
1595
1587 Items in the copy have an insertion/access order matching this
1596 Items in the copy have an insertion/access order matching this
1588 instance.
1597 instance.
1589 """
1598 """
1590
1599
1591 capacity = capacity or self.capacity
1600 capacity = capacity or self.capacity
1592 maxcost = maxcost or self.maxcost
1601 maxcost = maxcost or self.maxcost
1593 result = lrucachedict(capacity, maxcost=maxcost)
1602 result = lrucachedict(capacity, maxcost=maxcost)
1594
1603
1595 # We copy entries by iterating in oldest-to-newest order so the copy
1604 # We copy entries by iterating in oldest-to-newest order so the copy
1596 # has the correct ordering.
1605 # has the correct ordering.
1597
1606
1598 # Find the first non-empty entry.
1607 # Find the first non-empty entry.
1599 n = self._head.prev
1608 n = self._head.prev
1600 while n.key is _notset and n is not self._head:
1609 while n.key is _notset and n is not self._head:
1601 n = n.prev
1610 n = n.prev
1602
1611
1603 # We could potentially skip the first N items when decreasing capacity.
1612 # We could potentially skip the first N items when decreasing capacity.
1604 # But let's keep it simple unless it is a performance problem.
1613 # But let's keep it simple unless it is a performance problem.
1605 for i in range(len(self._cache)):
1614 for i in range(len(self._cache)):
1606 result.insert(n.key, n.value, cost=n.cost)
1615 result.insert(n.key, n.value, cost=n.cost)
1607 n = n.prev
1616 n = n.prev
1608
1617
1609 return result
1618 return result
1610
1619
1611 def popoldest(self):
1620 def popoldest(self):
1612 """Remove the oldest item from the cache.
1621 """Remove the oldest item from the cache.
1613
1622
1614 Returns the (key, value) describing the removed cache entry.
1623 Returns the (key, value) describing the removed cache entry.
1615 """
1624 """
1616 if not self._cache:
1625 if not self._cache:
1617 return
1626 return
1618
1627
1619 # Walk the linked list backwards starting at tail node until we hit
1628 # Walk the linked list backwards starting at tail node until we hit
1620 # a non-empty node.
1629 # a non-empty node.
1621 n = self._head.prev
1630 n = self._head.prev
1622
1631
1623 while n.key is _notset:
1632 while n.key is _notset:
1624 n = n.prev
1633 n = n.prev
1625
1634
1626 key, value = n.key, n.value
1635 key, value = n.key, n.value
1627
1636
1628 # And remove it from the cache and mark it as empty.
1637 # And remove it from the cache and mark it as empty.
1629 del self._cache[n.key]
1638 del self._cache[n.key]
1630 self.totalcost -= n.cost
1639 self.totalcost -= n.cost
1631 n.markempty()
1640 n.markempty()
1632
1641
1633 return key, value
1642 return key, value
1634
1643
1635 def _movetohead(self, node: _lrucachenode):
1644 def _movetohead(self, node: _lrucachenode):
1636 """Mark a node as the newest, making it the new head.
1645 """Mark a node as the newest, making it the new head.
1637
1646
1638 When a node is accessed, it becomes the freshest entry in the LRU
1647 When a node is accessed, it becomes the freshest entry in the LRU
1639 list, which is denoted by self._head.
1648 list, which is denoted by self._head.
1640
1649
1641 Visually, let's make ``N`` the new head node (* denotes head):
1650 Visually, let's make ``N`` the new head node (* denotes head):
1642
1651
1643 previous/oldest <-> head <-> next/next newest
1652 previous/oldest <-> head <-> next/next newest
1644
1653
1645 ----<->--- A* ---<->-----
1654 ----<->--- A* ---<->-----
1646 | |
1655 | |
1647 E <-> D <-> N <-> C <-> B
1656 E <-> D <-> N <-> C <-> B
1648
1657
1649 To:
1658 To:
1650
1659
1651 ----<->--- N* ---<->-----
1660 ----<->--- N* ---<->-----
1652 | |
1661 | |
1653 E <-> D <-> C <-> B <-> A
1662 E <-> D <-> C <-> B <-> A
1654
1663
1655 This requires the following moves:
1664 This requires the following moves:
1656
1665
1657 C.next = D (node.prev.next = node.next)
1666 C.next = D (node.prev.next = node.next)
1658 D.prev = C (node.next.prev = node.prev)
1667 D.prev = C (node.next.prev = node.prev)
1659 E.next = N (head.prev.next = node)
1668 E.next = N (head.prev.next = node)
1660 N.prev = E (node.prev = head.prev)
1669 N.prev = E (node.prev = head.prev)
1661 N.next = A (node.next = head)
1670 N.next = A (node.next = head)
1662 A.prev = N (head.prev = node)
1671 A.prev = N (head.prev = node)
1663 """
1672 """
1664 head = self._head
1673 head = self._head
1665 # C.next = D
1674 # C.next = D
1666 node.prev.next = node.next
1675 node.prev.next = node.next
1667 # D.prev = C
1676 # D.prev = C
1668 node.next.prev = node.prev
1677 node.next.prev = node.prev
1669 # N.prev = E
1678 # N.prev = E
1670 node.prev = head.prev
1679 node.prev = head.prev
1671 # N.next = A
1680 # N.next = A
1672 # It is tempting to do just "head" here, however if node is
1681 # It is tempting to do just "head" here, however if node is
1673 # adjacent to head, this will do bad things.
1682 # adjacent to head, this will do bad things.
1674 node.next = head.prev.next
1683 node.next = head.prev.next
1675 # E.next = N
1684 # E.next = N
1676 node.next.prev = node
1685 node.next.prev = node
1677 # A.prev = N
1686 # A.prev = N
1678 node.prev.next = node
1687 node.prev.next = node
1679
1688
1680 self._head = node
1689 self._head = node
1681
1690
1682 def _addcapacity(self) -> _lrucachenode:
1691 def _addcapacity(self) -> _lrucachenode:
1683 """Add a node to the circular linked list.
1692 """Add a node to the circular linked list.
1684
1693
1685 The new node is inserted before the head node.
1694 The new node is inserted before the head node.
1686 """
1695 """
1687 head = self._head
1696 head = self._head
1688 node = _lrucachenode()
1697 node = _lrucachenode()
1689 head.prev.next = node
1698 head.prev.next = node
1690 node.prev = head.prev
1699 node.prev = head.prev
1691 node.next = head
1700 node.next = head
1692 head.prev = node
1701 head.prev = node
1693 self._size += 1
1702 self._size += 1
1694 return node
1703 return node
1695
1704
1696 def _enforcecostlimit(self):
1705 def _enforcecostlimit(self):
1697 # This should run after an insertion. It should only be called if total
1706 # This should run after an insertion. It should only be called if total
1698 # cost limits are being enforced.
1707 # cost limits are being enforced.
1699 # The most recently inserted node is never evicted.
1708 # The most recently inserted node is never evicted.
1700 if len(self) <= 1 or self.totalcost <= self.maxcost:
1709 if len(self) <= 1 or self.totalcost <= self.maxcost:
1701 return
1710 return
1702
1711
1703 # This is logically equivalent to calling popoldest() until we
1712 # This is logically equivalent to calling popoldest() until we
1704 # free up enough cost. We don't do that since popoldest() needs
1713 # free up enough cost. We don't do that since popoldest() needs
1705 # to walk the linked list and doing this in a loop would be
1714 # to walk the linked list and doing this in a loop would be
1706 # quadratic. So we find the first non-empty node and then
1715 # quadratic. So we find the first non-empty node and then
1707 # walk nodes until we free up enough capacity.
1716 # walk nodes until we free up enough capacity.
1708 #
1717 #
1709 # If we only removed the minimum number of nodes to free enough
1718 # If we only removed the minimum number of nodes to free enough
1710 # cost at insert time, chances are high that the next insert would
1719 # cost at insert time, chances are high that the next insert would
1711 # also require pruning. This would effectively constitute quadratic
1720 # also require pruning. This would effectively constitute quadratic
1712 # behavior for insert-heavy workloads. To mitigate this, we set a
1721 # behavior for insert-heavy workloads. To mitigate this, we set a
1713 # target cost that is a percentage of the max cost. This will tend
1722 # target cost that is a percentage of the max cost. This will tend
1714 # to free more nodes when the high water mark is reached, which
1723 # to free more nodes when the high water mark is reached, which
1715 # lowers the chances of needing to prune on the subsequent insert.
1724 # lowers the chances of needing to prune on the subsequent insert.
1716 targetcost = int(self.maxcost * 0.75)
1725 targetcost = int(self.maxcost * 0.75)
1717
1726
1718 n = self._head.prev
1727 n = self._head.prev
1719 while n.key is _notset:
1728 while n.key is _notset:
1720 n = n.prev
1729 n = n.prev
1721
1730
1722 while len(self) > 1 and self.totalcost > targetcost:
1731 while len(self) > 1 and self.totalcost > targetcost:
1723 del self._cache[n.key]
1732 del self._cache[n.key]
1724 self.totalcost -= n.cost
1733 self.totalcost -= n.cost
1725 n.markempty()
1734 n.markempty()
1726 n = n.prev
1735 n = n.prev
1727
1736
1728
1737
1729 def lrucachefunc(func):
1738 def lrucachefunc(func):
1730 '''cache most recent results of function calls'''
1739 '''cache most recent results of function calls'''
1731 cache = {}
1740 cache = {}
1732 order = collections.deque()
1741 order = collections.deque()
1733 if func.__code__.co_argcount == 1:
1742 if func.__code__.co_argcount == 1:
1734
1743
1735 def f(arg):
1744 def f(arg):
1736 if arg not in cache:
1745 if arg not in cache:
1737 if len(cache) > 20:
1746 if len(cache) > 20:
1738 del cache[order.popleft()]
1747 del cache[order.popleft()]
1739 cache[arg] = func(arg)
1748 cache[arg] = func(arg)
1740 else:
1749 else:
1741 order.remove(arg)
1750 order.remove(arg)
1742 order.append(arg)
1751 order.append(arg)
1743 return cache[arg]
1752 return cache[arg]
1744
1753
1745 else:
1754 else:
1746
1755
1747 def f(*args):
1756 def f(*args):
1748 if args not in cache:
1757 if args not in cache:
1749 if len(cache) > 20:
1758 if len(cache) > 20:
1750 del cache[order.popleft()]
1759 del cache[order.popleft()]
1751 cache[args] = func(*args)
1760 cache[args] = func(*args)
1752 else:
1761 else:
1753 order.remove(args)
1762 order.remove(args)
1754 order.append(args)
1763 order.append(args)
1755 return cache[args]
1764 return cache[args]
1756
1765
1757 return f
1766 return f
1758
1767
1759
1768
1760 class propertycache:
1769 class propertycache:
1761 def __init__(self, func):
1770 def __init__(self, func):
1762 self.func = func
1771 self.func = func
1763 self.name = func.__name__
1772 self.name = func.__name__
1764
1773
1765 def __get__(self, obj, type=None):
1774 def __get__(self, obj, type=None):
1766 result = self.func(obj)
1775 result = self.func(obj)
1767 self.cachevalue(obj, result)
1776 self.cachevalue(obj, result)
1768 return result
1777 return result
1769
1778
1770 def cachevalue(self, obj, value):
1779 def cachevalue(self, obj, value):
1771 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1780 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1772 obj.__dict__[self.name] = value
1781 obj.__dict__[self.name] = value
1773
1782
1774
1783
1775 def clearcachedproperty(obj, prop):
1784 def clearcachedproperty(obj, prop):
1776 '''clear a cached property value, if one has been set'''
1785 '''clear a cached property value, if one has been set'''
1777 prop = pycompat.sysstr(prop)
1786 prop = pycompat.sysstr(prop)
1778 if prop in obj.__dict__:
1787 if prop in obj.__dict__:
1779 del obj.__dict__[prop]
1788 del obj.__dict__[prop]
1780
1789
1781
1790
1782 def increasingchunks(source, min=1024, max=65536):
1791 def increasingchunks(source, min=1024, max=65536):
1783 """return no less than min bytes per chunk while data remains,
1792 """return no less than min bytes per chunk while data remains,
1784 doubling min after each chunk until it reaches max"""
1793 doubling min after each chunk until it reaches max"""
1785
1794
1786 def log2(x):
1795 def log2(x):
1787 if not x:
1796 if not x:
1788 return 0
1797 return 0
1789 i = 0
1798 i = 0
1790 while x:
1799 while x:
1791 x >>= 1
1800 x >>= 1
1792 i += 1
1801 i += 1
1793 return i - 1
1802 return i - 1
1794
1803
1795 buf = []
1804 buf = []
1796 blen = 0
1805 blen = 0
1797 for chunk in source:
1806 for chunk in source:
1798 buf.append(chunk)
1807 buf.append(chunk)
1799 blen += len(chunk)
1808 blen += len(chunk)
1800 if blen >= min:
1809 if blen >= min:
1801 if min < max:
1810 if min < max:
1802 min = min << 1
1811 min = min << 1
1803 nmin = 1 << log2(blen)
1812 nmin = 1 << log2(blen)
1804 if nmin > min:
1813 if nmin > min:
1805 min = nmin
1814 min = nmin
1806 if min > max:
1815 if min > max:
1807 min = max
1816 min = max
1808 yield b''.join(buf)
1817 yield b''.join(buf)
1809 blen = 0
1818 blen = 0
1810 buf = []
1819 buf = []
1811 if buf:
1820 if buf:
1812 yield b''.join(buf)
1821 yield b''.join(buf)
1813
1822
1814
1823
1815 def always(fn):
1824 def always(fn):
1816 return True
1825 return True
1817
1826
1818
1827
1819 def never(fn):
1828 def never(fn):
1820 return False
1829 return False
1821
1830
1822
1831
1823 def nogc(func=None) -> Any:
1832 def nogc(func=None) -> Any:
1824 """disable garbage collector
1833 """disable garbage collector
1825
1834
1826 Python's garbage collector triggers a GC each time a certain number of
1835 Python's garbage collector triggers a GC each time a certain number of
1827 container objects (the number being defined by gc.get_threshold()) are
1836 container objects (the number being defined by gc.get_threshold()) are
1828 allocated even when marked not to be tracked by the collector. Tracking has
1837 allocated even when marked not to be tracked by the collector. Tracking has
1829 no effect on when GCs are triggered, only on what objects the GC looks
1838 no effect on when GCs are triggered, only on what objects the GC looks
1830 into. As a workaround, disable GC while building complex (huge)
1839 into. As a workaround, disable GC while building complex (huge)
1831 containers.
1840 containers.
1832
1841
1833 This garbage collector issue have been fixed in 2.7. But it still affect
1842 This garbage collector issue have been fixed in 2.7. But it still affect
1834 CPython's performance.
1843 CPython's performance.
1835 """
1844 """
1836 if func is None:
1845 if func is None:
1837 return _nogc_context()
1846 return _nogc_context()
1838 else:
1847 else:
1839 return _nogc_decorator(func)
1848 return _nogc_decorator(func)
1840
1849
1841
1850
1842 @contextlib.contextmanager
1851 @contextlib.contextmanager
1843 def _nogc_context():
1852 def _nogc_context():
1844 gcenabled = gc.isenabled()
1853 gcenabled = gc.isenabled()
1845 gc.disable()
1854 gc.disable()
1846 try:
1855 try:
1847 yield
1856 yield
1848 finally:
1857 finally:
1849 if gcenabled:
1858 if gcenabled:
1850 gc.enable()
1859 gc.enable()
1851
1860
1852
1861
1853 def _nogc_decorator(func):
1862 def _nogc_decorator(func):
1854 def wrapper(*args, **kwargs):
1863 def wrapper(*args, **kwargs):
1855 with _nogc_context():
1864 with _nogc_context():
1856 return func(*args, **kwargs)
1865 return func(*args, **kwargs)
1857
1866
1858 return wrapper
1867 return wrapper
1859
1868
1860
1869
1861 if pycompat.ispypy:
1870 if pycompat.ispypy:
1862 # PyPy runs slower with gc disabled
1871 # PyPy runs slower with gc disabled
1863 nogc = lambda x: x
1872 nogc = lambda x: x
1864
1873
1865
1874
1866 def pathto(root: bytes, n1: bytes, n2: bytes) -> bytes:
1875 def pathto(root: bytes, n1: bytes, n2: bytes) -> bytes:
1867 """return the relative path from one place to another.
1876 """return the relative path from one place to another.
1868 root should use os.sep to separate directories
1877 root should use os.sep to separate directories
1869 n1 should use os.sep to separate directories
1878 n1 should use os.sep to separate directories
1870 n2 should use "/" to separate directories
1879 n2 should use "/" to separate directories
1871 returns an os.sep-separated path.
1880 returns an os.sep-separated path.
1872
1881
1873 If n1 is a relative path, it's assumed it's
1882 If n1 is a relative path, it's assumed it's
1874 relative to root.
1883 relative to root.
1875 n2 should always be relative to root.
1884 n2 should always be relative to root.
1876 """
1885 """
1877 if not n1:
1886 if not n1:
1878 return localpath(n2)
1887 return localpath(n2)
1879 if os.path.isabs(n1):
1888 if os.path.isabs(n1):
1880 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1889 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1881 return os.path.join(root, localpath(n2))
1890 return os.path.join(root, localpath(n2))
1882 n2 = b'/'.join((pconvert(root), n2))
1891 n2 = b'/'.join((pconvert(root), n2))
1883 a, b = splitpath(n1), n2.split(b'/')
1892 a, b = splitpath(n1), n2.split(b'/')
1884 a.reverse()
1893 a.reverse()
1885 b.reverse()
1894 b.reverse()
1886 while a and b and a[-1] == b[-1]:
1895 while a and b and a[-1] == b[-1]:
1887 a.pop()
1896 a.pop()
1888 b.pop()
1897 b.pop()
1889 b.reverse()
1898 b.reverse()
1890 return pycompat.ossep.join(([b'..'] * len(a)) + b) or b'.'
1899 return pycompat.ossep.join(([b'..'] * len(a)) + b) or b'.'
1891
1900
1892
1901
1893 def checksignature(func, depth=1):
1902 def checksignature(func, depth=1):
1894 '''wrap a function with code to check for calling errors'''
1903 '''wrap a function with code to check for calling errors'''
1895
1904
1896 def check(*args, **kwargs):
1905 def check(*args, **kwargs):
1897 try:
1906 try:
1898 return func(*args, **kwargs)
1907 return func(*args, **kwargs)
1899 except TypeError:
1908 except TypeError:
1900 if len(traceback.extract_tb(sys.exc_info()[2])) == depth:
1909 if len(traceback.extract_tb(sys.exc_info()[2])) == depth:
1901 raise error.SignatureError
1910 raise error.SignatureError
1902 raise
1911 raise
1903
1912
1904 return check
1913 return check
1905
1914
1906
1915
1907 # a whilelist of known filesystems where hardlink works reliably
1916 # a whilelist of known filesystems where hardlink works reliably
1908 _hardlinkfswhitelist = {
1917 _hardlinkfswhitelist = {
1909 b'apfs',
1918 b'apfs',
1910 b'btrfs',
1919 b'btrfs',
1911 b'ext2',
1920 b'ext2',
1912 b'ext3',
1921 b'ext3',
1913 b'ext4',
1922 b'ext4',
1914 b'hfs',
1923 b'hfs',
1915 b'jfs',
1924 b'jfs',
1916 b'NTFS',
1925 b'NTFS',
1917 b'reiserfs',
1926 b'reiserfs',
1918 b'tmpfs',
1927 b'tmpfs',
1919 b'ufs',
1928 b'ufs',
1920 b'xfs',
1929 b'xfs',
1921 b'zfs',
1930 b'zfs',
1922 }
1931 }
1923
1932
1924
1933
1925 def copyfile(
1934 def copyfile(
1926 src,
1935 src,
1927 dest,
1936 dest,
1928 hardlink=False,
1937 hardlink=False,
1929 copystat=False,
1938 copystat=False,
1930 checkambig=False,
1939 checkambig=False,
1931 nb_bytes=None,
1940 nb_bytes=None,
1932 no_hardlink_cb=None,
1941 no_hardlink_cb=None,
1933 check_fs_hardlink=True,
1942 check_fs_hardlink=True,
1934 ):
1943 ):
1935 """copy a file, preserving mode and optionally other stat info like
1944 """copy a file, preserving mode and optionally other stat info like
1936 atime/mtime
1945 atime/mtime
1937
1946
1938 checkambig argument is used with filestat, and is useful only if
1947 checkambig argument is used with filestat, and is useful only if
1939 destination file is guarded by any lock (e.g. repo.lock or
1948 destination file is guarded by any lock (e.g. repo.lock or
1940 repo.wlock).
1949 repo.wlock).
1941
1950
1942 copystat and checkambig should be exclusive.
1951 copystat and checkambig should be exclusive.
1943
1952
1944 nb_bytes: if set only copy the first `nb_bytes` of the source file.
1953 nb_bytes: if set only copy the first `nb_bytes` of the source file.
1945 """
1954 """
1946 assert not (copystat and checkambig)
1955 assert not (copystat and checkambig)
1947 oldstat = None
1956 oldstat = None
1948 if os.path.lexists(dest):
1957 if os.path.lexists(dest):
1949 if checkambig:
1958 if checkambig:
1950 oldstat = checkambig and filestat.frompath(dest)
1959 oldstat = checkambig and filestat.frompath(dest)
1951 unlink(dest)
1960 unlink(dest)
1952 if hardlink and check_fs_hardlink:
1961 if hardlink and check_fs_hardlink:
1953 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1962 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1954 # unless we are confident that dest is on a whitelisted filesystem.
1963 # unless we are confident that dest is on a whitelisted filesystem.
1955 try:
1964 try:
1956 fstype = getfstype(os.path.dirname(dest))
1965 fstype = getfstype(os.path.dirname(dest))
1957 except OSError:
1966 except OSError:
1958 fstype = None
1967 fstype = None
1959 if fstype not in _hardlinkfswhitelist:
1968 if fstype not in _hardlinkfswhitelist:
1960 if no_hardlink_cb is not None:
1969 if no_hardlink_cb is not None:
1961 no_hardlink_cb()
1970 no_hardlink_cb()
1962 hardlink = False
1971 hardlink = False
1963 if hardlink:
1972 if hardlink:
1964 try:
1973 try:
1965 oslink(src, dest)
1974 oslink(src, dest)
1966 if nb_bytes is not None:
1975 if nb_bytes is not None:
1967 m = "the `nb_bytes` argument is incompatible with `hardlink`"
1976 m = "the `nb_bytes` argument is incompatible with `hardlink`"
1968 raise error.ProgrammingError(m)
1977 raise error.ProgrammingError(m)
1969 return
1978 return
1970 except (IOError, OSError) as exc:
1979 except (IOError, OSError) as exc:
1971 if exc.errno != errno.EEXIST and no_hardlink_cb is not None:
1980 if exc.errno != errno.EEXIST and no_hardlink_cb is not None:
1972 no_hardlink_cb()
1981 no_hardlink_cb()
1973 # fall back to normal copy
1982 # fall back to normal copy
1974 if os.path.islink(src):
1983 if os.path.islink(src):
1975 os.symlink(os.readlink(src), dest)
1984 os.symlink(os.readlink(src), dest)
1976 # copytime is ignored for symlinks, but in general copytime isn't needed
1985 # copytime is ignored for symlinks, but in general copytime isn't needed
1977 # for them anyway
1986 # for them anyway
1978 if nb_bytes is not None:
1987 if nb_bytes is not None:
1979 m = "cannot use `nb_bytes` on a symlink"
1988 m = "cannot use `nb_bytes` on a symlink"
1980 raise error.ProgrammingError(m)
1989 raise error.ProgrammingError(m)
1981 else:
1990 else:
1982 try:
1991 try:
1983 shutil.copyfile(src, dest)
1992 shutil.copyfile(src, dest)
1984 if copystat:
1993 if copystat:
1985 # copystat also copies mode
1994 # copystat also copies mode
1986 shutil.copystat(src, dest)
1995 shutil.copystat(src, dest)
1987 else:
1996 else:
1988 shutil.copymode(src, dest)
1997 shutil.copymode(src, dest)
1989 if oldstat and oldstat.stat:
1998 if oldstat and oldstat.stat:
1990 newstat = filestat.frompath(dest)
1999 newstat = filestat.frompath(dest)
1991 if newstat.isambig(oldstat):
2000 if newstat.isambig(oldstat):
1992 # stat of copied file is ambiguous to original one
2001 # stat of copied file is ambiguous to original one
1993 advanced = (
2002 advanced = (
1994 oldstat.stat[stat.ST_MTIME] + 1
2003 oldstat.stat[stat.ST_MTIME] + 1
1995 ) & 0x7FFFFFFF
2004 ) & 0x7FFFFFFF
1996 os.utime(dest, (advanced, advanced))
2005 os.utime(dest, (advanced, advanced))
1997 # We could do something smarter using `copy_file_range` call or similar
2006 # We could do something smarter using `copy_file_range` call or similar
1998 if nb_bytes is not None:
2007 if nb_bytes is not None:
1999 with open(dest, mode='r+') as f:
2008 with open(dest, mode='r+') as f:
2000 f.truncate(nb_bytes)
2009 f.truncate(nb_bytes)
2001 except shutil.Error as inst:
2010 except shutil.Error as inst:
2002 raise error.Abort(stringutil.forcebytestr(inst))
2011 raise error.Abort(stringutil.forcebytestr(inst))
2003
2012
2004
2013
2005 def copyfiles(src, dst, hardlink=None, progress=None):
2014 def copyfiles(src, dst, hardlink=None, progress=None):
2006 """Copy a directory tree using hardlinks if possible."""
2015 """Copy a directory tree using hardlinks if possible."""
2007 num = 0
2016 num = 0
2008
2017
2009 def settopic():
2018 def settopic():
2010 if progress:
2019 if progress:
2011 progress.topic = _(b'linking') if hardlink else _(b'copying')
2020 progress.topic = _(b'linking') if hardlink else _(b'copying')
2012
2021
2013 if os.path.isdir(src):
2022 if os.path.isdir(src):
2014 if hardlink is None:
2023 if hardlink is None:
2015 hardlink = (
2024 hardlink = (
2016 os.stat(src).st_dev == os.stat(os.path.dirname(dst)).st_dev
2025 os.stat(src).st_dev == os.stat(os.path.dirname(dst)).st_dev
2017 )
2026 )
2018 settopic()
2027 settopic()
2019 os.mkdir(dst)
2028 os.mkdir(dst)
2020 for name, kind in listdir(src):
2029 for name, kind in listdir(src):
2021 srcname = os.path.join(src, name)
2030 srcname = os.path.join(src, name)
2022 dstname = os.path.join(dst, name)
2031 dstname = os.path.join(dst, name)
2023 hardlink, n = copyfiles(srcname, dstname, hardlink, progress)
2032 hardlink, n = copyfiles(srcname, dstname, hardlink, progress)
2024 num += n
2033 num += n
2025 else:
2034 else:
2026 if hardlink is None:
2035 if hardlink is None:
2027 hardlink = (
2036 hardlink = (
2028 os.stat(os.path.dirname(src)).st_dev
2037 os.stat(os.path.dirname(src)).st_dev
2029 == os.stat(os.path.dirname(dst)).st_dev
2038 == os.stat(os.path.dirname(dst)).st_dev
2030 )
2039 )
2031 settopic()
2040 settopic()
2032
2041
2033 if hardlink:
2042 if hardlink:
2034 try:
2043 try:
2035 oslink(src, dst)
2044 oslink(src, dst)
2036 except (IOError, OSError) as exc:
2045 except (IOError, OSError) as exc:
2037 if exc.errno != errno.EEXIST:
2046 if exc.errno != errno.EEXIST:
2038 hardlink = False
2047 hardlink = False
2039 # XXX maybe try to relink if the file exist ?
2048 # XXX maybe try to relink if the file exist ?
2040 shutil.copy(src, dst)
2049 shutil.copy(src, dst)
2041 else:
2050 else:
2042 shutil.copy(src, dst)
2051 shutil.copy(src, dst)
2043 num += 1
2052 num += 1
2044 if progress:
2053 if progress:
2045 progress.increment()
2054 progress.increment()
2046
2055
2047 return hardlink, num
2056 return hardlink, num
2048
2057
2049
2058
2050 _winreservednames = {
2059 _winreservednames = {
2051 b'con',
2060 b'con',
2052 b'prn',
2061 b'prn',
2053 b'aux',
2062 b'aux',
2054 b'nul',
2063 b'nul',
2055 b'com1',
2064 b'com1',
2056 b'com2',
2065 b'com2',
2057 b'com3',
2066 b'com3',
2058 b'com4',
2067 b'com4',
2059 b'com5',
2068 b'com5',
2060 b'com6',
2069 b'com6',
2061 b'com7',
2070 b'com7',
2062 b'com8',
2071 b'com8',
2063 b'com9',
2072 b'com9',
2064 b'lpt1',
2073 b'lpt1',
2065 b'lpt2',
2074 b'lpt2',
2066 b'lpt3',
2075 b'lpt3',
2067 b'lpt4',
2076 b'lpt4',
2068 b'lpt5',
2077 b'lpt5',
2069 b'lpt6',
2078 b'lpt6',
2070 b'lpt7',
2079 b'lpt7',
2071 b'lpt8',
2080 b'lpt8',
2072 b'lpt9',
2081 b'lpt9',
2073 }
2082 }
2074 _winreservedchars = b':*?"<>|'
2083 _winreservedchars = b':*?"<>|'
2075
2084
2076
2085
2077 def checkwinfilename(path: bytes) -> Optional[bytes]:
2086 def checkwinfilename(path: bytes) -> Optional[bytes]:
2078 r"""Check that the base-relative path is a valid filename on Windows.
2087 r"""Check that the base-relative path is a valid filename on Windows.
2079 Returns None if the path is ok, or a UI string describing the problem.
2088 Returns None if the path is ok, or a UI string describing the problem.
2080
2089
2081 >>> checkwinfilename(b"just/a/normal/path")
2090 >>> checkwinfilename(b"just/a/normal/path")
2082 >>> checkwinfilename(b"foo/bar/con.xml")
2091 >>> checkwinfilename(b"foo/bar/con.xml")
2083 "filename contains 'con', which is reserved on Windows"
2092 "filename contains 'con', which is reserved on Windows"
2084 >>> checkwinfilename(b"foo/con.xml/bar")
2093 >>> checkwinfilename(b"foo/con.xml/bar")
2085 "filename contains 'con', which is reserved on Windows"
2094 "filename contains 'con', which is reserved on Windows"
2086 >>> checkwinfilename(b"foo/bar/xml.con")
2095 >>> checkwinfilename(b"foo/bar/xml.con")
2087 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
2096 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
2088 "filename contains 'AUX', which is reserved on Windows"
2097 "filename contains 'AUX', which is reserved on Windows"
2089 >>> checkwinfilename(b"foo/bar/bla:.txt")
2098 >>> checkwinfilename(b"foo/bar/bla:.txt")
2090 "filename contains ':', which is reserved on Windows"
2099 "filename contains ':', which is reserved on Windows"
2091 >>> checkwinfilename(b"foo/bar/b\07la.txt")
2100 >>> checkwinfilename(b"foo/bar/b\07la.txt")
2092 "filename contains '\\x07', which is invalid on Windows"
2101 "filename contains '\\x07', which is invalid on Windows"
2093 >>> checkwinfilename(b"foo/bar/bla ")
2102 >>> checkwinfilename(b"foo/bar/bla ")
2094 "filename ends with ' ', which is not allowed on Windows"
2103 "filename ends with ' ', which is not allowed on Windows"
2095 >>> checkwinfilename(b"../bar")
2104 >>> checkwinfilename(b"../bar")
2096 >>> checkwinfilename(b"foo\\")
2105 >>> checkwinfilename(b"foo\\")
2097 "filename ends with '\\', which is invalid on Windows"
2106 "filename ends with '\\', which is invalid on Windows"
2098 >>> checkwinfilename(b"foo\\/bar")
2107 >>> checkwinfilename(b"foo\\/bar")
2099 "directory name ends with '\\', which is invalid on Windows"
2108 "directory name ends with '\\', which is invalid on Windows"
2100 """
2109 """
2101 if path.endswith(b'\\'):
2110 if path.endswith(b'\\'):
2102 return _(b"filename ends with '\\', which is invalid on Windows")
2111 return _(b"filename ends with '\\', which is invalid on Windows")
2103 if b'\\/' in path:
2112 if b'\\/' in path:
2104 return _(b"directory name ends with '\\', which is invalid on Windows")
2113 return _(b"directory name ends with '\\', which is invalid on Windows")
2105 for n in path.replace(b'\\', b'/').split(b'/'):
2114 for n in path.replace(b'\\', b'/').split(b'/'):
2106 if not n:
2115 if not n:
2107 continue
2116 continue
2108 for c in _filenamebytestr(n):
2117 for c in _filenamebytestr(n):
2109 if c in _winreservedchars:
2118 if c in _winreservedchars:
2110 return (
2119 return (
2111 _(
2120 _(
2112 b"filename contains '%s', which is reserved "
2121 b"filename contains '%s', which is reserved "
2113 b"on Windows"
2122 b"on Windows"
2114 )
2123 )
2115 % c
2124 % c
2116 )
2125 )
2117 if ord(c) <= 31:
2126 if ord(c) <= 31:
2118 return _(
2127 return _(
2119 b"filename contains '%s', which is invalid on Windows"
2128 b"filename contains '%s', which is invalid on Windows"
2120 ) % stringutil.escapestr(c)
2129 ) % stringutil.escapestr(c)
2121 base = n.split(b'.')[0]
2130 base = n.split(b'.')[0]
2122 if base and base.lower() in _winreservednames:
2131 if base and base.lower() in _winreservednames:
2123 return (
2132 return (
2124 _(b"filename contains '%s', which is reserved on Windows")
2133 _(b"filename contains '%s', which is reserved on Windows")
2125 % base
2134 % base
2126 )
2135 )
2127 t = n[-1:]
2136 t = n[-1:]
2128 if t in b'. ' and n not in b'..':
2137 if t in b'. ' and n not in b'..':
2129 return (
2138 return (
2130 _(
2139 _(
2131 b"filename ends with '%s', which is not allowed "
2140 b"filename ends with '%s', which is not allowed "
2132 b"on Windows"
2141 b"on Windows"
2133 )
2142 )
2134 % t
2143 % t
2135 )
2144 )
2136
2145
2137
2146
2138 timer = getattr(time, "perf_counter", None)
2147 timer = getattr(time, "perf_counter", None)
2139
2148
2140 if pycompat.iswindows:
2149 if pycompat.iswindows:
2141 checkosfilename = checkwinfilename
2150 checkosfilename = checkwinfilename
2142 if not timer:
2151 if not timer:
2143 timer = time.clock # pytype: disable=module-attr
2152 timer = time.clock # pytype: disable=module-attr
2144 else:
2153 else:
2145 # mercurial.windows doesn't have platform.checkosfilename
2154 # mercurial.windows doesn't have platform.checkosfilename
2146 checkosfilename = platform.checkosfilename # pytype: disable=module-attr
2155 checkosfilename = platform.checkosfilename # pytype: disable=module-attr
2147 if not timer:
2156 if not timer:
2148 timer = time.time
2157 timer = time.time
2149
2158
2150
2159
2151 def makelock(info, pathname):
2160 def makelock(info, pathname):
2152 """Create a lock file atomically if possible
2161 """Create a lock file atomically if possible
2153
2162
2154 This may leave a stale lock file if symlink isn't supported and signal
2163 This may leave a stale lock file if symlink isn't supported and signal
2155 interrupt is enabled.
2164 interrupt is enabled.
2156 """
2165 """
2157 try:
2166 try:
2158 return os.symlink(info, pathname)
2167 return os.symlink(info, pathname)
2159 except OSError as why:
2168 except OSError as why:
2160 if why.errno == errno.EEXIST:
2169 if why.errno == errno.EEXIST:
2161 raise
2170 raise
2162 except AttributeError: # no symlink in os
2171 except AttributeError: # no symlink in os
2163 pass
2172 pass
2164
2173
2165 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
2174 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
2166 ld = os.open(pathname, flags)
2175 ld = os.open(pathname, flags)
2167 os.write(ld, info)
2176 os.write(ld, info)
2168 os.close(ld)
2177 os.close(ld)
2169
2178
2170
2179
2171 def readlock(pathname: bytes) -> bytes:
2180 def readlock(pathname: bytes) -> bytes:
2172 try:
2181 try:
2173 return readlink(pathname)
2182 return readlink(pathname)
2174 except OSError as why:
2183 except OSError as why:
2175 if why.errno not in (errno.EINVAL, errno.ENOSYS):
2184 if why.errno not in (errno.EINVAL, errno.ENOSYS):
2176 raise
2185 raise
2177 except AttributeError: # no symlink in os
2186 except AttributeError: # no symlink in os
2178 pass
2187 pass
2179 with posixfile(pathname, b'rb') as fp:
2188 with posixfile(pathname, b'rb') as fp:
2180 return fp.read()
2189 return fp.read()
2181
2190
2182
2191
2183 def fstat(fp):
2192 def fstat(fp):
2184 '''stat file object that may not have fileno method.'''
2193 '''stat file object that may not have fileno method.'''
2185 try:
2194 try:
2186 return os.fstat(fp.fileno())
2195 return os.fstat(fp.fileno())
2187 except AttributeError:
2196 except AttributeError:
2188 return os.stat(fp.name)
2197 return os.stat(fp.name)
2189
2198
2190
2199
2191 # File system features
2200 # File system features
2192
2201
2193
2202
2194 def fscasesensitive(path: bytes) -> bool:
2203 def fscasesensitive(path: bytes) -> bool:
2195 """
2204 """
2196 Return true if the given path is on a case-sensitive filesystem
2205 Return true if the given path is on a case-sensitive filesystem
2197
2206
2198 Requires a path (like /foo/.hg) ending with a foldable final
2207 Requires a path (like /foo/.hg) ending with a foldable final
2199 directory component.
2208 directory component.
2200 """
2209 """
2201 s1 = os.lstat(path)
2210 s1 = os.lstat(path)
2202 d, b = os.path.split(path)
2211 d, b = os.path.split(path)
2203 b2 = b.upper()
2212 b2 = b.upper()
2204 if b == b2:
2213 if b == b2:
2205 b2 = b.lower()
2214 b2 = b.lower()
2206 if b == b2:
2215 if b == b2:
2207 return True # no evidence against case sensitivity
2216 return True # no evidence against case sensitivity
2208 p2 = os.path.join(d, b2)
2217 p2 = os.path.join(d, b2)
2209 try:
2218 try:
2210 s2 = os.lstat(p2)
2219 s2 = os.lstat(p2)
2211 if s2 == s1:
2220 if s2 == s1:
2212 return False
2221 return False
2213 return True
2222 return True
2214 except OSError:
2223 except OSError:
2215 return True
2224 return True
2216
2225
2217
2226
2218 _re2_input = lambda x: x
2227 _re2_input = lambda x: x
2219 # google-re2 will need to be tell to not output error on its own
2228 # google-re2 will need to be tell to not output error on its own
2220 _re2_options = None
2229 _re2_options = None
2221 try:
2230 try:
2222 import re2 # pytype: disable=import-error
2231 import re2 # pytype: disable=import-error
2223
2232
2224 _re2 = None
2233 _re2 = None
2225 except ImportError:
2234 except ImportError:
2226 _re2 = False
2235 _re2 = False
2227
2236
2228
2237
2229 def has_re2():
2238 def has_re2():
2230 """return True is re2 is available, False otherwise"""
2239 """return True is re2 is available, False otherwise"""
2231 if _re2 is None:
2240 if _re2 is None:
2232 _re._checkre2()
2241 _re._checkre2()
2233 return _re2
2242 return _re2
2234
2243
2235
2244
2236 class _re:
2245 class _re:
2237 @staticmethod
2246 @staticmethod
2238 def _checkre2():
2247 def _checkre2():
2239 global _re2
2248 global _re2
2240 global _re2_input
2249 global _re2_input
2241 global _re2_options
2250 global _re2_options
2242 if _re2 is not None:
2251 if _re2 is not None:
2243 # we already have the answer
2252 # we already have the answer
2244 return
2253 return
2245
2254
2246 check_pattern = br'\[([^\[]+)\]'
2255 check_pattern = br'\[([^\[]+)\]'
2247 check_input = b'[ui]'
2256 check_input = b'[ui]'
2248 try:
2257 try:
2249 # check if match works, see issue3964
2258 # check if match works, see issue3964
2250 _re2 = bool(re2.match(check_pattern, check_input))
2259 _re2 = bool(re2.match(check_pattern, check_input))
2251 except ImportError:
2260 except ImportError:
2252 _re2 = False
2261 _re2 = False
2253 except TypeError:
2262 except TypeError:
2254 # the `pyre-2` project provides a re2 module that accept bytes
2263 # the `pyre-2` project provides a re2 module that accept bytes
2255 # the `fb-re2` project provides a re2 module that acccept sysstr
2264 # the `fb-re2` project provides a re2 module that acccept sysstr
2256 check_pattern = pycompat.sysstr(check_pattern)
2265 check_pattern = pycompat.sysstr(check_pattern)
2257 check_input = pycompat.sysstr(check_input)
2266 check_input = pycompat.sysstr(check_input)
2258 _re2 = bool(re2.match(check_pattern, check_input))
2267 _re2 = bool(re2.match(check_pattern, check_input))
2259 _re2_input = pycompat.sysstr
2268 _re2_input = pycompat.sysstr
2260 try:
2269 try:
2261 quiet = re2.Options()
2270 quiet = re2.Options()
2262 quiet.log_errors = False
2271 quiet.log_errors = False
2263 _re2_options = quiet
2272 _re2_options = quiet
2264 except AttributeError:
2273 except AttributeError:
2265 pass
2274 pass
2266
2275
2267 def compile(self, pat, flags=0):
2276 def compile(self, pat, flags=0):
2268 """Compile a regular expression, using re2 if possible
2277 """Compile a regular expression, using re2 if possible
2269
2278
2270 For best performance, use only re2-compatible regexp features. The
2279 For best performance, use only re2-compatible regexp features. The
2271 only flags from the re module that are re2-compatible are
2280 only flags from the re module that are re2-compatible are
2272 IGNORECASE and MULTILINE."""
2281 IGNORECASE and MULTILINE."""
2273 if _re2 is None:
2282 if _re2 is None:
2274 self._checkre2()
2283 self._checkre2()
2275 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
2284 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
2276 if flags & remod.IGNORECASE:
2285 if flags & remod.IGNORECASE:
2277 pat = b'(?i)' + pat
2286 pat = b'(?i)' + pat
2278 if flags & remod.MULTILINE:
2287 if flags & remod.MULTILINE:
2279 pat = b'(?m)' + pat
2288 pat = b'(?m)' + pat
2280 try:
2289 try:
2281 input_regex = _re2_input(pat)
2290 input_regex = _re2_input(pat)
2282 if _re2_options is not None:
2291 if _re2_options is not None:
2283 compiled = re2.compile(input_regex, options=_re2_options)
2292 compiled = re2.compile(input_regex, options=_re2_options)
2284 else:
2293 else:
2285 compiled = re2.compile(input_regex)
2294 compiled = re2.compile(input_regex)
2286 return compiled
2295 return compiled
2287 except re2.error:
2296 except re2.error:
2288 pass
2297 pass
2289 return remod.compile(pat, flags)
2298 return remod.compile(pat, flags)
2290
2299
2291 @propertycache
2300 @propertycache
2292 def escape(self):
2301 def escape(self):
2293 """Return the version of escape corresponding to self.compile.
2302 """Return the version of escape corresponding to self.compile.
2294
2303
2295 This is imperfect because whether re2 or re is used for a particular
2304 This is imperfect because whether re2 or re is used for a particular
2296 function depends on the flags, etc, but it's the best we can do.
2305 function depends on the flags, etc, but it's the best we can do.
2297 """
2306 """
2298 global _re2
2307 global _re2
2299 if _re2 is None:
2308 if _re2 is None:
2300 self._checkre2()
2309 self._checkre2()
2301 if _re2:
2310 if _re2:
2302 return re2.escape
2311 return re2.escape
2303 else:
2312 else:
2304 return remod.escape
2313 return remod.escape
2305
2314
2306
2315
2307 re = _re()
2316 re = _re()
2308
2317
2309 _fspathcache = {}
2318 _fspathcache = {}
2310
2319
2311
2320
2312 def fspath(name: bytes, root: bytes) -> bytes:
2321 def fspath(name: bytes, root: bytes) -> bytes:
2313 """Get name in the case stored in the filesystem
2322 """Get name in the case stored in the filesystem
2314
2323
2315 The name should be relative to root, and be normcase-ed for efficiency.
2324 The name should be relative to root, and be normcase-ed for efficiency.
2316
2325
2317 Note that this function is unnecessary, and should not be
2326 Note that this function is unnecessary, and should not be
2318 called, for case-sensitive filesystems (simply because it's expensive).
2327 called, for case-sensitive filesystems (simply because it's expensive).
2319
2328
2320 The root should be normcase-ed, too.
2329 The root should be normcase-ed, too.
2321 """
2330 """
2322
2331
2323 def _makefspathcacheentry(dir):
2332 def _makefspathcacheentry(dir):
2324 return {normcase(n): n for n in os.listdir(dir)}
2333 return {normcase(n): n for n in os.listdir(dir)}
2325
2334
2326 seps = pycompat.ossep
2335 seps = pycompat.ossep
2327 if pycompat.osaltsep:
2336 if pycompat.osaltsep:
2328 seps = seps + pycompat.osaltsep
2337 seps = seps + pycompat.osaltsep
2329 # Protect backslashes. This gets silly very quickly.
2338 # Protect backslashes. This gets silly very quickly.
2330 seps.replace(b'\\', b'\\\\')
2339 seps.replace(b'\\', b'\\\\')
2331 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
2340 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
2332 dir = os.path.normpath(root)
2341 dir = os.path.normpath(root)
2333 result = []
2342 result = []
2334 for part, sep in pattern.findall(name):
2343 for part, sep in pattern.findall(name):
2335 if sep:
2344 if sep:
2336 result.append(sep)
2345 result.append(sep)
2337 continue
2346 continue
2338
2347
2339 if dir not in _fspathcache:
2348 if dir not in _fspathcache:
2340 _fspathcache[dir] = _makefspathcacheentry(dir)
2349 _fspathcache[dir] = _makefspathcacheentry(dir)
2341 contents = _fspathcache[dir]
2350 contents = _fspathcache[dir]
2342
2351
2343 found = contents.get(part)
2352 found = contents.get(part)
2344 if not found:
2353 if not found:
2345 # retry "once per directory" per "dirstate.walk" which
2354 # retry "once per directory" per "dirstate.walk" which
2346 # may take place for each patches of "hg qpush", for example
2355 # may take place for each patches of "hg qpush", for example
2347 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
2356 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
2348 found = contents.get(part)
2357 found = contents.get(part)
2349
2358
2350 result.append(found or part)
2359 result.append(found or part)
2351 dir = os.path.join(dir, part)
2360 dir = os.path.join(dir, part)
2352
2361
2353 return b''.join(result)
2362 return b''.join(result)
2354
2363
2355
2364
2356 def checknlink(testfile: bytes) -> bool:
2365 def checknlink(testfile: bytes) -> bool:
2357 '''check whether hardlink count reporting works properly'''
2366 '''check whether hardlink count reporting works properly'''
2358
2367
2359 # testfile may be open, so we need a separate file for checking to
2368 # testfile may be open, so we need a separate file for checking to
2360 # work around issue2543 (or testfile may get lost on Samba shares)
2369 # work around issue2543 (or testfile may get lost on Samba shares)
2361 f1, f2, fp = None, None, None
2370 f1, f2, fp = None, None, None
2362 try:
2371 try:
2363 fd, f1 = pycompat.mkstemp(
2372 fd, f1 = pycompat.mkstemp(
2364 prefix=b'.%s-' % os.path.basename(testfile),
2373 prefix=b'.%s-' % os.path.basename(testfile),
2365 suffix=b'1~',
2374 suffix=b'1~',
2366 dir=os.path.dirname(testfile),
2375 dir=os.path.dirname(testfile),
2367 )
2376 )
2368 os.close(fd)
2377 os.close(fd)
2369 f2 = b'%s2~' % f1[:-2]
2378 f2 = b'%s2~' % f1[:-2]
2370
2379
2371 oslink(f1, f2)
2380 oslink(f1, f2)
2372 # nlinks() may behave differently for files on Windows shares if
2381 # nlinks() may behave differently for files on Windows shares if
2373 # the file is open.
2382 # the file is open.
2374 fp = posixfile(f2)
2383 fp = posixfile(f2)
2375 return nlinks(f2) > 1
2384 return nlinks(f2) > 1
2376 except OSError:
2385 except OSError:
2377 return False
2386 return False
2378 finally:
2387 finally:
2379 if fp is not None:
2388 if fp is not None:
2380 fp.close()
2389 fp.close()
2381 for f in (f1, f2):
2390 for f in (f1, f2):
2382 try:
2391 try:
2383 if f is not None:
2392 if f is not None:
2384 os.unlink(f)
2393 os.unlink(f)
2385 except OSError:
2394 except OSError:
2386 pass
2395 pass
2387
2396
2388
2397
2389 def endswithsep(path: bytes) -> bool:
2398 def endswithsep(path: bytes) -> bool:
2390 '''Check path ends with os.sep or os.altsep.'''
2399 '''Check path ends with os.sep or os.altsep.'''
2391 return bool( # help pytype
2400 return bool( # help pytype
2392 path.endswith(pycompat.ossep)
2401 path.endswith(pycompat.ossep)
2393 or pycompat.osaltsep
2402 or pycompat.osaltsep
2394 and path.endswith(pycompat.osaltsep)
2403 and path.endswith(pycompat.osaltsep)
2395 )
2404 )
2396
2405
2397
2406
2398 def splitpath(path: bytes) -> List[bytes]:
2407 def splitpath(path: bytes) -> List[bytes]:
2399 """Split path by os.sep.
2408 """Split path by os.sep.
2400 Note that this function does not use os.altsep because this is
2409 Note that this function does not use os.altsep because this is
2401 an alternative of simple "xxx.split(os.sep)".
2410 an alternative of simple "xxx.split(os.sep)".
2402 It is recommended to use os.path.normpath() before using this
2411 It is recommended to use os.path.normpath() before using this
2403 function if need."""
2412 function if need."""
2404 return path.split(pycompat.ossep)
2413 return path.split(pycompat.ossep)
2405
2414
2406
2415
2407 def mktempcopy(name, emptyok=False, createmode=None, enforcewritable=False):
2416 def mktempcopy(name, emptyok=False, createmode=None, enforcewritable=False):
2408 """Create a temporary file with the same contents from name
2417 """Create a temporary file with the same contents from name
2409
2418
2410 The permission bits are copied from the original file.
2419 The permission bits are copied from the original file.
2411
2420
2412 If the temporary file is going to be truncated immediately, you
2421 If the temporary file is going to be truncated immediately, you
2413 can use emptyok=True as an optimization.
2422 can use emptyok=True as an optimization.
2414
2423
2415 Returns the name of the temporary file.
2424 Returns the name of the temporary file.
2416 """
2425 """
2417 d, fn = os.path.split(name)
2426 d, fn = os.path.split(name)
2418 fd, temp = pycompat.mkstemp(prefix=b'.%s-' % fn, suffix=b'~', dir=d)
2427 fd, temp = pycompat.mkstemp(prefix=b'.%s-' % fn, suffix=b'~', dir=d)
2419 os.close(fd)
2428 os.close(fd)
2420 # Temporary files are created with mode 0600, which is usually not
2429 # Temporary files are created with mode 0600, which is usually not
2421 # what we want. If the original file already exists, just copy
2430 # what we want. If the original file already exists, just copy
2422 # its mode. Otherwise, manually obey umask.
2431 # its mode. Otherwise, manually obey umask.
2423 copymode(name, temp, createmode, enforcewritable)
2432 copymode(name, temp, createmode, enforcewritable)
2424
2433
2425 if emptyok:
2434 if emptyok:
2426 return temp
2435 return temp
2427 try:
2436 try:
2428 try:
2437 try:
2429 ifp = posixfile(name, b"rb")
2438 ifp = posixfile(name, b"rb")
2430 except IOError as inst:
2439 except IOError as inst:
2431 if inst.errno == errno.ENOENT:
2440 if inst.errno == errno.ENOENT:
2432 return temp
2441 return temp
2433 if not getattr(inst, 'filename', None):
2442 if not getattr(inst, 'filename', None):
2434 inst.filename = name
2443 inst.filename = name
2435 raise
2444 raise
2436 ofp = posixfile(temp, b"wb")
2445 ofp = posixfile(temp, b"wb")
2437 for chunk in filechunkiter(ifp):
2446 for chunk in filechunkiter(ifp):
2438 ofp.write(chunk)
2447 ofp.write(chunk)
2439 ifp.close()
2448 ifp.close()
2440 ofp.close()
2449 ofp.close()
2441 except: # re-raises
2450 except: # re-raises
2442 try:
2451 try:
2443 os.unlink(temp)
2452 os.unlink(temp)
2444 except OSError:
2453 except OSError:
2445 pass
2454 pass
2446 raise
2455 raise
2447 return temp
2456 return temp
2448
2457
2449
2458
2450 class filestat:
2459 class filestat:
2451 """help to exactly detect change of a file
2460 """help to exactly detect change of a file
2452
2461
2453 'stat' attribute is result of 'os.stat()' if specified 'path'
2462 'stat' attribute is result of 'os.stat()' if specified 'path'
2454 exists. Otherwise, it is None. This can avoid preparative
2463 exists. Otherwise, it is None. This can avoid preparative
2455 'exists()' examination on client side of this class.
2464 'exists()' examination on client side of this class.
2456 """
2465 """
2457
2466
2458 def __init__(self, stat):
2467 def __init__(self, stat):
2459 self.stat = stat
2468 self.stat = stat
2460
2469
2461 @classmethod
2470 @classmethod
2462 def frompath(cls, path):
2471 def frompath(cls, path):
2463 try:
2472 try:
2464 stat = os.stat(path)
2473 stat = os.stat(path)
2465 except FileNotFoundError:
2474 except FileNotFoundError:
2466 stat = None
2475 stat = None
2467 return cls(stat)
2476 return cls(stat)
2468
2477
2469 @classmethod
2478 @classmethod
2470 def fromfp(cls, fp):
2479 def fromfp(cls, fp):
2471 stat = os.fstat(fp.fileno())
2480 stat = os.fstat(fp.fileno())
2472 return cls(stat)
2481 return cls(stat)
2473
2482
2474 __hash__ = object.__hash__
2483 __hash__ = object.__hash__
2475
2484
2476 def __eq__(self, old):
2485 def __eq__(self, old):
2477 try:
2486 try:
2478 # if ambiguity between stat of new and old file is
2487 # if ambiguity between stat of new and old file is
2479 # avoided, comparison of size, ctime and mtime is enough
2488 # avoided, comparison of size, ctime and mtime is enough
2480 # to exactly detect change of a file regardless of platform
2489 # to exactly detect change of a file regardless of platform
2481 return (
2490 return (
2482 self.stat.st_size == old.stat.st_size
2491 self.stat.st_size == old.stat.st_size
2483 and self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2492 and self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2484 and self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME]
2493 and self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME]
2485 )
2494 )
2486 except AttributeError:
2495 except AttributeError:
2487 pass
2496 pass
2488 try:
2497 try:
2489 return self.stat is None and old.stat is None
2498 return self.stat is None and old.stat is None
2490 except AttributeError:
2499 except AttributeError:
2491 return False
2500 return False
2492
2501
2493 def isambig(self, old):
2502 def isambig(self, old):
2494 """Examine whether new (= self) stat is ambiguous against old one
2503 """Examine whether new (= self) stat is ambiguous against old one
2495
2504
2496 "S[N]" below means stat of a file at N-th change:
2505 "S[N]" below means stat of a file at N-th change:
2497
2506
2498 - S[n-1].ctime < S[n].ctime: can detect change of a file
2507 - S[n-1].ctime < S[n].ctime: can detect change of a file
2499 - S[n-1].ctime == S[n].ctime
2508 - S[n-1].ctime == S[n].ctime
2500 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2509 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2501 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2510 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2502 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2511 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2503 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2512 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2504
2513
2505 Case (*2) above means that a file was changed twice or more at
2514 Case (*2) above means that a file was changed twice or more at
2506 same time in sec (= S[n-1].ctime), and comparison of timestamp
2515 same time in sec (= S[n-1].ctime), and comparison of timestamp
2507 is ambiguous.
2516 is ambiguous.
2508
2517
2509 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2518 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2510 timestamp is ambiguous".
2519 timestamp is ambiguous".
2511
2520
2512 But advancing mtime only in case (*2) doesn't work as
2521 But advancing mtime only in case (*2) doesn't work as
2513 expected, because naturally advanced S[n].mtime in case (*1)
2522 expected, because naturally advanced S[n].mtime in case (*1)
2514 might be equal to manually advanced S[n-1 or earlier].mtime.
2523 might be equal to manually advanced S[n-1 or earlier].mtime.
2515
2524
2516 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2525 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2517 treated as ambiguous regardless of mtime, to avoid overlooking
2526 treated as ambiguous regardless of mtime, to avoid overlooking
2518 by confliction between such mtime.
2527 by confliction between such mtime.
2519
2528
2520 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2529 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2521 S[n].mtime", even if size of a file isn't changed.
2530 S[n].mtime", even if size of a file isn't changed.
2522 """
2531 """
2523 try:
2532 try:
2524 return self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2533 return self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2525 except AttributeError:
2534 except AttributeError:
2526 return False
2535 return False
2527
2536
2528 def avoidambig(self, path, old):
2537 def avoidambig(self, path, old):
2529 """Change file stat of specified path to avoid ambiguity
2538 """Change file stat of specified path to avoid ambiguity
2530
2539
2531 'old' should be previous filestat of 'path'.
2540 'old' should be previous filestat of 'path'.
2532
2541
2533 This skips avoiding ambiguity, if a process doesn't have
2542 This skips avoiding ambiguity, if a process doesn't have
2534 appropriate privileges for 'path'. This returns False in this
2543 appropriate privileges for 'path'. This returns False in this
2535 case.
2544 case.
2536
2545
2537 Otherwise, this returns True, as "ambiguity is avoided".
2546 Otherwise, this returns True, as "ambiguity is avoided".
2538 """
2547 """
2539 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2548 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2540 try:
2549 try:
2541 os.utime(path, (advanced, advanced))
2550 os.utime(path, (advanced, advanced))
2542 except PermissionError:
2551 except PermissionError:
2543 # utime() on the file created by another user causes EPERM,
2552 # utime() on the file created by another user causes EPERM,
2544 # if a process doesn't have appropriate privileges
2553 # if a process doesn't have appropriate privileges
2545 return False
2554 return False
2546 return True
2555 return True
2547
2556
2548 def __ne__(self, other):
2557 def __ne__(self, other):
2549 return not self == other
2558 return not self == other
2550
2559
2551
2560
2552 class atomictempfile:
2561 class atomictempfile:
2553 """writable file object that atomically updates a file
2562 """writable file object that atomically updates a file
2554
2563
2555 All writes will go to a temporary copy of the original file. Call
2564 All writes will go to a temporary copy of the original file. Call
2556 close() when you are done writing, and atomictempfile will rename
2565 close() when you are done writing, and atomictempfile will rename
2557 the temporary copy to the original name, making the changes
2566 the temporary copy to the original name, making the changes
2558 visible. If the object is destroyed without being closed, all your
2567 visible. If the object is destroyed without being closed, all your
2559 writes are discarded.
2568 writes are discarded.
2560
2569
2561 checkambig argument of constructor is used with filestat, and is
2570 checkambig argument of constructor is used with filestat, and is
2562 useful only if target file is guarded by any lock (e.g. repo.lock
2571 useful only if target file is guarded by any lock (e.g. repo.lock
2563 or repo.wlock).
2572 or repo.wlock).
2564 """
2573 """
2565
2574
2566 def __init__(self, name, mode=b'w+b', createmode=None, checkambig=False):
2575 def __init__(self, name, mode=b'w+b', createmode=None, checkambig=False):
2567 self.__name = name # permanent name
2576 self.__name = name # permanent name
2568 self._tempname = mktempcopy(
2577 self._tempname = mktempcopy(
2569 name,
2578 name,
2570 emptyok=(b'w' in mode),
2579 emptyok=(b'w' in mode),
2571 createmode=createmode,
2580 createmode=createmode,
2572 enforcewritable=(b'w' in mode),
2581 enforcewritable=(b'w' in mode),
2573 )
2582 )
2574
2583
2575 self._fp = posixfile(self._tempname, mode)
2584 self._fp = posixfile(self._tempname, mode)
2576 self._checkambig = checkambig
2585 self._checkambig = checkambig
2577
2586
2578 # delegated methods
2587 # delegated methods
2579 self.read = self._fp.read
2588 self.read = self._fp.read
2580 self.write = self._fp.write
2589 self.write = self._fp.write
2581 self.writelines = self._fp.writelines
2590 self.writelines = self._fp.writelines
2582 self.seek = self._fp.seek
2591 self.seek = self._fp.seek
2583 self.tell = self._fp.tell
2592 self.tell = self._fp.tell
2584 self.fileno = self._fp.fileno
2593 self.fileno = self._fp.fileno
2585
2594
2586 def close(self):
2595 def close(self):
2587 if not self._fp.closed:
2596 if not self._fp.closed:
2588 self._fp.close()
2597 self._fp.close()
2589 filename = localpath(self.__name)
2598 filename = localpath(self.__name)
2590 oldstat = self._checkambig and filestat.frompath(filename)
2599 oldstat = self._checkambig and filestat.frompath(filename)
2591 if oldstat and oldstat.stat:
2600 if oldstat and oldstat.stat:
2592 rename(self._tempname, filename)
2601 rename(self._tempname, filename)
2593 newstat = filestat.frompath(filename)
2602 newstat = filestat.frompath(filename)
2594 if newstat.isambig(oldstat):
2603 if newstat.isambig(oldstat):
2595 # stat of changed file is ambiguous to original one
2604 # stat of changed file is ambiguous to original one
2596 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2605 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2597 os.utime(filename, (advanced, advanced))
2606 os.utime(filename, (advanced, advanced))
2598 else:
2607 else:
2599 rename(self._tempname, filename)
2608 rename(self._tempname, filename)
2600
2609
2601 def discard(self):
2610 def discard(self):
2602 if not self._fp.closed:
2611 if not self._fp.closed:
2603 try:
2612 try:
2604 os.unlink(self._tempname)
2613 os.unlink(self._tempname)
2605 except OSError:
2614 except OSError:
2606 pass
2615 pass
2607 self._fp.close()
2616 self._fp.close()
2608
2617
2609 def __del__(self):
2618 def __del__(self):
2610 if hasattr(self, '_fp'): # constructor actually did something
2619 if hasattr(self, '_fp'): # constructor actually did something
2611 self.discard()
2620 self.discard()
2612
2621
2613 def __enter__(self):
2622 def __enter__(self):
2614 return self
2623 return self
2615
2624
2616 def __exit__(self, exctype, excvalue, traceback):
2625 def __exit__(self, exctype, excvalue, traceback):
2617 if exctype is not None:
2626 if exctype is not None:
2618 self.discard()
2627 self.discard()
2619 else:
2628 else:
2620 self.close()
2629 self.close()
2621
2630
2622
2631
2623 def tryrmdir(f):
2632 def tryrmdir(f):
2624 try:
2633 try:
2625 removedirs(f)
2634 removedirs(f)
2626 except OSError as e:
2635 except OSError as e:
2627 if e.errno != errno.ENOENT and e.errno != errno.ENOTEMPTY:
2636 if e.errno != errno.ENOENT and e.errno != errno.ENOTEMPTY:
2628 raise
2637 raise
2629
2638
2630
2639
2631 def unlinkpath(
2640 def unlinkpath(
2632 f: bytes, ignoremissing: bool = False, rmdir: bool = True
2641 f: bytes, ignoremissing: bool = False, rmdir: bool = True
2633 ) -> None:
2642 ) -> None:
2634 """unlink and remove the directory if it is empty"""
2643 """unlink and remove the directory if it is empty"""
2635 if ignoremissing:
2644 if ignoremissing:
2636 tryunlink(f)
2645 tryunlink(f)
2637 else:
2646 else:
2638 unlink(f)
2647 unlink(f)
2639 if rmdir:
2648 if rmdir:
2640 # try removing directories that might now be empty
2649 # try removing directories that might now be empty
2641 try:
2650 try:
2642 removedirs(os.path.dirname(f))
2651 removedirs(os.path.dirname(f))
2643 except OSError:
2652 except OSError:
2644 pass
2653 pass
2645
2654
2646
2655
2647 def tryunlink(f: bytes) -> bool:
2656 def tryunlink(f: bytes) -> bool:
2648 """Attempt to remove a file, ignoring FileNotFoundError.
2657 """Attempt to remove a file, ignoring FileNotFoundError.
2649
2658
2650 Returns False in case the file did not exit, True otherwise
2659 Returns False in case the file did not exit, True otherwise
2651 """
2660 """
2652 try:
2661 try:
2653 unlink(f)
2662 unlink(f)
2654 return True
2663 return True
2655 except FileNotFoundError:
2664 except FileNotFoundError:
2656 return False
2665 return False
2657
2666
2658
2667
2659 def makedirs(
2668 def makedirs(
2660 name: bytes, mode: Optional[int] = None, notindexed: bool = False
2669 name: bytes, mode: Optional[int] = None, notindexed: bool = False
2661 ) -> None:
2670 ) -> None:
2662 """recursive directory creation with parent mode inheritance
2671 """recursive directory creation with parent mode inheritance
2663
2672
2664 Newly created directories are marked as "not to be indexed by
2673 Newly created directories are marked as "not to be indexed by
2665 the content indexing service", if ``notindexed`` is specified
2674 the content indexing service", if ``notindexed`` is specified
2666 for "write" mode access.
2675 for "write" mode access.
2667 """
2676 """
2668 try:
2677 try:
2669 makedir(name, notindexed)
2678 makedir(name, notindexed)
2670 except OSError as err:
2679 except OSError as err:
2671 if err.errno == errno.EEXIST:
2680 if err.errno == errno.EEXIST:
2672 return
2681 return
2673 if err.errno != errno.ENOENT or not name:
2682 if err.errno != errno.ENOENT or not name:
2674 raise
2683 raise
2675 parent = os.path.dirname(abspath(name))
2684 parent = os.path.dirname(abspath(name))
2676 if parent == name:
2685 if parent == name:
2677 raise
2686 raise
2678 makedirs(parent, mode, notindexed)
2687 makedirs(parent, mode, notindexed)
2679 try:
2688 try:
2680 makedir(name, notindexed)
2689 makedir(name, notindexed)
2681 except OSError as err:
2690 except OSError as err:
2682 # Catch EEXIST to handle races
2691 # Catch EEXIST to handle races
2683 if err.errno == errno.EEXIST:
2692 if err.errno == errno.EEXIST:
2684 return
2693 return
2685 raise
2694 raise
2686 if mode is not None:
2695 if mode is not None:
2687 os.chmod(name, mode)
2696 os.chmod(name, mode)
2688
2697
2689
2698
2690 def readfile(path: bytes) -> bytes:
2699 def readfile(path: bytes) -> bytes:
2691 with open(path, b'rb') as fp:
2700 with open(path, b'rb') as fp:
2692 return fp.read()
2701 return fp.read()
2693
2702
2694
2703
2695 def writefile(path: bytes, text: bytes) -> None:
2704 def writefile(path: bytes, text: bytes) -> None:
2696 with open(path, b'wb') as fp:
2705 with open(path, b'wb') as fp:
2697 fp.write(text)
2706 fp.write(text)
2698
2707
2699
2708
2700 def appendfile(path: bytes, text: bytes) -> None:
2709 def appendfile(path: bytes, text: bytes) -> None:
2701 with open(path, b'ab') as fp:
2710 with open(path, b'ab') as fp:
2702 fp.write(text)
2711 fp.write(text)
2703
2712
2704
2713
2705 class chunkbuffer:
2714 class chunkbuffer:
2706 """Allow arbitrary sized chunks of data to be efficiently read from an
2715 """Allow arbitrary sized chunks of data to be efficiently read from an
2707 iterator over chunks of arbitrary size."""
2716 iterator over chunks of arbitrary size."""
2708
2717
2709 def __init__(self, in_iter):
2718 def __init__(self, in_iter):
2710 """in_iter is the iterator that's iterating over the input chunks."""
2719 """in_iter is the iterator that's iterating over the input chunks."""
2711
2720
2712 def splitbig(chunks):
2721 def splitbig(chunks):
2713 for chunk in chunks:
2722 for chunk in chunks:
2714 if len(chunk) > 2 ** 20:
2723 if len(chunk) > 2 ** 20:
2715 pos = 0
2724 pos = 0
2716 while pos < len(chunk):
2725 while pos < len(chunk):
2717 end = pos + 2 ** 18
2726 end = pos + 2 ** 18
2718 yield chunk[pos:end]
2727 yield chunk[pos:end]
2719 pos = end
2728 pos = end
2720 else:
2729 else:
2721 yield chunk
2730 yield chunk
2722
2731
2723 self.iter = splitbig(in_iter)
2732 self.iter = splitbig(in_iter)
2724 self._queue = collections.deque()
2733 self._queue = collections.deque()
2725 self._chunkoffset = 0
2734 self._chunkoffset = 0
2726
2735
2727 def read(self, l=None):
2736 def read(self, l=None):
2728 """Read L bytes of data from the iterator of chunks of data.
2737 """Read L bytes of data from the iterator of chunks of data.
2729 Returns less than L bytes if the iterator runs dry.
2738 Returns less than L bytes if the iterator runs dry.
2730
2739
2731 If size parameter is omitted, read everything"""
2740 If size parameter is omitted, read everything"""
2732 if l is None:
2741 if l is None:
2733 return b''.join(self.iter)
2742 return b''.join(self.iter)
2734
2743
2735 left = l
2744 left = l
2736 buf = []
2745 buf = []
2737 queue = self._queue
2746 queue = self._queue
2738 while left > 0:
2747 while left > 0:
2739 # refill the queue
2748 # refill the queue
2740 if not queue:
2749 if not queue:
2741 target = 2 ** 18
2750 target = 2 ** 18
2742 for chunk in self.iter:
2751 for chunk in self.iter:
2743 queue.append(chunk)
2752 queue.append(chunk)
2744 target -= len(chunk)
2753 target -= len(chunk)
2745 if target <= 0:
2754 if target <= 0:
2746 break
2755 break
2747 if not queue:
2756 if not queue:
2748 break
2757 break
2749
2758
2750 # The easy way to do this would be to queue.popleft(), modify the
2759 # The easy way to do this would be to queue.popleft(), modify the
2751 # chunk (if necessary), then queue.appendleft(). However, for cases
2760 # chunk (if necessary), then queue.appendleft(). However, for cases
2752 # where we read partial chunk content, this incurs 2 dequeue
2761 # where we read partial chunk content, this incurs 2 dequeue
2753 # mutations and creates a new str for the remaining chunk in the
2762 # mutations and creates a new str for the remaining chunk in the
2754 # queue. Our code below avoids this overhead.
2763 # queue. Our code below avoids this overhead.
2755
2764
2756 chunk = queue[0]
2765 chunk = queue[0]
2757 chunkl = len(chunk)
2766 chunkl = len(chunk)
2758 offset = self._chunkoffset
2767 offset = self._chunkoffset
2759
2768
2760 # Use full chunk.
2769 # Use full chunk.
2761 if offset == 0 and left >= chunkl:
2770 if offset == 0 and left >= chunkl:
2762 left -= chunkl
2771 left -= chunkl
2763 queue.popleft()
2772 queue.popleft()
2764 buf.append(chunk)
2773 buf.append(chunk)
2765 # self._chunkoffset remains at 0.
2774 # self._chunkoffset remains at 0.
2766 continue
2775 continue
2767
2776
2768 chunkremaining = chunkl - offset
2777 chunkremaining = chunkl - offset
2769
2778
2770 # Use all of unconsumed part of chunk.
2779 # Use all of unconsumed part of chunk.
2771 if left >= chunkremaining:
2780 if left >= chunkremaining:
2772 left -= chunkremaining
2781 left -= chunkremaining
2773 queue.popleft()
2782 queue.popleft()
2774 # offset == 0 is enabled by block above, so this won't merely
2783 # offset == 0 is enabled by block above, so this won't merely
2775 # copy via ``chunk[0:]``.
2784 # copy via ``chunk[0:]``.
2776 buf.append(chunk[offset:])
2785 buf.append(chunk[offset:])
2777 self._chunkoffset = 0
2786 self._chunkoffset = 0
2778
2787
2779 # Partial chunk needed.
2788 # Partial chunk needed.
2780 else:
2789 else:
2781 buf.append(chunk[offset : offset + left])
2790 buf.append(chunk[offset : offset + left])
2782 self._chunkoffset += left
2791 self._chunkoffset += left
2783 left -= chunkremaining
2792 left -= chunkremaining
2784
2793
2785 return b''.join(buf)
2794 return b''.join(buf)
2786
2795
2787
2796
2788 def filechunkiter(f, size=131072, limit=None):
2797 def filechunkiter(f, size=131072, limit=None):
2789 """Create a generator that produces the data in the file size
2798 """Create a generator that produces the data in the file size
2790 (default 131072) bytes at a time, up to optional limit (default is
2799 (default 131072) bytes at a time, up to optional limit (default is
2791 to read all data). Chunks may be less than size bytes if the
2800 to read all data). Chunks may be less than size bytes if the
2792 chunk is the last chunk in the file, or the file is a socket or
2801 chunk is the last chunk in the file, or the file is a socket or
2793 some other type of file that sometimes reads less data than is
2802 some other type of file that sometimes reads less data than is
2794 requested."""
2803 requested."""
2795 assert size >= 0
2804 assert size >= 0
2796 assert limit is None or limit >= 0
2805 assert limit is None or limit >= 0
2797 while True:
2806 while True:
2798 if limit is None:
2807 if limit is None:
2799 nbytes = size
2808 nbytes = size
2800 else:
2809 else:
2801 nbytes = min(limit, size)
2810 nbytes = min(limit, size)
2802 s = nbytes and f.read(nbytes)
2811 s = nbytes and f.read(nbytes)
2803 if not s:
2812 if not s:
2804 break
2813 break
2805 if limit:
2814 if limit:
2806 limit -= len(s)
2815 limit -= len(s)
2807 yield s
2816 yield s
2808
2817
2809
2818
2810 class cappedreader:
2819 class cappedreader:
2811 """A file object proxy that allows reading up to N bytes.
2820 """A file object proxy that allows reading up to N bytes.
2812
2821
2813 Given a source file object, instances of this type allow reading up to
2822 Given a source file object, instances of this type allow reading up to
2814 N bytes from that source file object. Attempts to read past the allowed
2823 N bytes from that source file object. Attempts to read past the allowed
2815 limit are treated as EOF.
2824 limit are treated as EOF.
2816
2825
2817 It is assumed that I/O is not performed on the original file object
2826 It is assumed that I/O is not performed on the original file object
2818 in addition to I/O that is performed by this instance. If there is,
2827 in addition to I/O that is performed by this instance. If there is,
2819 state tracking will get out of sync and unexpected results will ensue.
2828 state tracking will get out of sync and unexpected results will ensue.
2820 """
2829 """
2821
2830
2822 def __init__(self, fh, limit):
2831 def __init__(self, fh, limit):
2823 """Allow reading up to <limit> bytes from <fh>."""
2832 """Allow reading up to <limit> bytes from <fh>."""
2824 self._fh = fh
2833 self._fh = fh
2825 self._left = limit
2834 self._left = limit
2826
2835
2827 def read(self, n=-1):
2836 def read(self, n=-1):
2828 if not self._left:
2837 if not self._left:
2829 return b''
2838 return b''
2830
2839
2831 if n < 0:
2840 if n < 0:
2832 n = self._left
2841 n = self._left
2833
2842
2834 data = self._fh.read(min(n, self._left))
2843 data = self._fh.read(min(n, self._left))
2835 self._left -= len(data)
2844 self._left -= len(data)
2836 assert self._left >= 0
2845 assert self._left >= 0
2837
2846
2838 return data
2847 return data
2839
2848
2840 def readinto(self, b):
2849 def readinto(self, b):
2841 res = self.read(len(b))
2850 res = self.read(len(b))
2842 if res is None:
2851 if res is None:
2843 return None
2852 return None
2844
2853
2845 b[0 : len(res)] = res
2854 b[0 : len(res)] = res
2846 return len(res)
2855 return len(res)
2847
2856
2848
2857
2849 def unitcountfn(*unittable):
2858 def unitcountfn(*unittable):
2850 '''return a function that renders a readable count of some quantity'''
2859 '''return a function that renders a readable count of some quantity'''
2851
2860
2852 def go(count):
2861 def go(count):
2853 for multiplier, divisor, format in unittable:
2862 for multiplier, divisor, format in unittable:
2854 if abs(count) >= divisor * multiplier:
2863 if abs(count) >= divisor * multiplier:
2855 return format % (count / float(divisor))
2864 return format % (count / float(divisor))
2856 return unittable[-1][2] % count
2865 return unittable[-1][2] % count
2857
2866
2858 return go
2867 return go
2859
2868
2860
2869
2861 def processlinerange(fromline: int, toline: int) -> Tuple[int, int]:
2870 def processlinerange(fromline: int, toline: int) -> Tuple[int, int]:
2862 """Check that linerange <fromline>:<toline> makes sense and return a
2871 """Check that linerange <fromline>:<toline> makes sense and return a
2863 0-based range.
2872 0-based range.
2864
2873
2865 >>> processlinerange(10, 20)
2874 >>> processlinerange(10, 20)
2866 (9, 20)
2875 (9, 20)
2867 >>> processlinerange(2, 1)
2876 >>> processlinerange(2, 1)
2868 Traceback (most recent call last):
2877 Traceback (most recent call last):
2869 ...
2878 ...
2870 ParseError: line range must be positive
2879 ParseError: line range must be positive
2871 >>> processlinerange(0, 5)
2880 >>> processlinerange(0, 5)
2872 Traceback (most recent call last):
2881 Traceback (most recent call last):
2873 ...
2882 ...
2874 ParseError: fromline must be strictly positive
2883 ParseError: fromline must be strictly positive
2875 """
2884 """
2876 if toline - fromline < 0:
2885 if toline - fromline < 0:
2877 raise error.ParseError(_(b"line range must be positive"))
2886 raise error.ParseError(_(b"line range must be positive"))
2878 if fromline < 1:
2887 if fromline < 1:
2879 raise error.ParseError(_(b"fromline must be strictly positive"))
2888 raise error.ParseError(_(b"fromline must be strictly positive"))
2880 return fromline - 1, toline
2889 return fromline - 1, toline
2881
2890
2882
2891
2883 bytecount = unitcountfn(
2892 bytecount = unitcountfn(
2884 (100, 1 << 30, _(b'%.0f GB')),
2893 (100, 1 << 30, _(b'%.0f GB')),
2885 (10, 1 << 30, _(b'%.1f GB')),
2894 (10, 1 << 30, _(b'%.1f GB')),
2886 (1, 1 << 30, _(b'%.2f GB')),
2895 (1, 1 << 30, _(b'%.2f GB')),
2887 (100, 1 << 20, _(b'%.0f MB')),
2896 (100, 1 << 20, _(b'%.0f MB')),
2888 (10, 1 << 20, _(b'%.1f MB')),
2897 (10, 1 << 20, _(b'%.1f MB')),
2889 (1, 1 << 20, _(b'%.2f MB')),
2898 (1, 1 << 20, _(b'%.2f MB')),
2890 (100, 1 << 10, _(b'%.0f KB')),
2899 (100, 1 << 10, _(b'%.0f KB')),
2891 (10, 1 << 10, _(b'%.1f KB')),
2900 (10, 1 << 10, _(b'%.1f KB')),
2892 (1, 1 << 10, _(b'%.2f KB')),
2901 (1, 1 << 10, _(b'%.2f KB')),
2893 (1, 1, _(b'%.0f bytes')),
2902 (1, 1, _(b'%.0f bytes')),
2894 )
2903 )
2895
2904
2896
2905
2897 class transformingwriter:
2906 class transformingwriter:
2898 """Writable file wrapper to transform data by function"""
2907 """Writable file wrapper to transform data by function"""
2899
2908
2900 def __init__(self, fp, encode):
2909 def __init__(self, fp, encode):
2901 self._fp = fp
2910 self._fp = fp
2902 self._encode = encode
2911 self._encode = encode
2903
2912
2904 def close(self):
2913 def close(self):
2905 self._fp.close()
2914 self._fp.close()
2906
2915
2907 def flush(self):
2916 def flush(self):
2908 self._fp.flush()
2917 self._fp.flush()
2909
2918
2910 def write(self, data):
2919 def write(self, data):
2911 return self._fp.write(self._encode(data))
2920 return self._fp.write(self._encode(data))
2912
2921
2913
2922
2914 # Matches a single EOL which can either be a CRLF where repeated CR
2923 # Matches a single EOL which can either be a CRLF where repeated CR
2915 # are removed or a LF. We do not care about old Macintosh files, so a
2924 # are removed or a LF. We do not care about old Macintosh files, so a
2916 # stray CR is an error.
2925 # stray CR is an error.
2917 _eolre = remod.compile(br'\r*\n')
2926 _eolre = remod.compile(br'\r*\n')
2918
2927
2919
2928
2920 def tolf(s: bytes) -> bytes:
2929 def tolf(s: bytes) -> bytes:
2921 return _eolre.sub(b'\n', s)
2930 return _eolre.sub(b'\n', s)
2922
2931
2923
2932
2924 def tocrlf(s: bytes) -> bytes:
2933 def tocrlf(s: bytes) -> bytes:
2925 return _eolre.sub(b'\r\n', s)
2934 return _eolre.sub(b'\r\n', s)
2926
2935
2927
2936
2928 def _crlfwriter(fp):
2937 def _crlfwriter(fp):
2929 return transformingwriter(fp, tocrlf)
2938 return transformingwriter(fp, tocrlf)
2930
2939
2931
2940
2932 if pycompat.oslinesep == b'\r\n':
2941 if pycompat.oslinesep == b'\r\n':
2933 tonativeeol = tocrlf
2942 tonativeeol = tocrlf
2934 fromnativeeol = tolf
2943 fromnativeeol = tolf
2935 nativeeolwriter = _crlfwriter
2944 nativeeolwriter = _crlfwriter
2936 else:
2945 else:
2937 tonativeeol = pycompat.identity
2946 tonativeeol = pycompat.identity
2938 fromnativeeol = pycompat.identity
2947 fromnativeeol = pycompat.identity
2939 nativeeolwriter = pycompat.identity
2948 nativeeolwriter = pycompat.identity
2940
2949
2941
2950
2942 # TODO delete since workaround variant for Python 2 no longer needed.
2951 # TODO delete since workaround variant for Python 2 no longer needed.
2943 def iterfile(fp):
2952 def iterfile(fp):
2944 return fp
2953 return fp
2945
2954
2946
2955
2947 def iterlines(iterator: Iterable[bytes]) -> Iterator[bytes]:
2956 def iterlines(iterator: Iterable[bytes]) -> Iterator[bytes]:
2948 for chunk in iterator:
2957 for chunk in iterator:
2949 for line in chunk.splitlines():
2958 for line in chunk.splitlines():
2950 yield line
2959 yield line
2951
2960
2952
2961
2953 def expandpath(path: bytes) -> bytes:
2962 def expandpath(path: bytes) -> bytes:
2954 return os.path.expanduser(os.path.expandvars(path))
2963 return os.path.expanduser(os.path.expandvars(path))
2955
2964
2956
2965
2957 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2966 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2958 """Return the result of interpolating items in the mapping into string s.
2967 """Return the result of interpolating items in the mapping into string s.
2959
2968
2960 prefix is a single character string, or a two character string with
2969 prefix is a single character string, or a two character string with
2961 a backslash as the first character if the prefix needs to be escaped in
2970 a backslash as the first character if the prefix needs to be escaped in
2962 a regular expression.
2971 a regular expression.
2963
2972
2964 fn is an optional function that will be applied to the replacement text
2973 fn is an optional function that will be applied to the replacement text
2965 just before replacement.
2974 just before replacement.
2966
2975
2967 escape_prefix is an optional flag that allows using doubled prefix for
2976 escape_prefix is an optional flag that allows using doubled prefix for
2968 its escaping.
2977 its escaping.
2969 """
2978 """
2970 fn = fn or (lambda s: s)
2979 fn = fn or (lambda s: s)
2971 patterns = b'|'.join(mapping.keys())
2980 patterns = b'|'.join(mapping.keys())
2972 if escape_prefix:
2981 if escape_prefix:
2973 patterns += b'|' + prefix
2982 patterns += b'|' + prefix
2974 if len(prefix) > 1:
2983 if len(prefix) > 1:
2975 prefix_char = prefix[1:]
2984 prefix_char = prefix[1:]
2976 else:
2985 else:
2977 prefix_char = prefix
2986 prefix_char = prefix
2978 mapping[prefix_char] = prefix_char
2987 mapping[prefix_char] = prefix_char
2979 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2988 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2980 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2989 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2981
2990
2982
2991
2983 timecount = unitcountfn(
2992 timecount = unitcountfn(
2984 (1, 1e3, _(b'%.0f s')),
2993 (1, 1e3, _(b'%.0f s')),
2985 (100, 1, _(b'%.1f s')),
2994 (100, 1, _(b'%.1f s')),
2986 (10, 1, _(b'%.2f s')),
2995 (10, 1, _(b'%.2f s')),
2987 (1, 1, _(b'%.3f s')),
2996 (1, 1, _(b'%.3f s')),
2988 (100, 0.001, _(b'%.1f ms')),
2997 (100, 0.001, _(b'%.1f ms')),
2989 (10, 0.001, _(b'%.2f ms')),
2998 (10, 0.001, _(b'%.2f ms')),
2990 (1, 0.001, _(b'%.3f ms')),
2999 (1, 0.001, _(b'%.3f ms')),
2991 (100, 0.000001, _(b'%.1f us')),
3000 (100, 0.000001, _(b'%.1f us')),
2992 (10, 0.000001, _(b'%.2f us')),
3001 (10, 0.000001, _(b'%.2f us')),
2993 (1, 0.000001, _(b'%.3f us')),
3002 (1, 0.000001, _(b'%.3f us')),
2994 (100, 0.000000001, _(b'%.1f ns')),
3003 (100, 0.000000001, _(b'%.1f ns')),
2995 (10, 0.000000001, _(b'%.2f ns')),
3004 (10, 0.000000001, _(b'%.2f ns')),
2996 (1, 0.000000001, _(b'%.3f ns')),
3005 (1, 0.000000001, _(b'%.3f ns')),
2997 )
3006 )
2998
3007
2999
3008
3000 @attr.s
3009 @attr.s
3001 class timedcmstats:
3010 class timedcmstats:
3002 """Stats information produced by the timedcm context manager on entering."""
3011 """Stats information produced by the timedcm context manager on entering."""
3003
3012
3004 # the starting value of the timer as a float (meaning and resulution is
3013 # the starting value of the timer as a float (meaning and resulution is
3005 # platform dependent, see util.timer)
3014 # platform dependent, see util.timer)
3006 start = attr.ib(default=attr.Factory(lambda: timer()))
3015 start = attr.ib(default=attr.Factory(lambda: timer()))
3007 # the number of seconds as a floating point value; starts at 0, updated when
3016 # the number of seconds as a floating point value; starts at 0, updated when
3008 # the context is exited.
3017 # the context is exited.
3009 elapsed = attr.ib(default=0)
3018 elapsed = attr.ib(default=0)
3010 # the number of nested timedcm context managers.
3019 # the number of nested timedcm context managers.
3011 level = attr.ib(default=1)
3020 level = attr.ib(default=1)
3012
3021
3013 def __bytes__(self):
3022 def __bytes__(self):
3014 return timecount(self.elapsed) if self.elapsed else b'<unknown>'
3023 return timecount(self.elapsed) if self.elapsed else b'<unknown>'
3015
3024
3016 __str__ = encoding.strmethod(__bytes__)
3025 __str__ = encoding.strmethod(__bytes__)
3017
3026
3018
3027
3019 @contextlib.contextmanager
3028 @contextlib.contextmanager
3020 def timedcm(whencefmt, *whenceargs):
3029 def timedcm(whencefmt, *whenceargs):
3021 """A context manager that produces timing information for a given context.
3030 """A context manager that produces timing information for a given context.
3022
3031
3023 On entering a timedcmstats instance is produced.
3032 On entering a timedcmstats instance is produced.
3024
3033
3025 This context manager is reentrant.
3034 This context manager is reentrant.
3026
3035
3027 """
3036 """
3028 # track nested context managers
3037 # track nested context managers
3029 timedcm._nested += 1
3038 timedcm._nested += 1
3030 timing_stats = timedcmstats(level=timedcm._nested)
3039 timing_stats = timedcmstats(level=timedcm._nested)
3031 try:
3040 try:
3032 with tracing.log(whencefmt, *whenceargs):
3041 with tracing.log(whencefmt, *whenceargs):
3033 yield timing_stats
3042 yield timing_stats
3034 finally:
3043 finally:
3035 timing_stats.elapsed = timer() - timing_stats.start
3044 timing_stats.elapsed = timer() - timing_stats.start
3036 timedcm._nested -= 1
3045 timedcm._nested -= 1
3037
3046
3038
3047
3039 timedcm._nested = 0
3048 timedcm._nested = 0
3040
3049
3041
3050
3042 def timed(func):
3051 def timed(func):
3043 """Report the execution time of a function call to stderr.
3052 """Report the execution time of a function call to stderr.
3044
3053
3045 During development, use as a decorator when you need to measure
3054 During development, use as a decorator when you need to measure
3046 the cost of a function, e.g. as follows:
3055 the cost of a function, e.g. as follows:
3047
3056
3048 @util.timed
3057 @util.timed
3049 def foo(a, b, c):
3058 def foo(a, b, c):
3050 pass
3059 pass
3051 """
3060 """
3052
3061
3053 def wrapper(*args, **kwargs):
3062 def wrapper(*args, **kwargs):
3054 with timedcm(pycompat.bytestr(func.__name__)) as time_stats:
3063 with timedcm(pycompat.bytestr(func.__name__)) as time_stats:
3055 result = func(*args, **kwargs)
3064 result = func(*args, **kwargs)
3056 stderr = procutil.stderr
3065 stderr = procutil.stderr
3057 stderr.write(
3066 stderr.write(
3058 b'%s%s: %s\n'
3067 b'%s%s: %s\n'
3059 % (
3068 % (
3060 b' ' * time_stats.level * 2,
3069 b' ' * time_stats.level * 2,
3061 pycompat.bytestr(func.__name__),
3070 pycompat.bytestr(func.__name__),
3062 time_stats,
3071 time_stats,
3063 )
3072 )
3064 )
3073 )
3065 return result
3074 return result
3066
3075
3067 return wrapper
3076 return wrapper
3068
3077
3069
3078
3070 _sizeunits = (
3079 _sizeunits = (
3071 (b'm', 2 ** 20),
3080 (b'm', 2 ** 20),
3072 (b'k', 2 ** 10),
3081 (b'k', 2 ** 10),
3073 (b'g', 2 ** 30),
3082 (b'g', 2 ** 30),
3074 (b'kb', 2 ** 10),
3083 (b'kb', 2 ** 10),
3075 (b'mb', 2 ** 20),
3084 (b'mb', 2 ** 20),
3076 (b'gb', 2 ** 30),
3085 (b'gb', 2 ** 30),
3077 (b'b', 1),
3086 (b'b', 1),
3078 )
3087 )
3079
3088
3080
3089
3081 def sizetoint(s: bytes) -> int:
3090 def sizetoint(s: bytes) -> int:
3082 """Convert a space specifier to a byte count.
3091 """Convert a space specifier to a byte count.
3083
3092
3084 >>> sizetoint(b'30')
3093 >>> sizetoint(b'30')
3085 30
3094 30
3086 >>> sizetoint(b'2.2kb')
3095 >>> sizetoint(b'2.2kb')
3087 2252
3096 2252
3088 >>> sizetoint(b'6M')
3097 >>> sizetoint(b'6M')
3089 6291456
3098 6291456
3090 """
3099 """
3091 t = s.strip().lower()
3100 t = s.strip().lower()
3092 try:
3101 try:
3093 for k, u in _sizeunits:
3102 for k, u in _sizeunits:
3094 if t.endswith(k):
3103 if t.endswith(k):
3095 return int(float(t[: -len(k)]) * u)
3104 return int(float(t[: -len(k)]) * u)
3096 return int(t)
3105 return int(t)
3097 except ValueError:
3106 except ValueError:
3098 raise error.ParseError(_(b"couldn't parse size: %s") % s)
3107 raise error.ParseError(_(b"couldn't parse size: %s") % s)
3099
3108
3100
3109
3101 class hooks:
3110 class hooks:
3102 """A collection of hook functions that can be used to extend a
3111 """A collection of hook functions that can be used to extend a
3103 function's behavior. Hooks are called in lexicographic order,
3112 function's behavior. Hooks are called in lexicographic order,
3104 based on the names of their sources."""
3113 based on the names of their sources."""
3105
3114
3106 def __init__(self):
3115 def __init__(self):
3107 self._hooks = []
3116 self._hooks = []
3108
3117
3109 def add(self, source, hook):
3118 def add(self, source, hook):
3110 self._hooks.append((source, hook))
3119 self._hooks.append((source, hook))
3111
3120
3112 def __call__(self, *args):
3121 def __call__(self, *args):
3113 self._hooks.sort(key=lambda x: x[0])
3122 self._hooks.sort(key=lambda x: x[0])
3114 results = []
3123 results = []
3115 for source, hook in self._hooks:
3124 for source, hook in self._hooks:
3116 results.append(hook(*args))
3125 results.append(hook(*args))
3117 return results
3126 return results
3118
3127
3119
3128
3120 def getstackframes(skip=0, line=b' %-*s in %s\n', fileline=b'%s:%d', depth=0):
3129 def getstackframes(skip=0, line=b' %-*s in %s\n', fileline=b'%s:%d', depth=0):
3121 """Yields lines for a nicely formatted stacktrace.
3130 """Yields lines for a nicely formatted stacktrace.
3122 Skips the 'skip' last entries, then return the last 'depth' entries.
3131 Skips the 'skip' last entries, then return the last 'depth' entries.
3123 Each file+linenumber is formatted according to fileline.
3132 Each file+linenumber is formatted according to fileline.
3124 Each line is formatted according to line.
3133 Each line is formatted according to line.
3125 If line is None, it yields:
3134 If line is None, it yields:
3126 length of longest filepath+line number,
3135 length of longest filepath+line number,
3127 filepath+linenumber,
3136 filepath+linenumber,
3128 function
3137 function
3129
3138
3130 Not be used in production code but very convenient while developing.
3139 Not be used in production code but very convenient while developing.
3131 """
3140 """
3132 entries = [
3141 entries = [
3133 (fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3142 (fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3134 for fn, ln, func, _text in traceback.extract_stack()[: -skip - 1]
3143 for fn, ln, func, _text in traceback.extract_stack()[: -skip - 1]
3135 ][-depth:]
3144 ][-depth:]
3136 if entries:
3145 if entries:
3137 fnmax = max(len(entry[0]) for entry in entries)
3146 fnmax = max(len(entry[0]) for entry in entries)
3138 for fnln, func in entries:
3147 for fnln, func in entries:
3139 if line is None:
3148 if line is None:
3140 yield (fnmax, fnln, func)
3149 yield (fnmax, fnln, func)
3141 else:
3150 else:
3142 yield line % (fnmax, fnln, func)
3151 yield line % (fnmax, fnln, func)
3143
3152
3144
3153
3145 def debugstacktrace(
3154 def debugstacktrace(
3146 msg=b'stacktrace',
3155 msg=b'stacktrace',
3147 skip=0,
3156 skip=0,
3148 f=procutil.stderr,
3157 f=procutil.stderr,
3149 otherf=procutil.stdout,
3158 otherf=procutil.stdout,
3150 depth=0,
3159 depth=0,
3151 prefix=b'',
3160 prefix=b'',
3152 ):
3161 ):
3153 """Writes a message to f (stderr) with a nicely formatted stacktrace.
3162 """Writes a message to f (stderr) with a nicely formatted stacktrace.
3154 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3163 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3155 By default it will flush stdout first.
3164 By default it will flush stdout first.
3156 It can be used everywhere and intentionally does not require an ui object.
3165 It can be used everywhere and intentionally does not require an ui object.
3157 Not be used in production code but very convenient while developing.
3166 Not be used in production code but very convenient while developing.
3158 """
3167 """
3159 if otherf:
3168 if otherf:
3160 otherf.flush()
3169 otherf.flush()
3161 f.write(b'%s%s at:\n' % (prefix, msg.rstrip()))
3170 f.write(b'%s%s at:\n' % (prefix, msg.rstrip()))
3162 for line in getstackframes(skip + 1, depth=depth):
3171 for line in getstackframes(skip + 1, depth=depth):
3163 f.write(prefix + line)
3172 f.write(prefix + line)
3164 f.flush()
3173 f.flush()
3165
3174
3166
3175
3167 # convenient shortcut
3176 # convenient shortcut
3168 dst = debugstacktrace
3177 dst = debugstacktrace
3169
3178
3170
3179
3171 def safename(f, tag, ctx, others=None):
3180 def safename(f, tag, ctx, others=None):
3172 """
3181 """
3173 Generate a name that it is safe to rename f to in the given context.
3182 Generate a name that it is safe to rename f to in the given context.
3174
3183
3175 f: filename to rename
3184 f: filename to rename
3176 tag: a string tag that will be included in the new name
3185 tag: a string tag that will be included in the new name
3177 ctx: a context, in which the new name must not exist
3186 ctx: a context, in which the new name must not exist
3178 others: a set of other filenames that the new name must not be in
3187 others: a set of other filenames that the new name must not be in
3179
3188
3180 Returns a file name of the form oldname~tag[~number] which does not exist
3189 Returns a file name of the form oldname~tag[~number] which does not exist
3181 in the provided context and is not in the set of other names.
3190 in the provided context and is not in the set of other names.
3182 """
3191 """
3183 if others is None:
3192 if others is None:
3184 others = set()
3193 others = set()
3185
3194
3186 fn = b'%s~%s' % (f, tag)
3195 fn = b'%s~%s' % (f, tag)
3187 if fn not in ctx and fn not in others:
3196 if fn not in ctx and fn not in others:
3188 return fn
3197 return fn
3189 for n in itertools.count(1):
3198 for n in itertools.count(1):
3190 fn = b'%s~%s~%s' % (f, tag, n)
3199 fn = b'%s~%s~%s' % (f, tag, n)
3191 if fn not in ctx and fn not in others:
3200 if fn not in ctx and fn not in others:
3192 return fn
3201 return fn
3193
3202
3194
3203
3195 def readexactly(stream, n):
3204 def readexactly(stream, n):
3196 '''read n bytes from stream.read and abort if less was available'''
3205 '''read n bytes from stream.read and abort if less was available'''
3197 s = stream.read(n)
3206 s = stream.read(n)
3198 if len(s) < n:
3207 if len(s) < n:
3199 raise error.Abort(
3208 raise error.Abort(
3200 _(b"stream ended unexpectedly (got %d bytes, expected %d)")
3209 _(b"stream ended unexpectedly (got %d bytes, expected %d)")
3201 % (len(s), n)
3210 % (len(s), n)
3202 )
3211 )
3203 return s
3212 return s
3204
3213
3205
3214
3206 def uvarintencode(value):
3215 def uvarintencode(value):
3207 """Encode an unsigned integer value to a varint.
3216 """Encode an unsigned integer value to a varint.
3208
3217
3209 A varint is a variable length integer of 1 or more bytes. Each byte
3218 A varint is a variable length integer of 1 or more bytes. Each byte
3210 except the last has the most significant bit set. The lower 7 bits of
3219 except the last has the most significant bit set. The lower 7 bits of
3211 each byte store the 2's complement representation, least significant group
3220 each byte store the 2's complement representation, least significant group
3212 first.
3221 first.
3213
3222
3214 >>> uvarintencode(0)
3223 >>> uvarintencode(0)
3215 '\\x00'
3224 '\\x00'
3216 >>> uvarintencode(1)
3225 >>> uvarintencode(1)
3217 '\\x01'
3226 '\\x01'
3218 >>> uvarintencode(127)
3227 >>> uvarintencode(127)
3219 '\\x7f'
3228 '\\x7f'
3220 >>> uvarintencode(1337)
3229 >>> uvarintencode(1337)
3221 '\\xb9\\n'
3230 '\\xb9\\n'
3222 >>> uvarintencode(65536)
3231 >>> uvarintencode(65536)
3223 '\\x80\\x80\\x04'
3232 '\\x80\\x80\\x04'
3224 >>> uvarintencode(-1)
3233 >>> uvarintencode(-1)
3225 Traceback (most recent call last):
3234 Traceback (most recent call last):
3226 ...
3235 ...
3227 ProgrammingError: negative value for uvarint: -1
3236 ProgrammingError: negative value for uvarint: -1
3228 """
3237 """
3229 if value < 0:
3238 if value < 0:
3230 raise error.ProgrammingError(b'negative value for uvarint: %d' % value)
3239 raise error.ProgrammingError(b'negative value for uvarint: %d' % value)
3231 bits = value & 0x7F
3240 bits = value & 0x7F
3232 value >>= 7
3241 value >>= 7
3233 bytes = []
3242 bytes = []
3234 while value:
3243 while value:
3235 bytes.append(pycompat.bytechr(0x80 | bits))
3244 bytes.append(pycompat.bytechr(0x80 | bits))
3236 bits = value & 0x7F
3245 bits = value & 0x7F
3237 value >>= 7
3246 value >>= 7
3238 bytes.append(pycompat.bytechr(bits))
3247 bytes.append(pycompat.bytechr(bits))
3239
3248
3240 return b''.join(bytes)
3249 return b''.join(bytes)
3241
3250
3242
3251
3243 def uvarintdecodestream(fh):
3252 def uvarintdecodestream(fh):
3244 """Decode an unsigned variable length integer from a stream.
3253 """Decode an unsigned variable length integer from a stream.
3245
3254
3246 The passed argument is anything that has a ``.read(N)`` method.
3255 The passed argument is anything that has a ``.read(N)`` method.
3247
3256
3248 >>> from io import BytesIO
3257 >>> from io import BytesIO
3249 >>> uvarintdecodestream(BytesIO(b'\\x00'))
3258 >>> uvarintdecodestream(BytesIO(b'\\x00'))
3250 0
3259 0
3251 >>> uvarintdecodestream(BytesIO(b'\\x01'))
3260 >>> uvarintdecodestream(BytesIO(b'\\x01'))
3252 1
3261 1
3253 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
3262 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
3254 127
3263 127
3255 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
3264 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
3256 1337
3265 1337
3257 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
3266 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
3258 65536
3267 65536
3259 >>> uvarintdecodestream(BytesIO(b'\\x80'))
3268 >>> uvarintdecodestream(BytesIO(b'\\x80'))
3260 Traceback (most recent call last):
3269 Traceback (most recent call last):
3261 ...
3270 ...
3262 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
3271 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
3263 """
3272 """
3264 result = 0
3273 result = 0
3265 shift = 0
3274 shift = 0
3266 while True:
3275 while True:
3267 byte = ord(readexactly(fh, 1))
3276 byte = ord(readexactly(fh, 1))
3268 result |= (byte & 0x7F) << shift
3277 result |= (byte & 0x7F) << shift
3269 if not (byte & 0x80):
3278 if not (byte & 0x80):
3270 return result
3279 return result
3271 shift += 7
3280 shift += 7
3272
3281
3273
3282
3274 # Passing the '' locale means that the locale should be set according to the
3283 # Passing the '' locale means that the locale should be set according to the
3275 # user settings (environment variables).
3284 # user settings (environment variables).
3276 # Python sometimes avoids setting the global locale settings. When interfacing
3285 # Python sometimes avoids setting the global locale settings. When interfacing
3277 # with C code (e.g. the curses module or the Subversion bindings), the global
3286 # with C code (e.g. the curses module or the Subversion bindings), the global
3278 # locale settings must be initialized correctly. Python 2 does not initialize
3287 # locale settings must be initialized correctly. Python 2 does not initialize
3279 # the global locale settings on interpreter startup. Python 3 sometimes
3288 # the global locale settings on interpreter startup. Python 3 sometimes
3280 # initializes LC_CTYPE, but not consistently at least on Windows. Therefore we
3289 # initializes LC_CTYPE, but not consistently at least on Windows. Therefore we
3281 # explicitly initialize it to get consistent behavior if it's not already
3290 # explicitly initialize it to get consistent behavior if it's not already
3282 # initialized. Since CPython commit 177d921c8c03d30daa32994362023f777624b10d,
3291 # initialized. Since CPython commit 177d921c8c03d30daa32994362023f777624b10d,
3283 # LC_CTYPE is always initialized. If we require Python 3.8+, we should re-check
3292 # LC_CTYPE is always initialized. If we require Python 3.8+, we should re-check
3284 # if we can remove this code.
3293 # if we can remove this code.
3285 @contextlib.contextmanager
3294 @contextlib.contextmanager
3286 def with_lc_ctype():
3295 def with_lc_ctype():
3287 oldloc = locale.setlocale(locale.LC_CTYPE, None)
3296 oldloc = locale.setlocale(locale.LC_CTYPE, None)
3288 if oldloc == 'C':
3297 if oldloc == 'C':
3289 try:
3298 try:
3290 try:
3299 try:
3291 locale.setlocale(locale.LC_CTYPE, '')
3300 locale.setlocale(locale.LC_CTYPE, '')
3292 except locale.Error:
3301 except locale.Error:
3293 # The likely case is that the locale from the environment
3302 # The likely case is that the locale from the environment
3294 # variables is unknown.
3303 # variables is unknown.
3295 pass
3304 pass
3296 yield
3305 yield
3297 finally:
3306 finally:
3298 locale.setlocale(locale.LC_CTYPE, oldloc)
3307 locale.setlocale(locale.LC_CTYPE, oldloc)
3299 else:
3308 else:
3300 yield
3309 yield
3301
3310
3302
3311
3303 def _estimatememory() -> Optional[int]:
3312 def _estimatememory() -> Optional[int]:
3304 """Provide an estimate for the available system memory in Bytes.
3313 """Provide an estimate for the available system memory in Bytes.
3305
3314
3306 If no estimate can be provided on the platform, returns None.
3315 If no estimate can be provided on the platform, returns None.
3307 """
3316 """
3308 if pycompat.sysplatform.startswith(b'win'):
3317 if pycompat.sysplatform.startswith(b'win'):
3309 # On Windows, use the GlobalMemoryStatusEx kernel function directly.
3318 # On Windows, use the GlobalMemoryStatusEx kernel function directly.
3310 from ctypes import c_long as DWORD, c_ulonglong as DWORDLONG
3319 from ctypes import c_long as DWORD, c_ulonglong as DWORDLONG
3311 from ctypes.wintypes import ( # pytype: disable=import-error
3320 from ctypes.wintypes import ( # pytype: disable=import-error
3312 Structure,
3321 Structure,
3313 byref,
3322 byref,
3314 sizeof,
3323 sizeof,
3315 windll,
3324 windll,
3316 )
3325 )
3317
3326
3318 class MEMORYSTATUSEX(Structure):
3327 class MEMORYSTATUSEX(Structure):
3319 _fields_ = [
3328 _fields_ = [
3320 ('dwLength', DWORD),
3329 ('dwLength', DWORD),
3321 ('dwMemoryLoad', DWORD),
3330 ('dwMemoryLoad', DWORD),
3322 ('ullTotalPhys', DWORDLONG),
3331 ('ullTotalPhys', DWORDLONG),
3323 ('ullAvailPhys', DWORDLONG),
3332 ('ullAvailPhys', DWORDLONG),
3324 ('ullTotalPageFile', DWORDLONG),
3333 ('ullTotalPageFile', DWORDLONG),
3325 ('ullAvailPageFile', DWORDLONG),
3334 ('ullAvailPageFile', DWORDLONG),
3326 ('ullTotalVirtual', DWORDLONG),
3335 ('ullTotalVirtual', DWORDLONG),
3327 ('ullAvailVirtual', DWORDLONG),
3336 ('ullAvailVirtual', DWORDLONG),
3328 ('ullExtendedVirtual', DWORDLONG),
3337 ('ullExtendedVirtual', DWORDLONG),
3329 ]
3338 ]
3330
3339
3331 x = MEMORYSTATUSEX()
3340 x = MEMORYSTATUSEX()
3332 x.dwLength = sizeof(x)
3341 x.dwLength = sizeof(x)
3333 windll.kernel32.GlobalMemoryStatusEx(byref(x))
3342 windll.kernel32.GlobalMemoryStatusEx(byref(x))
3334 return x.ullAvailPhys
3343 return x.ullAvailPhys
3335
3344
3336 # On newer Unix-like systems and Mac OSX, the sysconf interface
3345 # On newer Unix-like systems and Mac OSX, the sysconf interface
3337 # can be used. _SC_PAGE_SIZE is part of POSIX; _SC_PHYS_PAGES
3346 # can be used. _SC_PAGE_SIZE is part of POSIX; _SC_PHYS_PAGES
3338 # seems to be implemented on most systems.
3347 # seems to be implemented on most systems.
3339 try:
3348 try:
3340 pagesize = os.sysconf(os.sysconf_names['SC_PAGE_SIZE'])
3349 pagesize = os.sysconf(os.sysconf_names['SC_PAGE_SIZE'])
3341 pages = os.sysconf(os.sysconf_names['SC_PHYS_PAGES'])
3350 pages = os.sysconf(os.sysconf_names['SC_PHYS_PAGES'])
3342 return pagesize * pages
3351 return pagesize * pages
3343 except OSError: # sysconf can fail
3352 except OSError: # sysconf can fail
3344 pass
3353 pass
3345 except KeyError: # unknown parameter
3354 except KeyError: # unknown parameter
3346 pass
3355 pass
General Comments 0
You need to be logged in to leave comments. Login now