##// END OF EJS Templates
util: drop a duplicate import...
Matt Harbison -
r49852:7ccf3dac default
parent child Browse files
Show More
@@ -1,3320 +1,3319 b''
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16
16
17 import abc
17 import abc
18 import collections
18 import collections
19 import contextlib
19 import contextlib
20 import errno
20 import errno
21 import gc
21 import gc
22 import hashlib
22 import hashlib
23 import io
23 import io
24 import itertools
24 import itertools
25 import locale
25 import locale
26 import mmap
26 import mmap
27 import os
27 import os
28 import pickle # provides util.pickle symbol
28 import pickle # provides util.pickle symbol
29 import re as remod
29 import re as remod
30 import shutil
30 import shutil
31 import stat
31 import stat
32 import sys
32 import sys
33 import time
33 import time
34 import traceback
34 import traceback
35 import warnings
35 import warnings
36
36
37 from .node import hex
37 from .node import hex
38 from .thirdparty import attr
38 from .thirdparty import attr
39 from .pycompat import (
39 from .pycompat import (
40 delattr,
40 delattr,
41 getattr,
41 getattr,
42 open,
42 open,
43 setattr,
43 setattr,
44 )
44 )
45 from .node import hex
46 from hgdemandimport import tracing
45 from hgdemandimport import tracing
47 from . import (
46 from . import (
48 encoding,
47 encoding,
49 error,
48 error,
50 i18n,
49 i18n,
51 policy,
50 policy,
52 pycompat,
51 pycompat,
53 urllibcompat,
52 urllibcompat,
54 )
53 )
55 from .utils import (
54 from .utils import (
56 compression,
55 compression,
57 hashutil,
56 hashutil,
58 procutil,
57 procutil,
59 stringutil,
58 stringutil,
60 )
59 )
61
60
62 if pycompat.TYPE_CHECKING:
61 if pycompat.TYPE_CHECKING:
63 from typing import (
62 from typing import (
64 Iterator,
63 Iterator,
65 List,
64 List,
66 Optional,
65 Optional,
67 Tuple,
66 Tuple,
68 )
67 )
69
68
70
69
71 base85 = policy.importmod('base85')
70 base85 = policy.importmod('base85')
72 osutil = policy.importmod('osutil')
71 osutil = policy.importmod('osutil')
73
72
74 b85decode = base85.b85decode
73 b85decode = base85.b85decode
75 b85encode = base85.b85encode
74 b85encode = base85.b85encode
76
75
77 cookielib = pycompat.cookielib
76 cookielib = pycompat.cookielib
78 httplib = pycompat.httplib
77 httplib = pycompat.httplib
79 safehasattr = pycompat.safehasattr
78 safehasattr = pycompat.safehasattr
80 socketserver = pycompat.socketserver
79 socketserver = pycompat.socketserver
81 bytesio = io.BytesIO
80 bytesio = io.BytesIO
82 # TODO deprecate stringio name, as it is a lie on Python 3.
81 # TODO deprecate stringio name, as it is a lie on Python 3.
83 stringio = bytesio
82 stringio = bytesio
84 xmlrpclib = pycompat.xmlrpclib
83 xmlrpclib = pycompat.xmlrpclib
85
84
86 httpserver = urllibcompat.httpserver
85 httpserver = urllibcompat.httpserver
87 urlerr = urllibcompat.urlerr
86 urlerr = urllibcompat.urlerr
88 urlreq = urllibcompat.urlreq
87 urlreq = urllibcompat.urlreq
89
88
90 # workaround for win32mbcs
89 # workaround for win32mbcs
91 _filenamebytestr = pycompat.bytestr
90 _filenamebytestr = pycompat.bytestr
92
91
93 if pycompat.iswindows:
92 if pycompat.iswindows:
94 from . import windows as platform
93 from . import windows as platform
95 else:
94 else:
96 from . import posix as platform
95 from . import posix as platform
97
96
98 _ = i18n._
97 _ = i18n._
99
98
100 abspath = platform.abspath
99 abspath = platform.abspath
101 bindunixsocket = platform.bindunixsocket
100 bindunixsocket = platform.bindunixsocket
102 cachestat = platform.cachestat
101 cachestat = platform.cachestat
103 checkexec = platform.checkexec
102 checkexec = platform.checkexec
104 checklink = platform.checklink
103 checklink = platform.checklink
105 copymode = platform.copymode
104 copymode = platform.copymode
106 expandglobs = platform.expandglobs
105 expandglobs = platform.expandglobs
107 getfsmountpoint = platform.getfsmountpoint
106 getfsmountpoint = platform.getfsmountpoint
108 getfstype = platform.getfstype
107 getfstype = platform.getfstype
109 get_password = platform.get_password
108 get_password = platform.get_password
110 groupmembers = platform.groupmembers
109 groupmembers = platform.groupmembers
111 groupname = platform.groupname
110 groupname = platform.groupname
112 isexec = platform.isexec
111 isexec = platform.isexec
113 isowner = platform.isowner
112 isowner = platform.isowner
114 listdir = osutil.listdir
113 listdir = osutil.listdir
115 localpath = platform.localpath
114 localpath = platform.localpath
116 lookupreg = platform.lookupreg
115 lookupreg = platform.lookupreg
117 makedir = platform.makedir
116 makedir = platform.makedir
118 nlinks = platform.nlinks
117 nlinks = platform.nlinks
119 normpath = platform.normpath
118 normpath = platform.normpath
120 normcase = platform.normcase
119 normcase = platform.normcase
121 normcasespec = platform.normcasespec
120 normcasespec = platform.normcasespec
122 normcasefallback = platform.normcasefallback
121 normcasefallback = platform.normcasefallback
123 openhardlinks = platform.openhardlinks
122 openhardlinks = platform.openhardlinks
124 oslink = platform.oslink
123 oslink = platform.oslink
125 parsepatchoutput = platform.parsepatchoutput
124 parsepatchoutput = platform.parsepatchoutput
126 pconvert = platform.pconvert
125 pconvert = platform.pconvert
127 poll = platform.poll
126 poll = platform.poll
128 posixfile = platform.posixfile
127 posixfile = platform.posixfile
129 readlink = platform.readlink
128 readlink = platform.readlink
130 rename = platform.rename
129 rename = platform.rename
131 removedirs = platform.removedirs
130 removedirs = platform.removedirs
132 samedevice = platform.samedevice
131 samedevice = platform.samedevice
133 samefile = platform.samefile
132 samefile = platform.samefile
134 samestat = platform.samestat
133 samestat = platform.samestat
135 setflags = platform.setflags
134 setflags = platform.setflags
136 split = platform.split
135 split = platform.split
137 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
136 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
138 statisexec = platform.statisexec
137 statisexec = platform.statisexec
139 statislink = platform.statislink
138 statislink = platform.statislink
140 umask = platform.umask
139 umask = platform.umask
141 unlink = platform.unlink
140 unlink = platform.unlink
142 username = platform.username
141 username = platform.username
143
142
144
143
145 def setumask(val):
144 def setumask(val):
146 # type: (int) -> None
145 # type: (int) -> None
147 '''updates the umask. used by chg server'''
146 '''updates the umask. used by chg server'''
148 if pycompat.iswindows:
147 if pycompat.iswindows:
149 return
148 return
150 os.umask(val)
149 os.umask(val)
151 global umask
150 global umask
152 platform.umask = umask = val & 0o777
151 platform.umask = umask = val & 0o777
153
152
154
153
155 # small compat layer
154 # small compat layer
156 compengines = compression.compengines
155 compengines = compression.compengines
157 SERVERROLE = compression.SERVERROLE
156 SERVERROLE = compression.SERVERROLE
158 CLIENTROLE = compression.CLIENTROLE
157 CLIENTROLE = compression.CLIENTROLE
159
158
160 try:
159 try:
161 recvfds = osutil.recvfds
160 recvfds = osutil.recvfds
162 except AttributeError:
161 except AttributeError:
163 pass
162 pass
164
163
165 # Python compatibility
164 # Python compatibility
166
165
167 _notset = object()
166 _notset = object()
168
167
169
168
170 def bitsfrom(container):
169 def bitsfrom(container):
171 bits = 0
170 bits = 0
172 for bit in container:
171 for bit in container:
173 bits |= bit
172 bits |= bit
174 return bits
173 return bits
175
174
176
175
177 # python 2.6 still have deprecation warning enabled by default. We do not want
176 # python 2.6 still have deprecation warning enabled by default. We do not want
178 # to display anything to standard user so detect if we are running test and
177 # to display anything to standard user so detect if we are running test and
179 # only use python deprecation warning in this case.
178 # only use python deprecation warning in this case.
180 _dowarn = bool(encoding.environ.get(b'HGEMITWARNINGS'))
179 _dowarn = bool(encoding.environ.get(b'HGEMITWARNINGS'))
181 if _dowarn:
180 if _dowarn:
182 # explicitly unfilter our warning for python 2.7
181 # explicitly unfilter our warning for python 2.7
183 #
182 #
184 # The option of setting PYTHONWARNINGS in the test runner was investigated.
183 # The option of setting PYTHONWARNINGS in the test runner was investigated.
185 # However, module name set through PYTHONWARNINGS was exactly matched, so
184 # However, module name set through PYTHONWARNINGS was exactly matched, so
186 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
185 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
187 # makes the whole PYTHONWARNINGS thing useless for our usecase.
186 # makes the whole PYTHONWARNINGS thing useless for our usecase.
188 warnings.filterwarnings('default', '', DeprecationWarning, 'mercurial')
187 warnings.filterwarnings('default', '', DeprecationWarning, 'mercurial')
189 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext')
188 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext')
190 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext3rd')
189 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext3rd')
191 if _dowarn:
190 if _dowarn:
192 # silence warning emitted by passing user string to re.sub()
191 # silence warning emitted by passing user string to re.sub()
193 warnings.filterwarnings(
192 warnings.filterwarnings(
194 'ignore', 'bad escape', DeprecationWarning, 'mercurial'
193 'ignore', 'bad escape', DeprecationWarning, 'mercurial'
195 )
194 )
196 warnings.filterwarnings(
195 warnings.filterwarnings(
197 'ignore', 'invalid escape sequence', DeprecationWarning, 'mercurial'
196 'ignore', 'invalid escape sequence', DeprecationWarning, 'mercurial'
198 )
197 )
199 # TODO: reinvent imp.is_frozen()
198 # TODO: reinvent imp.is_frozen()
200 warnings.filterwarnings(
199 warnings.filterwarnings(
201 'ignore',
200 'ignore',
202 'the imp module is deprecated',
201 'the imp module is deprecated',
203 DeprecationWarning,
202 DeprecationWarning,
204 'mercurial',
203 'mercurial',
205 )
204 )
206
205
207
206
208 def nouideprecwarn(msg, version, stacklevel=1):
207 def nouideprecwarn(msg, version, stacklevel=1):
209 """Issue an python native deprecation warning
208 """Issue an python native deprecation warning
210
209
211 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
210 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
212 """
211 """
213 if _dowarn:
212 if _dowarn:
214 msg += (
213 msg += (
215 b"\n(compatibility will be dropped after Mercurial-%s,"
214 b"\n(compatibility will be dropped after Mercurial-%s,"
216 b" update your code.)"
215 b" update your code.)"
217 ) % version
216 ) % version
218 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
217 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
219 # on python 3 with chg, we will need to explicitly flush the output
218 # on python 3 with chg, we will need to explicitly flush the output
220 sys.stderr.flush()
219 sys.stderr.flush()
221
220
222
221
223 DIGESTS = {
222 DIGESTS = {
224 b'md5': hashlib.md5,
223 b'md5': hashlib.md5,
225 b'sha1': hashutil.sha1,
224 b'sha1': hashutil.sha1,
226 b'sha512': hashlib.sha512,
225 b'sha512': hashlib.sha512,
227 }
226 }
228 # List of digest types from strongest to weakest
227 # List of digest types from strongest to weakest
229 DIGESTS_BY_STRENGTH = [b'sha512', b'sha1', b'md5']
228 DIGESTS_BY_STRENGTH = [b'sha512', b'sha1', b'md5']
230
229
231 for k in DIGESTS_BY_STRENGTH:
230 for k in DIGESTS_BY_STRENGTH:
232 assert k in DIGESTS
231 assert k in DIGESTS
233
232
234
233
235 class digester:
234 class digester:
236 """helper to compute digests.
235 """helper to compute digests.
237
236
238 This helper can be used to compute one or more digests given their name.
237 This helper can be used to compute one or more digests given their name.
239
238
240 >>> d = digester([b'md5', b'sha1'])
239 >>> d = digester([b'md5', b'sha1'])
241 >>> d.update(b'foo')
240 >>> d.update(b'foo')
242 >>> [k for k in sorted(d)]
241 >>> [k for k in sorted(d)]
243 ['md5', 'sha1']
242 ['md5', 'sha1']
244 >>> d[b'md5']
243 >>> d[b'md5']
245 'acbd18db4cc2f85cedef654fccc4a4d8'
244 'acbd18db4cc2f85cedef654fccc4a4d8'
246 >>> d[b'sha1']
245 >>> d[b'sha1']
247 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
246 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
248 >>> digester.preferred([b'md5', b'sha1'])
247 >>> digester.preferred([b'md5', b'sha1'])
249 'sha1'
248 'sha1'
250 """
249 """
251
250
252 def __init__(self, digests, s=b''):
251 def __init__(self, digests, s=b''):
253 self._hashes = {}
252 self._hashes = {}
254 for k in digests:
253 for k in digests:
255 if k not in DIGESTS:
254 if k not in DIGESTS:
256 raise error.Abort(_(b'unknown digest type: %s') % k)
255 raise error.Abort(_(b'unknown digest type: %s') % k)
257 self._hashes[k] = DIGESTS[k]()
256 self._hashes[k] = DIGESTS[k]()
258 if s:
257 if s:
259 self.update(s)
258 self.update(s)
260
259
261 def update(self, data):
260 def update(self, data):
262 for h in self._hashes.values():
261 for h in self._hashes.values():
263 h.update(data)
262 h.update(data)
264
263
265 def __getitem__(self, key):
264 def __getitem__(self, key):
266 if key not in DIGESTS:
265 if key not in DIGESTS:
267 raise error.Abort(_(b'unknown digest type: %s') % k)
266 raise error.Abort(_(b'unknown digest type: %s') % k)
268 return hex(self._hashes[key].digest())
267 return hex(self._hashes[key].digest())
269
268
270 def __iter__(self):
269 def __iter__(self):
271 return iter(self._hashes)
270 return iter(self._hashes)
272
271
273 @staticmethod
272 @staticmethod
274 def preferred(supported):
273 def preferred(supported):
275 """returns the strongest digest type in both supported and DIGESTS."""
274 """returns the strongest digest type in both supported and DIGESTS."""
276
275
277 for k in DIGESTS_BY_STRENGTH:
276 for k in DIGESTS_BY_STRENGTH:
278 if k in supported:
277 if k in supported:
279 return k
278 return k
280 return None
279 return None
281
280
282
281
283 class digestchecker:
282 class digestchecker:
284 """file handle wrapper that additionally checks content against a given
283 """file handle wrapper that additionally checks content against a given
285 size and digests.
284 size and digests.
286
285
287 d = digestchecker(fh, size, {'md5': '...'})
286 d = digestchecker(fh, size, {'md5': '...'})
288
287
289 When multiple digests are given, all of them are validated.
288 When multiple digests are given, all of them are validated.
290 """
289 """
291
290
292 def __init__(self, fh, size, digests):
291 def __init__(self, fh, size, digests):
293 self._fh = fh
292 self._fh = fh
294 self._size = size
293 self._size = size
295 self._got = 0
294 self._got = 0
296 self._digests = dict(digests)
295 self._digests = dict(digests)
297 self._digester = digester(self._digests.keys())
296 self._digester = digester(self._digests.keys())
298
297
299 def read(self, length=-1):
298 def read(self, length=-1):
300 content = self._fh.read(length)
299 content = self._fh.read(length)
301 self._digester.update(content)
300 self._digester.update(content)
302 self._got += len(content)
301 self._got += len(content)
303 return content
302 return content
304
303
305 def validate(self):
304 def validate(self):
306 if self._size != self._got:
305 if self._size != self._got:
307 raise error.Abort(
306 raise error.Abort(
308 _(b'size mismatch: expected %d, got %d')
307 _(b'size mismatch: expected %d, got %d')
309 % (self._size, self._got)
308 % (self._size, self._got)
310 )
309 )
311 for k, v in self._digests.items():
310 for k, v in self._digests.items():
312 if v != self._digester[k]:
311 if v != self._digester[k]:
313 # i18n: first parameter is a digest name
312 # i18n: first parameter is a digest name
314 raise error.Abort(
313 raise error.Abort(
315 _(b'%s mismatch: expected %s, got %s')
314 _(b'%s mismatch: expected %s, got %s')
316 % (k, v, self._digester[k])
315 % (k, v, self._digester[k])
317 )
316 )
318
317
319
318
320 try:
319 try:
321 buffer = buffer # pytype: disable=name-error
320 buffer = buffer # pytype: disable=name-error
322 except NameError:
321 except NameError:
323
322
324 def buffer(sliceable, offset=0, length=None):
323 def buffer(sliceable, offset=0, length=None):
325 if length is not None:
324 if length is not None:
326 return memoryview(sliceable)[offset : offset + length]
325 return memoryview(sliceable)[offset : offset + length]
327 return memoryview(sliceable)[offset:]
326 return memoryview(sliceable)[offset:]
328
327
329
328
330 _chunksize = 4096
329 _chunksize = 4096
331
330
332
331
333 class bufferedinputpipe:
332 class bufferedinputpipe:
334 """a manually buffered input pipe
333 """a manually buffered input pipe
335
334
336 Python will not let us use buffered IO and lazy reading with 'polling' at
335 Python will not let us use buffered IO and lazy reading with 'polling' at
337 the same time. We cannot probe the buffer state and select will not detect
336 the same time. We cannot probe the buffer state and select will not detect
338 that data are ready to read if they are already buffered.
337 that data are ready to read if they are already buffered.
339
338
340 This class let us work around that by implementing its own buffering
339 This class let us work around that by implementing its own buffering
341 (allowing efficient readline) while offering a way to know if the buffer is
340 (allowing efficient readline) while offering a way to know if the buffer is
342 empty from the output (allowing collaboration of the buffer with polling).
341 empty from the output (allowing collaboration of the buffer with polling).
343
342
344 This class lives in the 'util' module because it makes use of the 'os'
343 This class lives in the 'util' module because it makes use of the 'os'
345 module from the python stdlib.
344 module from the python stdlib.
346 """
345 """
347
346
348 def __new__(cls, fh):
347 def __new__(cls, fh):
349 # If we receive a fileobjectproxy, we need to use a variation of this
348 # If we receive a fileobjectproxy, we need to use a variation of this
350 # class that notifies observers about activity.
349 # class that notifies observers about activity.
351 if isinstance(fh, fileobjectproxy):
350 if isinstance(fh, fileobjectproxy):
352 cls = observedbufferedinputpipe
351 cls = observedbufferedinputpipe
353
352
354 return super(bufferedinputpipe, cls).__new__(cls)
353 return super(bufferedinputpipe, cls).__new__(cls)
355
354
356 def __init__(self, input):
355 def __init__(self, input):
357 self._input = input
356 self._input = input
358 self._buffer = []
357 self._buffer = []
359 self._eof = False
358 self._eof = False
360 self._lenbuf = 0
359 self._lenbuf = 0
361
360
362 @property
361 @property
363 def hasbuffer(self):
362 def hasbuffer(self):
364 """True is any data is currently buffered
363 """True is any data is currently buffered
365
364
366 This will be used externally a pre-step for polling IO. If there is
365 This will be used externally a pre-step for polling IO. If there is
367 already data then no polling should be set in place."""
366 already data then no polling should be set in place."""
368 return bool(self._buffer)
367 return bool(self._buffer)
369
368
370 @property
369 @property
371 def closed(self):
370 def closed(self):
372 return self._input.closed
371 return self._input.closed
373
372
374 def fileno(self):
373 def fileno(self):
375 return self._input.fileno()
374 return self._input.fileno()
376
375
377 def close(self):
376 def close(self):
378 return self._input.close()
377 return self._input.close()
379
378
380 def read(self, size):
379 def read(self, size):
381 while (not self._eof) and (self._lenbuf < size):
380 while (not self._eof) and (self._lenbuf < size):
382 self._fillbuffer()
381 self._fillbuffer()
383 return self._frombuffer(size)
382 return self._frombuffer(size)
384
383
385 def unbufferedread(self, size):
384 def unbufferedread(self, size):
386 if not self._eof and self._lenbuf == 0:
385 if not self._eof and self._lenbuf == 0:
387 self._fillbuffer(max(size, _chunksize))
386 self._fillbuffer(max(size, _chunksize))
388 return self._frombuffer(min(self._lenbuf, size))
387 return self._frombuffer(min(self._lenbuf, size))
389
388
390 def readline(self, *args, **kwargs):
389 def readline(self, *args, **kwargs):
391 if len(self._buffer) > 1:
390 if len(self._buffer) > 1:
392 # this should not happen because both read and readline end with a
391 # this should not happen because both read and readline end with a
393 # _frombuffer call that collapse it.
392 # _frombuffer call that collapse it.
394 self._buffer = [b''.join(self._buffer)]
393 self._buffer = [b''.join(self._buffer)]
395 self._lenbuf = len(self._buffer[0])
394 self._lenbuf = len(self._buffer[0])
396 lfi = -1
395 lfi = -1
397 if self._buffer:
396 if self._buffer:
398 lfi = self._buffer[-1].find(b'\n')
397 lfi = self._buffer[-1].find(b'\n')
399 while (not self._eof) and lfi < 0:
398 while (not self._eof) and lfi < 0:
400 self._fillbuffer()
399 self._fillbuffer()
401 if self._buffer:
400 if self._buffer:
402 lfi = self._buffer[-1].find(b'\n')
401 lfi = self._buffer[-1].find(b'\n')
403 size = lfi + 1
402 size = lfi + 1
404 if lfi < 0: # end of file
403 if lfi < 0: # end of file
405 size = self._lenbuf
404 size = self._lenbuf
406 elif len(self._buffer) > 1:
405 elif len(self._buffer) > 1:
407 # we need to take previous chunks into account
406 # we need to take previous chunks into account
408 size += self._lenbuf - len(self._buffer[-1])
407 size += self._lenbuf - len(self._buffer[-1])
409 return self._frombuffer(size)
408 return self._frombuffer(size)
410
409
411 def _frombuffer(self, size):
410 def _frombuffer(self, size):
412 """return at most 'size' data from the buffer
411 """return at most 'size' data from the buffer
413
412
414 The data are removed from the buffer."""
413 The data are removed from the buffer."""
415 if size == 0 or not self._buffer:
414 if size == 0 or not self._buffer:
416 return b''
415 return b''
417 buf = self._buffer[0]
416 buf = self._buffer[0]
418 if len(self._buffer) > 1:
417 if len(self._buffer) > 1:
419 buf = b''.join(self._buffer)
418 buf = b''.join(self._buffer)
420
419
421 data = buf[:size]
420 data = buf[:size]
422 buf = buf[len(data) :]
421 buf = buf[len(data) :]
423 if buf:
422 if buf:
424 self._buffer = [buf]
423 self._buffer = [buf]
425 self._lenbuf = len(buf)
424 self._lenbuf = len(buf)
426 else:
425 else:
427 self._buffer = []
426 self._buffer = []
428 self._lenbuf = 0
427 self._lenbuf = 0
429 return data
428 return data
430
429
431 def _fillbuffer(self, size=_chunksize):
430 def _fillbuffer(self, size=_chunksize):
432 """read data to the buffer"""
431 """read data to the buffer"""
433 data = os.read(self._input.fileno(), size)
432 data = os.read(self._input.fileno(), size)
434 if not data:
433 if not data:
435 self._eof = True
434 self._eof = True
436 else:
435 else:
437 self._lenbuf += len(data)
436 self._lenbuf += len(data)
438 self._buffer.append(data)
437 self._buffer.append(data)
439
438
440 return data
439 return data
441
440
442
441
443 def mmapread(fp, size=None):
442 def mmapread(fp, size=None):
444 if size == 0:
443 if size == 0:
445 # size of 0 to mmap.mmap() means "all data"
444 # size of 0 to mmap.mmap() means "all data"
446 # rather than "zero bytes", so special case that.
445 # rather than "zero bytes", so special case that.
447 return b''
446 return b''
448 elif size is None:
447 elif size is None:
449 size = 0
448 size = 0
450 fd = getattr(fp, 'fileno', lambda: fp)()
449 fd = getattr(fp, 'fileno', lambda: fp)()
451 try:
450 try:
452 return mmap.mmap(fd, size, access=mmap.ACCESS_READ)
451 return mmap.mmap(fd, size, access=mmap.ACCESS_READ)
453 except ValueError:
452 except ValueError:
454 # Empty files cannot be mmapped, but mmapread should still work. Check
453 # Empty files cannot be mmapped, but mmapread should still work. Check
455 # if the file is empty, and if so, return an empty buffer.
454 # if the file is empty, and if so, return an empty buffer.
456 if os.fstat(fd).st_size == 0:
455 if os.fstat(fd).st_size == 0:
457 return b''
456 return b''
458 raise
457 raise
459
458
460
459
461 class fileobjectproxy:
460 class fileobjectproxy:
462 """A proxy around file objects that tells a watcher when events occur.
461 """A proxy around file objects that tells a watcher when events occur.
463
462
464 This type is intended to only be used for testing purposes. Think hard
463 This type is intended to only be used for testing purposes. Think hard
465 before using it in important code.
464 before using it in important code.
466 """
465 """
467
466
468 __slots__ = (
467 __slots__ = (
469 '_orig',
468 '_orig',
470 '_observer',
469 '_observer',
471 )
470 )
472
471
473 def __init__(self, fh, observer):
472 def __init__(self, fh, observer):
474 object.__setattr__(self, '_orig', fh)
473 object.__setattr__(self, '_orig', fh)
475 object.__setattr__(self, '_observer', observer)
474 object.__setattr__(self, '_observer', observer)
476
475
477 def __getattribute__(self, name):
476 def __getattribute__(self, name):
478 ours = {
477 ours = {
479 '_observer',
478 '_observer',
480 # IOBase
479 # IOBase
481 'close',
480 'close',
482 # closed if a property
481 # closed if a property
483 'fileno',
482 'fileno',
484 'flush',
483 'flush',
485 'isatty',
484 'isatty',
486 'readable',
485 'readable',
487 'readline',
486 'readline',
488 'readlines',
487 'readlines',
489 'seek',
488 'seek',
490 'seekable',
489 'seekable',
491 'tell',
490 'tell',
492 'truncate',
491 'truncate',
493 'writable',
492 'writable',
494 'writelines',
493 'writelines',
495 # RawIOBase
494 # RawIOBase
496 'read',
495 'read',
497 'readall',
496 'readall',
498 'readinto',
497 'readinto',
499 'write',
498 'write',
500 # BufferedIOBase
499 # BufferedIOBase
501 # raw is a property
500 # raw is a property
502 'detach',
501 'detach',
503 # read defined above
502 # read defined above
504 'read1',
503 'read1',
505 # readinto defined above
504 # readinto defined above
506 # write defined above
505 # write defined above
507 }
506 }
508
507
509 # We only observe some methods.
508 # We only observe some methods.
510 if name in ours:
509 if name in ours:
511 return object.__getattribute__(self, name)
510 return object.__getattribute__(self, name)
512
511
513 return getattr(object.__getattribute__(self, '_orig'), name)
512 return getattr(object.__getattribute__(self, '_orig'), name)
514
513
515 def __nonzero__(self):
514 def __nonzero__(self):
516 return bool(object.__getattribute__(self, '_orig'))
515 return bool(object.__getattribute__(self, '_orig'))
517
516
518 __bool__ = __nonzero__
517 __bool__ = __nonzero__
519
518
520 def __delattr__(self, name):
519 def __delattr__(self, name):
521 return delattr(object.__getattribute__(self, '_orig'), name)
520 return delattr(object.__getattribute__(self, '_orig'), name)
522
521
523 def __setattr__(self, name, value):
522 def __setattr__(self, name, value):
524 return setattr(object.__getattribute__(self, '_orig'), name, value)
523 return setattr(object.__getattribute__(self, '_orig'), name, value)
525
524
526 def __iter__(self):
525 def __iter__(self):
527 return object.__getattribute__(self, '_orig').__iter__()
526 return object.__getattribute__(self, '_orig').__iter__()
528
527
529 def _observedcall(self, name, *args, **kwargs):
528 def _observedcall(self, name, *args, **kwargs):
530 # Call the original object.
529 # Call the original object.
531 orig = object.__getattribute__(self, '_orig')
530 orig = object.__getattribute__(self, '_orig')
532 res = getattr(orig, name)(*args, **kwargs)
531 res = getattr(orig, name)(*args, **kwargs)
533
532
534 # Call a method on the observer of the same name with arguments
533 # Call a method on the observer of the same name with arguments
535 # so it can react, log, etc.
534 # so it can react, log, etc.
536 observer = object.__getattribute__(self, '_observer')
535 observer = object.__getattribute__(self, '_observer')
537 fn = getattr(observer, name, None)
536 fn = getattr(observer, name, None)
538 if fn:
537 if fn:
539 fn(res, *args, **kwargs)
538 fn(res, *args, **kwargs)
540
539
541 return res
540 return res
542
541
543 def close(self, *args, **kwargs):
542 def close(self, *args, **kwargs):
544 return object.__getattribute__(self, '_observedcall')(
543 return object.__getattribute__(self, '_observedcall')(
545 'close', *args, **kwargs
544 'close', *args, **kwargs
546 )
545 )
547
546
548 def fileno(self, *args, **kwargs):
547 def fileno(self, *args, **kwargs):
549 return object.__getattribute__(self, '_observedcall')(
548 return object.__getattribute__(self, '_observedcall')(
550 'fileno', *args, **kwargs
549 'fileno', *args, **kwargs
551 )
550 )
552
551
553 def flush(self, *args, **kwargs):
552 def flush(self, *args, **kwargs):
554 return object.__getattribute__(self, '_observedcall')(
553 return object.__getattribute__(self, '_observedcall')(
555 'flush', *args, **kwargs
554 'flush', *args, **kwargs
556 )
555 )
557
556
558 def isatty(self, *args, **kwargs):
557 def isatty(self, *args, **kwargs):
559 return object.__getattribute__(self, '_observedcall')(
558 return object.__getattribute__(self, '_observedcall')(
560 'isatty', *args, **kwargs
559 'isatty', *args, **kwargs
561 )
560 )
562
561
563 def readable(self, *args, **kwargs):
562 def readable(self, *args, **kwargs):
564 return object.__getattribute__(self, '_observedcall')(
563 return object.__getattribute__(self, '_observedcall')(
565 'readable', *args, **kwargs
564 'readable', *args, **kwargs
566 )
565 )
567
566
568 def readline(self, *args, **kwargs):
567 def readline(self, *args, **kwargs):
569 return object.__getattribute__(self, '_observedcall')(
568 return object.__getattribute__(self, '_observedcall')(
570 'readline', *args, **kwargs
569 'readline', *args, **kwargs
571 )
570 )
572
571
573 def readlines(self, *args, **kwargs):
572 def readlines(self, *args, **kwargs):
574 return object.__getattribute__(self, '_observedcall')(
573 return object.__getattribute__(self, '_observedcall')(
575 'readlines', *args, **kwargs
574 'readlines', *args, **kwargs
576 )
575 )
577
576
578 def seek(self, *args, **kwargs):
577 def seek(self, *args, **kwargs):
579 return object.__getattribute__(self, '_observedcall')(
578 return object.__getattribute__(self, '_observedcall')(
580 'seek', *args, **kwargs
579 'seek', *args, **kwargs
581 )
580 )
582
581
583 def seekable(self, *args, **kwargs):
582 def seekable(self, *args, **kwargs):
584 return object.__getattribute__(self, '_observedcall')(
583 return object.__getattribute__(self, '_observedcall')(
585 'seekable', *args, **kwargs
584 'seekable', *args, **kwargs
586 )
585 )
587
586
588 def tell(self, *args, **kwargs):
587 def tell(self, *args, **kwargs):
589 return object.__getattribute__(self, '_observedcall')(
588 return object.__getattribute__(self, '_observedcall')(
590 'tell', *args, **kwargs
589 'tell', *args, **kwargs
591 )
590 )
592
591
593 def truncate(self, *args, **kwargs):
592 def truncate(self, *args, **kwargs):
594 return object.__getattribute__(self, '_observedcall')(
593 return object.__getattribute__(self, '_observedcall')(
595 'truncate', *args, **kwargs
594 'truncate', *args, **kwargs
596 )
595 )
597
596
598 def writable(self, *args, **kwargs):
597 def writable(self, *args, **kwargs):
599 return object.__getattribute__(self, '_observedcall')(
598 return object.__getattribute__(self, '_observedcall')(
600 'writable', *args, **kwargs
599 'writable', *args, **kwargs
601 )
600 )
602
601
603 def writelines(self, *args, **kwargs):
602 def writelines(self, *args, **kwargs):
604 return object.__getattribute__(self, '_observedcall')(
603 return object.__getattribute__(self, '_observedcall')(
605 'writelines', *args, **kwargs
604 'writelines', *args, **kwargs
606 )
605 )
607
606
608 def read(self, *args, **kwargs):
607 def read(self, *args, **kwargs):
609 return object.__getattribute__(self, '_observedcall')(
608 return object.__getattribute__(self, '_observedcall')(
610 'read', *args, **kwargs
609 'read', *args, **kwargs
611 )
610 )
612
611
613 def readall(self, *args, **kwargs):
612 def readall(self, *args, **kwargs):
614 return object.__getattribute__(self, '_observedcall')(
613 return object.__getattribute__(self, '_observedcall')(
615 'readall', *args, **kwargs
614 'readall', *args, **kwargs
616 )
615 )
617
616
618 def readinto(self, *args, **kwargs):
617 def readinto(self, *args, **kwargs):
619 return object.__getattribute__(self, '_observedcall')(
618 return object.__getattribute__(self, '_observedcall')(
620 'readinto', *args, **kwargs
619 'readinto', *args, **kwargs
621 )
620 )
622
621
623 def write(self, *args, **kwargs):
622 def write(self, *args, **kwargs):
624 return object.__getattribute__(self, '_observedcall')(
623 return object.__getattribute__(self, '_observedcall')(
625 'write', *args, **kwargs
624 'write', *args, **kwargs
626 )
625 )
627
626
628 def detach(self, *args, **kwargs):
627 def detach(self, *args, **kwargs):
629 return object.__getattribute__(self, '_observedcall')(
628 return object.__getattribute__(self, '_observedcall')(
630 'detach', *args, **kwargs
629 'detach', *args, **kwargs
631 )
630 )
632
631
633 def read1(self, *args, **kwargs):
632 def read1(self, *args, **kwargs):
634 return object.__getattribute__(self, '_observedcall')(
633 return object.__getattribute__(self, '_observedcall')(
635 'read1', *args, **kwargs
634 'read1', *args, **kwargs
636 )
635 )
637
636
638
637
639 class observedbufferedinputpipe(bufferedinputpipe):
638 class observedbufferedinputpipe(bufferedinputpipe):
640 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
639 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
641
640
642 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
641 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
643 bypass ``fileobjectproxy``. Because of this, we need to make
642 bypass ``fileobjectproxy``. Because of this, we need to make
644 ``bufferedinputpipe`` aware of these operations.
643 ``bufferedinputpipe`` aware of these operations.
645
644
646 This variation of ``bufferedinputpipe`` can notify observers about
645 This variation of ``bufferedinputpipe`` can notify observers about
647 ``os.read()`` events. It also re-publishes other events, such as
646 ``os.read()`` events. It also re-publishes other events, such as
648 ``read()`` and ``readline()``.
647 ``read()`` and ``readline()``.
649 """
648 """
650
649
651 def _fillbuffer(self):
650 def _fillbuffer(self):
652 res = super(observedbufferedinputpipe, self)._fillbuffer()
651 res = super(observedbufferedinputpipe, self)._fillbuffer()
653
652
654 fn = getattr(self._input._observer, 'osread', None)
653 fn = getattr(self._input._observer, 'osread', None)
655 if fn:
654 if fn:
656 fn(res, _chunksize)
655 fn(res, _chunksize)
657
656
658 return res
657 return res
659
658
660 # We use different observer methods because the operation isn't
659 # We use different observer methods because the operation isn't
661 # performed on the actual file object but on us.
660 # performed on the actual file object but on us.
662 def read(self, size):
661 def read(self, size):
663 res = super(observedbufferedinputpipe, self).read(size)
662 res = super(observedbufferedinputpipe, self).read(size)
664
663
665 fn = getattr(self._input._observer, 'bufferedread', None)
664 fn = getattr(self._input._observer, 'bufferedread', None)
666 if fn:
665 if fn:
667 fn(res, size)
666 fn(res, size)
668
667
669 return res
668 return res
670
669
671 def readline(self, *args, **kwargs):
670 def readline(self, *args, **kwargs):
672 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
671 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
673
672
674 fn = getattr(self._input._observer, 'bufferedreadline', None)
673 fn = getattr(self._input._observer, 'bufferedreadline', None)
675 if fn:
674 if fn:
676 fn(res)
675 fn(res)
677
676
678 return res
677 return res
679
678
680
679
681 PROXIED_SOCKET_METHODS = {
680 PROXIED_SOCKET_METHODS = {
682 'makefile',
681 'makefile',
683 'recv',
682 'recv',
684 'recvfrom',
683 'recvfrom',
685 'recvfrom_into',
684 'recvfrom_into',
686 'recv_into',
685 'recv_into',
687 'send',
686 'send',
688 'sendall',
687 'sendall',
689 'sendto',
688 'sendto',
690 'setblocking',
689 'setblocking',
691 'settimeout',
690 'settimeout',
692 'gettimeout',
691 'gettimeout',
693 'setsockopt',
692 'setsockopt',
694 }
693 }
695
694
696
695
697 class socketproxy:
696 class socketproxy:
698 """A proxy around a socket that tells a watcher when events occur.
697 """A proxy around a socket that tells a watcher when events occur.
699
698
700 This is like ``fileobjectproxy`` except for sockets.
699 This is like ``fileobjectproxy`` except for sockets.
701
700
702 This type is intended to only be used for testing purposes. Think hard
701 This type is intended to only be used for testing purposes. Think hard
703 before using it in important code.
702 before using it in important code.
704 """
703 """
705
704
706 __slots__ = (
705 __slots__ = (
707 '_orig',
706 '_orig',
708 '_observer',
707 '_observer',
709 )
708 )
710
709
711 def __init__(self, sock, observer):
710 def __init__(self, sock, observer):
712 object.__setattr__(self, '_orig', sock)
711 object.__setattr__(self, '_orig', sock)
713 object.__setattr__(self, '_observer', observer)
712 object.__setattr__(self, '_observer', observer)
714
713
715 def __getattribute__(self, name):
714 def __getattribute__(self, name):
716 if name in PROXIED_SOCKET_METHODS:
715 if name in PROXIED_SOCKET_METHODS:
717 return object.__getattribute__(self, name)
716 return object.__getattribute__(self, name)
718
717
719 return getattr(object.__getattribute__(self, '_orig'), name)
718 return getattr(object.__getattribute__(self, '_orig'), name)
720
719
721 def __delattr__(self, name):
720 def __delattr__(self, name):
722 return delattr(object.__getattribute__(self, '_orig'), name)
721 return delattr(object.__getattribute__(self, '_orig'), name)
723
722
724 def __setattr__(self, name, value):
723 def __setattr__(self, name, value):
725 return setattr(object.__getattribute__(self, '_orig'), name, value)
724 return setattr(object.__getattribute__(self, '_orig'), name, value)
726
725
727 def __nonzero__(self):
726 def __nonzero__(self):
728 return bool(object.__getattribute__(self, '_orig'))
727 return bool(object.__getattribute__(self, '_orig'))
729
728
730 __bool__ = __nonzero__
729 __bool__ = __nonzero__
731
730
732 def _observedcall(self, name, *args, **kwargs):
731 def _observedcall(self, name, *args, **kwargs):
733 # Call the original object.
732 # Call the original object.
734 orig = object.__getattribute__(self, '_orig')
733 orig = object.__getattribute__(self, '_orig')
735 res = getattr(orig, name)(*args, **kwargs)
734 res = getattr(orig, name)(*args, **kwargs)
736
735
737 # Call a method on the observer of the same name with arguments
736 # Call a method on the observer of the same name with arguments
738 # so it can react, log, etc.
737 # so it can react, log, etc.
739 observer = object.__getattribute__(self, '_observer')
738 observer = object.__getattribute__(self, '_observer')
740 fn = getattr(observer, name, None)
739 fn = getattr(observer, name, None)
741 if fn:
740 if fn:
742 fn(res, *args, **kwargs)
741 fn(res, *args, **kwargs)
743
742
744 return res
743 return res
745
744
746 def makefile(self, *args, **kwargs):
745 def makefile(self, *args, **kwargs):
747 res = object.__getattribute__(self, '_observedcall')(
746 res = object.__getattribute__(self, '_observedcall')(
748 'makefile', *args, **kwargs
747 'makefile', *args, **kwargs
749 )
748 )
750
749
751 # The file object may be used for I/O. So we turn it into a
750 # The file object may be used for I/O. So we turn it into a
752 # proxy using our observer.
751 # proxy using our observer.
753 observer = object.__getattribute__(self, '_observer')
752 observer = object.__getattribute__(self, '_observer')
754 return makeloggingfileobject(
753 return makeloggingfileobject(
755 observer.fh,
754 observer.fh,
756 res,
755 res,
757 observer.name,
756 observer.name,
758 reads=observer.reads,
757 reads=observer.reads,
759 writes=observer.writes,
758 writes=observer.writes,
760 logdata=observer.logdata,
759 logdata=observer.logdata,
761 logdataapis=observer.logdataapis,
760 logdataapis=observer.logdataapis,
762 )
761 )
763
762
764 def recv(self, *args, **kwargs):
763 def recv(self, *args, **kwargs):
765 return object.__getattribute__(self, '_observedcall')(
764 return object.__getattribute__(self, '_observedcall')(
766 'recv', *args, **kwargs
765 'recv', *args, **kwargs
767 )
766 )
768
767
769 def recvfrom(self, *args, **kwargs):
768 def recvfrom(self, *args, **kwargs):
770 return object.__getattribute__(self, '_observedcall')(
769 return object.__getattribute__(self, '_observedcall')(
771 'recvfrom', *args, **kwargs
770 'recvfrom', *args, **kwargs
772 )
771 )
773
772
774 def recvfrom_into(self, *args, **kwargs):
773 def recvfrom_into(self, *args, **kwargs):
775 return object.__getattribute__(self, '_observedcall')(
774 return object.__getattribute__(self, '_observedcall')(
776 'recvfrom_into', *args, **kwargs
775 'recvfrom_into', *args, **kwargs
777 )
776 )
778
777
779 def recv_into(self, *args, **kwargs):
778 def recv_into(self, *args, **kwargs):
780 return object.__getattribute__(self, '_observedcall')(
779 return object.__getattribute__(self, '_observedcall')(
781 'recv_info', *args, **kwargs
780 'recv_info', *args, **kwargs
782 )
781 )
783
782
784 def send(self, *args, **kwargs):
783 def send(self, *args, **kwargs):
785 return object.__getattribute__(self, '_observedcall')(
784 return object.__getattribute__(self, '_observedcall')(
786 'send', *args, **kwargs
785 'send', *args, **kwargs
787 )
786 )
788
787
789 def sendall(self, *args, **kwargs):
788 def sendall(self, *args, **kwargs):
790 return object.__getattribute__(self, '_observedcall')(
789 return object.__getattribute__(self, '_observedcall')(
791 'sendall', *args, **kwargs
790 'sendall', *args, **kwargs
792 )
791 )
793
792
794 def sendto(self, *args, **kwargs):
793 def sendto(self, *args, **kwargs):
795 return object.__getattribute__(self, '_observedcall')(
794 return object.__getattribute__(self, '_observedcall')(
796 'sendto', *args, **kwargs
795 'sendto', *args, **kwargs
797 )
796 )
798
797
799 def setblocking(self, *args, **kwargs):
798 def setblocking(self, *args, **kwargs):
800 return object.__getattribute__(self, '_observedcall')(
799 return object.__getattribute__(self, '_observedcall')(
801 'setblocking', *args, **kwargs
800 'setblocking', *args, **kwargs
802 )
801 )
803
802
804 def settimeout(self, *args, **kwargs):
803 def settimeout(self, *args, **kwargs):
805 return object.__getattribute__(self, '_observedcall')(
804 return object.__getattribute__(self, '_observedcall')(
806 'settimeout', *args, **kwargs
805 'settimeout', *args, **kwargs
807 )
806 )
808
807
809 def gettimeout(self, *args, **kwargs):
808 def gettimeout(self, *args, **kwargs):
810 return object.__getattribute__(self, '_observedcall')(
809 return object.__getattribute__(self, '_observedcall')(
811 'gettimeout', *args, **kwargs
810 'gettimeout', *args, **kwargs
812 )
811 )
813
812
814 def setsockopt(self, *args, **kwargs):
813 def setsockopt(self, *args, **kwargs):
815 return object.__getattribute__(self, '_observedcall')(
814 return object.__getattribute__(self, '_observedcall')(
816 'setsockopt', *args, **kwargs
815 'setsockopt', *args, **kwargs
817 )
816 )
818
817
819
818
820 class baseproxyobserver:
819 class baseproxyobserver:
821 def __init__(self, fh, name, logdata, logdataapis):
820 def __init__(self, fh, name, logdata, logdataapis):
822 self.fh = fh
821 self.fh = fh
823 self.name = name
822 self.name = name
824 self.logdata = logdata
823 self.logdata = logdata
825 self.logdataapis = logdataapis
824 self.logdataapis = logdataapis
826
825
827 def _writedata(self, data):
826 def _writedata(self, data):
828 if not self.logdata:
827 if not self.logdata:
829 if self.logdataapis:
828 if self.logdataapis:
830 self.fh.write(b'\n')
829 self.fh.write(b'\n')
831 self.fh.flush()
830 self.fh.flush()
832 return
831 return
833
832
834 # Simple case writes all data on a single line.
833 # Simple case writes all data on a single line.
835 if b'\n' not in data:
834 if b'\n' not in data:
836 if self.logdataapis:
835 if self.logdataapis:
837 self.fh.write(b': %s\n' % stringutil.escapestr(data))
836 self.fh.write(b': %s\n' % stringutil.escapestr(data))
838 else:
837 else:
839 self.fh.write(
838 self.fh.write(
840 b'%s> %s\n' % (self.name, stringutil.escapestr(data))
839 b'%s> %s\n' % (self.name, stringutil.escapestr(data))
841 )
840 )
842 self.fh.flush()
841 self.fh.flush()
843 return
842 return
844
843
845 # Data with newlines is written to multiple lines.
844 # Data with newlines is written to multiple lines.
846 if self.logdataapis:
845 if self.logdataapis:
847 self.fh.write(b':\n')
846 self.fh.write(b':\n')
848
847
849 lines = data.splitlines(True)
848 lines = data.splitlines(True)
850 for line in lines:
849 for line in lines:
851 self.fh.write(
850 self.fh.write(
852 b'%s> %s\n' % (self.name, stringutil.escapestr(line))
851 b'%s> %s\n' % (self.name, stringutil.escapestr(line))
853 )
852 )
854 self.fh.flush()
853 self.fh.flush()
855
854
856
855
857 class fileobjectobserver(baseproxyobserver):
856 class fileobjectobserver(baseproxyobserver):
858 """Logs file object activity."""
857 """Logs file object activity."""
859
858
860 def __init__(
859 def __init__(
861 self, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
860 self, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
862 ):
861 ):
863 super(fileobjectobserver, self).__init__(fh, name, logdata, logdataapis)
862 super(fileobjectobserver, self).__init__(fh, name, logdata, logdataapis)
864 self.reads = reads
863 self.reads = reads
865 self.writes = writes
864 self.writes = writes
866
865
867 def read(self, res, size=-1):
866 def read(self, res, size=-1):
868 if not self.reads:
867 if not self.reads:
869 return
868 return
870 # Python 3 can return None from reads at EOF instead of empty strings.
869 # Python 3 can return None from reads at EOF instead of empty strings.
871 if res is None:
870 if res is None:
872 res = b''
871 res = b''
873
872
874 if size == -1 and res == b'':
873 if size == -1 and res == b'':
875 # Suppress pointless read(-1) calls that return
874 # Suppress pointless read(-1) calls that return
876 # nothing. These happen _a lot_ on Python 3, and there
875 # nothing. These happen _a lot_ on Python 3, and there
877 # doesn't seem to be a better workaround to have matching
876 # doesn't seem to be a better workaround to have matching
878 # Python 2 and 3 behavior. :(
877 # Python 2 and 3 behavior. :(
879 return
878 return
880
879
881 if self.logdataapis:
880 if self.logdataapis:
882 self.fh.write(b'%s> read(%d) -> %d' % (self.name, size, len(res)))
881 self.fh.write(b'%s> read(%d) -> %d' % (self.name, size, len(res)))
883
882
884 self._writedata(res)
883 self._writedata(res)
885
884
886 def readline(self, res, limit=-1):
885 def readline(self, res, limit=-1):
887 if not self.reads:
886 if not self.reads:
888 return
887 return
889
888
890 if self.logdataapis:
889 if self.logdataapis:
891 self.fh.write(b'%s> readline() -> %d' % (self.name, len(res)))
890 self.fh.write(b'%s> readline() -> %d' % (self.name, len(res)))
892
891
893 self._writedata(res)
892 self._writedata(res)
894
893
895 def readinto(self, res, dest):
894 def readinto(self, res, dest):
896 if not self.reads:
895 if not self.reads:
897 return
896 return
898
897
899 if self.logdataapis:
898 if self.logdataapis:
900 self.fh.write(
899 self.fh.write(
901 b'%s> readinto(%d) -> %r' % (self.name, len(dest), res)
900 b'%s> readinto(%d) -> %r' % (self.name, len(dest), res)
902 )
901 )
903
902
904 data = dest[0:res] if res is not None else b''
903 data = dest[0:res] if res is not None else b''
905
904
906 # _writedata() uses "in" operator and is confused by memoryview because
905 # _writedata() uses "in" operator and is confused by memoryview because
907 # characters are ints on Python 3.
906 # characters are ints on Python 3.
908 if isinstance(data, memoryview):
907 if isinstance(data, memoryview):
909 data = data.tobytes()
908 data = data.tobytes()
910
909
911 self._writedata(data)
910 self._writedata(data)
912
911
913 def write(self, res, data):
912 def write(self, res, data):
914 if not self.writes:
913 if not self.writes:
915 return
914 return
916
915
917 # Python 2 returns None from some write() calls. Python 3 (reasonably)
916 # Python 2 returns None from some write() calls. Python 3 (reasonably)
918 # returns the integer bytes written.
917 # returns the integer bytes written.
919 if res is None and data:
918 if res is None and data:
920 res = len(data)
919 res = len(data)
921
920
922 if self.logdataapis:
921 if self.logdataapis:
923 self.fh.write(b'%s> write(%d) -> %r' % (self.name, len(data), res))
922 self.fh.write(b'%s> write(%d) -> %r' % (self.name, len(data), res))
924
923
925 self._writedata(data)
924 self._writedata(data)
926
925
927 def flush(self, res):
926 def flush(self, res):
928 if not self.writes:
927 if not self.writes:
929 return
928 return
930
929
931 self.fh.write(b'%s> flush() -> %r\n' % (self.name, res))
930 self.fh.write(b'%s> flush() -> %r\n' % (self.name, res))
932
931
933 # For observedbufferedinputpipe.
932 # For observedbufferedinputpipe.
934 def bufferedread(self, res, size):
933 def bufferedread(self, res, size):
935 if not self.reads:
934 if not self.reads:
936 return
935 return
937
936
938 if self.logdataapis:
937 if self.logdataapis:
939 self.fh.write(
938 self.fh.write(
940 b'%s> bufferedread(%d) -> %d' % (self.name, size, len(res))
939 b'%s> bufferedread(%d) -> %d' % (self.name, size, len(res))
941 )
940 )
942
941
943 self._writedata(res)
942 self._writedata(res)
944
943
945 def bufferedreadline(self, res):
944 def bufferedreadline(self, res):
946 if not self.reads:
945 if not self.reads:
947 return
946 return
948
947
949 if self.logdataapis:
948 if self.logdataapis:
950 self.fh.write(
949 self.fh.write(
951 b'%s> bufferedreadline() -> %d' % (self.name, len(res))
950 b'%s> bufferedreadline() -> %d' % (self.name, len(res))
952 )
951 )
953
952
954 self._writedata(res)
953 self._writedata(res)
955
954
956
955
957 def makeloggingfileobject(
956 def makeloggingfileobject(
958 logh, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
957 logh, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
959 ):
958 ):
960 """Turn a file object into a logging file object."""
959 """Turn a file object into a logging file object."""
961
960
962 observer = fileobjectobserver(
961 observer = fileobjectobserver(
963 logh,
962 logh,
964 name,
963 name,
965 reads=reads,
964 reads=reads,
966 writes=writes,
965 writes=writes,
967 logdata=logdata,
966 logdata=logdata,
968 logdataapis=logdataapis,
967 logdataapis=logdataapis,
969 )
968 )
970 return fileobjectproxy(fh, observer)
969 return fileobjectproxy(fh, observer)
971
970
972
971
973 class socketobserver(baseproxyobserver):
972 class socketobserver(baseproxyobserver):
974 """Logs socket activity."""
973 """Logs socket activity."""
975
974
976 def __init__(
975 def __init__(
977 self,
976 self,
978 fh,
977 fh,
979 name,
978 name,
980 reads=True,
979 reads=True,
981 writes=True,
980 writes=True,
982 states=True,
981 states=True,
983 logdata=False,
982 logdata=False,
984 logdataapis=True,
983 logdataapis=True,
985 ):
984 ):
986 super(socketobserver, self).__init__(fh, name, logdata, logdataapis)
985 super(socketobserver, self).__init__(fh, name, logdata, logdataapis)
987 self.reads = reads
986 self.reads = reads
988 self.writes = writes
987 self.writes = writes
989 self.states = states
988 self.states = states
990
989
991 def makefile(self, res, mode=None, bufsize=None):
990 def makefile(self, res, mode=None, bufsize=None):
992 if not self.states:
991 if not self.states:
993 return
992 return
994
993
995 self.fh.write(b'%s> makefile(%r, %r)\n' % (self.name, mode, bufsize))
994 self.fh.write(b'%s> makefile(%r, %r)\n' % (self.name, mode, bufsize))
996
995
997 def recv(self, res, size, flags=0):
996 def recv(self, res, size, flags=0):
998 if not self.reads:
997 if not self.reads:
999 return
998 return
1000
999
1001 if self.logdataapis:
1000 if self.logdataapis:
1002 self.fh.write(
1001 self.fh.write(
1003 b'%s> recv(%d, %d) -> %d' % (self.name, size, flags, len(res))
1002 b'%s> recv(%d, %d) -> %d' % (self.name, size, flags, len(res))
1004 )
1003 )
1005 self._writedata(res)
1004 self._writedata(res)
1006
1005
1007 def recvfrom(self, res, size, flags=0):
1006 def recvfrom(self, res, size, flags=0):
1008 if not self.reads:
1007 if not self.reads:
1009 return
1008 return
1010
1009
1011 if self.logdataapis:
1010 if self.logdataapis:
1012 self.fh.write(
1011 self.fh.write(
1013 b'%s> recvfrom(%d, %d) -> %d'
1012 b'%s> recvfrom(%d, %d) -> %d'
1014 % (self.name, size, flags, len(res[0]))
1013 % (self.name, size, flags, len(res[0]))
1015 )
1014 )
1016
1015
1017 self._writedata(res[0])
1016 self._writedata(res[0])
1018
1017
1019 def recvfrom_into(self, res, buf, size, flags=0):
1018 def recvfrom_into(self, res, buf, size, flags=0):
1020 if not self.reads:
1019 if not self.reads:
1021 return
1020 return
1022
1021
1023 if self.logdataapis:
1022 if self.logdataapis:
1024 self.fh.write(
1023 self.fh.write(
1025 b'%s> recvfrom_into(%d, %d) -> %d'
1024 b'%s> recvfrom_into(%d, %d) -> %d'
1026 % (self.name, size, flags, res[0])
1025 % (self.name, size, flags, res[0])
1027 )
1026 )
1028
1027
1029 self._writedata(buf[0 : res[0]])
1028 self._writedata(buf[0 : res[0]])
1030
1029
1031 def recv_into(self, res, buf, size=0, flags=0):
1030 def recv_into(self, res, buf, size=0, flags=0):
1032 if not self.reads:
1031 if not self.reads:
1033 return
1032 return
1034
1033
1035 if self.logdataapis:
1034 if self.logdataapis:
1036 self.fh.write(
1035 self.fh.write(
1037 b'%s> recv_into(%d, %d) -> %d' % (self.name, size, flags, res)
1036 b'%s> recv_into(%d, %d) -> %d' % (self.name, size, flags, res)
1038 )
1037 )
1039
1038
1040 self._writedata(buf[0:res])
1039 self._writedata(buf[0:res])
1041
1040
1042 def send(self, res, data, flags=0):
1041 def send(self, res, data, flags=0):
1043 if not self.writes:
1042 if not self.writes:
1044 return
1043 return
1045
1044
1046 self.fh.write(
1045 self.fh.write(
1047 b'%s> send(%d, %d) -> %d' % (self.name, len(data), flags, len(res))
1046 b'%s> send(%d, %d) -> %d' % (self.name, len(data), flags, len(res))
1048 )
1047 )
1049 self._writedata(data)
1048 self._writedata(data)
1050
1049
1051 def sendall(self, res, data, flags=0):
1050 def sendall(self, res, data, flags=0):
1052 if not self.writes:
1051 if not self.writes:
1053 return
1052 return
1054
1053
1055 if self.logdataapis:
1054 if self.logdataapis:
1056 # Returns None on success. So don't bother reporting return value.
1055 # Returns None on success. So don't bother reporting return value.
1057 self.fh.write(
1056 self.fh.write(
1058 b'%s> sendall(%d, %d)' % (self.name, len(data), flags)
1057 b'%s> sendall(%d, %d)' % (self.name, len(data), flags)
1059 )
1058 )
1060
1059
1061 self._writedata(data)
1060 self._writedata(data)
1062
1061
1063 def sendto(self, res, data, flagsoraddress, address=None):
1062 def sendto(self, res, data, flagsoraddress, address=None):
1064 if not self.writes:
1063 if not self.writes:
1065 return
1064 return
1066
1065
1067 if address:
1066 if address:
1068 flags = flagsoraddress
1067 flags = flagsoraddress
1069 else:
1068 else:
1070 flags = 0
1069 flags = 0
1071
1070
1072 if self.logdataapis:
1071 if self.logdataapis:
1073 self.fh.write(
1072 self.fh.write(
1074 b'%s> sendto(%d, %d, %r) -> %d'
1073 b'%s> sendto(%d, %d, %r) -> %d'
1075 % (self.name, len(data), flags, address, res)
1074 % (self.name, len(data), flags, address, res)
1076 )
1075 )
1077
1076
1078 self._writedata(data)
1077 self._writedata(data)
1079
1078
1080 def setblocking(self, res, flag):
1079 def setblocking(self, res, flag):
1081 if not self.states:
1080 if not self.states:
1082 return
1081 return
1083
1082
1084 self.fh.write(b'%s> setblocking(%r)\n' % (self.name, flag))
1083 self.fh.write(b'%s> setblocking(%r)\n' % (self.name, flag))
1085
1084
1086 def settimeout(self, res, value):
1085 def settimeout(self, res, value):
1087 if not self.states:
1086 if not self.states:
1088 return
1087 return
1089
1088
1090 self.fh.write(b'%s> settimeout(%r)\n' % (self.name, value))
1089 self.fh.write(b'%s> settimeout(%r)\n' % (self.name, value))
1091
1090
1092 def gettimeout(self, res):
1091 def gettimeout(self, res):
1093 if not self.states:
1092 if not self.states:
1094 return
1093 return
1095
1094
1096 self.fh.write(b'%s> gettimeout() -> %f\n' % (self.name, res))
1095 self.fh.write(b'%s> gettimeout() -> %f\n' % (self.name, res))
1097
1096
1098 def setsockopt(self, res, level, optname, value):
1097 def setsockopt(self, res, level, optname, value):
1099 if not self.states:
1098 if not self.states:
1100 return
1099 return
1101
1100
1102 self.fh.write(
1101 self.fh.write(
1103 b'%s> setsockopt(%r, %r, %r) -> %r\n'
1102 b'%s> setsockopt(%r, %r, %r) -> %r\n'
1104 % (self.name, level, optname, value, res)
1103 % (self.name, level, optname, value, res)
1105 )
1104 )
1106
1105
1107
1106
1108 def makeloggingsocket(
1107 def makeloggingsocket(
1109 logh,
1108 logh,
1110 fh,
1109 fh,
1111 name,
1110 name,
1112 reads=True,
1111 reads=True,
1113 writes=True,
1112 writes=True,
1114 states=True,
1113 states=True,
1115 logdata=False,
1114 logdata=False,
1116 logdataapis=True,
1115 logdataapis=True,
1117 ):
1116 ):
1118 """Turn a socket into a logging socket."""
1117 """Turn a socket into a logging socket."""
1119
1118
1120 observer = socketobserver(
1119 observer = socketobserver(
1121 logh,
1120 logh,
1122 name,
1121 name,
1123 reads=reads,
1122 reads=reads,
1124 writes=writes,
1123 writes=writes,
1125 states=states,
1124 states=states,
1126 logdata=logdata,
1125 logdata=logdata,
1127 logdataapis=logdataapis,
1126 logdataapis=logdataapis,
1128 )
1127 )
1129 return socketproxy(fh, observer)
1128 return socketproxy(fh, observer)
1130
1129
1131
1130
1132 def version():
1131 def version():
1133 """Return version information if available."""
1132 """Return version information if available."""
1134 try:
1133 try:
1135 from . import __version__
1134 from . import __version__
1136
1135
1137 return __version__.version
1136 return __version__.version
1138 except ImportError:
1137 except ImportError:
1139 return b'unknown'
1138 return b'unknown'
1140
1139
1141
1140
1142 def versiontuple(v=None, n=4):
1141 def versiontuple(v=None, n=4):
1143 """Parses a Mercurial version string into an N-tuple.
1142 """Parses a Mercurial version string into an N-tuple.
1144
1143
1145 The version string to be parsed is specified with the ``v`` argument.
1144 The version string to be parsed is specified with the ``v`` argument.
1146 If it isn't defined, the current Mercurial version string will be parsed.
1145 If it isn't defined, the current Mercurial version string will be parsed.
1147
1146
1148 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1147 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1149 returned values:
1148 returned values:
1150
1149
1151 >>> v = b'3.6.1+190-df9b73d2d444'
1150 >>> v = b'3.6.1+190-df9b73d2d444'
1152 >>> versiontuple(v, 2)
1151 >>> versiontuple(v, 2)
1153 (3, 6)
1152 (3, 6)
1154 >>> versiontuple(v, 3)
1153 >>> versiontuple(v, 3)
1155 (3, 6, 1)
1154 (3, 6, 1)
1156 >>> versiontuple(v, 4)
1155 >>> versiontuple(v, 4)
1157 (3, 6, 1, '190-df9b73d2d444')
1156 (3, 6, 1, '190-df9b73d2d444')
1158
1157
1159 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1158 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1160 (3, 6, 1, '190-df9b73d2d444+20151118')
1159 (3, 6, 1, '190-df9b73d2d444+20151118')
1161
1160
1162 >>> v = b'3.6'
1161 >>> v = b'3.6'
1163 >>> versiontuple(v, 2)
1162 >>> versiontuple(v, 2)
1164 (3, 6)
1163 (3, 6)
1165 >>> versiontuple(v, 3)
1164 >>> versiontuple(v, 3)
1166 (3, 6, None)
1165 (3, 6, None)
1167 >>> versiontuple(v, 4)
1166 >>> versiontuple(v, 4)
1168 (3, 6, None, None)
1167 (3, 6, None, None)
1169
1168
1170 >>> v = b'3.9-rc'
1169 >>> v = b'3.9-rc'
1171 >>> versiontuple(v, 2)
1170 >>> versiontuple(v, 2)
1172 (3, 9)
1171 (3, 9)
1173 >>> versiontuple(v, 3)
1172 >>> versiontuple(v, 3)
1174 (3, 9, None)
1173 (3, 9, None)
1175 >>> versiontuple(v, 4)
1174 >>> versiontuple(v, 4)
1176 (3, 9, None, 'rc')
1175 (3, 9, None, 'rc')
1177
1176
1178 >>> v = b'3.9-rc+2-02a8fea4289b'
1177 >>> v = b'3.9-rc+2-02a8fea4289b'
1179 >>> versiontuple(v, 2)
1178 >>> versiontuple(v, 2)
1180 (3, 9)
1179 (3, 9)
1181 >>> versiontuple(v, 3)
1180 >>> versiontuple(v, 3)
1182 (3, 9, None)
1181 (3, 9, None)
1183 >>> versiontuple(v, 4)
1182 >>> versiontuple(v, 4)
1184 (3, 9, None, 'rc+2-02a8fea4289b')
1183 (3, 9, None, 'rc+2-02a8fea4289b')
1185
1184
1186 >>> versiontuple(b'4.6rc0')
1185 >>> versiontuple(b'4.6rc0')
1187 (4, 6, None, 'rc0')
1186 (4, 6, None, 'rc0')
1188 >>> versiontuple(b'4.6rc0+12-425d55e54f98')
1187 >>> versiontuple(b'4.6rc0+12-425d55e54f98')
1189 (4, 6, None, 'rc0+12-425d55e54f98')
1188 (4, 6, None, 'rc0+12-425d55e54f98')
1190 >>> versiontuple(b'.1.2.3')
1189 >>> versiontuple(b'.1.2.3')
1191 (None, None, None, '.1.2.3')
1190 (None, None, None, '.1.2.3')
1192 >>> versiontuple(b'12.34..5')
1191 >>> versiontuple(b'12.34..5')
1193 (12, 34, None, '..5')
1192 (12, 34, None, '..5')
1194 >>> versiontuple(b'1.2.3.4.5.6')
1193 >>> versiontuple(b'1.2.3.4.5.6')
1195 (1, 2, 3, '.4.5.6')
1194 (1, 2, 3, '.4.5.6')
1196 """
1195 """
1197 if not v:
1196 if not v:
1198 v = version()
1197 v = version()
1199 m = remod.match(br'(\d+(?:\.\d+){,2})[+-]?(.*)', v)
1198 m = remod.match(br'(\d+(?:\.\d+){,2})[+-]?(.*)', v)
1200 if not m:
1199 if not m:
1201 vparts, extra = b'', v
1200 vparts, extra = b'', v
1202 elif m.group(2):
1201 elif m.group(2):
1203 vparts, extra = m.groups()
1202 vparts, extra = m.groups()
1204 else:
1203 else:
1205 vparts, extra = m.group(1), None
1204 vparts, extra = m.group(1), None
1206
1205
1207 assert vparts is not None # help pytype
1206 assert vparts is not None # help pytype
1208
1207
1209 vints = []
1208 vints = []
1210 for i in vparts.split(b'.'):
1209 for i in vparts.split(b'.'):
1211 try:
1210 try:
1212 vints.append(int(i))
1211 vints.append(int(i))
1213 except ValueError:
1212 except ValueError:
1214 break
1213 break
1215 # (3, 6) -> (3, 6, None)
1214 # (3, 6) -> (3, 6, None)
1216 while len(vints) < 3:
1215 while len(vints) < 3:
1217 vints.append(None)
1216 vints.append(None)
1218
1217
1219 if n == 2:
1218 if n == 2:
1220 return (vints[0], vints[1])
1219 return (vints[0], vints[1])
1221 if n == 3:
1220 if n == 3:
1222 return (vints[0], vints[1], vints[2])
1221 return (vints[0], vints[1], vints[2])
1223 if n == 4:
1222 if n == 4:
1224 return (vints[0], vints[1], vints[2], extra)
1223 return (vints[0], vints[1], vints[2], extra)
1225
1224
1226 raise error.ProgrammingError(b"invalid version part request: %d" % n)
1225 raise error.ProgrammingError(b"invalid version part request: %d" % n)
1227
1226
1228
1227
1229 def cachefunc(func):
1228 def cachefunc(func):
1230 '''cache the result of function calls'''
1229 '''cache the result of function calls'''
1231 # XXX doesn't handle keywords args
1230 # XXX doesn't handle keywords args
1232 if func.__code__.co_argcount == 0:
1231 if func.__code__.co_argcount == 0:
1233 listcache = []
1232 listcache = []
1234
1233
1235 def f():
1234 def f():
1236 if len(listcache) == 0:
1235 if len(listcache) == 0:
1237 listcache.append(func())
1236 listcache.append(func())
1238 return listcache[0]
1237 return listcache[0]
1239
1238
1240 return f
1239 return f
1241 cache = {}
1240 cache = {}
1242 if func.__code__.co_argcount == 1:
1241 if func.__code__.co_argcount == 1:
1243 # we gain a small amount of time because
1242 # we gain a small amount of time because
1244 # we don't need to pack/unpack the list
1243 # we don't need to pack/unpack the list
1245 def f(arg):
1244 def f(arg):
1246 if arg not in cache:
1245 if arg not in cache:
1247 cache[arg] = func(arg)
1246 cache[arg] = func(arg)
1248 return cache[arg]
1247 return cache[arg]
1249
1248
1250 else:
1249 else:
1251
1250
1252 def f(*args):
1251 def f(*args):
1253 if args not in cache:
1252 if args not in cache:
1254 cache[args] = func(*args)
1253 cache[args] = func(*args)
1255 return cache[args]
1254 return cache[args]
1256
1255
1257 return f
1256 return f
1258
1257
1259
1258
1260 class cow:
1259 class cow:
1261 """helper class to make copy-on-write easier
1260 """helper class to make copy-on-write easier
1262
1261
1263 Call preparewrite before doing any writes.
1262 Call preparewrite before doing any writes.
1264 """
1263 """
1265
1264
1266 def preparewrite(self):
1265 def preparewrite(self):
1267 """call this before writes, return self or a copied new object"""
1266 """call this before writes, return self or a copied new object"""
1268 if getattr(self, '_copied', 0):
1267 if getattr(self, '_copied', 0):
1269 self._copied -= 1
1268 self._copied -= 1
1270 # Function cow.__init__ expects 1 arg(s), got 2 [wrong-arg-count]
1269 # Function cow.__init__ expects 1 arg(s), got 2 [wrong-arg-count]
1271 return self.__class__(self) # pytype: disable=wrong-arg-count
1270 return self.__class__(self) # pytype: disable=wrong-arg-count
1272 return self
1271 return self
1273
1272
1274 def copy(self):
1273 def copy(self):
1275 """always do a cheap copy"""
1274 """always do a cheap copy"""
1276 self._copied = getattr(self, '_copied', 0) + 1
1275 self._copied = getattr(self, '_copied', 0) + 1
1277 return self
1276 return self
1278
1277
1279
1278
1280 class sortdict(collections.OrderedDict):
1279 class sortdict(collections.OrderedDict):
1281 """a simple sorted dictionary
1280 """a simple sorted dictionary
1282
1281
1283 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1282 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1284 >>> d2 = d1.copy()
1283 >>> d2 = d1.copy()
1285 >>> d2
1284 >>> d2
1286 sortdict([('a', 0), ('b', 1)])
1285 sortdict([('a', 0), ('b', 1)])
1287 >>> d2.update([(b'a', 2)])
1286 >>> d2.update([(b'a', 2)])
1288 >>> list(d2.keys()) # should still be in last-set order
1287 >>> list(d2.keys()) # should still be in last-set order
1289 ['b', 'a']
1288 ['b', 'a']
1290 >>> d1.insert(1, b'a.5', 0.5)
1289 >>> d1.insert(1, b'a.5', 0.5)
1291 >>> d1
1290 >>> d1
1292 sortdict([('a', 0), ('a.5', 0.5), ('b', 1)])
1291 sortdict([('a', 0), ('a.5', 0.5), ('b', 1)])
1293 """
1292 """
1294
1293
1295 def __setitem__(self, key, value):
1294 def __setitem__(self, key, value):
1296 if key in self:
1295 if key in self:
1297 del self[key]
1296 del self[key]
1298 super(sortdict, self).__setitem__(key, value)
1297 super(sortdict, self).__setitem__(key, value)
1299
1298
1300 if pycompat.ispypy:
1299 if pycompat.ispypy:
1301 # __setitem__() isn't called as of PyPy 5.8.0
1300 # __setitem__() isn't called as of PyPy 5.8.0
1302 def update(self, src, **f):
1301 def update(self, src, **f):
1303 if isinstance(src, dict):
1302 if isinstance(src, dict):
1304 src = src.items()
1303 src = src.items()
1305 for k, v in src:
1304 for k, v in src:
1306 self[k] = v
1305 self[k] = v
1307 for k in f:
1306 for k in f:
1308 self[k] = f[k]
1307 self[k] = f[k]
1309
1308
1310 def insert(self, position, key, value):
1309 def insert(self, position, key, value):
1311 for (i, (k, v)) in enumerate(list(self.items())):
1310 for (i, (k, v)) in enumerate(list(self.items())):
1312 if i == position:
1311 if i == position:
1313 self[key] = value
1312 self[key] = value
1314 if i >= position:
1313 if i >= position:
1315 del self[k]
1314 del self[k]
1316 self[k] = v
1315 self[k] = v
1317
1316
1318
1317
1319 class cowdict(cow, dict):
1318 class cowdict(cow, dict):
1320 """copy-on-write dict
1319 """copy-on-write dict
1321
1320
1322 Be sure to call d = d.preparewrite() before writing to d.
1321 Be sure to call d = d.preparewrite() before writing to d.
1323
1322
1324 >>> a = cowdict()
1323 >>> a = cowdict()
1325 >>> a is a.preparewrite()
1324 >>> a is a.preparewrite()
1326 True
1325 True
1327 >>> b = a.copy()
1326 >>> b = a.copy()
1328 >>> b is a
1327 >>> b is a
1329 True
1328 True
1330 >>> c = b.copy()
1329 >>> c = b.copy()
1331 >>> c is a
1330 >>> c is a
1332 True
1331 True
1333 >>> a = a.preparewrite()
1332 >>> a = a.preparewrite()
1334 >>> b is a
1333 >>> b is a
1335 False
1334 False
1336 >>> a is a.preparewrite()
1335 >>> a is a.preparewrite()
1337 True
1336 True
1338 >>> c = c.preparewrite()
1337 >>> c = c.preparewrite()
1339 >>> b is c
1338 >>> b is c
1340 False
1339 False
1341 >>> b is b.preparewrite()
1340 >>> b is b.preparewrite()
1342 True
1341 True
1343 """
1342 """
1344
1343
1345
1344
1346 class cowsortdict(cow, sortdict):
1345 class cowsortdict(cow, sortdict):
1347 """copy-on-write sortdict
1346 """copy-on-write sortdict
1348
1347
1349 Be sure to call d = d.preparewrite() before writing to d.
1348 Be sure to call d = d.preparewrite() before writing to d.
1350 """
1349 """
1351
1350
1352
1351
1353 class transactional: # pytype: disable=ignored-metaclass
1352 class transactional: # pytype: disable=ignored-metaclass
1354 """Base class for making a transactional type into a context manager."""
1353 """Base class for making a transactional type into a context manager."""
1355
1354
1356 __metaclass__ = abc.ABCMeta
1355 __metaclass__ = abc.ABCMeta
1357
1356
1358 @abc.abstractmethod
1357 @abc.abstractmethod
1359 def close(self):
1358 def close(self):
1360 """Successfully closes the transaction."""
1359 """Successfully closes the transaction."""
1361
1360
1362 @abc.abstractmethod
1361 @abc.abstractmethod
1363 def release(self):
1362 def release(self):
1364 """Marks the end of the transaction.
1363 """Marks the end of the transaction.
1365
1364
1366 If the transaction has not been closed, it will be aborted.
1365 If the transaction has not been closed, it will be aborted.
1367 """
1366 """
1368
1367
1369 def __enter__(self):
1368 def __enter__(self):
1370 return self
1369 return self
1371
1370
1372 def __exit__(self, exc_type, exc_val, exc_tb):
1371 def __exit__(self, exc_type, exc_val, exc_tb):
1373 try:
1372 try:
1374 if exc_type is None:
1373 if exc_type is None:
1375 self.close()
1374 self.close()
1376 finally:
1375 finally:
1377 self.release()
1376 self.release()
1378
1377
1379
1378
1380 @contextlib.contextmanager
1379 @contextlib.contextmanager
1381 def acceptintervention(tr=None):
1380 def acceptintervention(tr=None):
1382 """A context manager that closes the transaction on InterventionRequired
1381 """A context manager that closes the transaction on InterventionRequired
1383
1382
1384 If no transaction was provided, this simply runs the body and returns
1383 If no transaction was provided, this simply runs the body and returns
1385 """
1384 """
1386 if not tr:
1385 if not tr:
1387 yield
1386 yield
1388 return
1387 return
1389 try:
1388 try:
1390 yield
1389 yield
1391 tr.close()
1390 tr.close()
1392 except error.InterventionRequired:
1391 except error.InterventionRequired:
1393 tr.close()
1392 tr.close()
1394 raise
1393 raise
1395 finally:
1394 finally:
1396 tr.release()
1395 tr.release()
1397
1396
1398
1397
1399 @contextlib.contextmanager
1398 @contextlib.contextmanager
1400 def nullcontextmanager(enter_result=None):
1399 def nullcontextmanager(enter_result=None):
1401 yield enter_result
1400 yield enter_result
1402
1401
1403
1402
1404 class _lrucachenode:
1403 class _lrucachenode:
1405 """A node in a doubly linked list.
1404 """A node in a doubly linked list.
1406
1405
1407 Holds a reference to nodes on either side as well as a key-value
1406 Holds a reference to nodes on either side as well as a key-value
1408 pair for the dictionary entry.
1407 pair for the dictionary entry.
1409 """
1408 """
1410
1409
1411 __slots__ = ('next', 'prev', 'key', 'value', 'cost')
1410 __slots__ = ('next', 'prev', 'key', 'value', 'cost')
1412
1411
1413 def __init__(self):
1412 def __init__(self):
1414 self.next = self
1413 self.next = self
1415 self.prev = self
1414 self.prev = self
1416
1415
1417 self.key = _notset
1416 self.key = _notset
1418 self.value = None
1417 self.value = None
1419 self.cost = 0
1418 self.cost = 0
1420
1419
1421 def markempty(self):
1420 def markempty(self):
1422 """Mark the node as emptied."""
1421 """Mark the node as emptied."""
1423 self.key = _notset
1422 self.key = _notset
1424 self.value = None
1423 self.value = None
1425 self.cost = 0
1424 self.cost = 0
1426
1425
1427
1426
1428 class lrucachedict:
1427 class lrucachedict:
1429 """Dict that caches most recent accesses and sets.
1428 """Dict that caches most recent accesses and sets.
1430
1429
1431 The dict consists of an actual backing dict - indexed by original
1430 The dict consists of an actual backing dict - indexed by original
1432 key - and a doubly linked circular list defining the order of entries in
1431 key - and a doubly linked circular list defining the order of entries in
1433 the cache.
1432 the cache.
1434
1433
1435 The head node is the newest entry in the cache. If the cache is full,
1434 The head node is the newest entry in the cache. If the cache is full,
1436 we recycle head.prev and make it the new head. Cache accesses result in
1435 we recycle head.prev and make it the new head. Cache accesses result in
1437 the node being moved to before the existing head and being marked as the
1436 the node being moved to before the existing head and being marked as the
1438 new head node.
1437 new head node.
1439
1438
1440 Items in the cache can be inserted with an optional "cost" value. This is
1439 Items in the cache can be inserted with an optional "cost" value. This is
1441 simply an integer that is specified by the caller. The cache can be queried
1440 simply an integer that is specified by the caller. The cache can be queried
1442 for the total cost of all items presently in the cache.
1441 for the total cost of all items presently in the cache.
1443
1442
1444 The cache can also define a maximum cost. If a cache insertion would
1443 The cache can also define a maximum cost. If a cache insertion would
1445 cause the total cost of the cache to go beyond the maximum cost limit,
1444 cause the total cost of the cache to go beyond the maximum cost limit,
1446 nodes will be evicted to make room for the new code. This can be used
1445 nodes will be evicted to make room for the new code. This can be used
1447 to e.g. set a max memory limit and associate an estimated bytes size
1446 to e.g. set a max memory limit and associate an estimated bytes size
1448 cost to each item in the cache. By default, no maximum cost is enforced.
1447 cost to each item in the cache. By default, no maximum cost is enforced.
1449 """
1448 """
1450
1449
1451 def __init__(self, max, maxcost=0):
1450 def __init__(self, max, maxcost=0):
1452 self._cache = {}
1451 self._cache = {}
1453
1452
1454 self._head = _lrucachenode()
1453 self._head = _lrucachenode()
1455 self._size = 1
1454 self._size = 1
1456 self.capacity = max
1455 self.capacity = max
1457 self.totalcost = 0
1456 self.totalcost = 0
1458 self.maxcost = maxcost
1457 self.maxcost = maxcost
1459
1458
1460 def __len__(self):
1459 def __len__(self):
1461 return len(self._cache)
1460 return len(self._cache)
1462
1461
1463 def __contains__(self, k):
1462 def __contains__(self, k):
1464 return k in self._cache
1463 return k in self._cache
1465
1464
1466 def __iter__(self):
1465 def __iter__(self):
1467 # We don't have to iterate in cache order, but why not.
1466 # We don't have to iterate in cache order, but why not.
1468 n = self._head
1467 n = self._head
1469 for i in range(len(self._cache)):
1468 for i in range(len(self._cache)):
1470 yield n.key
1469 yield n.key
1471 n = n.next
1470 n = n.next
1472
1471
1473 def __getitem__(self, k):
1472 def __getitem__(self, k):
1474 node = self._cache[k]
1473 node = self._cache[k]
1475 self._movetohead(node)
1474 self._movetohead(node)
1476 return node.value
1475 return node.value
1477
1476
1478 def insert(self, k, v, cost=0):
1477 def insert(self, k, v, cost=0):
1479 """Insert a new item in the cache with optional cost value."""
1478 """Insert a new item in the cache with optional cost value."""
1480 node = self._cache.get(k)
1479 node = self._cache.get(k)
1481 # Replace existing value and mark as newest.
1480 # Replace existing value and mark as newest.
1482 if node is not None:
1481 if node is not None:
1483 self.totalcost -= node.cost
1482 self.totalcost -= node.cost
1484 node.value = v
1483 node.value = v
1485 node.cost = cost
1484 node.cost = cost
1486 self.totalcost += cost
1485 self.totalcost += cost
1487 self._movetohead(node)
1486 self._movetohead(node)
1488
1487
1489 if self.maxcost:
1488 if self.maxcost:
1490 self._enforcecostlimit()
1489 self._enforcecostlimit()
1491
1490
1492 return
1491 return
1493
1492
1494 if self._size < self.capacity:
1493 if self._size < self.capacity:
1495 node = self._addcapacity()
1494 node = self._addcapacity()
1496 else:
1495 else:
1497 # Grab the last/oldest item.
1496 # Grab the last/oldest item.
1498 node = self._head.prev
1497 node = self._head.prev
1499
1498
1500 # At capacity. Kill the old entry.
1499 # At capacity. Kill the old entry.
1501 if node.key is not _notset:
1500 if node.key is not _notset:
1502 self.totalcost -= node.cost
1501 self.totalcost -= node.cost
1503 del self._cache[node.key]
1502 del self._cache[node.key]
1504
1503
1505 node.key = k
1504 node.key = k
1506 node.value = v
1505 node.value = v
1507 node.cost = cost
1506 node.cost = cost
1508 self.totalcost += cost
1507 self.totalcost += cost
1509 self._cache[k] = node
1508 self._cache[k] = node
1510 # And mark it as newest entry. No need to adjust order since it
1509 # And mark it as newest entry. No need to adjust order since it
1511 # is already self._head.prev.
1510 # is already self._head.prev.
1512 self._head = node
1511 self._head = node
1513
1512
1514 if self.maxcost:
1513 if self.maxcost:
1515 self._enforcecostlimit()
1514 self._enforcecostlimit()
1516
1515
1517 def __setitem__(self, k, v):
1516 def __setitem__(self, k, v):
1518 self.insert(k, v)
1517 self.insert(k, v)
1519
1518
1520 def __delitem__(self, k):
1519 def __delitem__(self, k):
1521 self.pop(k)
1520 self.pop(k)
1522
1521
1523 def pop(self, k, default=_notset):
1522 def pop(self, k, default=_notset):
1524 try:
1523 try:
1525 node = self._cache.pop(k)
1524 node = self._cache.pop(k)
1526 except KeyError:
1525 except KeyError:
1527 if default is _notset:
1526 if default is _notset:
1528 raise
1527 raise
1529 return default
1528 return default
1530
1529
1531 assert node is not None # help pytype
1530 assert node is not None # help pytype
1532 value = node.value
1531 value = node.value
1533 self.totalcost -= node.cost
1532 self.totalcost -= node.cost
1534 node.markempty()
1533 node.markempty()
1535
1534
1536 # Temporarily mark as newest item before re-adjusting head to make
1535 # Temporarily mark as newest item before re-adjusting head to make
1537 # this node the oldest item.
1536 # this node the oldest item.
1538 self._movetohead(node)
1537 self._movetohead(node)
1539 self._head = node.next
1538 self._head = node.next
1540
1539
1541 return value
1540 return value
1542
1541
1543 # Additional dict methods.
1542 # Additional dict methods.
1544
1543
1545 def get(self, k, default=None):
1544 def get(self, k, default=None):
1546 try:
1545 try:
1547 return self.__getitem__(k)
1546 return self.__getitem__(k)
1548 except KeyError:
1547 except KeyError:
1549 return default
1548 return default
1550
1549
1551 def peek(self, k, default=_notset):
1550 def peek(self, k, default=_notset):
1552 """Get the specified item without moving it to the head
1551 """Get the specified item without moving it to the head
1553
1552
1554 Unlike get(), this doesn't mutate the internal state. But be aware
1553 Unlike get(), this doesn't mutate the internal state. But be aware
1555 that it doesn't mean peek() is thread safe.
1554 that it doesn't mean peek() is thread safe.
1556 """
1555 """
1557 try:
1556 try:
1558 node = self._cache[k]
1557 node = self._cache[k]
1559 assert node is not None # help pytype
1558 assert node is not None # help pytype
1560 return node.value
1559 return node.value
1561 except KeyError:
1560 except KeyError:
1562 if default is _notset:
1561 if default is _notset:
1563 raise
1562 raise
1564 return default
1563 return default
1565
1564
1566 def clear(self):
1565 def clear(self):
1567 n = self._head
1566 n = self._head
1568 while n.key is not _notset:
1567 while n.key is not _notset:
1569 self.totalcost -= n.cost
1568 self.totalcost -= n.cost
1570 n.markempty()
1569 n.markempty()
1571 n = n.next
1570 n = n.next
1572
1571
1573 self._cache.clear()
1572 self._cache.clear()
1574
1573
1575 def copy(self, capacity=None, maxcost=0):
1574 def copy(self, capacity=None, maxcost=0):
1576 """Create a new cache as a copy of the current one.
1575 """Create a new cache as a copy of the current one.
1577
1576
1578 By default, the new cache has the same capacity as the existing one.
1577 By default, the new cache has the same capacity as the existing one.
1579 But, the cache capacity can be changed as part of performing the
1578 But, the cache capacity can be changed as part of performing the
1580 copy.
1579 copy.
1581
1580
1582 Items in the copy have an insertion/access order matching this
1581 Items in the copy have an insertion/access order matching this
1583 instance.
1582 instance.
1584 """
1583 """
1585
1584
1586 capacity = capacity or self.capacity
1585 capacity = capacity or self.capacity
1587 maxcost = maxcost or self.maxcost
1586 maxcost = maxcost or self.maxcost
1588 result = lrucachedict(capacity, maxcost=maxcost)
1587 result = lrucachedict(capacity, maxcost=maxcost)
1589
1588
1590 # We copy entries by iterating in oldest-to-newest order so the copy
1589 # We copy entries by iterating in oldest-to-newest order so the copy
1591 # has the correct ordering.
1590 # has the correct ordering.
1592
1591
1593 # Find the first non-empty entry.
1592 # Find the first non-empty entry.
1594 n = self._head.prev
1593 n = self._head.prev
1595 while n.key is _notset and n is not self._head:
1594 while n.key is _notset and n is not self._head:
1596 n = n.prev
1595 n = n.prev
1597
1596
1598 # We could potentially skip the first N items when decreasing capacity.
1597 # We could potentially skip the first N items when decreasing capacity.
1599 # But let's keep it simple unless it is a performance problem.
1598 # But let's keep it simple unless it is a performance problem.
1600 for i in range(len(self._cache)):
1599 for i in range(len(self._cache)):
1601 result.insert(n.key, n.value, cost=n.cost)
1600 result.insert(n.key, n.value, cost=n.cost)
1602 n = n.prev
1601 n = n.prev
1603
1602
1604 return result
1603 return result
1605
1604
1606 def popoldest(self):
1605 def popoldest(self):
1607 """Remove the oldest item from the cache.
1606 """Remove the oldest item from the cache.
1608
1607
1609 Returns the (key, value) describing the removed cache entry.
1608 Returns the (key, value) describing the removed cache entry.
1610 """
1609 """
1611 if not self._cache:
1610 if not self._cache:
1612 return
1611 return
1613
1612
1614 # Walk the linked list backwards starting at tail node until we hit
1613 # Walk the linked list backwards starting at tail node until we hit
1615 # a non-empty node.
1614 # a non-empty node.
1616 n = self._head.prev
1615 n = self._head.prev
1617
1616
1618 assert n is not None # help pytype
1617 assert n is not None # help pytype
1619
1618
1620 while n.key is _notset:
1619 while n.key is _notset:
1621 n = n.prev
1620 n = n.prev
1622
1621
1623 assert n is not None # help pytype
1622 assert n is not None # help pytype
1624
1623
1625 key, value = n.key, n.value
1624 key, value = n.key, n.value
1626
1625
1627 # And remove it from the cache and mark it as empty.
1626 # And remove it from the cache and mark it as empty.
1628 del self._cache[n.key]
1627 del self._cache[n.key]
1629 self.totalcost -= n.cost
1628 self.totalcost -= n.cost
1630 n.markempty()
1629 n.markempty()
1631
1630
1632 return key, value
1631 return key, value
1633
1632
1634 def _movetohead(self, node):
1633 def _movetohead(self, node):
1635 """Mark a node as the newest, making it the new head.
1634 """Mark a node as the newest, making it the new head.
1636
1635
1637 When a node is accessed, it becomes the freshest entry in the LRU
1636 When a node is accessed, it becomes the freshest entry in the LRU
1638 list, which is denoted by self._head.
1637 list, which is denoted by self._head.
1639
1638
1640 Visually, let's make ``N`` the new head node (* denotes head):
1639 Visually, let's make ``N`` the new head node (* denotes head):
1641
1640
1642 previous/oldest <-> head <-> next/next newest
1641 previous/oldest <-> head <-> next/next newest
1643
1642
1644 ----<->--- A* ---<->-----
1643 ----<->--- A* ---<->-----
1645 | |
1644 | |
1646 E <-> D <-> N <-> C <-> B
1645 E <-> D <-> N <-> C <-> B
1647
1646
1648 To:
1647 To:
1649
1648
1650 ----<->--- N* ---<->-----
1649 ----<->--- N* ---<->-----
1651 | |
1650 | |
1652 E <-> D <-> C <-> B <-> A
1651 E <-> D <-> C <-> B <-> A
1653
1652
1654 This requires the following moves:
1653 This requires the following moves:
1655
1654
1656 C.next = D (node.prev.next = node.next)
1655 C.next = D (node.prev.next = node.next)
1657 D.prev = C (node.next.prev = node.prev)
1656 D.prev = C (node.next.prev = node.prev)
1658 E.next = N (head.prev.next = node)
1657 E.next = N (head.prev.next = node)
1659 N.prev = E (node.prev = head.prev)
1658 N.prev = E (node.prev = head.prev)
1660 N.next = A (node.next = head)
1659 N.next = A (node.next = head)
1661 A.prev = N (head.prev = node)
1660 A.prev = N (head.prev = node)
1662 """
1661 """
1663 head = self._head
1662 head = self._head
1664 # C.next = D
1663 # C.next = D
1665 node.prev.next = node.next
1664 node.prev.next = node.next
1666 # D.prev = C
1665 # D.prev = C
1667 node.next.prev = node.prev
1666 node.next.prev = node.prev
1668 # N.prev = E
1667 # N.prev = E
1669 node.prev = head.prev
1668 node.prev = head.prev
1670 # N.next = A
1669 # N.next = A
1671 # It is tempting to do just "head" here, however if node is
1670 # It is tempting to do just "head" here, however if node is
1672 # adjacent to head, this will do bad things.
1671 # adjacent to head, this will do bad things.
1673 node.next = head.prev.next
1672 node.next = head.prev.next
1674 # E.next = N
1673 # E.next = N
1675 node.next.prev = node
1674 node.next.prev = node
1676 # A.prev = N
1675 # A.prev = N
1677 node.prev.next = node
1676 node.prev.next = node
1678
1677
1679 self._head = node
1678 self._head = node
1680
1679
1681 def _addcapacity(self):
1680 def _addcapacity(self):
1682 """Add a node to the circular linked list.
1681 """Add a node to the circular linked list.
1683
1682
1684 The new node is inserted before the head node.
1683 The new node is inserted before the head node.
1685 """
1684 """
1686 head = self._head
1685 head = self._head
1687 node = _lrucachenode()
1686 node = _lrucachenode()
1688 head.prev.next = node
1687 head.prev.next = node
1689 node.prev = head.prev
1688 node.prev = head.prev
1690 node.next = head
1689 node.next = head
1691 head.prev = node
1690 head.prev = node
1692 self._size += 1
1691 self._size += 1
1693 return node
1692 return node
1694
1693
1695 def _enforcecostlimit(self):
1694 def _enforcecostlimit(self):
1696 # This should run after an insertion. It should only be called if total
1695 # This should run after an insertion. It should only be called if total
1697 # cost limits are being enforced.
1696 # cost limits are being enforced.
1698 # The most recently inserted node is never evicted.
1697 # The most recently inserted node is never evicted.
1699 if len(self) <= 1 or self.totalcost <= self.maxcost:
1698 if len(self) <= 1 or self.totalcost <= self.maxcost:
1700 return
1699 return
1701
1700
1702 # This is logically equivalent to calling popoldest() until we
1701 # This is logically equivalent to calling popoldest() until we
1703 # free up enough cost. We don't do that since popoldest() needs
1702 # free up enough cost. We don't do that since popoldest() needs
1704 # to walk the linked list and doing this in a loop would be
1703 # to walk the linked list and doing this in a loop would be
1705 # quadratic. So we find the first non-empty node and then
1704 # quadratic. So we find the first non-empty node and then
1706 # walk nodes until we free up enough capacity.
1705 # walk nodes until we free up enough capacity.
1707 #
1706 #
1708 # If we only removed the minimum number of nodes to free enough
1707 # If we only removed the minimum number of nodes to free enough
1709 # cost at insert time, chances are high that the next insert would
1708 # cost at insert time, chances are high that the next insert would
1710 # also require pruning. This would effectively constitute quadratic
1709 # also require pruning. This would effectively constitute quadratic
1711 # behavior for insert-heavy workloads. To mitigate this, we set a
1710 # behavior for insert-heavy workloads. To mitigate this, we set a
1712 # target cost that is a percentage of the max cost. This will tend
1711 # target cost that is a percentage of the max cost. This will tend
1713 # to free more nodes when the high water mark is reached, which
1712 # to free more nodes when the high water mark is reached, which
1714 # lowers the chances of needing to prune on the subsequent insert.
1713 # lowers the chances of needing to prune on the subsequent insert.
1715 targetcost = int(self.maxcost * 0.75)
1714 targetcost = int(self.maxcost * 0.75)
1716
1715
1717 n = self._head.prev
1716 n = self._head.prev
1718 while n.key is _notset:
1717 while n.key is _notset:
1719 n = n.prev
1718 n = n.prev
1720
1719
1721 while len(self) > 1 and self.totalcost > targetcost:
1720 while len(self) > 1 and self.totalcost > targetcost:
1722 del self._cache[n.key]
1721 del self._cache[n.key]
1723 self.totalcost -= n.cost
1722 self.totalcost -= n.cost
1724 n.markempty()
1723 n.markempty()
1725 n = n.prev
1724 n = n.prev
1726
1725
1727
1726
1728 def lrucachefunc(func):
1727 def lrucachefunc(func):
1729 '''cache most recent results of function calls'''
1728 '''cache most recent results of function calls'''
1730 cache = {}
1729 cache = {}
1731 order = collections.deque()
1730 order = collections.deque()
1732 if func.__code__.co_argcount == 1:
1731 if func.__code__.co_argcount == 1:
1733
1732
1734 def f(arg):
1733 def f(arg):
1735 if arg not in cache:
1734 if arg not in cache:
1736 if len(cache) > 20:
1735 if len(cache) > 20:
1737 del cache[order.popleft()]
1736 del cache[order.popleft()]
1738 cache[arg] = func(arg)
1737 cache[arg] = func(arg)
1739 else:
1738 else:
1740 order.remove(arg)
1739 order.remove(arg)
1741 order.append(arg)
1740 order.append(arg)
1742 return cache[arg]
1741 return cache[arg]
1743
1742
1744 else:
1743 else:
1745
1744
1746 def f(*args):
1745 def f(*args):
1747 if args not in cache:
1746 if args not in cache:
1748 if len(cache) > 20:
1747 if len(cache) > 20:
1749 del cache[order.popleft()]
1748 del cache[order.popleft()]
1750 cache[args] = func(*args)
1749 cache[args] = func(*args)
1751 else:
1750 else:
1752 order.remove(args)
1751 order.remove(args)
1753 order.append(args)
1752 order.append(args)
1754 return cache[args]
1753 return cache[args]
1755
1754
1756 return f
1755 return f
1757
1756
1758
1757
1759 class propertycache:
1758 class propertycache:
1760 def __init__(self, func):
1759 def __init__(self, func):
1761 self.func = func
1760 self.func = func
1762 self.name = func.__name__
1761 self.name = func.__name__
1763
1762
1764 def __get__(self, obj, type=None):
1763 def __get__(self, obj, type=None):
1765 result = self.func(obj)
1764 result = self.func(obj)
1766 self.cachevalue(obj, result)
1765 self.cachevalue(obj, result)
1767 return result
1766 return result
1768
1767
1769 def cachevalue(self, obj, value):
1768 def cachevalue(self, obj, value):
1770 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1769 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1771 obj.__dict__[self.name] = value
1770 obj.__dict__[self.name] = value
1772
1771
1773
1772
1774 def clearcachedproperty(obj, prop):
1773 def clearcachedproperty(obj, prop):
1775 '''clear a cached property value, if one has been set'''
1774 '''clear a cached property value, if one has been set'''
1776 prop = pycompat.sysstr(prop)
1775 prop = pycompat.sysstr(prop)
1777 if prop in obj.__dict__:
1776 if prop in obj.__dict__:
1778 del obj.__dict__[prop]
1777 del obj.__dict__[prop]
1779
1778
1780
1779
1781 def increasingchunks(source, min=1024, max=65536):
1780 def increasingchunks(source, min=1024, max=65536):
1782 """return no less than min bytes per chunk while data remains,
1781 """return no less than min bytes per chunk while data remains,
1783 doubling min after each chunk until it reaches max"""
1782 doubling min after each chunk until it reaches max"""
1784
1783
1785 def log2(x):
1784 def log2(x):
1786 if not x:
1785 if not x:
1787 return 0
1786 return 0
1788 i = 0
1787 i = 0
1789 while x:
1788 while x:
1790 x >>= 1
1789 x >>= 1
1791 i += 1
1790 i += 1
1792 return i - 1
1791 return i - 1
1793
1792
1794 buf = []
1793 buf = []
1795 blen = 0
1794 blen = 0
1796 for chunk in source:
1795 for chunk in source:
1797 buf.append(chunk)
1796 buf.append(chunk)
1798 blen += len(chunk)
1797 blen += len(chunk)
1799 if blen >= min:
1798 if blen >= min:
1800 if min < max:
1799 if min < max:
1801 min = min << 1
1800 min = min << 1
1802 nmin = 1 << log2(blen)
1801 nmin = 1 << log2(blen)
1803 if nmin > min:
1802 if nmin > min:
1804 min = nmin
1803 min = nmin
1805 if min > max:
1804 if min > max:
1806 min = max
1805 min = max
1807 yield b''.join(buf)
1806 yield b''.join(buf)
1808 blen = 0
1807 blen = 0
1809 buf = []
1808 buf = []
1810 if buf:
1809 if buf:
1811 yield b''.join(buf)
1810 yield b''.join(buf)
1812
1811
1813
1812
1814 def always(fn):
1813 def always(fn):
1815 return True
1814 return True
1816
1815
1817
1816
1818 def never(fn):
1817 def never(fn):
1819 return False
1818 return False
1820
1819
1821
1820
1822 def nogc(func):
1821 def nogc(func):
1823 """disable garbage collector
1822 """disable garbage collector
1824
1823
1825 Python's garbage collector triggers a GC each time a certain number of
1824 Python's garbage collector triggers a GC each time a certain number of
1826 container objects (the number being defined by gc.get_threshold()) are
1825 container objects (the number being defined by gc.get_threshold()) are
1827 allocated even when marked not to be tracked by the collector. Tracking has
1826 allocated even when marked not to be tracked by the collector. Tracking has
1828 no effect on when GCs are triggered, only on what objects the GC looks
1827 no effect on when GCs are triggered, only on what objects the GC looks
1829 into. As a workaround, disable GC while building complex (huge)
1828 into. As a workaround, disable GC while building complex (huge)
1830 containers.
1829 containers.
1831
1830
1832 This garbage collector issue have been fixed in 2.7. But it still affect
1831 This garbage collector issue have been fixed in 2.7. But it still affect
1833 CPython's performance.
1832 CPython's performance.
1834 """
1833 """
1835
1834
1836 def wrapper(*args, **kwargs):
1835 def wrapper(*args, **kwargs):
1837 gcenabled = gc.isenabled()
1836 gcenabled = gc.isenabled()
1838 gc.disable()
1837 gc.disable()
1839 try:
1838 try:
1840 return func(*args, **kwargs)
1839 return func(*args, **kwargs)
1841 finally:
1840 finally:
1842 if gcenabled:
1841 if gcenabled:
1843 gc.enable()
1842 gc.enable()
1844
1843
1845 return wrapper
1844 return wrapper
1846
1845
1847
1846
1848 if pycompat.ispypy:
1847 if pycompat.ispypy:
1849 # PyPy runs slower with gc disabled
1848 # PyPy runs slower with gc disabled
1850 nogc = lambda x: x
1849 nogc = lambda x: x
1851
1850
1852
1851
1853 def pathto(root, n1, n2):
1852 def pathto(root, n1, n2):
1854 # type: (bytes, bytes, bytes) -> bytes
1853 # type: (bytes, bytes, bytes) -> bytes
1855 """return the relative path from one place to another.
1854 """return the relative path from one place to another.
1856 root should use os.sep to separate directories
1855 root should use os.sep to separate directories
1857 n1 should use os.sep to separate directories
1856 n1 should use os.sep to separate directories
1858 n2 should use "/" to separate directories
1857 n2 should use "/" to separate directories
1859 returns an os.sep-separated path.
1858 returns an os.sep-separated path.
1860
1859
1861 If n1 is a relative path, it's assumed it's
1860 If n1 is a relative path, it's assumed it's
1862 relative to root.
1861 relative to root.
1863 n2 should always be relative to root.
1862 n2 should always be relative to root.
1864 """
1863 """
1865 if not n1:
1864 if not n1:
1866 return localpath(n2)
1865 return localpath(n2)
1867 if os.path.isabs(n1):
1866 if os.path.isabs(n1):
1868 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1867 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1869 return os.path.join(root, localpath(n2))
1868 return os.path.join(root, localpath(n2))
1870 n2 = b'/'.join((pconvert(root), n2))
1869 n2 = b'/'.join((pconvert(root), n2))
1871 a, b = splitpath(n1), n2.split(b'/')
1870 a, b = splitpath(n1), n2.split(b'/')
1872 a.reverse()
1871 a.reverse()
1873 b.reverse()
1872 b.reverse()
1874 while a and b and a[-1] == b[-1]:
1873 while a and b and a[-1] == b[-1]:
1875 a.pop()
1874 a.pop()
1876 b.pop()
1875 b.pop()
1877 b.reverse()
1876 b.reverse()
1878 return pycompat.ossep.join(([b'..'] * len(a)) + b) or b'.'
1877 return pycompat.ossep.join(([b'..'] * len(a)) + b) or b'.'
1879
1878
1880
1879
1881 def checksignature(func, depth=1):
1880 def checksignature(func, depth=1):
1882 '''wrap a function with code to check for calling errors'''
1881 '''wrap a function with code to check for calling errors'''
1883
1882
1884 def check(*args, **kwargs):
1883 def check(*args, **kwargs):
1885 try:
1884 try:
1886 return func(*args, **kwargs)
1885 return func(*args, **kwargs)
1887 except TypeError:
1886 except TypeError:
1888 if len(traceback.extract_tb(sys.exc_info()[2])) == depth:
1887 if len(traceback.extract_tb(sys.exc_info()[2])) == depth:
1889 raise error.SignatureError
1888 raise error.SignatureError
1890 raise
1889 raise
1891
1890
1892 return check
1891 return check
1893
1892
1894
1893
1895 # a whilelist of known filesystems where hardlink works reliably
1894 # a whilelist of known filesystems where hardlink works reliably
1896 _hardlinkfswhitelist = {
1895 _hardlinkfswhitelist = {
1897 b'apfs',
1896 b'apfs',
1898 b'btrfs',
1897 b'btrfs',
1899 b'ext2',
1898 b'ext2',
1900 b'ext3',
1899 b'ext3',
1901 b'ext4',
1900 b'ext4',
1902 b'hfs',
1901 b'hfs',
1903 b'jfs',
1902 b'jfs',
1904 b'NTFS',
1903 b'NTFS',
1905 b'reiserfs',
1904 b'reiserfs',
1906 b'tmpfs',
1905 b'tmpfs',
1907 b'ufs',
1906 b'ufs',
1908 b'xfs',
1907 b'xfs',
1909 b'zfs',
1908 b'zfs',
1910 }
1909 }
1911
1910
1912
1911
1913 def copyfile(
1912 def copyfile(
1914 src,
1913 src,
1915 dest,
1914 dest,
1916 hardlink=False,
1915 hardlink=False,
1917 copystat=False,
1916 copystat=False,
1918 checkambig=False,
1917 checkambig=False,
1919 nb_bytes=None,
1918 nb_bytes=None,
1920 no_hardlink_cb=None,
1919 no_hardlink_cb=None,
1921 check_fs_hardlink=True,
1920 check_fs_hardlink=True,
1922 ):
1921 ):
1923 """copy a file, preserving mode and optionally other stat info like
1922 """copy a file, preserving mode and optionally other stat info like
1924 atime/mtime
1923 atime/mtime
1925
1924
1926 checkambig argument is used with filestat, and is useful only if
1925 checkambig argument is used with filestat, and is useful only if
1927 destination file is guarded by any lock (e.g. repo.lock or
1926 destination file is guarded by any lock (e.g. repo.lock or
1928 repo.wlock).
1927 repo.wlock).
1929
1928
1930 copystat and checkambig should be exclusive.
1929 copystat and checkambig should be exclusive.
1931
1930
1932 nb_bytes: if set only copy the first `nb_bytes` of the source file.
1931 nb_bytes: if set only copy the first `nb_bytes` of the source file.
1933 """
1932 """
1934 assert not (copystat and checkambig)
1933 assert not (copystat and checkambig)
1935 oldstat = None
1934 oldstat = None
1936 if os.path.lexists(dest):
1935 if os.path.lexists(dest):
1937 if checkambig:
1936 if checkambig:
1938 oldstat = checkambig and filestat.frompath(dest)
1937 oldstat = checkambig and filestat.frompath(dest)
1939 unlink(dest)
1938 unlink(dest)
1940 if hardlink and check_fs_hardlink:
1939 if hardlink and check_fs_hardlink:
1941 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1940 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1942 # unless we are confident that dest is on a whitelisted filesystem.
1941 # unless we are confident that dest is on a whitelisted filesystem.
1943 try:
1942 try:
1944 fstype = getfstype(os.path.dirname(dest))
1943 fstype = getfstype(os.path.dirname(dest))
1945 except OSError:
1944 except OSError:
1946 fstype = None
1945 fstype = None
1947 if fstype not in _hardlinkfswhitelist:
1946 if fstype not in _hardlinkfswhitelist:
1948 if no_hardlink_cb is not None:
1947 if no_hardlink_cb is not None:
1949 no_hardlink_cb()
1948 no_hardlink_cb()
1950 hardlink = False
1949 hardlink = False
1951 if hardlink:
1950 if hardlink:
1952 try:
1951 try:
1953 oslink(src, dest)
1952 oslink(src, dest)
1954 if nb_bytes is not None:
1953 if nb_bytes is not None:
1955 m = "the `nb_bytes` argument is incompatible with `hardlink`"
1954 m = "the `nb_bytes` argument is incompatible with `hardlink`"
1956 raise error.ProgrammingError(m)
1955 raise error.ProgrammingError(m)
1957 return
1956 return
1958 except (IOError, OSError) as exc:
1957 except (IOError, OSError) as exc:
1959 if exc.errno != errno.EEXIST and no_hardlink_cb is not None:
1958 if exc.errno != errno.EEXIST and no_hardlink_cb is not None:
1960 no_hardlink_cb()
1959 no_hardlink_cb()
1961 # fall back to normal copy
1960 # fall back to normal copy
1962 if os.path.islink(src):
1961 if os.path.islink(src):
1963 os.symlink(os.readlink(src), dest)
1962 os.symlink(os.readlink(src), dest)
1964 # copytime is ignored for symlinks, but in general copytime isn't needed
1963 # copytime is ignored for symlinks, but in general copytime isn't needed
1965 # for them anyway
1964 # for them anyway
1966 if nb_bytes is not None:
1965 if nb_bytes is not None:
1967 m = "cannot use `nb_bytes` on a symlink"
1966 m = "cannot use `nb_bytes` on a symlink"
1968 raise error.ProgrammingError(m)
1967 raise error.ProgrammingError(m)
1969 else:
1968 else:
1970 try:
1969 try:
1971 shutil.copyfile(src, dest)
1970 shutil.copyfile(src, dest)
1972 if copystat:
1971 if copystat:
1973 # copystat also copies mode
1972 # copystat also copies mode
1974 shutil.copystat(src, dest)
1973 shutil.copystat(src, dest)
1975 else:
1974 else:
1976 shutil.copymode(src, dest)
1975 shutil.copymode(src, dest)
1977 if oldstat and oldstat.stat:
1976 if oldstat and oldstat.stat:
1978 newstat = filestat.frompath(dest)
1977 newstat = filestat.frompath(dest)
1979 if newstat.isambig(oldstat):
1978 if newstat.isambig(oldstat):
1980 # stat of copied file is ambiguous to original one
1979 # stat of copied file is ambiguous to original one
1981 advanced = (
1980 advanced = (
1982 oldstat.stat[stat.ST_MTIME] + 1
1981 oldstat.stat[stat.ST_MTIME] + 1
1983 ) & 0x7FFFFFFF
1982 ) & 0x7FFFFFFF
1984 os.utime(dest, (advanced, advanced))
1983 os.utime(dest, (advanced, advanced))
1985 # We could do something smarter using `copy_file_range` call or similar
1984 # We could do something smarter using `copy_file_range` call or similar
1986 if nb_bytes is not None:
1985 if nb_bytes is not None:
1987 with open(dest, mode='r+') as f:
1986 with open(dest, mode='r+') as f:
1988 f.truncate(nb_bytes)
1987 f.truncate(nb_bytes)
1989 except shutil.Error as inst:
1988 except shutil.Error as inst:
1990 raise error.Abort(stringutil.forcebytestr(inst))
1989 raise error.Abort(stringutil.forcebytestr(inst))
1991
1990
1992
1991
1993 def copyfiles(src, dst, hardlink=None, progress=None):
1992 def copyfiles(src, dst, hardlink=None, progress=None):
1994 """Copy a directory tree using hardlinks if possible."""
1993 """Copy a directory tree using hardlinks if possible."""
1995 num = 0
1994 num = 0
1996
1995
1997 def settopic():
1996 def settopic():
1998 if progress:
1997 if progress:
1999 progress.topic = _(b'linking') if hardlink else _(b'copying')
1998 progress.topic = _(b'linking') if hardlink else _(b'copying')
2000
1999
2001 if os.path.isdir(src):
2000 if os.path.isdir(src):
2002 if hardlink is None:
2001 if hardlink is None:
2003 hardlink = (
2002 hardlink = (
2004 os.stat(src).st_dev == os.stat(os.path.dirname(dst)).st_dev
2003 os.stat(src).st_dev == os.stat(os.path.dirname(dst)).st_dev
2005 )
2004 )
2006 settopic()
2005 settopic()
2007 os.mkdir(dst)
2006 os.mkdir(dst)
2008 for name, kind in listdir(src):
2007 for name, kind in listdir(src):
2009 srcname = os.path.join(src, name)
2008 srcname = os.path.join(src, name)
2010 dstname = os.path.join(dst, name)
2009 dstname = os.path.join(dst, name)
2011 hardlink, n = copyfiles(srcname, dstname, hardlink, progress)
2010 hardlink, n = copyfiles(srcname, dstname, hardlink, progress)
2012 num += n
2011 num += n
2013 else:
2012 else:
2014 if hardlink is None:
2013 if hardlink is None:
2015 hardlink = (
2014 hardlink = (
2016 os.stat(os.path.dirname(src)).st_dev
2015 os.stat(os.path.dirname(src)).st_dev
2017 == os.stat(os.path.dirname(dst)).st_dev
2016 == os.stat(os.path.dirname(dst)).st_dev
2018 )
2017 )
2019 settopic()
2018 settopic()
2020
2019
2021 if hardlink:
2020 if hardlink:
2022 try:
2021 try:
2023 oslink(src, dst)
2022 oslink(src, dst)
2024 except (IOError, OSError) as exc:
2023 except (IOError, OSError) as exc:
2025 if exc.errno != errno.EEXIST:
2024 if exc.errno != errno.EEXIST:
2026 hardlink = False
2025 hardlink = False
2027 # XXX maybe try to relink if the file exist ?
2026 # XXX maybe try to relink if the file exist ?
2028 shutil.copy(src, dst)
2027 shutil.copy(src, dst)
2029 else:
2028 else:
2030 shutil.copy(src, dst)
2029 shutil.copy(src, dst)
2031 num += 1
2030 num += 1
2032 if progress:
2031 if progress:
2033 progress.increment()
2032 progress.increment()
2034
2033
2035 return hardlink, num
2034 return hardlink, num
2036
2035
2037
2036
2038 _winreservednames = {
2037 _winreservednames = {
2039 b'con',
2038 b'con',
2040 b'prn',
2039 b'prn',
2041 b'aux',
2040 b'aux',
2042 b'nul',
2041 b'nul',
2043 b'com1',
2042 b'com1',
2044 b'com2',
2043 b'com2',
2045 b'com3',
2044 b'com3',
2046 b'com4',
2045 b'com4',
2047 b'com5',
2046 b'com5',
2048 b'com6',
2047 b'com6',
2049 b'com7',
2048 b'com7',
2050 b'com8',
2049 b'com8',
2051 b'com9',
2050 b'com9',
2052 b'lpt1',
2051 b'lpt1',
2053 b'lpt2',
2052 b'lpt2',
2054 b'lpt3',
2053 b'lpt3',
2055 b'lpt4',
2054 b'lpt4',
2056 b'lpt5',
2055 b'lpt5',
2057 b'lpt6',
2056 b'lpt6',
2058 b'lpt7',
2057 b'lpt7',
2059 b'lpt8',
2058 b'lpt8',
2060 b'lpt9',
2059 b'lpt9',
2061 }
2060 }
2062 _winreservedchars = b':*?"<>|'
2061 _winreservedchars = b':*?"<>|'
2063
2062
2064
2063
2065 def checkwinfilename(path):
2064 def checkwinfilename(path):
2066 # type: (bytes) -> Optional[bytes]
2065 # type: (bytes) -> Optional[bytes]
2067 r"""Check that the base-relative path is a valid filename on Windows.
2066 r"""Check that the base-relative path is a valid filename on Windows.
2068 Returns None if the path is ok, or a UI string describing the problem.
2067 Returns None if the path is ok, or a UI string describing the problem.
2069
2068
2070 >>> checkwinfilename(b"just/a/normal/path")
2069 >>> checkwinfilename(b"just/a/normal/path")
2071 >>> checkwinfilename(b"foo/bar/con.xml")
2070 >>> checkwinfilename(b"foo/bar/con.xml")
2072 "filename contains 'con', which is reserved on Windows"
2071 "filename contains 'con', which is reserved on Windows"
2073 >>> checkwinfilename(b"foo/con.xml/bar")
2072 >>> checkwinfilename(b"foo/con.xml/bar")
2074 "filename contains 'con', which is reserved on Windows"
2073 "filename contains 'con', which is reserved on Windows"
2075 >>> checkwinfilename(b"foo/bar/xml.con")
2074 >>> checkwinfilename(b"foo/bar/xml.con")
2076 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
2075 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
2077 "filename contains 'AUX', which is reserved on Windows"
2076 "filename contains 'AUX', which is reserved on Windows"
2078 >>> checkwinfilename(b"foo/bar/bla:.txt")
2077 >>> checkwinfilename(b"foo/bar/bla:.txt")
2079 "filename contains ':', which is reserved on Windows"
2078 "filename contains ':', which is reserved on Windows"
2080 >>> checkwinfilename(b"foo/bar/b\07la.txt")
2079 >>> checkwinfilename(b"foo/bar/b\07la.txt")
2081 "filename contains '\\x07', which is invalid on Windows"
2080 "filename contains '\\x07', which is invalid on Windows"
2082 >>> checkwinfilename(b"foo/bar/bla ")
2081 >>> checkwinfilename(b"foo/bar/bla ")
2083 "filename ends with ' ', which is not allowed on Windows"
2082 "filename ends with ' ', which is not allowed on Windows"
2084 >>> checkwinfilename(b"../bar")
2083 >>> checkwinfilename(b"../bar")
2085 >>> checkwinfilename(b"foo\\")
2084 >>> checkwinfilename(b"foo\\")
2086 "filename ends with '\\', which is invalid on Windows"
2085 "filename ends with '\\', which is invalid on Windows"
2087 >>> checkwinfilename(b"foo\\/bar")
2086 >>> checkwinfilename(b"foo\\/bar")
2088 "directory name ends with '\\', which is invalid on Windows"
2087 "directory name ends with '\\', which is invalid on Windows"
2089 """
2088 """
2090 if path.endswith(b'\\'):
2089 if path.endswith(b'\\'):
2091 return _(b"filename ends with '\\', which is invalid on Windows")
2090 return _(b"filename ends with '\\', which is invalid on Windows")
2092 if b'\\/' in path:
2091 if b'\\/' in path:
2093 return _(b"directory name ends with '\\', which is invalid on Windows")
2092 return _(b"directory name ends with '\\', which is invalid on Windows")
2094 for n in path.replace(b'\\', b'/').split(b'/'):
2093 for n in path.replace(b'\\', b'/').split(b'/'):
2095 if not n:
2094 if not n:
2096 continue
2095 continue
2097 for c in _filenamebytestr(n):
2096 for c in _filenamebytestr(n):
2098 if c in _winreservedchars:
2097 if c in _winreservedchars:
2099 return (
2098 return (
2100 _(
2099 _(
2101 b"filename contains '%s', which is reserved "
2100 b"filename contains '%s', which is reserved "
2102 b"on Windows"
2101 b"on Windows"
2103 )
2102 )
2104 % c
2103 % c
2105 )
2104 )
2106 if ord(c) <= 31:
2105 if ord(c) <= 31:
2107 return _(
2106 return _(
2108 b"filename contains '%s', which is invalid on Windows"
2107 b"filename contains '%s', which is invalid on Windows"
2109 ) % stringutil.escapestr(c)
2108 ) % stringutil.escapestr(c)
2110 base = n.split(b'.')[0]
2109 base = n.split(b'.')[0]
2111 if base and base.lower() in _winreservednames:
2110 if base and base.lower() in _winreservednames:
2112 return (
2111 return (
2113 _(b"filename contains '%s', which is reserved on Windows")
2112 _(b"filename contains '%s', which is reserved on Windows")
2114 % base
2113 % base
2115 )
2114 )
2116 t = n[-1:]
2115 t = n[-1:]
2117 if t in b'. ' and n not in b'..':
2116 if t in b'. ' and n not in b'..':
2118 return (
2117 return (
2119 _(
2118 _(
2120 b"filename ends with '%s', which is not allowed "
2119 b"filename ends with '%s', which is not allowed "
2121 b"on Windows"
2120 b"on Windows"
2122 )
2121 )
2123 % t
2122 % t
2124 )
2123 )
2125
2124
2126
2125
2127 timer = getattr(time, "perf_counter", None)
2126 timer = getattr(time, "perf_counter", None)
2128
2127
2129 if pycompat.iswindows:
2128 if pycompat.iswindows:
2130 checkosfilename = checkwinfilename
2129 checkosfilename = checkwinfilename
2131 if not timer:
2130 if not timer:
2132 timer = time.clock
2131 timer = time.clock
2133 else:
2132 else:
2134 # mercurial.windows doesn't have platform.checkosfilename
2133 # mercurial.windows doesn't have platform.checkosfilename
2135 checkosfilename = platform.checkosfilename # pytype: disable=module-attr
2134 checkosfilename = platform.checkosfilename # pytype: disable=module-attr
2136 if not timer:
2135 if not timer:
2137 timer = time.time
2136 timer = time.time
2138
2137
2139
2138
2140 def makelock(info, pathname):
2139 def makelock(info, pathname):
2141 """Create a lock file atomically if possible
2140 """Create a lock file atomically if possible
2142
2141
2143 This may leave a stale lock file if symlink isn't supported and signal
2142 This may leave a stale lock file if symlink isn't supported and signal
2144 interrupt is enabled.
2143 interrupt is enabled.
2145 """
2144 """
2146 try:
2145 try:
2147 return os.symlink(info, pathname)
2146 return os.symlink(info, pathname)
2148 except OSError as why:
2147 except OSError as why:
2149 if why.errno == errno.EEXIST:
2148 if why.errno == errno.EEXIST:
2150 raise
2149 raise
2151 except AttributeError: # no symlink in os
2150 except AttributeError: # no symlink in os
2152 pass
2151 pass
2153
2152
2154 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
2153 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
2155 ld = os.open(pathname, flags)
2154 ld = os.open(pathname, flags)
2156 os.write(ld, info)
2155 os.write(ld, info)
2157 os.close(ld)
2156 os.close(ld)
2158
2157
2159
2158
2160 def readlock(pathname):
2159 def readlock(pathname):
2161 # type: (bytes) -> bytes
2160 # type: (bytes) -> bytes
2162 try:
2161 try:
2163 return readlink(pathname)
2162 return readlink(pathname)
2164 except OSError as why:
2163 except OSError as why:
2165 if why.errno not in (errno.EINVAL, errno.ENOSYS):
2164 if why.errno not in (errno.EINVAL, errno.ENOSYS):
2166 raise
2165 raise
2167 except AttributeError: # no symlink in os
2166 except AttributeError: # no symlink in os
2168 pass
2167 pass
2169 with posixfile(pathname, b'rb') as fp:
2168 with posixfile(pathname, b'rb') as fp:
2170 return fp.read()
2169 return fp.read()
2171
2170
2172
2171
2173 def fstat(fp):
2172 def fstat(fp):
2174 '''stat file object that may not have fileno method.'''
2173 '''stat file object that may not have fileno method.'''
2175 try:
2174 try:
2176 return os.fstat(fp.fileno())
2175 return os.fstat(fp.fileno())
2177 except AttributeError:
2176 except AttributeError:
2178 return os.stat(fp.name)
2177 return os.stat(fp.name)
2179
2178
2180
2179
2181 # File system features
2180 # File system features
2182
2181
2183
2182
2184 def fscasesensitive(path):
2183 def fscasesensitive(path):
2185 # type: (bytes) -> bool
2184 # type: (bytes) -> bool
2186 """
2185 """
2187 Return true if the given path is on a case-sensitive filesystem
2186 Return true if the given path is on a case-sensitive filesystem
2188
2187
2189 Requires a path (like /foo/.hg) ending with a foldable final
2188 Requires a path (like /foo/.hg) ending with a foldable final
2190 directory component.
2189 directory component.
2191 """
2190 """
2192 s1 = os.lstat(path)
2191 s1 = os.lstat(path)
2193 d, b = os.path.split(path)
2192 d, b = os.path.split(path)
2194 b2 = b.upper()
2193 b2 = b.upper()
2195 if b == b2:
2194 if b == b2:
2196 b2 = b.lower()
2195 b2 = b.lower()
2197 if b == b2:
2196 if b == b2:
2198 return True # no evidence against case sensitivity
2197 return True # no evidence against case sensitivity
2199 p2 = os.path.join(d, b2)
2198 p2 = os.path.join(d, b2)
2200 try:
2199 try:
2201 s2 = os.lstat(p2)
2200 s2 = os.lstat(p2)
2202 if s2 == s1:
2201 if s2 == s1:
2203 return False
2202 return False
2204 return True
2203 return True
2205 except OSError:
2204 except OSError:
2206 return True
2205 return True
2207
2206
2208
2207
2209 _re2_input = lambda x: x
2208 _re2_input = lambda x: x
2210 try:
2209 try:
2211 import re2 # pytype: disable=import-error
2210 import re2 # pytype: disable=import-error
2212
2211
2213 _re2 = None
2212 _re2 = None
2214 except ImportError:
2213 except ImportError:
2215 _re2 = False
2214 _re2 = False
2216
2215
2217
2216
2218 class _re:
2217 class _re:
2219 def _checkre2(self):
2218 def _checkre2(self):
2220 global _re2
2219 global _re2
2221 global _re2_input
2220 global _re2_input
2222
2221
2223 check_pattern = br'\[([^\[]+)\]'
2222 check_pattern = br'\[([^\[]+)\]'
2224 check_input = b'[ui]'
2223 check_input = b'[ui]'
2225 try:
2224 try:
2226 # check if match works, see issue3964
2225 # check if match works, see issue3964
2227 _re2 = bool(re2.match(check_pattern, check_input))
2226 _re2 = bool(re2.match(check_pattern, check_input))
2228 except ImportError:
2227 except ImportError:
2229 _re2 = False
2228 _re2 = False
2230 except TypeError:
2229 except TypeError:
2231 # the `pyre-2` project provides a re2 module that accept bytes
2230 # the `pyre-2` project provides a re2 module that accept bytes
2232 # the `fb-re2` project provides a re2 module that acccept sysstr
2231 # the `fb-re2` project provides a re2 module that acccept sysstr
2233 check_pattern = pycompat.sysstr(check_pattern)
2232 check_pattern = pycompat.sysstr(check_pattern)
2234 check_input = pycompat.sysstr(check_input)
2233 check_input = pycompat.sysstr(check_input)
2235 _re2 = bool(re2.match(check_pattern, check_input))
2234 _re2 = bool(re2.match(check_pattern, check_input))
2236 _re2_input = pycompat.sysstr
2235 _re2_input = pycompat.sysstr
2237
2236
2238 def compile(self, pat, flags=0):
2237 def compile(self, pat, flags=0):
2239 """Compile a regular expression, using re2 if possible
2238 """Compile a regular expression, using re2 if possible
2240
2239
2241 For best performance, use only re2-compatible regexp features. The
2240 For best performance, use only re2-compatible regexp features. The
2242 only flags from the re module that are re2-compatible are
2241 only flags from the re module that are re2-compatible are
2243 IGNORECASE and MULTILINE."""
2242 IGNORECASE and MULTILINE."""
2244 if _re2 is None:
2243 if _re2 is None:
2245 self._checkre2()
2244 self._checkre2()
2246 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
2245 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
2247 if flags & remod.IGNORECASE:
2246 if flags & remod.IGNORECASE:
2248 pat = b'(?i)' + pat
2247 pat = b'(?i)' + pat
2249 if flags & remod.MULTILINE:
2248 if flags & remod.MULTILINE:
2250 pat = b'(?m)' + pat
2249 pat = b'(?m)' + pat
2251 try:
2250 try:
2252 return re2.compile(_re2_input(pat))
2251 return re2.compile(_re2_input(pat))
2253 except re2.error:
2252 except re2.error:
2254 pass
2253 pass
2255 return remod.compile(pat, flags)
2254 return remod.compile(pat, flags)
2256
2255
2257 @propertycache
2256 @propertycache
2258 def escape(self):
2257 def escape(self):
2259 """Return the version of escape corresponding to self.compile.
2258 """Return the version of escape corresponding to self.compile.
2260
2259
2261 This is imperfect because whether re2 or re is used for a particular
2260 This is imperfect because whether re2 or re is used for a particular
2262 function depends on the flags, etc, but it's the best we can do.
2261 function depends on the flags, etc, but it's the best we can do.
2263 """
2262 """
2264 global _re2
2263 global _re2
2265 if _re2 is None:
2264 if _re2 is None:
2266 self._checkre2()
2265 self._checkre2()
2267 if _re2:
2266 if _re2:
2268 return re2.escape
2267 return re2.escape
2269 else:
2268 else:
2270 return remod.escape
2269 return remod.escape
2271
2270
2272
2271
2273 re = _re()
2272 re = _re()
2274
2273
2275 _fspathcache = {}
2274 _fspathcache = {}
2276
2275
2277
2276
2278 def fspath(name, root):
2277 def fspath(name, root):
2279 # type: (bytes, bytes) -> bytes
2278 # type: (bytes, bytes) -> bytes
2280 """Get name in the case stored in the filesystem
2279 """Get name in the case stored in the filesystem
2281
2280
2282 The name should be relative to root, and be normcase-ed for efficiency.
2281 The name should be relative to root, and be normcase-ed for efficiency.
2283
2282
2284 Note that this function is unnecessary, and should not be
2283 Note that this function is unnecessary, and should not be
2285 called, for case-sensitive filesystems (simply because it's expensive).
2284 called, for case-sensitive filesystems (simply because it's expensive).
2286
2285
2287 The root should be normcase-ed, too.
2286 The root should be normcase-ed, too.
2288 """
2287 """
2289
2288
2290 def _makefspathcacheentry(dir):
2289 def _makefspathcacheentry(dir):
2291 return {normcase(n): n for n in os.listdir(dir)}
2290 return {normcase(n): n for n in os.listdir(dir)}
2292
2291
2293 seps = pycompat.ossep
2292 seps = pycompat.ossep
2294 if pycompat.osaltsep:
2293 if pycompat.osaltsep:
2295 seps = seps + pycompat.osaltsep
2294 seps = seps + pycompat.osaltsep
2296 # Protect backslashes. This gets silly very quickly.
2295 # Protect backslashes. This gets silly very quickly.
2297 seps.replace(b'\\', b'\\\\')
2296 seps.replace(b'\\', b'\\\\')
2298 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
2297 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
2299 dir = os.path.normpath(root)
2298 dir = os.path.normpath(root)
2300 result = []
2299 result = []
2301 for part, sep in pattern.findall(name):
2300 for part, sep in pattern.findall(name):
2302 if sep:
2301 if sep:
2303 result.append(sep)
2302 result.append(sep)
2304 continue
2303 continue
2305
2304
2306 if dir not in _fspathcache:
2305 if dir not in _fspathcache:
2307 _fspathcache[dir] = _makefspathcacheentry(dir)
2306 _fspathcache[dir] = _makefspathcacheentry(dir)
2308 contents = _fspathcache[dir]
2307 contents = _fspathcache[dir]
2309
2308
2310 found = contents.get(part)
2309 found = contents.get(part)
2311 if not found:
2310 if not found:
2312 # retry "once per directory" per "dirstate.walk" which
2311 # retry "once per directory" per "dirstate.walk" which
2313 # may take place for each patches of "hg qpush", for example
2312 # may take place for each patches of "hg qpush", for example
2314 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
2313 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
2315 found = contents.get(part)
2314 found = contents.get(part)
2316
2315
2317 result.append(found or part)
2316 result.append(found or part)
2318 dir = os.path.join(dir, part)
2317 dir = os.path.join(dir, part)
2319
2318
2320 return b''.join(result)
2319 return b''.join(result)
2321
2320
2322
2321
2323 def checknlink(testfile):
2322 def checknlink(testfile):
2324 # type: (bytes) -> bool
2323 # type: (bytes) -> bool
2325 '''check whether hardlink count reporting works properly'''
2324 '''check whether hardlink count reporting works properly'''
2326
2325
2327 # testfile may be open, so we need a separate file for checking to
2326 # testfile may be open, so we need a separate file for checking to
2328 # work around issue2543 (or testfile may get lost on Samba shares)
2327 # work around issue2543 (or testfile may get lost on Samba shares)
2329 f1, f2, fp = None, None, None
2328 f1, f2, fp = None, None, None
2330 try:
2329 try:
2331 fd, f1 = pycompat.mkstemp(
2330 fd, f1 = pycompat.mkstemp(
2332 prefix=b'.%s-' % os.path.basename(testfile),
2331 prefix=b'.%s-' % os.path.basename(testfile),
2333 suffix=b'1~',
2332 suffix=b'1~',
2334 dir=os.path.dirname(testfile),
2333 dir=os.path.dirname(testfile),
2335 )
2334 )
2336 os.close(fd)
2335 os.close(fd)
2337 f2 = b'%s2~' % f1[:-2]
2336 f2 = b'%s2~' % f1[:-2]
2338
2337
2339 oslink(f1, f2)
2338 oslink(f1, f2)
2340 # nlinks() may behave differently for files on Windows shares if
2339 # nlinks() may behave differently for files on Windows shares if
2341 # the file is open.
2340 # the file is open.
2342 fp = posixfile(f2)
2341 fp = posixfile(f2)
2343 return nlinks(f2) > 1
2342 return nlinks(f2) > 1
2344 except OSError:
2343 except OSError:
2345 return False
2344 return False
2346 finally:
2345 finally:
2347 if fp is not None:
2346 if fp is not None:
2348 fp.close()
2347 fp.close()
2349 for f in (f1, f2):
2348 for f in (f1, f2):
2350 try:
2349 try:
2351 if f is not None:
2350 if f is not None:
2352 os.unlink(f)
2351 os.unlink(f)
2353 except OSError:
2352 except OSError:
2354 pass
2353 pass
2355
2354
2356
2355
2357 def endswithsep(path):
2356 def endswithsep(path):
2358 # type: (bytes) -> bool
2357 # type: (bytes) -> bool
2359 '''Check path ends with os.sep or os.altsep.'''
2358 '''Check path ends with os.sep or os.altsep.'''
2360 return bool( # help pytype
2359 return bool( # help pytype
2361 path.endswith(pycompat.ossep)
2360 path.endswith(pycompat.ossep)
2362 or pycompat.osaltsep
2361 or pycompat.osaltsep
2363 and path.endswith(pycompat.osaltsep)
2362 and path.endswith(pycompat.osaltsep)
2364 )
2363 )
2365
2364
2366
2365
2367 def splitpath(path):
2366 def splitpath(path):
2368 # type: (bytes) -> List[bytes]
2367 # type: (bytes) -> List[bytes]
2369 """Split path by os.sep.
2368 """Split path by os.sep.
2370 Note that this function does not use os.altsep because this is
2369 Note that this function does not use os.altsep because this is
2371 an alternative of simple "xxx.split(os.sep)".
2370 an alternative of simple "xxx.split(os.sep)".
2372 It is recommended to use os.path.normpath() before using this
2371 It is recommended to use os.path.normpath() before using this
2373 function if need."""
2372 function if need."""
2374 return path.split(pycompat.ossep)
2373 return path.split(pycompat.ossep)
2375
2374
2376
2375
2377 def mktempcopy(name, emptyok=False, createmode=None, enforcewritable=False):
2376 def mktempcopy(name, emptyok=False, createmode=None, enforcewritable=False):
2378 """Create a temporary file with the same contents from name
2377 """Create a temporary file with the same contents from name
2379
2378
2380 The permission bits are copied from the original file.
2379 The permission bits are copied from the original file.
2381
2380
2382 If the temporary file is going to be truncated immediately, you
2381 If the temporary file is going to be truncated immediately, you
2383 can use emptyok=True as an optimization.
2382 can use emptyok=True as an optimization.
2384
2383
2385 Returns the name of the temporary file.
2384 Returns the name of the temporary file.
2386 """
2385 """
2387 d, fn = os.path.split(name)
2386 d, fn = os.path.split(name)
2388 fd, temp = pycompat.mkstemp(prefix=b'.%s-' % fn, suffix=b'~', dir=d)
2387 fd, temp = pycompat.mkstemp(prefix=b'.%s-' % fn, suffix=b'~', dir=d)
2389 os.close(fd)
2388 os.close(fd)
2390 # Temporary files are created with mode 0600, which is usually not
2389 # Temporary files are created with mode 0600, which is usually not
2391 # what we want. If the original file already exists, just copy
2390 # what we want. If the original file already exists, just copy
2392 # its mode. Otherwise, manually obey umask.
2391 # its mode. Otherwise, manually obey umask.
2393 copymode(name, temp, createmode, enforcewritable)
2392 copymode(name, temp, createmode, enforcewritable)
2394
2393
2395 if emptyok:
2394 if emptyok:
2396 return temp
2395 return temp
2397 try:
2396 try:
2398 try:
2397 try:
2399 ifp = posixfile(name, b"rb")
2398 ifp = posixfile(name, b"rb")
2400 except IOError as inst:
2399 except IOError as inst:
2401 if inst.errno == errno.ENOENT:
2400 if inst.errno == errno.ENOENT:
2402 return temp
2401 return temp
2403 if not getattr(inst, 'filename', None):
2402 if not getattr(inst, 'filename', None):
2404 inst.filename = name
2403 inst.filename = name
2405 raise
2404 raise
2406 ofp = posixfile(temp, b"wb")
2405 ofp = posixfile(temp, b"wb")
2407 for chunk in filechunkiter(ifp):
2406 for chunk in filechunkiter(ifp):
2408 ofp.write(chunk)
2407 ofp.write(chunk)
2409 ifp.close()
2408 ifp.close()
2410 ofp.close()
2409 ofp.close()
2411 except: # re-raises
2410 except: # re-raises
2412 try:
2411 try:
2413 os.unlink(temp)
2412 os.unlink(temp)
2414 except OSError:
2413 except OSError:
2415 pass
2414 pass
2416 raise
2415 raise
2417 return temp
2416 return temp
2418
2417
2419
2418
2420 class filestat:
2419 class filestat:
2421 """help to exactly detect change of a file
2420 """help to exactly detect change of a file
2422
2421
2423 'stat' attribute is result of 'os.stat()' if specified 'path'
2422 'stat' attribute is result of 'os.stat()' if specified 'path'
2424 exists. Otherwise, it is None. This can avoid preparative
2423 exists. Otherwise, it is None. This can avoid preparative
2425 'exists()' examination on client side of this class.
2424 'exists()' examination on client side of this class.
2426 """
2425 """
2427
2426
2428 def __init__(self, stat):
2427 def __init__(self, stat):
2429 self.stat = stat
2428 self.stat = stat
2430
2429
2431 @classmethod
2430 @classmethod
2432 def frompath(cls, path):
2431 def frompath(cls, path):
2433 try:
2432 try:
2434 stat = os.stat(path)
2433 stat = os.stat(path)
2435 except OSError as err:
2434 except OSError as err:
2436 if err.errno != errno.ENOENT:
2435 if err.errno != errno.ENOENT:
2437 raise
2436 raise
2438 stat = None
2437 stat = None
2439 return cls(stat)
2438 return cls(stat)
2440
2439
2441 @classmethod
2440 @classmethod
2442 def fromfp(cls, fp):
2441 def fromfp(cls, fp):
2443 stat = os.fstat(fp.fileno())
2442 stat = os.fstat(fp.fileno())
2444 return cls(stat)
2443 return cls(stat)
2445
2444
2446 __hash__ = object.__hash__
2445 __hash__ = object.__hash__
2447
2446
2448 def __eq__(self, old):
2447 def __eq__(self, old):
2449 try:
2448 try:
2450 # if ambiguity between stat of new and old file is
2449 # if ambiguity between stat of new and old file is
2451 # avoided, comparison of size, ctime and mtime is enough
2450 # avoided, comparison of size, ctime and mtime is enough
2452 # to exactly detect change of a file regardless of platform
2451 # to exactly detect change of a file regardless of platform
2453 return (
2452 return (
2454 self.stat.st_size == old.stat.st_size
2453 self.stat.st_size == old.stat.st_size
2455 and self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2454 and self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2456 and self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME]
2455 and self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME]
2457 )
2456 )
2458 except AttributeError:
2457 except AttributeError:
2459 pass
2458 pass
2460 try:
2459 try:
2461 return self.stat is None and old.stat is None
2460 return self.stat is None and old.stat is None
2462 except AttributeError:
2461 except AttributeError:
2463 return False
2462 return False
2464
2463
2465 def isambig(self, old):
2464 def isambig(self, old):
2466 """Examine whether new (= self) stat is ambiguous against old one
2465 """Examine whether new (= self) stat is ambiguous against old one
2467
2466
2468 "S[N]" below means stat of a file at N-th change:
2467 "S[N]" below means stat of a file at N-th change:
2469
2468
2470 - S[n-1].ctime < S[n].ctime: can detect change of a file
2469 - S[n-1].ctime < S[n].ctime: can detect change of a file
2471 - S[n-1].ctime == S[n].ctime
2470 - S[n-1].ctime == S[n].ctime
2472 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2471 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2473 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2472 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2474 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2473 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2475 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2474 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2476
2475
2477 Case (*2) above means that a file was changed twice or more at
2476 Case (*2) above means that a file was changed twice or more at
2478 same time in sec (= S[n-1].ctime), and comparison of timestamp
2477 same time in sec (= S[n-1].ctime), and comparison of timestamp
2479 is ambiguous.
2478 is ambiguous.
2480
2479
2481 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2480 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2482 timestamp is ambiguous".
2481 timestamp is ambiguous".
2483
2482
2484 But advancing mtime only in case (*2) doesn't work as
2483 But advancing mtime only in case (*2) doesn't work as
2485 expected, because naturally advanced S[n].mtime in case (*1)
2484 expected, because naturally advanced S[n].mtime in case (*1)
2486 might be equal to manually advanced S[n-1 or earlier].mtime.
2485 might be equal to manually advanced S[n-1 or earlier].mtime.
2487
2486
2488 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2487 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2489 treated as ambiguous regardless of mtime, to avoid overlooking
2488 treated as ambiguous regardless of mtime, to avoid overlooking
2490 by confliction between such mtime.
2489 by confliction between such mtime.
2491
2490
2492 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2491 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2493 S[n].mtime", even if size of a file isn't changed.
2492 S[n].mtime", even if size of a file isn't changed.
2494 """
2493 """
2495 try:
2494 try:
2496 return self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2495 return self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2497 except AttributeError:
2496 except AttributeError:
2498 return False
2497 return False
2499
2498
2500 def avoidambig(self, path, old):
2499 def avoidambig(self, path, old):
2501 """Change file stat of specified path to avoid ambiguity
2500 """Change file stat of specified path to avoid ambiguity
2502
2501
2503 'old' should be previous filestat of 'path'.
2502 'old' should be previous filestat of 'path'.
2504
2503
2505 This skips avoiding ambiguity, if a process doesn't have
2504 This skips avoiding ambiguity, if a process doesn't have
2506 appropriate privileges for 'path'. This returns False in this
2505 appropriate privileges for 'path'. This returns False in this
2507 case.
2506 case.
2508
2507
2509 Otherwise, this returns True, as "ambiguity is avoided".
2508 Otherwise, this returns True, as "ambiguity is avoided".
2510 """
2509 """
2511 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2510 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2512 try:
2511 try:
2513 os.utime(path, (advanced, advanced))
2512 os.utime(path, (advanced, advanced))
2514 except OSError as inst:
2513 except OSError as inst:
2515 if inst.errno == errno.EPERM:
2514 if inst.errno == errno.EPERM:
2516 # utime() on the file created by another user causes EPERM,
2515 # utime() on the file created by another user causes EPERM,
2517 # if a process doesn't have appropriate privileges
2516 # if a process doesn't have appropriate privileges
2518 return False
2517 return False
2519 raise
2518 raise
2520 return True
2519 return True
2521
2520
2522 def __ne__(self, other):
2521 def __ne__(self, other):
2523 return not self == other
2522 return not self == other
2524
2523
2525
2524
2526 class atomictempfile:
2525 class atomictempfile:
2527 """writable file object that atomically updates a file
2526 """writable file object that atomically updates a file
2528
2527
2529 All writes will go to a temporary copy of the original file. Call
2528 All writes will go to a temporary copy of the original file. Call
2530 close() when you are done writing, and atomictempfile will rename
2529 close() when you are done writing, and atomictempfile will rename
2531 the temporary copy to the original name, making the changes
2530 the temporary copy to the original name, making the changes
2532 visible. If the object is destroyed without being closed, all your
2531 visible. If the object is destroyed without being closed, all your
2533 writes are discarded.
2532 writes are discarded.
2534
2533
2535 checkambig argument of constructor is used with filestat, and is
2534 checkambig argument of constructor is used with filestat, and is
2536 useful only if target file is guarded by any lock (e.g. repo.lock
2535 useful only if target file is guarded by any lock (e.g. repo.lock
2537 or repo.wlock).
2536 or repo.wlock).
2538 """
2537 """
2539
2538
2540 def __init__(self, name, mode=b'w+b', createmode=None, checkambig=False):
2539 def __init__(self, name, mode=b'w+b', createmode=None, checkambig=False):
2541 self.__name = name # permanent name
2540 self.__name = name # permanent name
2542 self._tempname = mktempcopy(
2541 self._tempname = mktempcopy(
2543 name,
2542 name,
2544 emptyok=(b'w' in mode),
2543 emptyok=(b'w' in mode),
2545 createmode=createmode,
2544 createmode=createmode,
2546 enforcewritable=(b'w' in mode),
2545 enforcewritable=(b'w' in mode),
2547 )
2546 )
2548
2547
2549 self._fp = posixfile(self._tempname, mode)
2548 self._fp = posixfile(self._tempname, mode)
2550 self._checkambig = checkambig
2549 self._checkambig = checkambig
2551
2550
2552 # delegated methods
2551 # delegated methods
2553 self.read = self._fp.read
2552 self.read = self._fp.read
2554 self.write = self._fp.write
2553 self.write = self._fp.write
2555 self.seek = self._fp.seek
2554 self.seek = self._fp.seek
2556 self.tell = self._fp.tell
2555 self.tell = self._fp.tell
2557 self.fileno = self._fp.fileno
2556 self.fileno = self._fp.fileno
2558
2557
2559 def close(self):
2558 def close(self):
2560 if not self._fp.closed:
2559 if not self._fp.closed:
2561 self._fp.close()
2560 self._fp.close()
2562 filename = localpath(self.__name)
2561 filename = localpath(self.__name)
2563 oldstat = self._checkambig and filestat.frompath(filename)
2562 oldstat = self._checkambig and filestat.frompath(filename)
2564 if oldstat and oldstat.stat:
2563 if oldstat and oldstat.stat:
2565 rename(self._tempname, filename)
2564 rename(self._tempname, filename)
2566 newstat = filestat.frompath(filename)
2565 newstat = filestat.frompath(filename)
2567 if newstat.isambig(oldstat):
2566 if newstat.isambig(oldstat):
2568 # stat of changed file is ambiguous to original one
2567 # stat of changed file is ambiguous to original one
2569 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2568 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2570 os.utime(filename, (advanced, advanced))
2569 os.utime(filename, (advanced, advanced))
2571 else:
2570 else:
2572 rename(self._tempname, filename)
2571 rename(self._tempname, filename)
2573
2572
2574 def discard(self):
2573 def discard(self):
2575 if not self._fp.closed:
2574 if not self._fp.closed:
2576 try:
2575 try:
2577 os.unlink(self._tempname)
2576 os.unlink(self._tempname)
2578 except OSError:
2577 except OSError:
2579 pass
2578 pass
2580 self._fp.close()
2579 self._fp.close()
2581
2580
2582 def __del__(self):
2581 def __del__(self):
2583 if safehasattr(self, '_fp'): # constructor actually did something
2582 if safehasattr(self, '_fp'): # constructor actually did something
2584 self.discard()
2583 self.discard()
2585
2584
2586 def __enter__(self):
2585 def __enter__(self):
2587 return self
2586 return self
2588
2587
2589 def __exit__(self, exctype, excvalue, traceback):
2588 def __exit__(self, exctype, excvalue, traceback):
2590 if exctype is not None:
2589 if exctype is not None:
2591 self.discard()
2590 self.discard()
2592 else:
2591 else:
2593 self.close()
2592 self.close()
2594
2593
2595
2594
2596 def unlinkpath(f, ignoremissing=False, rmdir=True):
2595 def unlinkpath(f, ignoremissing=False, rmdir=True):
2597 # type: (bytes, bool, bool) -> None
2596 # type: (bytes, bool, bool) -> None
2598 """unlink and remove the directory if it is empty"""
2597 """unlink and remove the directory if it is empty"""
2599 if ignoremissing:
2598 if ignoremissing:
2600 tryunlink(f)
2599 tryunlink(f)
2601 else:
2600 else:
2602 unlink(f)
2601 unlink(f)
2603 if rmdir:
2602 if rmdir:
2604 # try removing directories that might now be empty
2603 # try removing directories that might now be empty
2605 try:
2604 try:
2606 removedirs(os.path.dirname(f))
2605 removedirs(os.path.dirname(f))
2607 except OSError:
2606 except OSError:
2608 pass
2607 pass
2609
2608
2610
2609
2611 def tryunlink(f):
2610 def tryunlink(f):
2612 # type: (bytes) -> None
2611 # type: (bytes) -> None
2613 """Attempt to remove a file, ignoring ENOENT errors."""
2612 """Attempt to remove a file, ignoring ENOENT errors."""
2614 try:
2613 try:
2615 unlink(f)
2614 unlink(f)
2616 except OSError as e:
2615 except OSError as e:
2617 if e.errno != errno.ENOENT:
2616 if e.errno != errno.ENOENT:
2618 raise
2617 raise
2619
2618
2620
2619
2621 def makedirs(name, mode=None, notindexed=False):
2620 def makedirs(name, mode=None, notindexed=False):
2622 # type: (bytes, Optional[int], bool) -> None
2621 # type: (bytes, Optional[int], bool) -> None
2623 """recursive directory creation with parent mode inheritance
2622 """recursive directory creation with parent mode inheritance
2624
2623
2625 Newly created directories are marked as "not to be indexed by
2624 Newly created directories are marked as "not to be indexed by
2626 the content indexing service", if ``notindexed`` is specified
2625 the content indexing service", if ``notindexed`` is specified
2627 for "write" mode access.
2626 for "write" mode access.
2628 """
2627 """
2629 try:
2628 try:
2630 makedir(name, notindexed)
2629 makedir(name, notindexed)
2631 except OSError as err:
2630 except OSError as err:
2632 if err.errno == errno.EEXIST:
2631 if err.errno == errno.EEXIST:
2633 return
2632 return
2634 if err.errno != errno.ENOENT or not name:
2633 if err.errno != errno.ENOENT or not name:
2635 raise
2634 raise
2636 parent = os.path.dirname(abspath(name))
2635 parent = os.path.dirname(abspath(name))
2637 if parent == name:
2636 if parent == name:
2638 raise
2637 raise
2639 makedirs(parent, mode, notindexed)
2638 makedirs(parent, mode, notindexed)
2640 try:
2639 try:
2641 makedir(name, notindexed)
2640 makedir(name, notindexed)
2642 except OSError as err:
2641 except OSError as err:
2643 # Catch EEXIST to handle races
2642 # Catch EEXIST to handle races
2644 if err.errno == errno.EEXIST:
2643 if err.errno == errno.EEXIST:
2645 return
2644 return
2646 raise
2645 raise
2647 if mode is not None:
2646 if mode is not None:
2648 os.chmod(name, mode)
2647 os.chmod(name, mode)
2649
2648
2650
2649
2651 def readfile(path):
2650 def readfile(path):
2652 # type: (bytes) -> bytes
2651 # type: (bytes) -> bytes
2653 with open(path, b'rb') as fp:
2652 with open(path, b'rb') as fp:
2654 return fp.read()
2653 return fp.read()
2655
2654
2656
2655
2657 def writefile(path, text):
2656 def writefile(path, text):
2658 # type: (bytes, bytes) -> None
2657 # type: (bytes, bytes) -> None
2659 with open(path, b'wb') as fp:
2658 with open(path, b'wb') as fp:
2660 fp.write(text)
2659 fp.write(text)
2661
2660
2662
2661
2663 def appendfile(path, text):
2662 def appendfile(path, text):
2664 # type: (bytes, bytes) -> None
2663 # type: (bytes, bytes) -> None
2665 with open(path, b'ab') as fp:
2664 with open(path, b'ab') as fp:
2666 fp.write(text)
2665 fp.write(text)
2667
2666
2668
2667
2669 class chunkbuffer:
2668 class chunkbuffer:
2670 """Allow arbitrary sized chunks of data to be efficiently read from an
2669 """Allow arbitrary sized chunks of data to be efficiently read from an
2671 iterator over chunks of arbitrary size."""
2670 iterator over chunks of arbitrary size."""
2672
2671
2673 def __init__(self, in_iter):
2672 def __init__(self, in_iter):
2674 """in_iter is the iterator that's iterating over the input chunks."""
2673 """in_iter is the iterator that's iterating over the input chunks."""
2675
2674
2676 def splitbig(chunks):
2675 def splitbig(chunks):
2677 for chunk in chunks:
2676 for chunk in chunks:
2678 if len(chunk) > 2 ** 20:
2677 if len(chunk) > 2 ** 20:
2679 pos = 0
2678 pos = 0
2680 while pos < len(chunk):
2679 while pos < len(chunk):
2681 end = pos + 2 ** 18
2680 end = pos + 2 ** 18
2682 yield chunk[pos:end]
2681 yield chunk[pos:end]
2683 pos = end
2682 pos = end
2684 else:
2683 else:
2685 yield chunk
2684 yield chunk
2686
2685
2687 self.iter = splitbig(in_iter)
2686 self.iter = splitbig(in_iter)
2688 self._queue = collections.deque()
2687 self._queue = collections.deque()
2689 self._chunkoffset = 0
2688 self._chunkoffset = 0
2690
2689
2691 def read(self, l=None):
2690 def read(self, l=None):
2692 """Read L bytes of data from the iterator of chunks of data.
2691 """Read L bytes of data from the iterator of chunks of data.
2693 Returns less than L bytes if the iterator runs dry.
2692 Returns less than L bytes if the iterator runs dry.
2694
2693
2695 If size parameter is omitted, read everything"""
2694 If size parameter is omitted, read everything"""
2696 if l is None:
2695 if l is None:
2697 return b''.join(self.iter)
2696 return b''.join(self.iter)
2698
2697
2699 left = l
2698 left = l
2700 buf = []
2699 buf = []
2701 queue = self._queue
2700 queue = self._queue
2702 while left > 0:
2701 while left > 0:
2703 # refill the queue
2702 # refill the queue
2704 if not queue:
2703 if not queue:
2705 target = 2 ** 18
2704 target = 2 ** 18
2706 for chunk in self.iter:
2705 for chunk in self.iter:
2707 queue.append(chunk)
2706 queue.append(chunk)
2708 target -= len(chunk)
2707 target -= len(chunk)
2709 if target <= 0:
2708 if target <= 0:
2710 break
2709 break
2711 if not queue:
2710 if not queue:
2712 break
2711 break
2713
2712
2714 # The easy way to do this would be to queue.popleft(), modify the
2713 # The easy way to do this would be to queue.popleft(), modify the
2715 # chunk (if necessary), then queue.appendleft(). However, for cases
2714 # chunk (if necessary), then queue.appendleft(). However, for cases
2716 # where we read partial chunk content, this incurs 2 dequeue
2715 # where we read partial chunk content, this incurs 2 dequeue
2717 # mutations and creates a new str for the remaining chunk in the
2716 # mutations and creates a new str for the remaining chunk in the
2718 # queue. Our code below avoids this overhead.
2717 # queue. Our code below avoids this overhead.
2719
2718
2720 chunk = queue[0]
2719 chunk = queue[0]
2721 chunkl = len(chunk)
2720 chunkl = len(chunk)
2722 offset = self._chunkoffset
2721 offset = self._chunkoffset
2723
2722
2724 # Use full chunk.
2723 # Use full chunk.
2725 if offset == 0 and left >= chunkl:
2724 if offset == 0 and left >= chunkl:
2726 left -= chunkl
2725 left -= chunkl
2727 queue.popleft()
2726 queue.popleft()
2728 buf.append(chunk)
2727 buf.append(chunk)
2729 # self._chunkoffset remains at 0.
2728 # self._chunkoffset remains at 0.
2730 continue
2729 continue
2731
2730
2732 chunkremaining = chunkl - offset
2731 chunkremaining = chunkl - offset
2733
2732
2734 # Use all of unconsumed part of chunk.
2733 # Use all of unconsumed part of chunk.
2735 if left >= chunkremaining:
2734 if left >= chunkremaining:
2736 left -= chunkremaining
2735 left -= chunkremaining
2737 queue.popleft()
2736 queue.popleft()
2738 # offset == 0 is enabled by block above, so this won't merely
2737 # offset == 0 is enabled by block above, so this won't merely
2739 # copy via ``chunk[0:]``.
2738 # copy via ``chunk[0:]``.
2740 buf.append(chunk[offset:])
2739 buf.append(chunk[offset:])
2741 self._chunkoffset = 0
2740 self._chunkoffset = 0
2742
2741
2743 # Partial chunk needed.
2742 # Partial chunk needed.
2744 else:
2743 else:
2745 buf.append(chunk[offset : offset + left])
2744 buf.append(chunk[offset : offset + left])
2746 self._chunkoffset += left
2745 self._chunkoffset += left
2747 left -= chunkremaining
2746 left -= chunkremaining
2748
2747
2749 return b''.join(buf)
2748 return b''.join(buf)
2750
2749
2751
2750
2752 def filechunkiter(f, size=131072, limit=None):
2751 def filechunkiter(f, size=131072, limit=None):
2753 """Create a generator that produces the data in the file size
2752 """Create a generator that produces the data in the file size
2754 (default 131072) bytes at a time, up to optional limit (default is
2753 (default 131072) bytes at a time, up to optional limit (default is
2755 to read all data). Chunks may be less than size bytes if the
2754 to read all data). Chunks may be less than size bytes if the
2756 chunk is the last chunk in the file, or the file is a socket or
2755 chunk is the last chunk in the file, or the file is a socket or
2757 some other type of file that sometimes reads less data than is
2756 some other type of file that sometimes reads less data than is
2758 requested."""
2757 requested."""
2759 assert size >= 0
2758 assert size >= 0
2760 assert limit is None or limit >= 0
2759 assert limit is None or limit >= 0
2761 while True:
2760 while True:
2762 if limit is None:
2761 if limit is None:
2763 nbytes = size
2762 nbytes = size
2764 else:
2763 else:
2765 nbytes = min(limit, size)
2764 nbytes = min(limit, size)
2766 s = nbytes and f.read(nbytes)
2765 s = nbytes and f.read(nbytes)
2767 if not s:
2766 if not s:
2768 break
2767 break
2769 if limit:
2768 if limit:
2770 limit -= len(s)
2769 limit -= len(s)
2771 yield s
2770 yield s
2772
2771
2773
2772
2774 class cappedreader:
2773 class cappedreader:
2775 """A file object proxy that allows reading up to N bytes.
2774 """A file object proxy that allows reading up to N bytes.
2776
2775
2777 Given a source file object, instances of this type allow reading up to
2776 Given a source file object, instances of this type allow reading up to
2778 N bytes from that source file object. Attempts to read past the allowed
2777 N bytes from that source file object. Attempts to read past the allowed
2779 limit are treated as EOF.
2778 limit are treated as EOF.
2780
2779
2781 It is assumed that I/O is not performed on the original file object
2780 It is assumed that I/O is not performed on the original file object
2782 in addition to I/O that is performed by this instance. If there is,
2781 in addition to I/O that is performed by this instance. If there is,
2783 state tracking will get out of sync and unexpected results will ensue.
2782 state tracking will get out of sync and unexpected results will ensue.
2784 """
2783 """
2785
2784
2786 def __init__(self, fh, limit):
2785 def __init__(self, fh, limit):
2787 """Allow reading up to <limit> bytes from <fh>."""
2786 """Allow reading up to <limit> bytes from <fh>."""
2788 self._fh = fh
2787 self._fh = fh
2789 self._left = limit
2788 self._left = limit
2790
2789
2791 def read(self, n=-1):
2790 def read(self, n=-1):
2792 if not self._left:
2791 if not self._left:
2793 return b''
2792 return b''
2794
2793
2795 if n < 0:
2794 if n < 0:
2796 n = self._left
2795 n = self._left
2797
2796
2798 data = self._fh.read(min(n, self._left))
2797 data = self._fh.read(min(n, self._left))
2799 self._left -= len(data)
2798 self._left -= len(data)
2800 assert self._left >= 0
2799 assert self._left >= 0
2801
2800
2802 return data
2801 return data
2803
2802
2804 def readinto(self, b):
2803 def readinto(self, b):
2805 res = self.read(len(b))
2804 res = self.read(len(b))
2806 if res is None:
2805 if res is None:
2807 return None
2806 return None
2808
2807
2809 b[0 : len(res)] = res
2808 b[0 : len(res)] = res
2810 return len(res)
2809 return len(res)
2811
2810
2812
2811
2813 def unitcountfn(*unittable):
2812 def unitcountfn(*unittable):
2814 '''return a function that renders a readable count of some quantity'''
2813 '''return a function that renders a readable count of some quantity'''
2815
2814
2816 def go(count):
2815 def go(count):
2817 for multiplier, divisor, format in unittable:
2816 for multiplier, divisor, format in unittable:
2818 if abs(count) >= divisor * multiplier:
2817 if abs(count) >= divisor * multiplier:
2819 return format % (count / float(divisor))
2818 return format % (count / float(divisor))
2820 return unittable[-1][2] % count
2819 return unittable[-1][2] % count
2821
2820
2822 return go
2821 return go
2823
2822
2824
2823
2825 def processlinerange(fromline, toline):
2824 def processlinerange(fromline, toline):
2826 # type: (int, int) -> Tuple[int, int]
2825 # type: (int, int) -> Tuple[int, int]
2827 """Check that linerange <fromline>:<toline> makes sense and return a
2826 """Check that linerange <fromline>:<toline> makes sense and return a
2828 0-based range.
2827 0-based range.
2829
2828
2830 >>> processlinerange(10, 20)
2829 >>> processlinerange(10, 20)
2831 (9, 20)
2830 (9, 20)
2832 >>> processlinerange(2, 1)
2831 >>> processlinerange(2, 1)
2833 Traceback (most recent call last):
2832 Traceback (most recent call last):
2834 ...
2833 ...
2835 ParseError: line range must be positive
2834 ParseError: line range must be positive
2836 >>> processlinerange(0, 5)
2835 >>> processlinerange(0, 5)
2837 Traceback (most recent call last):
2836 Traceback (most recent call last):
2838 ...
2837 ...
2839 ParseError: fromline must be strictly positive
2838 ParseError: fromline must be strictly positive
2840 """
2839 """
2841 if toline - fromline < 0:
2840 if toline - fromline < 0:
2842 raise error.ParseError(_(b"line range must be positive"))
2841 raise error.ParseError(_(b"line range must be positive"))
2843 if fromline < 1:
2842 if fromline < 1:
2844 raise error.ParseError(_(b"fromline must be strictly positive"))
2843 raise error.ParseError(_(b"fromline must be strictly positive"))
2845 return fromline - 1, toline
2844 return fromline - 1, toline
2846
2845
2847
2846
2848 bytecount = unitcountfn(
2847 bytecount = unitcountfn(
2849 (100, 1 << 30, _(b'%.0f GB')),
2848 (100, 1 << 30, _(b'%.0f GB')),
2850 (10, 1 << 30, _(b'%.1f GB')),
2849 (10, 1 << 30, _(b'%.1f GB')),
2851 (1, 1 << 30, _(b'%.2f GB')),
2850 (1, 1 << 30, _(b'%.2f GB')),
2852 (100, 1 << 20, _(b'%.0f MB')),
2851 (100, 1 << 20, _(b'%.0f MB')),
2853 (10, 1 << 20, _(b'%.1f MB')),
2852 (10, 1 << 20, _(b'%.1f MB')),
2854 (1, 1 << 20, _(b'%.2f MB')),
2853 (1, 1 << 20, _(b'%.2f MB')),
2855 (100, 1 << 10, _(b'%.0f KB')),
2854 (100, 1 << 10, _(b'%.0f KB')),
2856 (10, 1 << 10, _(b'%.1f KB')),
2855 (10, 1 << 10, _(b'%.1f KB')),
2857 (1, 1 << 10, _(b'%.2f KB')),
2856 (1, 1 << 10, _(b'%.2f KB')),
2858 (1, 1, _(b'%.0f bytes')),
2857 (1, 1, _(b'%.0f bytes')),
2859 )
2858 )
2860
2859
2861
2860
2862 class transformingwriter:
2861 class transformingwriter:
2863 """Writable file wrapper to transform data by function"""
2862 """Writable file wrapper to transform data by function"""
2864
2863
2865 def __init__(self, fp, encode):
2864 def __init__(self, fp, encode):
2866 self._fp = fp
2865 self._fp = fp
2867 self._encode = encode
2866 self._encode = encode
2868
2867
2869 def close(self):
2868 def close(self):
2870 self._fp.close()
2869 self._fp.close()
2871
2870
2872 def flush(self):
2871 def flush(self):
2873 self._fp.flush()
2872 self._fp.flush()
2874
2873
2875 def write(self, data):
2874 def write(self, data):
2876 return self._fp.write(self._encode(data))
2875 return self._fp.write(self._encode(data))
2877
2876
2878
2877
2879 # Matches a single EOL which can either be a CRLF where repeated CR
2878 # Matches a single EOL which can either be a CRLF where repeated CR
2880 # are removed or a LF. We do not care about old Macintosh files, so a
2879 # are removed or a LF. We do not care about old Macintosh files, so a
2881 # stray CR is an error.
2880 # stray CR is an error.
2882 _eolre = remod.compile(br'\r*\n')
2881 _eolre = remod.compile(br'\r*\n')
2883
2882
2884
2883
2885 def tolf(s):
2884 def tolf(s):
2886 # type: (bytes) -> bytes
2885 # type: (bytes) -> bytes
2887 return _eolre.sub(b'\n', s)
2886 return _eolre.sub(b'\n', s)
2888
2887
2889
2888
2890 def tocrlf(s):
2889 def tocrlf(s):
2891 # type: (bytes) -> bytes
2890 # type: (bytes) -> bytes
2892 return _eolre.sub(b'\r\n', s)
2891 return _eolre.sub(b'\r\n', s)
2893
2892
2894
2893
2895 def _crlfwriter(fp):
2894 def _crlfwriter(fp):
2896 return transformingwriter(fp, tocrlf)
2895 return transformingwriter(fp, tocrlf)
2897
2896
2898
2897
2899 if pycompat.oslinesep == b'\r\n':
2898 if pycompat.oslinesep == b'\r\n':
2900 tonativeeol = tocrlf
2899 tonativeeol = tocrlf
2901 fromnativeeol = tolf
2900 fromnativeeol = tolf
2902 nativeeolwriter = _crlfwriter
2901 nativeeolwriter = _crlfwriter
2903 else:
2902 else:
2904 tonativeeol = pycompat.identity
2903 tonativeeol = pycompat.identity
2905 fromnativeeol = pycompat.identity
2904 fromnativeeol = pycompat.identity
2906 nativeeolwriter = pycompat.identity
2905 nativeeolwriter = pycompat.identity
2907
2906
2908
2907
2909 # TODO delete since workaround variant for Python 2 no longer needed.
2908 # TODO delete since workaround variant for Python 2 no longer needed.
2910 def iterfile(fp):
2909 def iterfile(fp):
2911 return fp
2910 return fp
2912
2911
2913
2912
2914 def iterlines(iterator):
2913 def iterlines(iterator):
2915 # type: (Iterator[bytes]) -> Iterator[bytes]
2914 # type: (Iterator[bytes]) -> Iterator[bytes]
2916 for chunk in iterator:
2915 for chunk in iterator:
2917 for line in chunk.splitlines():
2916 for line in chunk.splitlines():
2918 yield line
2917 yield line
2919
2918
2920
2919
2921 def expandpath(path):
2920 def expandpath(path):
2922 # type: (bytes) -> bytes
2921 # type: (bytes) -> bytes
2923 return os.path.expanduser(os.path.expandvars(path))
2922 return os.path.expanduser(os.path.expandvars(path))
2924
2923
2925
2924
2926 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2925 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2927 """Return the result of interpolating items in the mapping into string s.
2926 """Return the result of interpolating items in the mapping into string s.
2928
2927
2929 prefix is a single character string, or a two character string with
2928 prefix is a single character string, or a two character string with
2930 a backslash as the first character if the prefix needs to be escaped in
2929 a backslash as the first character if the prefix needs to be escaped in
2931 a regular expression.
2930 a regular expression.
2932
2931
2933 fn is an optional function that will be applied to the replacement text
2932 fn is an optional function that will be applied to the replacement text
2934 just before replacement.
2933 just before replacement.
2935
2934
2936 escape_prefix is an optional flag that allows using doubled prefix for
2935 escape_prefix is an optional flag that allows using doubled prefix for
2937 its escaping.
2936 its escaping.
2938 """
2937 """
2939 fn = fn or (lambda s: s)
2938 fn = fn or (lambda s: s)
2940 patterns = b'|'.join(mapping.keys())
2939 patterns = b'|'.join(mapping.keys())
2941 if escape_prefix:
2940 if escape_prefix:
2942 patterns += b'|' + prefix
2941 patterns += b'|' + prefix
2943 if len(prefix) > 1:
2942 if len(prefix) > 1:
2944 prefix_char = prefix[1:]
2943 prefix_char = prefix[1:]
2945 else:
2944 else:
2946 prefix_char = prefix
2945 prefix_char = prefix
2947 mapping[prefix_char] = prefix_char
2946 mapping[prefix_char] = prefix_char
2948 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2947 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2949 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2948 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2950
2949
2951
2950
2952 timecount = unitcountfn(
2951 timecount = unitcountfn(
2953 (1, 1e3, _(b'%.0f s')),
2952 (1, 1e3, _(b'%.0f s')),
2954 (100, 1, _(b'%.1f s')),
2953 (100, 1, _(b'%.1f s')),
2955 (10, 1, _(b'%.2f s')),
2954 (10, 1, _(b'%.2f s')),
2956 (1, 1, _(b'%.3f s')),
2955 (1, 1, _(b'%.3f s')),
2957 (100, 0.001, _(b'%.1f ms')),
2956 (100, 0.001, _(b'%.1f ms')),
2958 (10, 0.001, _(b'%.2f ms')),
2957 (10, 0.001, _(b'%.2f ms')),
2959 (1, 0.001, _(b'%.3f ms')),
2958 (1, 0.001, _(b'%.3f ms')),
2960 (100, 0.000001, _(b'%.1f us')),
2959 (100, 0.000001, _(b'%.1f us')),
2961 (10, 0.000001, _(b'%.2f us')),
2960 (10, 0.000001, _(b'%.2f us')),
2962 (1, 0.000001, _(b'%.3f us')),
2961 (1, 0.000001, _(b'%.3f us')),
2963 (100, 0.000000001, _(b'%.1f ns')),
2962 (100, 0.000000001, _(b'%.1f ns')),
2964 (10, 0.000000001, _(b'%.2f ns')),
2963 (10, 0.000000001, _(b'%.2f ns')),
2965 (1, 0.000000001, _(b'%.3f ns')),
2964 (1, 0.000000001, _(b'%.3f ns')),
2966 )
2965 )
2967
2966
2968
2967
2969 @attr.s
2968 @attr.s
2970 class timedcmstats:
2969 class timedcmstats:
2971 """Stats information produced by the timedcm context manager on entering."""
2970 """Stats information produced by the timedcm context manager on entering."""
2972
2971
2973 # the starting value of the timer as a float (meaning and resulution is
2972 # the starting value of the timer as a float (meaning and resulution is
2974 # platform dependent, see util.timer)
2973 # platform dependent, see util.timer)
2975 start = attr.ib(default=attr.Factory(lambda: timer()))
2974 start = attr.ib(default=attr.Factory(lambda: timer()))
2976 # the number of seconds as a floating point value; starts at 0, updated when
2975 # the number of seconds as a floating point value; starts at 0, updated when
2977 # the context is exited.
2976 # the context is exited.
2978 elapsed = attr.ib(default=0)
2977 elapsed = attr.ib(default=0)
2979 # the number of nested timedcm context managers.
2978 # the number of nested timedcm context managers.
2980 level = attr.ib(default=1)
2979 level = attr.ib(default=1)
2981
2980
2982 def __bytes__(self):
2981 def __bytes__(self):
2983 return timecount(self.elapsed) if self.elapsed else b'<unknown>'
2982 return timecount(self.elapsed) if self.elapsed else b'<unknown>'
2984
2983
2985 __str__ = encoding.strmethod(__bytes__)
2984 __str__ = encoding.strmethod(__bytes__)
2986
2985
2987
2986
2988 @contextlib.contextmanager
2987 @contextlib.contextmanager
2989 def timedcm(whencefmt, *whenceargs):
2988 def timedcm(whencefmt, *whenceargs):
2990 """A context manager that produces timing information for a given context.
2989 """A context manager that produces timing information for a given context.
2991
2990
2992 On entering a timedcmstats instance is produced.
2991 On entering a timedcmstats instance is produced.
2993
2992
2994 This context manager is reentrant.
2993 This context manager is reentrant.
2995
2994
2996 """
2995 """
2997 # track nested context managers
2996 # track nested context managers
2998 timedcm._nested += 1
2997 timedcm._nested += 1
2999 timing_stats = timedcmstats(level=timedcm._nested)
2998 timing_stats = timedcmstats(level=timedcm._nested)
3000 try:
2999 try:
3001 with tracing.log(whencefmt, *whenceargs):
3000 with tracing.log(whencefmt, *whenceargs):
3002 yield timing_stats
3001 yield timing_stats
3003 finally:
3002 finally:
3004 timing_stats.elapsed = timer() - timing_stats.start
3003 timing_stats.elapsed = timer() - timing_stats.start
3005 timedcm._nested -= 1
3004 timedcm._nested -= 1
3006
3005
3007
3006
3008 timedcm._nested = 0
3007 timedcm._nested = 0
3009
3008
3010
3009
3011 def timed(func):
3010 def timed(func):
3012 """Report the execution time of a function call to stderr.
3011 """Report the execution time of a function call to stderr.
3013
3012
3014 During development, use as a decorator when you need to measure
3013 During development, use as a decorator when you need to measure
3015 the cost of a function, e.g. as follows:
3014 the cost of a function, e.g. as follows:
3016
3015
3017 @util.timed
3016 @util.timed
3018 def foo(a, b, c):
3017 def foo(a, b, c):
3019 pass
3018 pass
3020 """
3019 """
3021
3020
3022 def wrapper(*args, **kwargs):
3021 def wrapper(*args, **kwargs):
3023 with timedcm(pycompat.bytestr(func.__name__)) as time_stats:
3022 with timedcm(pycompat.bytestr(func.__name__)) as time_stats:
3024 result = func(*args, **kwargs)
3023 result = func(*args, **kwargs)
3025 stderr = procutil.stderr
3024 stderr = procutil.stderr
3026 stderr.write(
3025 stderr.write(
3027 b'%s%s: %s\n'
3026 b'%s%s: %s\n'
3028 % (
3027 % (
3029 b' ' * time_stats.level * 2,
3028 b' ' * time_stats.level * 2,
3030 pycompat.bytestr(func.__name__),
3029 pycompat.bytestr(func.__name__),
3031 time_stats,
3030 time_stats,
3032 )
3031 )
3033 )
3032 )
3034 return result
3033 return result
3035
3034
3036 return wrapper
3035 return wrapper
3037
3036
3038
3037
3039 _sizeunits = (
3038 _sizeunits = (
3040 (b'm', 2 ** 20),
3039 (b'm', 2 ** 20),
3041 (b'k', 2 ** 10),
3040 (b'k', 2 ** 10),
3042 (b'g', 2 ** 30),
3041 (b'g', 2 ** 30),
3043 (b'kb', 2 ** 10),
3042 (b'kb', 2 ** 10),
3044 (b'mb', 2 ** 20),
3043 (b'mb', 2 ** 20),
3045 (b'gb', 2 ** 30),
3044 (b'gb', 2 ** 30),
3046 (b'b', 1),
3045 (b'b', 1),
3047 )
3046 )
3048
3047
3049
3048
3050 def sizetoint(s):
3049 def sizetoint(s):
3051 # type: (bytes) -> int
3050 # type: (bytes) -> int
3052 """Convert a space specifier to a byte count.
3051 """Convert a space specifier to a byte count.
3053
3052
3054 >>> sizetoint(b'30')
3053 >>> sizetoint(b'30')
3055 30
3054 30
3056 >>> sizetoint(b'2.2kb')
3055 >>> sizetoint(b'2.2kb')
3057 2252
3056 2252
3058 >>> sizetoint(b'6M')
3057 >>> sizetoint(b'6M')
3059 6291456
3058 6291456
3060 """
3059 """
3061 t = s.strip().lower()
3060 t = s.strip().lower()
3062 try:
3061 try:
3063 for k, u in _sizeunits:
3062 for k, u in _sizeunits:
3064 if t.endswith(k):
3063 if t.endswith(k):
3065 return int(float(t[: -len(k)]) * u)
3064 return int(float(t[: -len(k)]) * u)
3066 return int(t)
3065 return int(t)
3067 except ValueError:
3066 except ValueError:
3068 raise error.ParseError(_(b"couldn't parse size: %s") % s)
3067 raise error.ParseError(_(b"couldn't parse size: %s") % s)
3069
3068
3070
3069
3071 class hooks:
3070 class hooks:
3072 """A collection of hook functions that can be used to extend a
3071 """A collection of hook functions that can be used to extend a
3073 function's behavior. Hooks are called in lexicographic order,
3072 function's behavior. Hooks are called in lexicographic order,
3074 based on the names of their sources."""
3073 based on the names of their sources."""
3075
3074
3076 def __init__(self):
3075 def __init__(self):
3077 self._hooks = []
3076 self._hooks = []
3078
3077
3079 def add(self, source, hook):
3078 def add(self, source, hook):
3080 self._hooks.append((source, hook))
3079 self._hooks.append((source, hook))
3081
3080
3082 def __call__(self, *args):
3081 def __call__(self, *args):
3083 self._hooks.sort(key=lambda x: x[0])
3082 self._hooks.sort(key=lambda x: x[0])
3084 results = []
3083 results = []
3085 for source, hook in self._hooks:
3084 for source, hook in self._hooks:
3086 results.append(hook(*args))
3085 results.append(hook(*args))
3087 return results
3086 return results
3088
3087
3089
3088
3090 def getstackframes(skip=0, line=b' %-*s in %s\n', fileline=b'%s:%d', depth=0):
3089 def getstackframes(skip=0, line=b' %-*s in %s\n', fileline=b'%s:%d', depth=0):
3091 """Yields lines for a nicely formatted stacktrace.
3090 """Yields lines for a nicely formatted stacktrace.
3092 Skips the 'skip' last entries, then return the last 'depth' entries.
3091 Skips the 'skip' last entries, then return the last 'depth' entries.
3093 Each file+linenumber is formatted according to fileline.
3092 Each file+linenumber is formatted according to fileline.
3094 Each line is formatted according to line.
3093 Each line is formatted according to line.
3095 If line is None, it yields:
3094 If line is None, it yields:
3096 length of longest filepath+line number,
3095 length of longest filepath+line number,
3097 filepath+linenumber,
3096 filepath+linenumber,
3098 function
3097 function
3099
3098
3100 Not be used in production code but very convenient while developing.
3099 Not be used in production code but very convenient while developing.
3101 """
3100 """
3102 entries = [
3101 entries = [
3103 (fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3102 (fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3104 for fn, ln, func, _text in traceback.extract_stack()[: -skip - 1]
3103 for fn, ln, func, _text in traceback.extract_stack()[: -skip - 1]
3105 ][-depth:]
3104 ][-depth:]
3106 if entries:
3105 if entries:
3107 fnmax = max(len(entry[0]) for entry in entries)
3106 fnmax = max(len(entry[0]) for entry in entries)
3108 for fnln, func in entries:
3107 for fnln, func in entries:
3109 if line is None:
3108 if line is None:
3110 yield (fnmax, fnln, func)
3109 yield (fnmax, fnln, func)
3111 else:
3110 else:
3112 yield line % (fnmax, fnln, func)
3111 yield line % (fnmax, fnln, func)
3113
3112
3114
3113
3115 def debugstacktrace(
3114 def debugstacktrace(
3116 msg=b'stacktrace',
3115 msg=b'stacktrace',
3117 skip=0,
3116 skip=0,
3118 f=procutil.stderr,
3117 f=procutil.stderr,
3119 otherf=procutil.stdout,
3118 otherf=procutil.stdout,
3120 depth=0,
3119 depth=0,
3121 prefix=b'',
3120 prefix=b'',
3122 ):
3121 ):
3123 """Writes a message to f (stderr) with a nicely formatted stacktrace.
3122 """Writes a message to f (stderr) with a nicely formatted stacktrace.
3124 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3123 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3125 By default it will flush stdout first.
3124 By default it will flush stdout first.
3126 It can be used everywhere and intentionally does not require an ui object.
3125 It can be used everywhere and intentionally does not require an ui object.
3127 Not be used in production code but very convenient while developing.
3126 Not be used in production code but very convenient while developing.
3128 """
3127 """
3129 if otherf:
3128 if otherf:
3130 otherf.flush()
3129 otherf.flush()
3131 f.write(b'%s%s at:\n' % (prefix, msg.rstrip()))
3130 f.write(b'%s%s at:\n' % (prefix, msg.rstrip()))
3132 for line in getstackframes(skip + 1, depth=depth):
3131 for line in getstackframes(skip + 1, depth=depth):
3133 f.write(prefix + line)
3132 f.write(prefix + line)
3134 f.flush()
3133 f.flush()
3135
3134
3136
3135
3137 # convenient shortcut
3136 # convenient shortcut
3138 dst = debugstacktrace
3137 dst = debugstacktrace
3139
3138
3140
3139
3141 def safename(f, tag, ctx, others=None):
3140 def safename(f, tag, ctx, others=None):
3142 """
3141 """
3143 Generate a name that it is safe to rename f to in the given context.
3142 Generate a name that it is safe to rename f to in the given context.
3144
3143
3145 f: filename to rename
3144 f: filename to rename
3146 tag: a string tag that will be included in the new name
3145 tag: a string tag that will be included in the new name
3147 ctx: a context, in which the new name must not exist
3146 ctx: a context, in which the new name must not exist
3148 others: a set of other filenames that the new name must not be in
3147 others: a set of other filenames that the new name must not be in
3149
3148
3150 Returns a file name of the form oldname~tag[~number] which does not exist
3149 Returns a file name of the form oldname~tag[~number] which does not exist
3151 in the provided context and is not in the set of other names.
3150 in the provided context and is not in the set of other names.
3152 """
3151 """
3153 if others is None:
3152 if others is None:
3154 others = set()
3153 others = set()
3155
3154
3156 fn = b'%s~%s' % (f, tag)
3155 fn = b'%s~%s' % (f, tag)
3157 if fn not in ctx and fn not in others:
3156 if fn not in ctx and fn not in others:
3158 return fn
3157 return fn
3159 for n in itertools.count(1):
3158 for n in itertools.count(1):
3160 fn = b'%s~%s~%s' % (f, tag, n)
3159 fn = b'%s~%s~%s' % (f, tag, n)
3161 if fn not in ctx and fn not in others:
3160 if fn not in ctx and fn not in others:
3162 return fn
3161 return fn
3163
3162
3164
3163
3165 def readexactly(stream, n):
3164 def readexactly(stream, n):
3166 '''read n bytes from stream.read and abort if less was available'''
3165 '''read n bytes from stream.read and abort if less was available'''
3167 s = stream.read(n)
3166 s = stream.read(n)
3168 if len(s) < n:
3167 if len(s) < n:
3169 raise error.Abort(
3168 raise error.Abort(
3170 _(b"stream ended unexpectedly (got %d bytes, expected %d)")
3169 _(b"stream ended unexpectedly (got %d bytes, expected %d)")
3171 % (len(s), n)
3170 % (len(s), n)
3172 )
3171 )
3173 return s
3172 return s
3174
3173
3175
3174
3176 def uvarintencode(value):
3175 def uvarintencode(value):
3177 """Encode an unsigned integer value to a varint.
3176 """Encode an unsigned integer value to a varint.
3178
3177
3179 A varint is a variable length integer of 1 or more bytes. Each byte
3178 A varint is a variable length integer of 1 or more bytes. Each byte
3180 except the last has the most significant bit set. The lower 7 bits of
3179 except the last has the most significant bit set. The lower 7 bits of
3181 each byte store the 2's complement representation, least significant group
3180 each byte store the 2's complement representation, least significant group
3182 first.
3181 first.
3183
3182
3184 >>> uvarintencode(0)
3183 >>> uvarintencode(0)
3185 '\\x00'
3184 '\\x00'
3186 >>> uvarintencode(1)
3185 >>> uvarintencode(1)
3187 '\\x01'
3186 '\\x01'
3188 >>> uvarintencode(127)
3187 >>> uvarintencode(127)
3189 '\\x7f'
3188 '\\x7f'
3190 >>> uvarintencode(1337)
3189 >>> uvarintencode(1337)
3191 '\\xb9\\n'
3190 '\\xb9\\n'
3192 >>> uvarintencode(65536)
3191 >>> uvarintencode(65536)
3193 '\\x80\\x80\\x04'
3192 '\\x80\\x80\\x04'
3194 >>> uvarintencode(-1)
3193 >>> uvarintencode(-1)
3195 Traceback (most recent call last):
3194 Traceback (most recent call last):
3196 ...
3195 ...
3197 ProgrammingError: negative value for uvarint: -1
3196 ProgrammingError: negative value for uvarint: -1
3198 """
3197 """
3199 if value < 0:
3198 if value < 0:
3200 raise error.ProgrammingError(b'negative value for uvarint: %d' % value)
3199 raise error.ProgrammingError(b'negative value for uvarint: %d' % value)
3201 bits = value & 0x7F
3200 bits = value & 0x7F
3202 value >>= 7
3201 value >>= 7
3203 bytes = []
3202 bytes = []
3204 while value:
3203 while value:
3205 bytes.append(pycompat.bytechr(0x80 | bits))
3204 bytes.append(pycompat.bytechr(0x80 | bits))
3206 bits = value & 0x7F
3205 bits = value & 0x7F
3207 value >>= 7
3206 value >>= 7
3208 bytes.append(pycompat.bytechr(bits))
3207 bytes.append(pycompat.bytechr(bits))
3209
3208
3210 return b''.join(bytes)
3209 return b''.join(bytes)
3211
3210
3212
3211
3213 def uvarintdecodestream(fh):
3212 def uvarintdecodestream(fh):
3214 """Decode an unsigned variable length integer from a stream.
3213 """Decode an unsigned variable length integer from a stream.
3215
3214
3216 The passed argument is anything that has a ``.read(N)`` method.
3215 The passed argument is anything that has a ``.read(N)`` method.
3217
3216
3218 >>> try:
3217 >>> try:
3219 ... from StringIO import StringIO as BytesIO
3218 ... from StringIO import StringIO as BytesIO
3220 ... except ImportError:
3219 ... except ImportError:
3221 ... from io import BytesIO
3220 ... from io import BytesIO
3222 >>> uvarintdecodestream(BytesIO(b'\\x00'))
3221 >>> uvarintdecodestream(BytesIO(b'\\x00'))
3223 0
3222 0
3224 >>> uvarintdecodestream(BytesIO(b'\\x01'))
3223 >>> uvarintdecodestream(BytesIO(b'\\x01'))
3225 1
3224 1
3226 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
3225 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
3227 127
3226 127
3228 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
3227 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
3229 1337
3228 1337
3230 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
3229 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
3231 65536
3230 65536
3232 >>> uvarintdecodestream(BytesIO(b'\\x80'))
3231 >>> uvarintdecodestream(BytesIO(b'\\x80'))
3233 Traceback (most recent call last):
3232 Traceback (most recent call last):
3234 ...
3233 ...
3235 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
3234 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
3236 """
3235 """
3237 result = 0
3236 result = 0
3238 shift = 0
3237 shift = 0
3239 while True:
3238 while True:
3240 byte = ord(readexactly(fh, 1))
3239 byte = ord(readexactly(fh, 1))
3241 result |= (byte & 0x7F) << shift
3240 result |= (byte & 0x7F) << shift
3242 if not (byte & 0x80):
3241 if not (byte & 0x80):
3243 return result
3242 return result
3244 shift += 7
3243 shift += 7
3245
3244
3246
3245
3247 # Passing the '' locale means that the locale should be set according to the
3246 # Passing the '' locale means that the locale should be set according to the
3248 # user settings (environment variables).
3247 # user settings (environment variables).
3249 # Python sometimes avoids setting the global locale settings. When interfacing
3248 # Python sometimes avoids setting the global locale settings. When interfacing
3250 # with C code (e.g. the curses module or the Subversion bindings), the global
3249 # with C code (e.g. the curses module or the Subversion bindings), the global
3251 # locale settings must be initialized correctly. Python 2 does not initialize
3250 # locale settings must be initialized correctly. Python 2 does not initialize
3252 # the global locale settings on interpreter startup. Python 3 sometimes
3251 # the global locale settings on interpreter startup. Python 3 sometimes
3253 # initializes LC_CTYPE, but not consistently at least on Windows. Therefore we
3252 # initializes LC_CTYPE, but not consistently at least on Windows. Therefore we
3254 # explicitly initialize it to get consistent behavior if it's not already
3253 # explicitly initialize it to get consistent behavior if it's not already
3255 # initialized. Since CPython commit 177d921c8c03d30daa32994362023f777624b10d,
3254 # initialized. Since CPython commit 177d921c8c03d30daa32994362023f777624b10d,
3256 # LC_CTYPE is always initialized. If we require Python 3.8+, we should re-check
3255 # LC_CTYPE is always initialized. If we require Python 3.8+, we should re-check
3257 # if we can remove this code.
3256 # if we can remove this code.
3258 @contextlib.contextmanager
3257 @contextlib.contextmanager
3259 def with_lc_ctype():
3258 def with_lc_ctype():
3260 oldloc = locale.setlocale(locale.LC_CTYPE, None)
3259 oldloc = locale.setlocale(locale.LC_CTYPE, None)
3261 if oldloc == 'C':
3260 if oldloc == 'C':
3262 try:
3261 try:
3263 try:
3262 try:
3264 locale.setlocale(locale.LC_CTYPE, '')
3263 locale.setlocale(locale.LC_CTYPE, '')
3265 except locale.Error:
3264 except locale.Error:
3266 # The likely case is that the locale from the environment
3265 # The likely case is that the locale from the environment
3267 # variables is unknown.
3266 # variables is unknown.
3268 pass
3267 pass
3269 yield
3268 yield
3270 finally:
3269 finally:
3271 locale.setlocale(locale.LC_CTYPE, oldloc)
3270 locale.setlocale(locale.LC_CTYPE, oldloc)
3272 else:
3271 else:
3273 yield
3272 yield
3274
3273
3275
3274
3276 def _estimatememory():
3275 def _estimatememory():
3277 # type: () -> Optional[int]
3276 # type: () -> Optional[int]
3278 """Provide an estimate for the available system memory in Bytes.
3277 """Provide an estimate for the available system memory in Bytes.
3279
3278
3280 If no estimate can be provided on the platform, returns None.
3279 If no estimate can be provided on the platform, returns None.
3281 """
3280 """
3282 if pycompat.sysplatform.startswith(b'win'):
3281 if pycompat.sysplatform.startswith(b'win'):
3283 # On Windows, use the GlobalMemoryStatusEx kernel function directly.
3282 # On Windows, use the GlobalMemoryStatusEx kernel function directly.
3284 from ctypes import c_long as DWORD, c_ulonglong as DWORDLONG
3283 from ctypes import c_long as DWORD, c_ulonglong as DWORDLONG
3285 from ctypes.wintypes import ( # pytype: disable=import-error
3284 from ctypes.wintypes import ( # pytype: disable=import-error
3286 Structure,
3285 Structure,
3287 byref,
3286 byref,
3288 sizeof,
3287 sizeof,
3289 windll,
3288 windll,
3290 )
3289 )
3291
3290
3292 class MEMORYSTATUSEX(Structure):
3291 class MEMORYSTATUSEX(Structure):
3293 _fields_ = [
3292 _fields_ = [
3294 ('dwLength', DWORD),
3293 ('dwLength', DWORD),
3295 ('dwMemoryLoad', DWORD),
3294 ('dwMemoryLoad', DWORD),
3296 ('ullTotalPhys', DWORDLONG),
3295 ('ullTotalPhys', DWORDLONG),
3297 ('ullAvailPhys', DWORDLONG),
3296 ('ullAvailPhys', DWORDLONG),
3298 ('ullTotalPageFile', DWORDLONG),
3297 ('ullTotalPageFile', DWORDLONG),
3299 ('ullAvailPageFile', DWORDLONG),
3298 ('ullAvailPageFile', DWORDLONG),
3300 ('ullTotalVirtual', DWORDLONG),
3299 ('ullTotalVirtual', DWORDLONG),
3301 ('ullAvailVirtual', DWORDLONG),
3300 ('ullAvailVirtual', DWORDLONG),
3302 ('ullExtendedVirtual', DWORDLONG),
3301 ('ullExtendedVirtual', DWORDLONG),
3303 ]
3302 ]
3304
3303
3305 x = MEMORYSTATUSEX()
3304 x = MEMORYSTATUSEX()
3306 x.dwLength = sizeof(x)
3305 x.dwLength = sizeof(x)
3307 windll.kernel32.GlobalMemoryStatusEx(byref(x))
3306 windll.kernel32.GlobalMemoryStatusEx(byref(x))
3308 return x.ullAvailPhys
3307 return x.ullAvailPhys
3309
3308
3310 # On newer Unix-like systems and Mac OSX, the sysconf interface
3309 # On newer Unix-like systems and Mac OSX, the sysconf interface
3311 # can be used. _SC_PAGE_SIZE is part of POSIX; _SC_PHYS_PAGES
3310 # can be used. _SC_PAGE_SIZE is part of POSIX; _SC_PHYS_PAGES
3312 # seems to be implemented on most systems.
3311 # seems to be implemented on most systems.
3313 try:
3312 try:
3314 pagesize = os.sysconf(os.sysconf_names['SC_PAGE_SIZE'])
3313 pagesize = os.sysconf(os.sysconf_names['SC_PAGE_SIZE'])
3315 pages = os.sysconf(os.sysconf_names['SC_PHYS_PAGES'])
3314 pages = os.sysconf(os.sysconf_names['SC_PHYS_PAGES'])
3316 return pagesize * pages
3315 return pagesize * pages
3317 except OSError: # sysconf can fail
3316 except OSError: # sysconf can fail
3318 pass
3317 pass
3319 except KeyError: # unknown parameter
3318 except KeyError: # unknown parameter
3320 pass
3319 pass
General Comments 0
You need to be logged in to leave comments. Login now