##// END OF EJS Templates
copyfiles: deal with existing file when hardlinking...
marmoute -
r48210:9ea52521 default
parent child Browse files
Show More
@@ -1,3394 +1,3396
1 # util.py - Mercurial utility functions and platform specific implementations
1 # util.py - Mercurial utility functions and platform specific implementations
2 #
2 #
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 #
6 #
7 # This software may be used and distributed according to the terms of the
7 # This software may be used and distributed according to the terms of the
8 # GNU General Public License version 2 or any later version.
8 # GNU General Public License version 2 or any later version.
9
9
10 """Mercurial utility functions and platform specific implementations.
10 """Mercurial utility functions and platform specific implementations.
11
11
12 This contains helper routines that are independent of the SCM core and
12 This contains helper routines that are independent of the SCM core and
13 hide platform-specific details from the core.
13 hide platform-specific details from the core.
14 """
14 """
15
15
16 from __future__ import absolute_import, print_function
16 from __future__ import absolute_import, print_function
17
17
18 import abc
18 import abc
19 import collections
19 import collections
20 import contextlib
20 import contextlib
21 import errno
21 import errno
22 import gc
22 import gc
23 import hashlib
23 import hashlib
24 import itertools
24 import itertools
25 import locale
25 import locale
26 import mmap
26 import mmap
27 import os
27 import os
28 import platform as pyplatform
28 import platform as pyplatform
29 import re as remod
29 import re as remod
30 import shutil
30 import shutil
31 import stat
31 import stat
32 import sys
32 import sys
33 import time
33 import time
34 import traceback
34 import traceback
35 import warnings
35 import warnings
36
36
37 from .node import hex
37 from .node import hex
38 from .thirdparty import attr
38 from .thirdparty import attr
39 from .pycompat import (
39 from .pycompat import (
40 delattr,
40 delattr,
41 getattr,
41 getattr,
42 open,
42 open,
43 setattr,
43 setattr,
44 )
44 )
45 from .node import hex
45 from .node import hex
46 from hgdemandimport import tracing
46 from hgdemandimport import tracing
47 from . import (
47 from . import (
48 encoding,
48 encoding,
49 error,
49 error,
50 i18n,
50 i18n,
51 policy,
51 policy,
52 pycompat,
52 pycompat,
53 urllibcompat,
53 urllibcompat,
54 )
54 )
55 from .utils import (
55 from .utils import (
56 compression,
56 compression,
57 hashutil,
57 hashutil,
58 procutil,
58 procutil,
59 stringutil,
59 stringutil,
60 urlutil,
60 urlutil,
61 )
61 )
62
62
63 if pycompat.TYPE_CHECKING:
63 if pycompat.TYPE_CHECKING:
64 from typing import (
64 from typing import (
65 Iterator,
65 Iterator,
66 List,
66 List,
67 Optional,
67 Optional,
68 Tuple,
68 Tuple,
69 )
69 )
70
70
71
71
72 base85 = policy.importmod('base85')
72 base85 = policy.importmod('base85')
73 osutil = policy.importmod('osutil')
73 osutil = policy.importmod('osutil')
74
74
75 b85decode = base85.b85decode
75 b85decode = base85.b85decode
76 b85encode = base85.b85encode
76 b85encode = base85.b85encode
77
77
78 cookielib = pycompat.cookielib
78 cookielib = pycompat.cookielib
79 httplib = pycompat.httplib
79 httplib = pycompat.httplib
80 pickle = pycompat.pickle
80 pickle = pycompat.pickle
81 safehasattr = pycompat.safehasattr
81 safehasattr = pycompat.safehasattr
82 socketserver = pycompat.socketserver
82 socketserver = pycompat.socketserver
83 bytesio = pycompat.bytesio
83 bytesio = pycompat.bytesio
84 # TODO deprecate stringio name, as it is a lie on Python 3.
84 # TODO deprecate stringio name, as it is a lie on Python 3.
85 stringio = bytesio
85 stringio = bytesio
86 xmlrpclib = pycompat.xmlrpclib
86 xmlrpclib = pycompat.xmlrpclib
87
87
88 httpserver = urllibcompat.httpserver
88 httpserver = urllibcompat.httpserver
89 urlerr = urllibcompat.urlerr
89 urlerr = urllibcompat.urlerr
90 urlreq = urllibcompat.urlreq
90 urlreq = urllibcompat.urlreq
91
91
92 # workaround for win32mbcs
92 # workaround for win32mbcs
93 _filenamebytestr = pycompat.bytestr
93 _filenamebytestr = pycompat.bytestr
94
94
95 if pycompat.iswindows:
95 if pycompat.iswindows:
96 from . import windows as platform
96 from . import windows as platform
97 else:
97 else:
98 from . import posix as platform
98 from . import posix as platform
99
99
100 _ = i18n._
100 _ = i18n._
101
101
102 bindunixsocket = platform.bindunixsocket
102 bindunixsocket = platform.bindunixsocket
103 cachestat = platform.cachestat
103 cachestat = platform.cachestat
104 checkexec = platform.checkexec
104 checkexec = platform.checkexec
105 checklink = platform.checklink
105 checklink = platform.checklink
106 copymode = platform.copymode
106 copymode = platform.copymode
107 expandglobs = platform.expandglobs
107 expandglobs = platform.expandglobs
108 getfsmountpoint = platform.getfsmountpoint
108 getfsmountpoint = platform.getfsmountpoint
109 getfstype = platform.getfstype
109 getfstype = platform.getfstype
110 get_password = platform.get_password
110 get_password = platform.get_password
111 groupmembers = platform.groupmembers
111 groupmembers = platform.groupmembers
112 groupname = platform.groupname
112 groupname = platform.groupname
113 isexec = platform.isexec
113 isexec = platform.isexec
114 isowner = platform.isowner
114 isowner = platform.isowner
115 listdir = osutil.listdir
115 listdir = osutil.listdir
116 localpath = platform.localpath
116 localpath = platform.localpath
117 lookupreg = platform.lookupreg
117 lookupreg = platform.lookupreg
118 makedir = platform.makedir
118 makedir = platform.makedir
119 nlinks = platform.nlinks
119 nlinks = platform.nlinks
120 normpath = platform.normpath
120 normpath = platform.normpath
121 normcase = platform.normcase
121 normcase = platform.normcase
122 normcasespec = platform.normcasespec
122 normcasespec = platform.normcasespec
123 normcasefallback = platform.normcasefallback
123 normcasefallback = platform.normcasefallback
124 openhardlinks = platform.openhardlinks
124 openhardlinks = platform.openhardlinks
125 oslink = platform.oslink
125 oslink = platform.oslink
126 parsepatchoutput = platform.parsepatchoutput
126 parsepatchoutput = platform.parsepatchoutput
127 pconvert = platform.pconvert
127 pconvert = platform.pconvert
128 poll = platform.poll
128 poll = platform.poll
129 posixfile = platform.posixfile
129 posixfile = platform.posixfile
130 readlink = platform.readlink
130 readlink = platform.readlink
131 rename = platform.rename
131 rename = platform.rename
132 removedirs = platform.removedirs
132 removedirs = platform.removedirs
133 samedevice = platform.samedevice
133 samedevice = platform.samedevice
134 samefile = platform.samefile
134 samefile = platform.samefile
135 samestat = platform.samestat
135 samestat = platform.samestat
136 setflags = platform.setflags
136 setflags = platform.setflags
137 split = platform.split
137 split = platform.split
138 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
138 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
139 statisexec = platform.statisexec
139 statisexec = platform.statisexec
140 statislink = platform.statislink
140 statislink = platform.statislink
141 umask = platform.umask
141 umask = platform.umask
142 unlink = platform.unlink
142 unlink = platform.unlink
143 username = platform.username
143 username = platform.username
144
144
145
145
146 def setumask(val):
146 def setumask(val):
147 # type: (int) -> None
147 # type: (int) -> None
148 '''updates the umask. used by chg server'''
148 '''updates the umask. used by chg server'''
149 if pycompat.iswindows:
149 if pycompat.iswindows:
150 return
150 return
151 os.umask(val)
151 os.umask(val)
152 global umask
152 global umask
153 platform.umask = umask = val & 0o777
153 platform.umask = umask = val & 0o777
154
154
155
155
156 # small compat layer
156 # small compat layer
157 compengines = compression.compengines
157 compengines = compression.compengines
158 SERVERROLE = compression.SERVERROLE
158 SERVERROLE = compression.SERVERROLE
159 CLIENTROLE = compression.CLIENTROLE
159 CLIENTROLE = compression.CLIENTROLE
160
160
161 try:
161 try:
162 recvfds = osutil.recvfds
162 recvfds = osutil.recvfds
163 except AttributeError:
163 except AttributeError:
164 pass
164 pass
165
165
166 # Python compatibility
166 # Python compatibility
167
167
168 _notset = object()
168 _notset = object()
169
169
170
170
171 def bitsfrom(container):
171 def bitsfrom(container):
172 bits = 0
172 bits = 0
173 for bit in container:
173 for bit in container:
174 bits |= bit
174 bits |= bit
175 return bits
175 return bits
176
176
177
177
178 # python 2.6 still have deprecation warning enabled by default. We do not want
178 # python 2.6 still have deprecation warning enabled by default. We do not want
179 # to display anything to standard user so detect if we are running test and
179 # to display anything to standard user so detect if we are running test and
180 # only use python deprecation warning in this case.
180 # only use python deprecation warning in this case.
181 _dowarn = bool(encoding.environ.get(b'HGEMITWARNINGS'))
181 _dowarn = bool(encoding.environ.get(b'HGEMITWARNINGS'))
182 if _dowarn:
182 if _dowarn:
183 # explicitly unfilter our warning for python 2.7
183 # explicitly unfilter our warning for python 2.7
184 #
184 #
185 # The option of setting PYTHONWARNINGS in the test runner was investigated.
185 # The option of setting PYTHONWARNINGS in the test runner was investigated.
186 # However, module name set through PYTHONWARNINGS was exactly matched, so
186 # However, module name set through PYTHONWARNINGS was exactly matched, so
187 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
187 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
188 # makes the whole PYTHONWARNINGS thing useless for our usecase.
188 # makes the whole PYTHONWARNINGS thing useless for our usecase.
189 warnings.filterwarnings('default', '', DeprecationWarning, 'mercurial')
189 warnings.filterwarnings('default', '', DeprecationWarning, 'mercurial')
190 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext')
190 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext')
191 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext3rd')
191 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext3rd')
192 if _dowarn and pycompat.ispy3:
192 if _dowarn and pycompat.ispy3:
193 # silence warning emitted by passing user string to re.sub()
193 # silence warning emitted by passing user string to re.sub()
194 warnings.filterwarnings(
194 warnings.filterwarnings(
195 'ignore', 'bad escape', DeprecationWarning, 'mercurial'
195 'ignore', 'bad escape', DeprecationWarning, 'mercurial'
196 )
196 )
197 warnings.filterwarnings(
197 warnings.filterwarnings(
198 'ignore', 'invalid escape sequence', DeprecationWarning, 'mercurial'
198 'ignore', 'invalid escape sequence', DeprecationWarning, 'mercurial'
199 )
199 )
200 # TODO: reinvent imp.is_frozen()
200 # TODO: reinvent imp.is_frozen()
201 warnings.filterwarnings(
201 warnings.filterwarnings(
202 'ignore',
202 'ignore',
203 'the imp module is deprecated',
203 'the imp module is deprecated',
204 DeprecationWarning,
204 DeprecationWarning,
205 'mercurial',
205 'mercurial',
206 )
206 )
207
207
208
208
209 def nouideprecwarn(msg, version, stacklevel=1):
209 def nouideprecwarn(msg, version, stacklevel=1):
210 """Issue an python native deprecation warning
210 """Issue an python native deprecation warning
211
211
212 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
212 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
213 """
213 """
214 if _dowarn:
214 if _dowarn:
215 msg += (
215 msg += (
216 b"\n(compatibility will be dropped after Mercurial-%s,"
216 b"\n(compatibility will be dropped after Mercurial-%s,"
217 b" update your code.)"
217 b" update your code.)"
218 ) % version
218 ) % version
219 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
219 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
220 # on python 3 with chg, we will need to explicitly flush the output
220 # on python 3 with chg, we will need to explicitly flush the output
221 sys.stderr.flush()
221 sys.stderr.flush()
222
222
223
223
224 DIGESTS = {
224 DIGESTS = {
225 b'md5': hashlib.md5,
225 b'md5': hashlib.md5,
226 b'sha1': hashutil.sha1,
226 b'sha1': hashutil.sha1,
227 b'sha512': hashlib.sha512,
227 b'sha512': hashlib.sha512,
228 }
228 }
229 # List of digest types from strongest to weakest
229 # List of digest types from strongest to weakest
230 DIGESTS_BY_STRENGTH = [b'sha512', b'sha1', b'md5']
230 DIGESTS_BY_STRENGTH = [b'sha512', b'sha1', b'md5']
231
231
232 for k in DIGESTS_BY_STRENGTH:
232 for k in DIGESTS_BY_STRENGTH:
233 assert k in DIGESTS
233 assert k in DIGESTS
234
234
235
235
236 class digester(object):
236 class digester(object):
237 """helper to compute digests.
237 """helper to compute digests.
238
238
239 This helper can be used to compute one or more digests given their name.
239 This helper can be used to compute one or more digests given their name.
240
240
241 >>> d = digester([b'md5', b'sha1'])
241 >>> d = digester([b'md5', b'sha1'])
242 >>> d.update(b'foo')
242 >>> d.update(b'foo')
243 >>> [k for k in sorted(d)]
243 >>> [k for k in sorted(d)]
244 ['md5', 'sha1']
244 ['md5', 'sha1']
245 >>> d[b'md5']
245 >>> d[b'md5']
246 'acbd18db4cc2f85cedef654fccc4a4d8'
246 'acbd18db4cc2f85cedef654fccc4a4d8'
247 >>> d[b'sha1']
247 >>> d[b'sha1']
248 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
248 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
249 >>> digester.preferred([b'md5', b'sha1'])
249 >>> digester.preferred([b'md5', b'sha1'])
250 'sha1'
250 'sha1'
251 """
251 """
252
252
253 def __init__(self, digests, s=b''):
253 def __init__(self, digests, s=b''):
254 self._hashes = {}
254 self._hashes = {}
255 for k in digests:
255 for k in digests:
256 if k not in DIGESTS:
256 if k not in DIGESTS:
257 raise error.Abort(_(b'unknown digest type: %s') % k)
257 raise error.Abort(_(b'unknown digest type: %s') % k)
258 self._hashes[k] = DIGESTS[k]()
258 self._hashes[k] = DIGESTS[k]()
259 if s:
259 if s:
260 self.update(s)
260 self.update(s)
261
261
262 def update(self, data):
262 def update(self, data):
263 for h in self._hashes.values():
263 for h in self._hashes.values():
264 h.update(data)
264 h.update(data)
265
265
266 def __getitem__(self, key):
266 def __getitem__(self, key):
267 if key not in DIGESTS:
267 if key not in DIGESTS:
268 raise error.Abort(_(b'unknown digest type: %s') % k)
268 raise error.Abort(_(b'unknown digest type: %s') % k)
269 return hex(self._hashes[key].digest())
269 return hex(self._hashes[key].digest())
270
270
271 def __iter__(self):
271 def __iter__(self):
272 return iter(self._hashes)
272 return iter(self._hashes)
273
273
274 @staticmethod
274 @staticmethod
275 def preferred(supported):
275 def preferred(supported):
276 """returns the strongest digest type in both supported and DIGESTS."""
276 """returns the strongest digest type in both supported and DIGESTS."""
277
277
278 for k in DIGESTS_BY_STRENGTH:
278 for k in DIGESTS_BY_STRENGTH:
279 if k in supported:
279 if k in supported:
280 return k
280 return k
281 return None
281 return None
282
282
283
283
284 class digestchecker(object):
284 class digestchecker(object):
285 """file handle wrapper that additionally checks content against a given
285 """file handle wrapper that additionally checks content against a given
286 size and digests.
286 size and digests.
287
287
288 d = digestchecker(fh, size, {'md5': '...'})
288 d = digestchecker(fh, size, {'md5': '...'})
289
289
290 When multiple digests are given, all of them are validated.
290 When multiple digests are given, all of them are validated.
291 """
291 """
292
292
293 def __init__(self, fh, size, digests):
293 def __init__(self, fh, size, digests):
294 self._fh = fh
294 self._fh = fh
295 self._size = size
295 self._size = size
296 self._got = 0
296 self._got = 0
297 self._digests = dict(digests)
297 self._digests = dict(digests)
298 self._digester = digester(self._digests.keys())
298 self._digester = digester(self._digests.keys())
299
299
300 def read(self, length=-1):
300 def read(self, length=-1):
301 content = self._fh.read(length)
301 content = self._fh.read(length)
302 self._digester.update(content)
302 self._digester.update(content)
303 self._got += len(content)
303 self._got += len(content)
304 return content
304 return content
305
305
306 def validate(self):
306 def validate(self):
307 if self._size != self._got:
307 if self._size != self._got:
308 raise error.Abort(
308 raise error.Abort(
309 _(b'size mismatch: expected %d, got %d')
309 _(b'size mismatch: expected %d, got %d')
310 % (self._size, self._got)
310 % (self._size, self._got)
311 )
311 )
312 for k, v in self._digests.items():
312 for k, v in self._digests.items():
313 if v != self._digester[k]:
313 if v != self._digester[k]:
314 # i18n: first parameter is a digest name
314 # i18n: first parameter is a digest name
315 raise error.Abort(
315 raise error.Abort(
316 _(b'%s mismatch: expected %s, got %s')
316 _(b'%s mismatch: expected %s, got %s')
317 % (k, v, self._digester[k])
317 % (k, v, self._digester[k])
318 )
318 )
319
319
320
320
321 try:
321 try:
322 buffer = buffer # pytype: disable=name-error
322 buffer = buffer # pytype: disable=name-error
323 except NameError:
323 except NameError:
324
324
325 def buffer(sliceable, offset=0, length=None):
325 def buffer(sliceable, offset=0, length=None):
326 if length is not None:
326 if length is not None:
327 return memoryview(sliceable)[offset : offset + length]
327 return memoryview(sliceable)[offset : offset + length]
328 return memoryview(sliceable)[offset:]
328 return memoryview(sliceable)[offset:]
329
329
330
330
331 _chunksize = 4096
331 _chunksize = 4096
332
332
333
333
334 class bufferedinputpipe(object):
334 class bufferedinputpipe(object):
335 """a manually buffered input pipe
335 """a manually buffered input pipe
336
336
337 Python will not let us use buffered IO and lazy reading with 'polling' at
337 Python will not let us use buffered IO and lazy reading with 'polling' at
338 the same time. We cannot probe the buffer state and select will not detect
338 the same time. We cannot probe the buffer state and select will not detect
339 that data are ready to read if they are already buffered.
339 that data are ready to read if they are already buffered.
340
340
341 This class let us work around that by implementing its own buffering
341 This class let us work around that by implementing its own buffering
342 (allowing efficient readline) while offering a way to know if the buffer is
342 (allowing efficient readline) while offering a way to know if the buffer is
343 empty from the output (allowing collaboration of the buffer with polling).
343 empty from the output (allowing collaboration of the buffer with polling).
344
344
345 This class lives in the 'util' module because it makes use of the 'os'
345 This class lives in the 'util' module because it makes use of the 'os'
346 module from the python stdlib.
346 module from the python stdlib.
347 """
347 """
348
348
349 def __new__(cls, fh):
349 def __new__(cls, fh):
350 # If we receive a fileobjectproxy, we need to use a variation of this
350 # If we receive a fileobjectproxy, we need to use a variation of this
351 # class that notifies observers about activity.
351 # class that notifies observers about activity.
352 if isinstance(fh, fileobjectproxy):
352 if isinstance(fh, fileobjectproxy):
353 cls = observedbufferedinputpipe
353 cls = observedbufferedinputpipe
354
354
355 return super(bufferedinputpipe, cls).__new__(cls)
355 return super(bufferedinputpipe, cls).__new__(cls)
356
356
357 def __init__(self, input):
357 def __init__(self, input):
358 self._input = input
358 self._input = input
359 self._buffer = []
359 self._buffer = []
360 self._eof = False
360 self._eof = False
361 self._lenbuf = 0
361 self._lenbuf = 0
362
362
363 @property
363 @property
364 def hasbuffer(self):
364 def hasbuffer(self):
365 """True is any data is currently buffered
365 """True is any data is currently buffered
366
366
367 This will be used externally a pre-step for polling IO. If there is
367 This will be used externally a pre-step for polling IO. If there is
368 already data then no polling should be set in place."""
368 already data then no polling should be set in place."""
369 return bool(self._buffer)
369 return bool(self._buffer)
370
370
371 @property
371 @property
372 def closed(self):
372 def closed(self):
373 return self._input.closed
373 return self._input.closed
374
374
375 def fileno(self):
375 def fileno(self):
376 return self._input.fileno()
376 return self._input.fileno()
377
377
378 def close(self):
378 def close(self):
379 return self._input.close()
379 return self._input.close()
380
380
381 def read(self, size):
381 def read(self, size):
382 while (not self._eof) and (self._lenbuf < size):
382 while (not self._eof) and (self._lenbuf < size):
383 self._fillbuffer()
383 self._fillbuffer()
384 return self._frombuffer(size)
384 return self._frombuffer(size)
385
385
386 def unbufferedread(self, size):
386 def unbufferedread(self, size):
387 if not self._eof and self._lenbuf == 0:
387 if not self._eof and self._lenbuf == 0:
388 self._fillbuffer(max(size, _chunksize))
388 self._fillbuffer(max(size, _chunksize))
389 return self._frombuffer(min(self._lenbuf, size))
389 return self._frombuffer(min(self._lenbuf, size))
390
390
391 def readline(self, *args, **kwargs):
391 def readline(self, *args, **kwargs):
392 if len(self._buffer) > 1:
392 if len(self._buffer) > 1:
393 # this should not happen because both read and readline end with a
393 # this should not happen because both read and readline end with a
394 # _frombuffer call that collapse it.
394 # _frombuffer call that collapse it.
395 self._buffer = [b''.join(self._buffer)]
395 self._buffer = [b''.join(self._buffer)]
396 self._lenbuf = len(self._buffer[0])
396 self._lenbuf = len(self._buffer[0])
397 lfi = -1
397 lfi = -1
398 if self._buffer:
398 if self._buffer:
399 lfi = self._buffer[-1].find(b'\n')
399 lfi = self._buffer[-1].find(b'\n')
400 while (not self._eof) and lfi < 0:
400 while (not self._eof) and lfi < 0:
401 self._fillbuffer()
401 self._fillbuffer()
402 if self._buffer:
402 if self._buffer:
403 lfi = self._buffer[-1].find(b'\n')
403 lfi = self._buffer[-1].find(b'\n')
404 size = lfi + 1
404 size = lfi + 1
405 if lfi < 0: # end of file
405 if lfi < 0: # end of file
406 size = self._lenbuf
406 size = self._lenbuf
407 elif len(self._buffer) > 1:
407 elif len(self._buffer) > 1:
408 # we need to take previous chunks into account
408 # we need to take previous chunks into account
409 size += self._lenbuf - len(self._buffer[-1])
409 size += self._lenbuf - len(self._buffer[-1])
410 return self._frombuffer(size)
410 return self._frombuffer(size)
411
411
412 def _frombuffer(self, size):
412 def _frombuffer(self, size):
413 """return at most 'size' data from the buffer
413 """return at most 'size' data from the buffer
414
414
415 The data are removed from the buffer."""
415 The data are removed from the buffer."""
416 if size == 0 or not self._buffer:
416 if size == 0 or not self._buffer:
417 return b''
417 return b''
418 buf = self._buffer[0]
418 buf = self._buffer[0]
419 if len(self._buffer) > 1:
419 if len(self._buffer) > 1:
420 buf = b''.join(self._buffer)
420 buf = b''.join(self._buffer)
421
421
422 data = buf[:size]
422 data = buf[:size]
423 buf = buf[len(data) :]
423 buf = buf[len(data) :]
424 if buf:
424 if buf:
425 self._buffer = [buf]
425 self._buffer = [buf]
426 self._lenbuf = len(buf)
426 self._lenbuf = len(buf)
427 else:
427 else:
428 self._buffer = []
428 self._buffer = []
429 self._lenbuf = 0
429 self._lenbuf = 0
430 return data
430 return data
431
431
432 def _fillbuffer(self, size=_chunksize):
432 def _fillbuffer(self, size=_chunksize):
433 """read data to the buffer"""
433 """read data to the buffer"""
434 data = os.read(self._input.fileno(), size)
434 data = os.read(self._input.fileno(), size)
435 if not data:
435 if not data:
436 self._eof = True
436 self._eof = True
437 else:
437 else:
438 self._lenbuf += len(data)
438 self._lenbuf += len(data)
439 self._buffer.append(data)
439 self._buffer.append(data)
440
440
441 return data
441 return data
442
442
443
443
444 def mmapread(fp, size=None):
444 def mmapread(fp, size=None):
445 if size == 0:
445 if size == 0:
446 # size of 0 to mmap.mmap() means "all data"
446 # size of 0 to mmap.mmap() means "all data"
447 # rather than "zero bytes", so special case that.
447 # rather than "zero bytes", so special case that.
448 return b''
448 return b''
449 elif size is None:
449 elif size is None:
450 size = 0
450 size = 0
451 try:
451 try:
452 fd = getattr(fp, 'fileno', lambda: fp)()
452 fd = getattr(fp, 'fileno', lambda: fp)()
453 return mmap.mmap(fd, size, access=mmap.ACCESS_READ)
453 return mmap.mmap(fd, size, access=mmap.ACCESS_READ)
454 except ValueError:
454 except ValueError:
455 # Empty files cannot be mmapped, but mmapread should still work. Check
455 # Empty files cannot be mmapped, but mmapread should still work. Check
456 # if the file is empty, and if so, return an empty buffer.
456 # if the file is empty, and if so, return an empty buffer.
457 if os.fstat(fd).st_size == 0:
457 if os.fstat(fd).st_size == 0:
458 return b''
458 return b''
459 raise
459 raise
460
460
461
461
462 class fileobjectproxy(object):
462 class fileobjectproxy(object):
463 """A proxy around file objects that tells a watcher when events occur.
463 """A proxy around file objects that tells a watcher when events occur.
464
464
465 This type is intended to only be used for testing purposes. Think hard
465 This type is intended to only be used for testing purposes. Think hard
466 before using it in important code.
466 before using it in important code.
467 """
467 """
468
468
469 __slots__ = (
469 __slots__ = (
470 '_orig',
470 '_orig',
471 '_observer',
471 '_observer',
472 )
472 )
473
473
474 def __init__(self, fh, observer):
474 def __init__(self, fh, observer):
475 object.__setattr__(self, '_orig', fh)
475 object.__setattr__(self, '_orig', fh)
476 object.__setattr__(self, '_observer', observer)
476 object.__setattr__(self, '_observer', observer)
477
477
478 def __getattribute__(self, name):
478 def __getattribute__(self, name):
479 ours = {
479 ours = {
480 '_observer',
480 '_observer',
481 # IOBase
481 # IOBase
482 'close',
482 'close',
483 # closed if a property
483 # closed if a property
484 'fileno',
484 'fileno',
485 'flush',
485 'flush',
486 'isatty',
486 'isatty',
487 'readable',
487 'readable',
488 'readline',
488 'readline',
489 'readlines',
489 'readlines',
490 'seek',
490 'seek',
491 'seekable',
491 'seekable',
492 'tell',
492 'tell',
493 'truncate',
493 'truncate',
494 'writable',
494 'writable',
495 'writelines',
495 'writelines',
496 # RawIOBase
496 # RawIOBase
497 'read',
497 'read',
498 'readall',
498 'readall',
499 'readinto',
499 'readinto',
500 'write',
500 'write',
501 # BufferedIOBase
501 # BufferedIOBase
502 # raw is a property
502 # raw is a property
503 'detach',
503 'detach',
504 # read defined above
504 # read defined above
505 'read1',
505 'read1',
506 # readinto defined above
506 # readinto defined above
507 # write defined above
507 # write defined above
508 }
508 }
509
509
510 # We only observe some methods.
510 # We only observe some methods.
511 if name in ours:
511 if name in ours:
512 return object.__getattribute__(self, name)
512 return object.__getattribute__(self, name)
513
513
514 return getattr(object.__getattribute__(self, '_orig'), name)
514 return getattr(object.__getattribute__(self, '_orig'), name)
515
515
516 def __nonzero__(self):
516 def __nonzero__(self):
517 return bool(object.__getattribute__(self, '_orig'))
517 return bool(object.__getattribute__(self, '_orig'))
518
518
519 __bool__ = __nonzero__
519 __bool__ = __nonzero__
520
520
521 def __delattr__(self, name):
521 def __delattr__(self, name):
522 return delattr(object.__getattribute__(self, '_orig'), name)
522 return delattr(object.__getattribute__(self, '_orig'), name)
523
523
524 def __setattr__(self, name, value):
524 def __setattr__(self, name, value):
525 return setattr(object.__getattribute__(self, '_orig'), name, value)
525 return setattr(object.__getattribute__(self, '_orig'), name, value)
526
526
527 def __iter__(self):
527 def __iter__(self):
528 return object.__getattribute__(self, '_orig').__iter__()
528 return object.__getattribute__(self, '_orig').__iter__()
529
529
530 def _observedcall(self, name, *args, **kwargs):
530 def _observedcall(self, name, *args, **kwargs):
531 # Call the original object.
531 # Call the original object.
532 orig = object.__getattribute__(self, '_orig')
532 orig = object.__getattribute__(self, '_orig')
533 res = getattr(orig, name)(*args, **kwargs)
533 res = getattr(orig, name)(*args, **kwargs)
534
534
535 # Call a method on the observer of the same name with arguments
535 # Call a method on the observer of the same name with arguments
536 # so it can react, log, etc.
536 # so it can react, log, etc.
537 observer = object.__getattribute__(self, '_observer')
537 observer = object.__getattribute__(self, '_observer')
538 fn = getattr(observer, name, None)
538 fn = getattr(observer, name, None)
539 if fn:
539 if fn:
540 fn(res, *args, **kwargs)
540 fn(res, *args, **kwargs)
541
541
542 return res
542 return res
543
543
544 def close(self, *args, **kwargs):
544 def close(self, *args, **kwargs):
545 return object.__getattribute__(self, '_observedcall')(
545 return object.__getattribute__(self, '_observedcall')(
546 'close', *args, **kwargs
546 'close', *args, **kwargs
547 )
547 )
548
548
549 def fileno(self, *args, **kwargs):
549 def fileno(self, *args, **kwargs):
550 return object.__getattribute__(self, '_observedcall')(
550 return object.__getattribute__(self, '_observedcall')(
551 'fileno', *args, **kwargs
551 'fileno', *args, **kwargs
552 )
552 )
553
553
554 def flush(self, *args, **kwargs):
554 def flush(self, *args, **kwargs):
555 return object.__getattribute__(self, '_observedcall')(
555 return object.__getattribute__(self, '_observedcall')(
556 'flush', *args, **kwargs
556 'flush', *args, **kwargs
557 )
557 )
558
558
559 def isatty(self, *args, **kwargs):
559 def isatty(self, *args, **kwargs):
560 return object.__getattribute__(self, '_observedcall')(
560 return object.__getattribute__(self, '_observedcall')(
561 'isatty', *args, **kwargs
561 'isatty', *args, **kwargs
562 )
562 )
563
563
564 def readable(self, *args, **kwargs):
564 def readable(self, *args, **kwargs):
565 return object.__getattribute__(self, '_observedcall')(
565 return object.__getattribute__(self, '_observedcall')(
566 'readable', *args, **kwargs
566 'readable', *args, **kwargs
567 )
567 )
568
568
569 def readline(self, *args, **kwargs):
569 def readline(self, *args, **kwargs):
570 return object.__getattribute__(self, '_observedcall')(
570 return object.__getattribute__(self, '_observedcall')(
571 'readline', *args, **kwargs
571 'readline', *args, **kwargs
572 )
572 )
573
573
574 def readlines(self, *args, **kwargs):
574 def readlines(self, *args, **kwargs):
575 return object.__getattribute__(self, '_observedcall')(
575 return object.__getattribute__(self, '_observedcall')(
576 'readlines', *args, **kwargs
576 'readlines', *args, **kwargs
577 )
577 )
578
578
579 def seek(self, *args, **kwargs):
579 def seek(self, *args, **kwargs):
580 return object.__getattribute__(self, '_observedcall')(
580 return object.__getattribute__(self, '_observedcall')(
581 'seek', *args, **kwargs
581 'seek', *args, **kwargs
582 )
582 )
583
583
584 def seekable(self, *args, **kwargs):
584 def seekable(self, *args, **kwargs):
585 return object.__getattribute__(self, '_observedcall')(
585 return object.__getattribute__(self, '_observedcall')(
586 'seekable', *args, **kwargs
586 'seekable', *args, **kwargs
587 )
587 )
588
588
589 def tell(self, *args, **kwargs):
589 def tell(self, *args, **kwargs):
590 return object.__getattribute__(self, '_observedcall')(
590 return object.__getattribute__(self, '_observedcall')(
591 'tell', *args, **kwargs
591 'tell', *args, **kwargs
592 )
592 )
593
593
594 def truncate(self, *args, **kwargs):
594 def truncate(self, *args, **kwargs):
595 return object.__getattribute__(self, '_observedcall')(
595 return object.__getattribute__(self, '_observedcall')(
596 'truncate', *args, **kwargs
596 'truncate', *args, **kwargs
597 )
597 )
598
598
599 def writable(self, *args, **kwargs):
599 def writable(self, *args, **kwargs):
600 return object.__getattribute__(self, '_observedcall')(
600 return object.__getattribute__(self, '_observedcall')(
601 'writable', *args, **kwargs
601 'writable', *args, **kwargs
602 )
602 )
603
603
604 def writelines(self, *args, **kwargs):
604 def writelines(self, *args, **kwargs):
605 return object.__getattribute__(self, '_observedcall')(
605 return object.__getattribute__(self, '_observedcall')(
606 'writelines', *args, **kwargs
606 'writelines', *args, **kwargs
607 )
607 )
608
608
609 def read(self, *args, **kwargs):
609 def read(self, *args, **kwargs):
610 return object.__getattribute__(self, '_observedcall')(
610 return object.__getattribute__(self, '_observedcall')(
611 'read', *args, **kwargs
611 'read', *args, **kwargs
612 )
612 )
613
613
614 def readall(self, *args, **kwargs):
614 def readall(self, *args, **kwargs):
615 return object.__getattribute__(self, '_observedcall')(
615 return object.__getattribute__(self, '_observedcall')(
616 'readall', *args, **kwargs
616 'readall', *args, **kwargs
617 )
617 )
618
618
619 def readinto(self, *args, **kwargs):
619 def readinto(self, *args, **kwargs):
620 return object.__getattribute__(self, '_observedcall')(
620 return object.__getattribute__(self, '_observedcall')(
621 'readinto', *args, **kwargs
621 'readinto', *args, **kwargs
622 )
622 )
623
623
624 def write(self, *args, **kwargs):
624 def write(self, *args, **kwargs):
625 return object.__getattribute__(self, '_observedcall')(
625 return object.__getattribute__(self, '_observedcall')(
626 'write', *args, **kwargs
626 'write', *args, **kwargs
627 )
627 )
628
628
629 def detach(self, *args, **kwargs):
629 def detach(self, *args, **kwargs):
630 return object.__getattribute__(self, '_observedcall')(
630 return object.__getattribute__(self, '_observedcall')(
631 'detach', *args, **kwargs
631 'detach', *args, **kwargs
632 )
632 )
633
633
634 def read1(self, *args, **kwargs):
634 def read1(self, *args, **kwargs):
635 return object.__getattribute__(self, '_observedcall')(
635 return object.__getattribute__(self, '_observedcall')(
636 'read1', *args, **kwargs
636 'read1', *args, **kwargs
637 )
637 )
638
638
639
639
640 class observedbufferedinputpipe(bufferedinputpipe):
640 class observedbufferedinputpipe(bufferedinputpipe):
641 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
641 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
642
642
643 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
643 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
644 bypass ``fileobjectproxy``. Because of this, we need to make
644 bypass ``fileobjectproxy``. Because of this, we need to make
645 ``bufferedinputpipe`` aware of these operations.
645 ``bufferedinputpipe`` aware of these operations.
646
646
647 This variation of ``bufferedinputpipe`` can notify observers about
647 This variation of ``bufferedinputpipe`` can notify observers about
648 ``os.read()`` events. It also re-publishes other events, such as
648 ``os.read()`` events. It also re-publishes other events, such as
649 ``read()`` and ``readline()``.
649 ``read()`` and ``readline()``.
650 """
650 """
651
651
652 def _fillbuffer(self):
652 def _fillbuffer(self):
653 res = super(observedbufferedinputpipe, self)._fillbuffer()
653 res = super(observedbufferedinputpipe, self)._fillbuffer()
654
654
655 fn = getattr(self._input._observer, 'osread', None)
655 fn = getattr(self._input._observer, 'osread', None)
656 if fn:
656 if fn:
657 fn(res, _chunksize)
657 fn(res, _chunksize)
658
658
659 return res
659 return res
660
660
661 # We use different observer methods because the operation isn't
661 # We use different observer methods because the operation isn't
662 # performed on the actual file object but on us.
662 # performed on the actual file object but on us.
663 def read(self, size):
663 def read(self, size):
664 res = super(observedbufferedinputpipe, self).read(size)
664 res = super(observedbufferedinputpipe, self).read(size)
665
665
666 fn = getattr(self._input._observer, 'bufferedread', None)
666 fn = getattr(self._input._observer, 'bufferedread', None)
667 if fn:
667 if fn:
668 fn(res, size)
668 fn(res, size)
669
669
670 return res
670 return res
671
671
672 def readline(self, *args, **kwargs):
672 def readline(self, *args, **kwargs):
673 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
673 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
674
674
675 fn = getattr(self._input._observer, 'bufferedreadline', None)
675 fn = getattr(self._input._observer, 'bufferedreadline', None)
676 if fn:
676 if fn:
677 fn(res)
677 fn(res)
678
678
679 return res
679 return res
680
680
681
681
682 PROXIED_SOCKET_METHODS = {
682 PROXIED_SOCKET_METHODS = {
683 'makefile',
683 'makefile',
684 'recv',
684 'recv',
685 'recvfrom',
685 'recvfrom',
686 'recvfrom_into',
686 'recvfrom_into',
687 'recv_into',
687 'recv_into',
688 'send',
688 'send',
689 'sendall',
689 'sendall',
690 'sendto',
690 'sendto',
691 'setblocking',
691 'setblocking',
692 'settimeout',
692 'settimeout',
693 'gettimeout',
693 'gettimeout',
694 'setsockopt',
694 'setsockopt',
695 }
695 }
696
696
697
697
698 class socketproxy(object):
698 class socketproxy(object):
699 """A proxy around a socket that tells a watcher when events occur.
699 """A proxy around a socket that tells a watcher when events occur.
700
700
701 This is like ``fileobjectproxy`` except for sockets.
701 This is like ``fileobjectproxy`` except for sockets.
702
702
703 This type is intended to only be used for testing purposes. Think hard
703 This type is intended to only be used for testing purposes. Think hard
704 before using it in important code.
704 before using it in important code.
705 """
705 """
706
706
707 __slots__ = (
707 __slots__ = (
708 '_orig',
708 '_orig',
709 '_observer',
709 '_observer',
710 )
710 )
711
711
712 def __init__(self, sock, observer):
712 def __init__(self, sock, observer):
713 object.__setattr__(self, '_orig', sock)
713 object.__setattr__(self, '_orig', sock)
714 object.__setattr__(self, '_observer', observer)
714 object.__setattr__(self, '_observer', observer)
715
715
716 def __getattribute__(self, name):
716 def __getattribute__(self, name):
717 if name in PROXIED_SOCKET_METHODS:
717 if name in PROXIED_SOCKET_METHODS:
718 return object.__getattribute__(self, name)
718 return object.__getattribute__(self, name)
719
719
720 return getattr(object.__getattribute__(self, '_orig'), name)
720 return getattr(object.__getattribute__(self, '_orig'), name)
721
721
722 def __delattr__(self, name):
722 def __delattr__(self, name):
723 return delattr(object.__getattribute__(self, '_orig'), name)
723 return delattr(object.__getattribute__(self, '_orig'), name)
724
724
725 def __setattr__(self, name, value):
725 def __setattr__(self, name, value):
726 return setattr(object.__getattribute__(self, '_orig'), name, value)
726 return setattr(object.__getattribute__(self, '_orig'), name, value)
727
727
728 def __nonzero__(self):
728 def __nonzero__(self):
729 return bool(object.__getattribute__(self, '_orig'))
729 return bool(object.__getattribute__(self, '_orig'))
730
730
731 __bool__ = __nonzero__
731 __bool__ = __nonzero__
732
732
733 def _observedcall(self, name, *args, **kwargs):
733 def _observedcall(self, name, *args, **kwargs):
734 # Call the original object.
734 # Call the original object.
735 orig = object.__getattribute__(self, '_orig')
735 orig = object.__getattribute__(self, '_orig')
736 res = getattr(orig, name)(*args, **kwargs)
736 res = getattr(orig, name)(*args, **kwargs)
737
737
738 # Call a method on the observer of the same name with arguments
738 # Call a method on the observer of the same name with arguments
739 # so it can react, log, etc.
739 # so it can react, log, etc.
740 observer = object.__getattribute__(self, '_observer')
740 observer = object.__getattribute__(self, '_observer')
741 fn = getattr(observer, name, None)
741 fn = getattr(observer, name, None)
742 if fn:
742 if fn:
743 fn(res, *args, **kwargs)
743 fn(res, *args, **kwargs)
744
744
745 return res
745 return res
746
746
747 def makefile(self, *args, **kwargs):
747 def makefile(self, *args, **kwargs):
748 res = object.__getattribute__(self, '_observedcall')(
748 res = object.__getattribute__(self, '_observedcall')(
749 'makefile', *args, **kwargs
749 'makefile', *args, **kwargs
750 )
750 )
751
751
752 # The file object may be used for I/O. So we turn it into a
752 # The file object may be used for I/O. So we turn it into a
753 # proxy using our observer.
753 # proxy using our observer.
754 observer = object.__getattribute__(self, '_observer')
754 observer = object.__getattribute__(self, '_observer')
755 return makeloggingfileobject(
755 return makeloggingfileobject(
756 observer.fh,
756 observer.fh,
757 res,
757 res,
758 observer.name,
758 observer.name,
759 reads=observer.reads,
759 reads=observer.reads,
760 writes=observer.writes,
760 writes=observer.writes,
761 logdata=observer.logdata,
761 logdata=observer.logdata,
762 logdataapis=observer.logdataapis,
762 logdataapis=observer.logdataapis,
763 )
763 )
764
764
765 def recv(self, *args, **kwargs):
765 def recv(self, *args, **kwargs):
766 return object.__getattribute__(self, '_observedcall')(
766 return object.__getattribute__(self, '_observedcall')(
767 'recv', *args, **kwargs
767 'recv', *args, **kwargs
768 )
768 )
769
769
770 def recvfrom(self, *args, **kwargs):
770 def recvfrom(self, *args, **kwargs):
771 return object.__getattribute__(self, '_observedcall')(
771 return object.__getattribute__(self, '_observedcall')(
772 'recvfrom', *args, **kwargs
772 'recvfrom', *args, **kwargs
773 )
773 )
774
774
775 def recvfrom_into(self, *args, **kwargs):
775 def recvfrom_into(self, *args, **kwargs):
776 return object.__getattribute__(self, '_observedcall')(
776 return object.__getattribute__(self, '_observedcall')(
777 'recvfrom_into', *args, **kwargs
777 'recvfrom_into', *args, **kwargs
778 )
778 )
779
779
780 def recv_into(self, *args, **kwargs):
780 def recv_into(self, *args, **kwargs):
781 return object.__getattribute__(self, '_observedcall')(
781 return object.__getattribute__(self, '_observedcall')(
782 'recv_info', *args, **kwargs
782 'recv_info', *args, **kwargs
783 )
783 )
784
784
785 def send(self, *args, **kwargs):
785 def send(self, *args, **kwargs):
786 return object.__getattribute__(self, '_observedcall')(
786 return object.__getattribute__(self, '_observedcall')(
787 'send', *args, **kwargs
787 'send', *args, **kwargs
788 )
788 )
789
789
790 def sendall(self, *args, **kwargs):
790 def sendall(self, *args, **kwargs):
791 return object.__getattribute__(self, '_observedcall')(
791 return object.__getattribute__(self, '_observedcall')(
792 'sendall', *args, **kwargs
792 'sendall', *args, **kwargs
793 )
793 )
794
794
795 def sendto(self, *args, **kwargs):
795 def sendto(self, *args, **kwargs):
796 return object.__getattribute__(self, '_observedcall')(
796 return object.__getattribute__(self, '_observedcall')(
797 'sendto', *args, **kwargs
797 'sendto', *args, **kwargs
798 )
798 )
799
799
800 def setblocking(self, *args, **kwargs):
800 def setblocking(self, *args, **kwargs):
801 return object.__getattribute__(self, '_observedcall')(
801 return object.__getattribute__(self, '_observedcall')(
802 'setblocking', *args, **kwargs
802 'setblocking', *args, **kwargs
803 )
803 )
804
804
805 def settimeout(self, *args, **kwargs):
805 def settimeout(self, *args, **kwargs):
806 return object.__getattribute__(self, '_observedcall')(
806 return object.__getattribute__(self, '_observedcall')(
807 'settimeout', *args, **kwargs
807 'settimeout', *args, **kwargs
808 )
808 )
809
809
810 def gettimeout(self, *args, **kwargs):
810 def gettimeout(self, *args, **kwargs):
811 return object.__getattribute__(self, '_observedcall')(
811 return object.__getattribute__(self, '_observedcall')(
812 'gettimeout', *args, **kwargs
812 'gettimeout', *args, **kwargs
813 )
813 )
814
814
815 def setsockopt(self, *args, **kwargs):
815 def setsockopt(self, *args, **kwargs):
816 return object.__getattribute__(self, '_observedcall')(
816 return object.__getattribute__(self, '_observedcall')(
817 'setsockopt', *args, **kwargs
817 'setsockopt', *args, **kwargs
818 )
818 )
819
819
820
820
821 class baseproxyobserver(object):
821 class baseproxyobserver(object):
822 def __init__(self, fh, name, logdata, logdataapis):
822 def __init__(self, fh, name, logdata, logdataapis):
823 self.fh = fh
823 self.fh = fh
824 self.name = name
824 self.name = name
825 self.logdata = logdata
825 self.logdata = logdata
826 self.logdataapis = logdataapis
826 self.logdataapis = logdataapis
827
827
828 def _writedata(self, data):
828 def _writedata(self, data):
829 if not self.logdata:
829 if not self.logdata:
830 if self.logdataapis:
830 if self.logdataapis:
831 self.fh.write(b'\n')
831 self.fh.write(b'\n')
832 self.fh.flush()
832 self.fh.flush()
833 return
833 return
834
834
835 # Simple case writes all data on a single line.
835 # Simple case writes all data on a single line.
836 if b'\n' not in data:
836 if b'\n' not in data:
837 if self.logdataapis:
837 if self.logdataapis:
838 self.fh.write(b': %s\n' % stringutil.escapestr(data))
838 self.fh.write(b': %s\n' % stringutil.escapestr(data))
839 else:
839 else:
840 self.fh.write(
840 self.fh.write(
841 b'%s> %s\n' % (self.name, stringutil.escapestr(data))
841 b'%s> %s\n' % (self.name, stringutil.escapestr(data))
842 )
842 )
843 self.fh.flush()
843 self.fh.flush()
844 return
844 return
845
845
846 # Data with newlines is written to multiple lines.
846 # Data with newlines is written to multiple lines.
847 if self.logdataapis:
847 if self.logdataapis:
848 self.fh.write(b':\n')
848 self.fh.write(b':\n')
849
849
850 lines = data.splitlines(True)
850 lines = data.splitlines(True)
851 for line in lines:
851 for line in lines:
852 self.fh.write(
852 self.fh.write(
853 b'%s> %s\n' % (self.name, stringutil.escapestr(line))
853 b'%s> %s\n' % (self.name, stringutil.escapestr(line))
854 )
854 )
855 self.fh.flush()
855 self.fh.flush()
856
856
857
857
858 class fileobjectobserver(baseproxyobserver):
858 class fileobjectobserver(baseproxyobserver):
859 """Logs file object activity."""
859 """Logs file object activity."""
860
860
861 def __init__(
861 def __init__(
862 self, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
862 self, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
863 ):
863 ):
864 super(fileobjectobserver, self).__init__(fh, name, logdata, logdataapis)
864 super(fileobjectobserver, self).__init__(fh, name, logdata, logdataapis)
865 self.reads = reads
865 self.reads = reads
866 self.writes = writes
866 self.writes = writes
867
867
868 def read(self, res, size=-1):
868 def read(self, res, size=-1):
869 if not self.reads:
869 if not self.reads:
870 return
870 return
871 # Python 3 can return None from reads at EOF instead of empty strings.
871 # Python 3 can return None from reads at EOF instead of empty strings.
872 if res is None:
872 if res is None:
873 res = b''
873 res = b''
874
874
875 if size == -1 and res == b'':
875 if size == -1 and res == b'':
876 # Suppress pointless read(-1) calls that return
876 # Suppress pointless read(-1) calls that return
877 # nothing. These happen _a lot_ on Python 3, and there
877 # nothing. These happen _a lot_ on Python 3, and there
878 # doesn't seem to be a better workaround to have matching
878 # doesn't seem to be a better workaround to have matching
879 # Python 2 and 3 behavior. :(
879 # Python 2 and 3 behavior. :(
880 return
880 return
881
881
882 if self.logdataapis:
882 if self.logdataapis:
883 self.fh.write(b'%s> read(%d) -> %d' % (self.name, size, len(res)))
883 self.fh.write(b'%s> read(%d) -> %d' % (self.name, size, len(res)))
884
884
885 self._writedata(res)
885 self._writedata(res)
886
886
887 def readline(self, res, limit=-1):
887 def readline(self, res, limit=-1):
888 if not self.reads:
888 if not self.reads:
889 return
889 return
890
890
891 if self.logdataapis:
891 if self.logdataapis:
892 self.fh.write(b'%s> readline() -> %d' % (self.name, len(res)))
892 self.fh.write(b'%s> readline() -> %d' % (self.name, len(res)))
893
893
894 self._writedata(res)
894 self._writedata(res)
895
895
896 def readinto(self, res, dest):
896 def readinto(self, res, dest):
897 if not self.reads:
897 if not self.reads:
898 return
898 return
899
899
900 if self.logdataapis:
900 if self.logdataapis:
901 self.fh.write(
901 self.fh.write(
902 b'%s> readinto(%d) -> %r' % (self.name, len(dest), res)
902 b'%s> readinto(%d) -> %r' % (self.name, len(dest), res)
903 )
903 )
904
904
905 data = dest[0:res] if res is not None else b''
905 data = dest[0:res] if res is not None else b''
906
906
907 # _writedata() uses "in" operator and is confused by memoryview because
907 # _writedata() uses "in" operator and is confused by memoryview because
908 # characters are ints on Python 3.
908 # characters are ints on Python 3.
909 if isinstance(data, memoryview):
909 if isinstance(data, memoryview):
910 data = data.tobytes()
910 data = data.tobytes()
911
911
912 self._writedata(data)
912 self._writedata(data)
913
913
914 def write(self, res, data):
914 def write(self, res, data):
915 if not self.writes:
915 if not self.writes:
916 return
916 return
917
917
918 # Python 2 returns None from some write() calls. Python 3 (reasonably)
918 # Python 2 returns None from some write() calls. Python 3 (reasonably)
919 # returns the integer bytes written.
919 # returns the integer bytes written.
920 if res is None and data:
920 if res is None and data:
921 res = len(data)
921 res = len(data)
922
922
923 if self.logdataapis:
923 if self.logdataapis:
924 self.fh.write(b'%s> write(%d) -> %r' % (self.name, len(data), res))
924 self.fh.write(b'%s> write(%d) -> %r' % (self.name, len(data), res))
925
925
926 self._writedata(data)
926 self._writedata(data)
927
927
928 def flush(self, res):
928 def flush(self, res):
929 if not self.writes:
929 if not self.writes:
930 return
930 return
931
931
932 self.fh.write(b'%s> flush() -> %r\n' % (self.name, res))
932 self.fh.write(b'%s> flush() -> %r\n' % (self.name, res))
933
933
934 # For observedbufferedinputpipe.
934 # For observedbufferedinputpipe.
935 def bufferedread(self, res, size):
935 def bufferedread(self, res, size):
936 if not self.reads:
936 if not self.reads:
937 return
937 return
938
938
939 if self.logdataapis:
939 if self.logdataapis:
940 self.fh.write(
940 self.fh.write(
941 b'%s> bufferedread(%d) -> %d' % (self.name, size, len(res))
941 b'%s> bufferedread(%d) -> %d' % (self.name, size, len(res))
942 )
942 )
943
943
944 self._writedata(res)
944 self._writedata(res)
945
945
946 def bufferedreadline(self, res):
946 def bufferedreadline(self, res):
947 if not self.reads:
947 if not self.reads:
948 return
948 return
949
949
950 if self.logdataapis:
950 if self.logdataapis:
951 self.fh.write(
951 self.fh.write(
952 b'%s> bufferedreadline() -> %d' % (self.name, len(res))
952 b'%s> bufferedreadline() -> %d' % (self.name, len(res))
953 )
953 )
954
954
955 self._writedata(res)
955 self._writedata(res)
956
956
957
957
958 def makeloggingfileobject(
958 def makeloggingfileobject(
959 logh, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
959 logh, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
960 ):
960 ):
961 """Turn a file object into a logging file object."""
961 """Turn a file object into a logging file object."""
962
962
963 observer = fileobjectobserver(
963 observer = fileobjectobserver(
964 logh,
964 logh,
965 name,
965 name,
966 reads=reads,
966 reads=reads,
967 writes=writes,
967 writes=writes,
968 logdata=logdata,
968 logdata=logdata,
969 logdataapis=logdataapis,
969 logdataapis=logdataapis,
970 )
970 )
971 return fileobjectproxy(fh, observer)
971 return fileobjectproxy(fh, observer)
972
972
973
973
974 class socketobserver(baseproxyobserver):
974 class socketobserver(baseproxyobserver):
975 """Logs socket activity."""
975 """Logs socket activity."""
976
976
977 def __init__(
977 def __init__(
978 self,
978 self,
979 fh,
979 fh,
980 name,
980 name,
981 reads=True,
981 reads=True,
982 writes=True,
982 writes=True,
983 states=True,
983 states=True,
984 logdata=False,
984 logdata=False,
985 logdataapis=True,
985 logdataapis=True,
986 ):
986 ):
987 super(socketobserver, self).__init__(fh, name, logdata, logdataapis)
987 super(socketobserver, self).__init__(fh, name, logdata, logdataapis)
988 self.reads = reads
988 self.reads = reads
989 self.writes = writes
989 self.writes = writes
990 self.states = states
990 self.states = states
991
991
992 def makefile(self, res, mode=None, bufsize=None):
992 def makefile(self, res, mode=None, bufsize=None):
993 if not self.states:
993 if not self.states:
994 return
994 return
995
995
996 self.fh.write(b'%s> makefile(%r, %r)\n' % (self.name, mode, bufsize))
996 self.fh.write(b'%s> makefile(%r, %r)\n' % (self.name, mode, bufsize))
997
997
998 def recv(self, res, size, flags=0):
998 def recv(self, res, size, flags=0):
999 if not self.reads:
999 if not self.reads:
1000 return
1000 return
1001
1001
1002 if self.logdataapis:
1002 if self.logdataapis:
1003 self.fh.write(
1003 self.fh.write(
1004 b'%s> recv(%d, %d) -> %d' % (self.name, size, flags, len(res))
1004 b'%s> recv(%d, %d) -> %d' % (self.name, size, flags, len(res))
1005 )
1005 )
1006 self._writedata(res)
1006 self._writedata(res)
1007
1007
1008 def recvfrom(self, res, size, flags=0):
1008 def recvfrom(self, res, size, flags=0):
1009 if not self.reads:
1009 if not self.reads:
1010 return
1010 return
1011
1011
1012 if self.logdataapis:
1012 if self.logdataapis:
1013 self.fh.write(
1013 self.fh.write(
1014 b'%s> recvfrom(%d, %d) -> %d'
1014 b'%s> recvfrom(%d, %d) -> %d'
1015 % (self.name, size, flags, len(res[0]))
1015 % (self.name, size, flags, len(res[0]))
1016 )
1016 )
1017
1017
1018 self._writedata(res[0])
1018 self._writedata(res[0])
1019
1019
1020 def recvfrom_into(self, res, buf, size, flags=0):
1020 def recvfrom_into(self, res, buf, size, flags=0):
1021 if not self.reads:
1021 if not self.reads:
1022 return
1022 return
1023
1023
1024 if self.logdataapis:
1024 if self.logdataapis:
1025 self.fh.write(
1025 self.fh.write(
1026 b'%s> recvfrom_into(%d, %d) -> %d'
1026 b'%s> recvfrom_into(%d, %d) -> %d'
1027 % (self.name, size, flags, res[0])
1027 % (self.name, size, flags, res[0])
1028 )
1028 )
1029
1029
1030 self._writedata(buf[0 : res[0]])
1030 self._writedata(buf[0 : res[0]])
1031
1031
1032 def recv_into(self, res, buf, size=0, flags=0):
1032 def recv_into(self, res, buf, size=0, flags=0):
1033 if not self.reads:
1033 if not self.reads:
1034 return
1034 return
1035
1035
1036 if self.logdataapis:
1036 if self.logdataapis:
1037 self.fh.write(
1037 self.fh.write(
1038 b'%s> recv_into(%d, %d) -> %d' % (self.name, size, flags, res)
1038 b'%s> recv_into(%d, %d) -> %d' % (self.name, size, flags, res)
1039 )
1039 )
1040
1040
1041 self._writedata(buf[0:res])
1041 self._writedata(buf[0:res])
1042
1042
1043 def send(self, res, data, flags=0):
1043 def send(self, res, data, flags=0):
1044 if not self.writes:
1044 if not self.writes:
1045 return
1045 return
1046
1046
1047 self.fh.write(
1047 self.fh.write(
1048 b'%s> send(%d, %d) -> %d' % (self.name, len(data), flags, len(res))
1048 b'%s> send(%d, %d) -> %d' % (self.name, len(data), flags, len(res))
1049 )
1049 )
1050 self._writedata(data)
1050 self._writedata(data)
1051
1051
1052 def sendall(self, res, data, flags=0):
1052 def sendall(self, res, data, flags=0):
1053 if not self.writes:
1053 if not self.writes:
1054 return
1054 return
1055
1055
1056 if self.logdataapis:
1056 if self.logdataapis:
1057 # Returns None on success. So don't bother reporting return value.
1057 # Returns None on success. So don't bother reporting return value.
1058 self.fh.write(
1058 self.fh.write(
1059 b'%s> sendall(%d, %d)' % (self.name, len(data), flags)
1059 b'%s> sendall(%d, %d)' % (self.name, len(data), flags)
1060 )
1060 )
1061
1061
1062 self._writedata(data)
1062 self._writedata(data)
1063
1063
1064 def sendto(self, res, data, flagsoraddress, address=None):
1064 def sendto(self, res, data, flagsoraddress, address=None):
1065 if not self.writes:
1065 if not self.writes:
1066 return
1066 return
1067
1067
1068 if address:
1068 if address:
1069 flags = flagsoraddress
1069 flags = flagsoraddress
1070 else:
1070 else:
1071 flags = 0
1071 flags = 0
1072
1072
1073 if self.logdataapis:
1073 if self.logdataapis:
1074 self.fh.write(
1074 self.fh.write(
1075 b'%s> sendto(%d, %d, %r) -> %d'
1075 b'%s> sendto(%d, %d, %r) -> %d'
1076 % (self.name, len(data), flags, address, res)
1076 % (self.name, len(data), flags, address, res)
1077 )
1077 )
1078
1078
1079 self._writedata(data)
1079 self._writedata(data)
1080
1080
1081 def setblocking(self, res, flag):
1081 def setblocking(self, res, flag):
1082 if not self.states:
1082 if not self.states:
1083 return
1083 return
1084
1084
1085 self.fh.write(b'%s> setblocking(%r)\n' % (self.name, flag))
1085 self.fh.write(b'%s> setblocking(%r)\n' % (self.name, flag))
1086
1086
1087 def settimeout(self, res, value):
1087 def settimeout(self, res, value):
1088 if not self.states:
1088 if not self.states:
1089 return
1089 return
1090
1090
1091 self.fh.write(b'%s> settimeout(%r)\n' % (self.name, value))
1091 self.fh.write(b'%s> settimeout(%r)\n' % (self.name, value))
1092
1092
1093 def gettimeout(self, res):
1093 def gettimeout(self, res):
1094 if not self.states:
1094 if not self.states:
1095 return
1095 return
1096
1096
1097 self.fh.write(b'%s> gettimeout() -> %f\n' % (self.name, res))
1097 self.fh.write(b'%s> gettimeout() -> %f\n' % (self.name, res))
1098
1098
1099 def setsockopt(self, res, level, optname, value):
1099 def setsockopt(self, res, level, optname, value):
1100 if not self.states:
1100 if not self.states:
1101 return
1101 return
1102
1102
1103 self.fh.write(
1103 self.fh.write(
1104 b'%s> setsockopt(%r, %r, %r) -> %r\n'
1104 b'%s> setsockopt(%r, %r, %r) -> %r\n'
1105 % (self.name, level, optname, value, res)
1105 % (self.name, level, optname, value, res)
1106 )
1106 )
1107
1107
1108
1108
1109 def makeloggingsocket(
1109 def makeloggingsocket(
1110 logh,
1110 logh,
1111 fh,
1111 fh,
1112 name,
1112 name,
1113 reads=True,
1113 reads=True,
1114 writes=True,
1114 writes=True,
1115 states=True,
1115 states=True,
1116 logdata=False,
1116 logdata=False,
1117 logdataapis=True,
1117 logdataapis=True,
1118 ):
1118 ):
1119 """Turn a socket into a logging socket."""
1119 """Turn a socket into a logging socket."""
1120
1120
1121 observer = socketobserver(
1121 observer = socketobserver(
1122 logh,
1122 logh,
1123 name,
1123 name,
1124 reads=reads,
1124 reads=reads,
1125 writes=writes,
1125 writes=writes,
1126 states=states,
1126 states=states,
1127 logdata=logdata,
1127 logdata=logdata,
1128 logdataapis=logdataapis,
1128 logdataapis=logdataapis,
1129 )
1129 )
1130 return socketproxy(fh, observer)
1130 return socketproxy(fh, observer)
1131
1131
1132
1132
1133 def version():
1133 def version():
1134 """Return version information if available."""
1134 """Return version information if available."""
1135 try:
1135 try:
1136 from . import __version__
1136 from . import __version__
1137
1137
1138 return __version__.version
1138 return __version__.version
1139 except ImportError:
1139 except ImportError:
1140 return b'unknown'
1140 return b'unknown'
1141
1141
1142
1142
1143 def versiontuple(v=None, n=4):
1143 def versiontuple(v=None, n=4):
1144 """Parses a Mercurial version string into an N-tuple.
1144 """Parses a Mercurial version string into an N-tuple.
1145
1145
1146 The version string to be parsed is specified with the ``v`` argument.
1146 The version string to be parsed is specified with the ``v`` argument.
1147 If it isn't defined, the current Mercurial version string will be parsed.
1147 If it isn't defined, the current Mercurial version string will be parsed.
1148
1148
1149 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1149 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1150 returned values:
1150 returned values:
1151
1151
1152 >>> v = b'3.6.1+190-df9b73d2d444'
1152 >>> v = b'3.6.1+190-df9b73d2d444'
1153 >>> versiontuple(v, 2)
1153 >>> versiontuple(v, 2)
1154 (3, 6)
1154 (3, 6)
1155 >>> versiontuple(v, 3)
1155 >>> versiontuple(v, 3)
1156 (3, 6, 1)
1156 (3, 6, 1)
1157 >>> versiontuple(v, 4)
1157 >>> versiontuple(v, 4)
1158 (3, 6, 1, '190-df9b73d2d444')
1158 (3, 6, 1, '190-df9b73d2d444')
1159
1159
1160 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1160 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1161 (3, 6, 1, '190-df9b73d2d444+20151118')
1161 (3, 6, 1, '190-df9b73d2d444+20151118')
1162
1162
1163 >>> v = b'3.6'
1163 >>> v = b'3.6'
1164 >>> versiontuple(v, 2)
1164 >>> versiontuple(v, 2)
1165 (3, 6)
1165 (3, 6)
1166 >>> versiontuple(v, 3)
1166 >>> versiontuple(v, 3)
1167 (3, 6, None)
1167 (3, 6, None)
1168 >>> versiontuple(v, 4)
1168 >>> versiontuple(v, 4)
1169 (3, 6, None, None)
1169 (3, 6, None, None)
1170
1170
1171 >>> v = b'3.9-rc'
1171 >>> v = b'3.9-rc'
1172 >>> versiontuple(v, 2)
1172 >>> versiontuple(v, 2)
1173 (3, 9)
1173 (3, 9)
1174 >>> versiontuple(v, 3)
1174 >>> versiontuple(v, 3)
1175 (3, 9, None)
1175 (3, 9, None)
1176 >>> versiontuple(v, 4)
1176 >>> versiontuple(v, 4)
1177 (3, 9, None, 'rc')
1177 (3, 9, None, 'rc')
1178
1178
1179 >>> v = b'3.9-rc+2-02a8fea4289b'
1179 >>> v = b'3.9-rc+2-02a8fea4289b'
1180 >>> versiontuple(v, 2)
1180 >>> versiontuple(v, 2)
1181 (3, 9)
1181 (3, 9)
1182 >>> versiontuple(v, 3)
1182 >>> versiontuple(v, 3)
1183 (3, 9, None)
1183 (3, 9, None)
1184 >>> versiontuple(v, 4)
1184 >>> versiontuple(v, 4)
1185 (3, 9, None, 'rc+2-02a8fea4289b')
1185 (3, 9, None, 'rc+2-02a8fea4289b')
1186
1186
1187 >>> versiontuple(b'4.6rc0')
1187 >>> versiontuple(b'4.6rc0')
1188 (4, 6, None, 'rc0')
1188 (4, 6, None, 'rc0')
1189 >>> versiontuple(b'4.6rc0+12-425d55e54f98')
1189 >>> versiontuple(b'4.6rc0+12-425d55e54f98')
1190 (4, 6, None, 'rc0+12-425d55e54f98')
1190 (4, 6, None, 'rc0+12-425d55e54f98')
1191 >>> versiontuple(b'.1.2.3')
1191 >>> versiontuple(b'.1.2.3')
1192 (None, None, None, '.1.2.3')
1192 (None, None, None, '.1.2.3')
1193 >>> versiontuple(b'12.34..5')
1193 >>> versiontuple(b'12.34..5')
1194 (12, 34, None, '..5')
1194 (12, 34, None, '..5')
1195 >>> versiontuple(b'1.2.3.4.5.6')
1195 >>> versiontuple(b'1.2.3.4.5.6')
1196 (1, 2, 3, '.4.5.6')
1196 (1, 2, 3, '.4.5.6')
1197 """
1197 """
1198 if not v:
1198 if not v:
1199 v = version()
1199 v = version()
1200 m = remod.match(br'(\d+(?:\.\d+){,2})[+-]?(.*)', v)
1200 m = remod.match(br'(\d+(?:\.\d+){,2})[+-]?(.*)', v)
1201 if not m:
1201 if not m:
1202 vparts, extra = b'', v
1202 vparts, extra = b'', v
1203 elif m.group(2):
1203 elif m.group(2):
1204 vparts, extra = m.groups()
1204 vparts, extra = m.groups()
1205 else:
1205 else:
1206 vparts, extra = m.group(1), None
1206 vparts, extra = m.group(1), None
1207
1207
1208 assert vparts is not None # help pytype
1208 assert vparts is not None # help pytype
1209
1209
1210 vints = []
1210 vints = []
1211 for i in vparts.split(b'.'):
1211 for i in vparts.split(b'.'):
1212 try:
1212 try:
1213 vints.append(int(i))
1213 vints.append(int(i))
1214 except ValueError:
1214 except ValueError:
1215 break
1215 break
1216 # (3, 6) -> (3, 6, None)
1216 # (3, 6) -> (3, 6, None)
1217 while len(vints) < 3:
1217 while len(vints) < 3:
1218 vints.append(None)
1218 vints.append(None)
1219
1219
1220 if n == 2:
1220 if n == 2:
1221 return (vints[0], vints[1])
1221 return (vints[0], vints[1])
1222 if n == 3:
1222 if n == 3:
1223 return (vints[0], vints[1], vints[2])
1223 return (vints[0], vints[1], vints[2])
1224 if n == 4:
1224 if n == 4:
1225 return (vints[0], vints[1], vints[2], extra)
1225 return (vints[0], vints[1], vints[2], extra)
1226
1226
1227
1227
1228 def cachefunc(func):
1228 def cachefunc(func):
1229 '''cache the result of function calls'''
1229 '''cache the result of function calls'''
1230 # XXX doesn't handle keywords args
1230 # XXX doesn't handle keywords args
1231 if func.__code__.co_argcount == 0:
1231 if func.__code__.co_argcount == 0:
1232 listcache = []
1232 listcache = []
1233
1233
1234 def f():
1234 def f():
1235 if len(listcache) == 0:
1235 if len(listcache) == 0:
1236 listcache.append(func())
1236 listcache.append(func())
1237 return listcache[0]
1237 return listcache[0]
1238
1238
1239 return f
1239 return f
1240 cache = {}
1240 cache = {}
1241 if func.__code__.co_argcount == 1:
1241 if func.__code__.co_argcount == 1:
1242 # we gain a small amount of time because
1242 # we gain a small amount of time because
1243 # we don't need to pack/unpack the list
1243 # we don't need to pack/unpack the list
1244 def f(arg):
1244 def f(arg):
1245 if arg not in cache:
1245 if arg not in cache:
1246 cache[arg] = func(arg)
1246 cache[arg] = func(arg)
1247 return cache[arg]
1247 return cache[arg]
1248
1248
1249 else:
1249 else:
1250
1250
1251 def f(*args):
1251 def f(*args):
1252 if args not in cache:
1252 if args not in cache:
1253 cache[args] = func(*args)
1253 cache[args] = func(*args)
1254 return cache[args]
1254 return cache[args]
1255
1255
1256 return f
1256 return f
1257
1257
1258
1258
1259 class cow(object):
1259 class cow(object):
1260 """helper class to make copy-on-write easier
1260 """helper class to make copy-on-write easier
1261
1261
1262 Call preparewrite before doing any writes.
1262 Call preparewrite before doing any writes.
1263 """
1263 """
1264
1264
1265 def preparewrite(self):
1265 def preparewrite(self):
1266 """call this before writes, return self or a copied new object"""
1266 """call this before writes, return self or a copied new object"""
1267 if getattr(self, '_copied', 0):
1267 if getattr(self, '_copied', 0):
1268 self._copied -= 1
1268 self._copied -= 1
1269 # Function cow.__init__ expects 1 arg(s), got 2 [wrong-arg-count]
1269 # Function cow.__init__ expects 1 arg(s), got 2 [wrong-arg-count]
1270 return self.__class__(self) # pytype: disable=wrong-arg-count
1270 return self.__class__(self) # pytype: disable=wrong-arg-count
1271 return self
1271 return self
1272
1272
1273 def copy(self):
1273 def copy(self):
1274 """always do a cheap copy"""
1274 """always do a cheap copy"""
1275 self._copied = getattr(self, '_copied', 0) + 1
1275 self._copied = getattr(self, '_copied', 0) + 1
1276 return self
1276 return self
1277
1277
1278
1278
1279 class sortdict(collections.OrderedDict):
1279 class sortdict(collections.OrderedDict):
1280 """a simple sorted dictionary
1280 """a simple sorted dictionary
1281
1281
1282 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1282 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1283 >>> d2 = d1.copy()
1283 >>> d2 = d1.copy()
1284 >>> d2
1284 >>> d2
1285 sortdict([('a', 0), ('b', 1)])
1285 sortdict([('a', 0), ('b', 1)])
1286 >>> d2.update([(b'a', 2)])
1286 >>> d2.update([(b'a', 2)])
1287 >>> list(d2.keys()) # should still be in last-set order
1287 >>> list(d2.keys()) # should still be in last-set order
1288 ['b', 'a']
1288 ['b', 'a']
1289 >>> d1.insert(1, b'a.5', 0.5)
1289 >>> d1.insert(1, b'a.5', 0.5)
1290 >>> d1
1290 >>> d1
1291 sortdict([('a', 0), ('a.5', 0.5), ('b', 1)])
1291 sortdict([('a', 0), ('a.5', 0.5), ('b', 1)])
1292 """
1292 """
1293
1293
1294 def __setitem__(self, key, value):
1294 def __setitem__(self, key, value):
1295 if key in self:
1295 if key in self:
1296 del self[key]
1296 del self[key]
1297 super(sortdict, self).__setitem__(key, value)
1297 super(sortdict, self).__setitem__(key, value)
1298
1298
1299 if pycompat.ispypy:
1299 if pycompat.ispypy:
1300 # __setitem__() isn't called as of PyPy 5.8.0
1300 # __setitem__() isn't called as of PyPy 5.8.0
1301 def update(self, src, **f):
1301 def update(self, src, **f):
1302 if isinstance(src, dict):
1302 if isinstance(src, dict):
1303 src = pycompat.iteritems(src)
1303 src = pycompat.iteritems(src)
1304 for k, v in src:
1304 for k, v in src:
1305 self[k] = v
1305 self[k] = v
1306 for k in f:
1306 for k in f:
1307 self[k] = f[k]
1307 self[k] = f[k]
1308
1308
1309 def insert(self, position, key, value):
1309 def insert(self, position, key, value):
1310 for (i, (k, v)) in enumerate(list(self.items())):
1310 for (i, (k, v)) in enumerate(list(self.items())):
1311 if i == position:
1311 if i == position:
1312 self[key] = value
1312 self[key] = value
1313 if i >= position:
1313 if i >= position:
1314 del self[k]
1314 del self[k]
1315 self[k] = v
1315 self[k] = v
1316
1316
1317
1317
1318 class cowdict(cow, dict):
1318 class cowdict(cow, dict):
1319 """copy-on-write dict
1319 """copy-on-write dict
1320
1320
1321 Be sure to call d = d.preparewrite() before writing to d.
1321 Be sure to call d = d.preparewrite() before writing to d.
1322
1322
1323 >>> a = cowdict()
1323 >>> a = cowdict()
1324 >>> a is a.preparewrite()
1324 >>> a is a.preparewrite()
1325 True
1325 True
1326 >>> b = a.copy()
1326 >>> b = a.copy()
1327 >>> b is a
1327 >>> b is a
1328 True
1328 True
1329 >>> c = b.copy()
1329 >>> c = b.copy()
1330 >>> c is a
1330 >>> c is a
1331 True
1331 True
1332 >>> a = a.preparewrite()
1332 >>> a = a.preparewrite()
1333 >>> b is a
1333 >>> b is a
1334 False
1334 False
1335 >>> a is a.preparewrite()
1335 >>> a is a.preparewrite()
1336 True
1336 True
1337 >>> c = c.preparewrite()
1337 >>> c = c.preparewrite()
1338 >>> b is c
1338 >>> b is c
1339 False
1339 False
1340 >>> b is b.preparewrite()
1340 >>> b is b.preparewrite()
1341 True
1341 True
1342 """
1342 """
1343
1343
1344
1344
1345 class cowsortdict(cow, sortdict):
1345 class cowsortdict(cow, sortdict):
1346 """copy-on-write sortdict
1346 """copy-on-write sortdict
1347
1347
1348 Be sure to call d = d.preparewrite() before writing to d.
1348 Be sure to call d = d.preparewrite() before writing to d.
1349 """
1349 """
1350
1350
1351
1351
1352 class transactional(object): # pytype: disable=ignored-metaclass
1352 class transactional(object): # pytype: disable=ignored-metaclass
1353 """Base class for making a transactional type into a context manager."""
1353 """Base class for making a transactional type into a context manager."""
1354
1354
1355 __metaclass__ = abc.ABCMeta
1355 __metaclass__ = abc.ABCMeta
1356
1356
1357 @abc.abstractmethod
1357 @abc.abstractmethod
1358 def close(self):
1358 def close(self):
1359 """Successfully closes the transaction."""
1359 """Successfully closes the transaction."""
1360
1360
1361 @abc.abstractmethod
1361 @abc.abstractmethod
1362 def release(self):
1362 def release(self):
1363 """Marks the end of the transaction.
1363 """Marks the end of the transaction.
1364
1364
1365 If the transaction has not been closed, it will be aborted.
1365 If the transaction has not been closed, it will be aborted.
1366 """
1366 """
1367
1367
1368 def __enter__(self):
1368 def __enter__(self):
1369 return self
1369 return self
1370
1370
1371 def __exit__(self, exc_type, exc_val, exc_tb):
1371 def __exit__(self, exc_type, exc_val, exc_tb):
1372 try:
1372 try:
1373 if exc_type is None:
1373 if exc_type is None:
1374 self.close()
1374 self.close()
1375 finally:
1375 finally:
1376 self.release()
1376 self.release()
1377
1377
1378
1378
1379 @contextlib.contextmanager
1379 @contextlib.contextmanager
1380 def acceptintervention(tr=None):
1380 def acceptintervention(tr=None):
1381 """A context manager that closes the transaction on InterventionRequired
1381 """A context manager that closes the transaction on InterventionRequired
1382
1382
1383 If no transaction was provided, this simply runs the body and returns
1383 If no transaction was provided, this simply runs the body and returns
1384 """
1384 """
1385 if not tr:
1385 if not tr:
1386 yield
1386 yield
1387 return
1387 return
1388 try:
1388 try:
1389 yield
1389 yield
1390 tr.close()
1390 tr.close()
1391 except error.InterventionRequired:
1391 except error.InterventionRequired:
1392 tr.close()
1392 tr.close()
1393 raise
1393 raise
1394 finally:
1394 finally:
1395 tr.release()
1395 tr.release()
1396
1396
1397
1397
1398 @contextlib.contextmanager
1398 @contextlib.contextmanager
1399 def nullcontextmanager(enter_result=None):
1399 def nullcontextmanager(enter_result=None):
1400 yield enter_result
1400 yield enter_result
1401
1401
1402
1402
1403 class _lrucachenode(object):
1403 class _lrucachenode(object):
1404 """A node in a doubly linked list.
1404 """A node in a doubly linked list.
1405
1405
1406 Holds a reference to nodes on either side as well as a key-value
1406 Holds a reference to nodes on either side as well as a key-value
1407 pair for the dictionary entry.
1407 pair for the dictionary entry.
1408 """
1408 """
1409
1409
1410 __slots__ = ('next', 'prev', 'key', 'value', 'cost')
1410 __slots__ = ('next', 'prev', 'key', 'value', 'cost')
1411
1411
1412 def __init__(self):
1412 def __init__(self):
1413 self.next = self
1413 self.next = self
1414 self.prev = self
1414 self.prev = self
1415
1415
1416 self.key = _notset
1416 self.key = _notset
1417 self.value = None
1417 self.value = None
1418 self.cost = 0
1418 self.cost = 0
1419
1419
1420 def markempty(self):
1420 def markempty(self):
1421 """Mark the node as emptied."""
1421 """Mark the node as emptied."""
1422 self.key = _notset
1422 self.key = _notset
1423 self.value = None
1423 self.value = None
1424 self.cost = 0
1424 self.cost = 0
1425
1425
1426
1426
1427 class lrucachedict(object):
1427 class lrucachedict(object):
1428 """Dict that caches most recent accesses and sets.
1428 """Dict that caches most recent accesses and sets.
1429
1429
1430 The dict consists of an actual backing dict - indexed by original
1430 The dict consists of an actual backing dict - indexed by original
1431 key - and a doubly linked circular list defining the order of entries in
1431 key - and a doubly linked circular list defining the order of entries in
1432 the cache.
1432 the cache.
1433
1433
1434 The head node is the newest entry in the cache. If the cache is full,
1434 The head node is the newest entry in the cache. If the cache is full,
1435 we recycle head.prev and make it the new head. Cache accesses result in
1435 we recycle head.prev and make it the new head. Cache accesses result in
1436 the node being moved to before the existing head and being marked as the
1436 the node being moved to before the existing head and being marked as the
1437 new head node.
1437 new head node.
1438
1438
1439 Items in the cache can be inserted with an optional "cost" value. This is
1439 Items in the cache can be inserted with an optional "cost" value. This is
1440 simply an integer that is specified by the caller. The cache can be queried
1440 simply an integer that is specified by the caller. The cache can be queried
1441 for the total cost of all items presently in the cache.
1441 for the total cost of all items presently in the cache.
1442
1442
1443 The cache can also define a maximum cost. If a cache insertion would
1443 The cache can also define a maximum cost. If a cache insertion would
1444 cause the total cost of the cache to go beyond the maximum cost limit,
1444 cause the total cost of the cache to go beyond the maximum cost limit,
1445 nodes will be evicted to make room for the new code. This can be used
1445 nodes will be evicted to make room for the new code. This can be used
1446 to e.g. set a max memory limit and associate an estimated bytes size
1446 to e.g. set a max memory limit and associate an estimated bytes size
1447 cost to each item in the cache. By default, no maximum cost is enforced.
1447 cost to each item in the cache. By default, no maximum cost is enforced.
1448 """
1448 """
1449
1449
1450 def __init__(self, max, maxcost=0):
1450 def __init__(self, max, maxcost=0):
1451 self._cache = {}
1451 self._cache = {}
1452
1452
1453 self._head = _lrucachenode()
1453 self._head = _lrucachenode()
1454 self._size = 1
1454 self._size = 1
1455 self.capacity = max
1455 self.capacity = max
1456 self.totalcost = 0
1456 self.totalcost = 0
1457 self.maxcost = maxcost
1457 self.maxcost = maxcost
1458
1458
1459 def __len__(self):
1459 def __len__(self):
1460 return len(self._cache)
1460 return len(self._cache)
1461
1461
1462 def __contains__(self, k):
1462 def __contains__(self, k):
1463 return k in self._cache
1463 return k in self._cache
1464
1464
1465 def __iter__(self):
1465 def __iter__(self):
1466 # We don't have to iterate in cache order, but why not.
1466 # We don't have to iterate in cache order, but why not.
1467 n = self._head
1467 n = self._head
1468 for i in range(len(self._cache)):
1468 for i in range(len(self._cache)):
1469 yield n.key
1469 yield n.key
1470 n = n.next
1470 n = n.next
1471
1471
1472 def __getitem__(self, k):
1472 def __getitem__(self, k):
1473 node = self._cache[k]
1473 node = self._cache[k]
1474 self._movetohead(node)
1474 self._movetohead(node)
1475 return node.value
1475 return node.value
1476
1476
1477 def insert(self, k, v, cost=0):
1477 def insert(self, k, v, cost=0):
1478 """Insert a new item in the cache with optional cost value."""
1478 """Insert a new item in the cache with optional cost value."""
1479 node = self._cache.get(k)
1479 node = self._cache.get(k)
1480 # Replace existing value and mark as newest.
1480 # Replace existing value and mark as newest.
1481 if node is not None:
1481 if node is not None:
1482 self.totalcost -= node.cost
1482 self.totalcost -= node.cost
1483 node.value = v
1483 node.value = v
1484 node.cost = cost
1484 node.cost = cost
1485 self.totalcost += cost
1485 self.totalcost += cost
1486 self._movetohead(node)
1486 self._movetohead(node)
1487
1487
1488 if self.maxcost:
1488 if self.maxcost:
1489 self._enforcecostlimit()
1489 self._enforcecostlimit()
1490
1490
1491 return
1491 return
1492
1492
1493 if self._size < self.capacity:
1493 if self._size < self.capacity:
1494 node = self._addcapacity()
1494 node = self._addcapacity()
1495 else:
1495 else:
1496 # Grab the last/oldest item.
1496 # Grab the last/oldest item.
1497 node = self._head.prev
1497 node = self._head.prev
1498
1498
1499 # At capacity. Kill the old entry.
1499 # At capacity. Kill the old entry.
1500 if node.key is not _notset:
1500 if node.key is not _notset:
1501 self.totalcost -= node.cost
1501 self.totalcost -= node.cost
1502 del self._cache[node.key]
1502 del self._cache[node.key]
1503
1503
1504 node.key = k
1504 node.key = k
1505 node.value = v
1505 node.value = v
1506 node.cost = cost
1506 node.cost = cost
1507 self.totalcost += cost
1507 self.totalcost += cost
1508 self._cache[k] = node
1508 self._cache[k] = node
1509 # And mark it as newest entry. No need to adjust order since it
1509 # And mark it as newest entry. No need to adjust order since it
1510 # is already self._head.prev.
1510 # is already self._head.prev.
1511 self._head = node
1511 self._head = node
1512
1512
1513 if self.maxcost:
1513 if self.maxcost:
1514 self._enforcecostlimit()
1514 self._enforcecostlimit()
1515
1515
1516 def __setitem__(self, k, v):
1516 def __setitem__(self, k, v):
1517 self.insert(k, v)
1517 self.insert(k, v)
1518
1518
1519 def __delitem__(self, k):
1519 def __delitem__(self, k):
1520 self.pop(k)
1520 self.pop(k)
1521
1521
1522 def pop(self, k, default=_notset):
1522 def pop(self, k, default=_notset):
1523 try:
1523 try:
1524 node = self._cache.pop(k)
1524 node = self._cache.pop(k)
1525 except KeyError:
1525 except KeyError:
1526 if default is _notset:
1526 if default is _notset:
1527 raise
1527 raise
1528 return default
1528 return default
1529
1529
1530 assert node is not None # help pytype
1530 assert node is not None # help pytype
1531 value = node.value
1531 value = node.value
1532 self.totalcost -= node.cost
1532 self.totalcost -= node.cost
1533 node.markempty()
1533 node.markempty()
1534
1534
1535 # Temporarily mark as newest item before re-adjusting head to make
1535 # Temporarily mark as newest item before re-adjusting head to make
1536 # this node the oldest item.
1536 # this node the oldest item.
1537 self._movetohead(node)
1537 self._movetohead(node)
1538 self._head = node.next
1538 self._head = node.next
1539
1539
1540 return value
1540 return value
1541
1541
1542 # Additional dict methods.
1542 # Additional dict methods.
1543
1543
1544 def get(self, k, default=None):
1544 def get(self, k, default=None):
1545 try:
1545 try:
1546 return self.__getitem__(k)
1546 return self.__getitem__(k)
1547 except KeyError:
1547 except KeyError:
1548 return default
1548 return default
1549
1549
1550 def peek(self, k, default=_notset):
1550 def peek(self, k, default=_notset):
1551 """Get the specified item without moving it to the head
1551 """Get the specified item without moving it to the head
1552
1552
1553 Unlike get(), this doesn't mutate the internal state. But be aware
1553 Unlike get(), this doesn't mutate the internal state. But be aware
1554 that it doesn't mean peek() is thread safe.
1554 that it doesn't mean peek() is thread safe.
1555 """
1555 """
1556 try:
1556 try:
1557 node = self._cache[k]
1557 node = self._cache[k]
1558 assert node is not None # help pytype
1558 assert node is not None # help pytype
1559 return node.value
1559 return node.value
1560 except KeyError:
1560 except KeyError:
1561 if default is _notset:
1561 if default is _notset:
1562 raise
1562 raise
1563 return default
1563 return default
1564
1564
1565 def clear(self):
1565 def clear(self):
1566 n = self._head
1566 n = self._head
1567 while n.key is not _notset:
1567 while n.key is not _notset:
1568 self.totalcost -= n.cost
1568 self.totalcost -= n.cost
1569 n.markempty()
1569 n.markempty()
1570 n = n.next
1570 n = n.next
1571
1571
1572 self._cache.clear()
1572 self._cache.clear()
1573
1573
1574 def copy(self, capacity=None, maxcost=0):
1574 def copy(self, capacity=None, maxcost=0):
1575 """Create a new cache as a copy of the current one.
1575 """Create a new cache as a copy of the current one.
1576
1576
1577 By default, the new cache has the same capacity as the existing one.
1577 By default, the new cache has the same capacity as the existing one.
1578 But, the cache capacity can be changed as part of performing the
1578 But, the cache capacity can be changed as part of performing the
1579 copy.
1579 copy.
1580
1580
1581 Items in the copy have an insertion/access order matching this
1581 Items in the copy have an insertion/access order matching this
1582 instance.
1582 instance.
1583 """
1583 """
1584
1584
1585 capacity = capacity or self.capacity
1585 capacity = capacity or self.capacity
1586 maxcost = maxcost or self.maxcost
1586 maxcost = maxcost or self.maxcost
1587 result = lrucachedict(capacity, maxcost=maxcost)
1587 result = lrucachedict(capacity, maxcost=maxcost)
1588
1588
1589 # We copy entries by iterating in oldest-to-newest order so the copy
1589 # We copy entries by iterating in oldest-to-newest order so the copy
1590 # has the correct ordering.
1590 # has the correct ordering.
1591
1591
1592 # Find the first non-empty entry.
1592 # Find the first non-empty entry.
1593 n = self._head.prev
1593 n = self._head.prev
1594 while n.key is _notset and n is not self._head:
1594 while n.key is _notset and n is not self._head:
1595 n = n.prev
1595 n = n.prev
1596
1596
1597 # We could potentially skip the first N items when decreasing capacity.
1597 # We could potentially skip the first N items when decreasing capacity.
1598 # But let's keep it simple unless it is a performance problem.
1598 # But let's keep it simple unless it is a performance problem.
1599 for i in range(len(self._cache)):
1599 for i in range(len(self._cache)):
1600 result.insert(n.key, n.value, cost=n.cost)
1600 result.insert(n.key, n.value, cost=n.cost)
1601 n = n.prev
1601 n = n.prev
1602
1602
1603 return result
1603 return result
1604
1604
1605 def popoldest(self):
1605 def popoldest(self):
1606 """Remove the oldest item from the cache.
1606 """Remove the oldest item from the cache.
1607
1607
1608 Returns the (key, value) describing the removed cache entry.
1608 Returns the (key, value) describing the removed cache entry.
1609 """
1609 """
1610 if not self._cache:
1610 if not self._cache:
1611 return
1611 return
1612
1612
1613 # Walk the linked list backwards starting at tail node until we hit
1613 # Walk the linked list backwards starting at tail node until we hit
1614 # a non-empty node.
1614 # a non-empty node.
1615 n = self._head.prev
1615 n = self._head.prev
1616
1616
1617 assert n is not None # help pytype
1617 assert n is not None # help pytype
1618
1618
1619 while n.key is _notset:
1619 while n.key is _notset:
1620 n = n.prev
1620 n = n.prev
1621
1621
1622 assert n is not None # help pytype
1622 assert n is not None # help pytype
1623
1623
1624 key, value = n.key, n.value
1624 key, value = n.key, n.value
1625
1625
1626 # And remove it from the cache and mark it as empty.
1626 # And remove it from the cache and mark it as empty.
1627 del self._cache[n.key]
1627 del self._cache[n.key]
1628 self.totalcost -= n.cost
1628 self.totalcost -= n.cost
1629 n.markempty()
1629 n.markempty()
1630
1630
1631 return key, value
1631 return key, value
1632
1632
1633 def _movetohead(self, node):
1633 def _movetohead(self, node):
1634 """Mark a node as the newest, making it the new head.
1634 """Mark a node as the newest, making it the new head.
1635
1635
1636 When a node is accessed, it becomes the freshest entry in the LRU
1636 When a node is accessed, it becomes the freshest entry in the LRU
1637 list, which is denoted by self._head.
1637 list, which is denoted by self._head.
1638
1638
1639 Visually, let's make ``N`` the new head node (* denotes head):
1639 Visually, let's make ``N`` the new head node (* denotes head):
1640
1640
1641 previous/oldest <-> head <-> next/next newest
1641 previous/oldest <-> head <-> next/next newest
1642
1642
1643 ----<->--- A* ---<->-----
1643 ----<->--- A* ---<->-----
1644 | |
1644 | |
1645 E <-> D <-> N <-> C <-> B
1645 E <-> D <-> N <-> C <-> B
1646
1646
1647 To:
1647 To:
1648
1648
1649 ----<->--- N* ---<->-----
1649 ----<->--- N* ---<->-----
1650 | |
1650 | |
1651 E <-> D <-> C <-> B <-> A
1651 E <-> D <-> C <-> B <-> A
1652
1652
1653 This requires the following moves:
1653 This requires the following moves:
1654
1654
1655 C.next = D (node.prev.next = node.next)
1655 C.next = D (node.prev.next = node.next)
1656 D.prev = C (node.next.prev = node.prev)
1656 D.prev = C (node.next.prev = node.prev)
1657 E.next = N (head.prev.next = node)
1657 E.next = N (head.prev.next = node)
1658 N.prev = E (node.prev = head.prev)
1658 N.prev = E (node.prev = head.prev)
1659 N.next = A (node.next = head)
1659 N.next = A (node.next = head)
1660 A.prev = N (head.prev = node)
1660 A.prev = N (head.prev = node)
1661 """
1661 """
1662 head = self._head
1662 head = self._head
1663 # C.next = D
1663 # C.next = D
1664 node.prev.next = node.next
1664 node.prev.next = node.next
1665 # D.prev = C
1665 # D.prev = C
1666 node.next.prev = node.prev
1666 node.next.prev = node.prev
1667 # N.prev = E
1667 # N.prev = E
1668 node.prev = head.prev
1668 node.prev = head.prev
1669 # N.next = A
1669 # N.next = A
1670 # It is tempting to do just "head" here, however if node is
1670 # It is tempting to do just "head" here, however if node is
1671 # adjacent to head, this will do bad things.
1671 # adjacent to head, this will do bad things.
1672 node.next = head.prev.next
1672 node.next = head.prev.next
1673 # E.next = N
1673 # E.next = N
1674 node.next.prev = node
1674 node.next.prev = node
1675 # A.prev = N
1675 # A.prev = N
1676 node.prev.next = node
1676 node.prev.next = node
1677
1677
1678 self._head = node
1678 self._head = node
1679
1679
1680 def _addcapacity(self):
1680 def _addcapacity(self):
1681 """Add a node to the circular linked list.
1681 """Add a node to the circular linked list.
1682
1682
1683 The new node is inserted before the head node.
1683 The new node is inserted before the head node.
1684 """
1684 """
1685 head = self._head
1685 head = self._head
1686 node = _lrucachenode()
1686 node = _lrucachenode()
1687 head.prev.next = node
1687 head.prev.next = node
1688 node.prev = head.prev
1688 node.prev = head.prev
1689 node.next = head
1689 node.next = head
1690 head.prev = node
1690 head.prev = node
1691 self._size += 1
1691 self._size += 1
1692 return node
1692 return node
1693
1693
1694 def _enforcecostlimit(self):
1694 def _enforcecostlimit(self):
1695 # This should run after an insertion. It should only be called if total
1695 # This should run after an insertion. It should only be called if total
1696 # cost limits are being enforced.
1696 # cost limits are being enforced.
1697 # The most recently inserted node is never evicted.
1697 # The most recently inserted node is never evicted.
1698 if len(self) <= 1 or self.totalcost <= self.maxcost:
1698 if len(self) <= 1 or self.totalcost <= self.maxcost:
1699 return
1699 return
1700
1700
1701 # This is logically equivalent to calling popoldest() until we
1701 # This is logically equivalent to calling popoldest() until we
1702 # free up enough cost. We don't do that since popoldest() needs
1702 # free up enough cost. We don't do that since popoldest() needs
1703 # to walk the linked list and doing this in a loop would be
1703 # to walk the linked list and doing this in a loop would be
1704 # quadratic. So we find the first non-empty node and then
1704 # quadratic. So we find the first non-empty node and then
1705 # walk nodes until we free up enough capacity.
1705 # walk nodes until we free up enough capacity.
1706 #
1706 #
1707 # If we only removed the minimum number of nodes to free enough
1707 # If we only removed the minimum number of nodes to free enough
1708 # cost at insert time, chances are high that the next insert would
1708 # cost at insert time, chances are high that the next insert would
1709 # also require pruning. This would effectively constitute quadratic
1709 # also require pruning. This would effectively constitute quadratic
1710 # behavior for insert-heavy workloads. To mitigate this, we set a
1710 # behavior for insert-heavy workloads. To mitigate this, we set a
1711 # target cost that is a percentage of the max cost. This will tend
1711 # target cost that is a percentage of the max cost. This will tend
1712 # to free more nodes when the high water mark is reached, which
1712 # to free more nodes when the high water mark is reached, which
1713 # lowers the chances of needing to prune on the subsequent insert.
1713 # lowers the chances of needing to prune on the subsequent insert.
1714 targetcost = int(self.maxcost * 0.75)
1714 targetcost = int(self.maxcost * 0.75)
1715
1715
1716 n = self._head.prev
1716 n = self._head.prev
1717 while n.key is _notset:
1717 while n.key is _notset:
1718 n = n.prev
1718 n = n.prev
1719
1719
1720 while len(self) > 1 and self.totalcost > targetcost:
1720 while len(self) > 1 and self.totalcost > targetcost:
1721 del self._cache[n.key]
1721 del self._cache[n.key]
1722 self.totalcost -= n.cost
1722 self.totalcost -= n.cost
1723 n.markempty()
1723 n.markempty()
1724 n = n.prev
1724 n = n.prev
1725
1725
1726
1726
1727 def lrucachefunc(func):
1727 def lrucachefunc(func):
1728 '''cache most recent results of function calls'''
1728 '''cache most recent results of function calls'''
1729 cache = {}
1729 cache = {}
1730 order = collections.deque()
1730 order = collections.deque()
1731 if func.__code__.co_argcount == 1:
1731 if func.__code__.co_argcount == 1:
1732
1732
1733 def f(arg):
1733 def f(arg):
1734 if arg not in cache:
1734 if arg not in cache:
1735 if len(cache) > 20:
1735 if len(cache) > 20:
1736 del cache[order.popleft()]
1736 del cache[order.popleft()]
1737 cache[arg] = func(arg)
1737 cache[arg] = func(arg)
1738 else:
1738 else:
1739 order.remove(arg)
1739 order.remove(arg)
1740 order.append(arg)
1740 order.append(arg)
1741 return cache[arg]
1741 return cache[arg]
1742
1742
1743 else:
1743 else:
1744
1744
1745 def f(*args):
1745 def f(*args):
1746 if args not in cache:
1746 if args not in cache:
1747 if len(cache) > 20:
1747 if len(cache) > 20:
1748 del cache[order.popleft()]
1748 del cache[order.popleft()]
1749 cache[args] = func(*args)
1749 cache[args] = func(*args)
1750 else:
1750 else:
1751 order.remove(args)
1751 order.remove(args)
1752 order.append(args)
1752 order.append(args)
1753 return cache[args]
1753 return cache[args]
1754
1754
1755 return f
1755 return f
1756
1756
1757
1757
1758 class propertycache(object):
1758 class propertycache(object):
1759 def __init__(self, func):
1759 def __init__(self, func):
1760 self.func = func
1760 self.func = func
1761 self.name = func.__name__
1761 self.name = func.__name__
1762
1762
1763 def __get__(self, obj, type=None):
1763 def __get__(self, obj, type=None):
1764 result = self.func(obj)
1764 result = self.func(obj)
1765 self.cachevalue(obj, result)
1765 self.cachevalue(obj, result)
1766 return result
1766 return result
1767
1767
1768 def cachevalue(self, obj, value):
1768 def cachevalue(self, obj, value):
1769 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1769 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1770 obj.__dict__[self.name] = value
1770 obj.__dict__[self.name] = value
1771
1771
1772
1772
1773 def clearcachedproperty(obj, prop):
1773 def clearcachedproperty(obj, prop):
1774 '''clear a cached property value, if one has been set'''
1774 '''clear a cached property value, if one has been set'''
1775 prop = pycompat.sysstr(prop)
1775 prop = pycompat.sysstr(prop)
1776 if prop in obj.__dict__:
1776 if prop in obj.__dict__:
1777 del obj.__dict__[prop]
1777 del obj.__dict__[prop]
1778
1778
1779
1779
1780 def increasingchunks(source, min=1024, max=65536):
1780 def increasingchunks(source, min=1024, max=65536):
1781 """return no less than min bytes per chunk while data remains,
1781 """return no less than min bytes per chunk while data remains,
1782 doubling min after each chunk until it reaches max"""
1782 doubling min after each chunk until it reaches max"""
1783
1783
1784 def log2(x):
1784 def log2(x):
1785 if not x:
1785 if not x:
1786 return 0
1786 return 0
1787 i = 0
1787 i = 0
1788 while x:
1788 while x:
1789 x >>= 1
1789 x >>= 1
1790 i += 1
1790 i += 1
1791 return i - 1
1791 return i - 1
1792
1792
1793 buf = []
1793 buf = []
1794 blen = 0
1794 blen = 0
1795 for chunk in source:
1795 for chunk in source:
1796 buf.append(chunk)
1796 buf.append(chunk)
1797 blen += len(chunk)
1797 blen += len(chunk)
1798 if blen >= min:
1798 if blen >= min:
1799 if min < max:
1799 if min < max:
1800 min = min << 1
1800 min = min << 1
1801 nmin = 1 << log2(blen)
1801 nmin = 1 << log2(blen)
1802 if nmin > min:
1802 if nmin > min:
1803 min = nmin
1803 min = nmin
1804 if min > max:
1804 if min > max:
1805 min = max
1805 min = max
1806 yield b''.join(buf)
1806 yield b''.join(buf)
1807 blen = 0
1807 blen = 0
1808 buf = []
1808 buf = []
1809 if buf:
1809 if buf:
1810 yield b''.join(buf)
1810 yield b''.join(buf)
1811
1811
1812
1812
1813 def always(fn):
1813 def always(fn):
1814 return True
1814 return True
1815
1815
1816
1816
1817 def never(fn):
1817 def never(fn):
1818 return False
1818 return False
1819
1819
1820
1820
1821 def nogc(func):
1821 def nogc(func):
1822 """disable garbage collector
1822 """disable garbage collector
1823
1823
1824 Python's garbage collector triggers a GC each time a certain number of
1824 Python's garbage collector triggers a GC each time a certain number of
1825 container objects (the number being defined by gc.get_threshold()) are
1825 container objects (the number being defined by gc.get_threshold()) are
1826 allocated even when marked not to be tracked by the collector. Tracking has
1826 allocated even when marked not to be tracked by the collector. Tracking has
1827 no effect on when GCs are triggered, only on what objects the GC looks
1827 no effect on when GCs are triggered, only on what objects the GC looks
1828 into. As a workaround, disable GC while building complex (huge)
1828 into. As a workaround, disable GC while building complex (huge)
1829 containers.
1829 containers.
1830
1830
1831 This garbage collector issue have been fixed in 2.7. But it still affect
1831 This garbage collector issue have been fixed in 2.7. But it still affect
1832 CPython's performance.
1832 CPython's performance.
1833 """
1833 """
1834
1834
1835 def wrapper(*args, **kwargs):
1835 def wrapper(*args, **kwargs):
1836 gcenabled = gc.isenabled()
1836 gcenabled = gc.isenabled()
1837 gc.disable()
1837 gc.disable()
1838 try:
1838 try:
1839 return func(*args, **kwargs)
1839 return func(*args, **kwargs)
1840 finally:
1840 finally:
1841 if gcenabled:
1841 if gcenabled:
1842 gc.enable()
1842 gc.enable()
1843
1843
1844 return wrapper
1844 return wrapper
1845
1845
1846
1846
1847 if pycompat.ispypy:
1847 if pycompat.ispypy:
1848 # PyPy runs slower with gc disabled
1848 # PyPy runs slower with gc disabled
1849 nogc = lambda x: x
1849 nogc = lambda x: x
1850
1850
1851
1851
1852 def pathto(root, n1, n2):
1852 def pathto(root, n1, n2):
1853 # type: (bytes, bytes, bytes) -> bytes
1853 # type: (bytes, bytes, bytes) -> bytes
1854 """return the relative path from one place to another.
1854 """return the relative path from one place to another.
1855 root should use os.sep to separate directories
1855 root should use os.sep to separate directories
1856 n1 should use os.sep to separate directories
1856 n1 should use os.sep to separate directories
1857 n2 should use "/" to separate directories
1857 n2 should use "/" to separate directories
1858 returns an os.sep-separated path.
1858 returns an os.sep-separated path.
1859
1859
1860 If n1 is a relative path, it's assumed it's
1860 If n1 is a relative path, it's assumed it's
1861 relative to root.
1861 relative to root.
1862 n2 should always be relative to root.
1862 n2 should always be relative to root.
1863 """
1863 """
1864 if not n1:
1864 if not n1:
1865 return localpath(n2)
1865 return localpath(n2)
1866 if os.path.isabs(n1):
1866 if os.path.isabs(n1):
1867 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1867 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1868 return os.path.join(root, localpath(n2))
1868 return os.path.join(root, localpath(n2))
1869 n2 = b'/'.join((pconvert(root), n2))
1869 n2 = b'/'.join((pconvert(root), n2))
1870 a, b = splitpath(n1), n2.split(b'/')
1870 a, b = splitpath(n1), n2.split(b'/')
1871 a.reverse()
1871 a.reverse()
1872 b.reverse()
1872 b.reverse()
1873 while a and b and a[-1] == b[-1]:
1873 while a and b and a[-1] == b[-1]:
1874 a.pop()
1874 a.pop()
1875 b.pop()
1875 b.pop()
1876 b.reverse()
1876 b.reverse()
1877 return pycompat.ossep.join(([b'..'] * len(a)) + b) or b'.'
1877 return pycompat.ossep.join(([b'..'] * len(a)) + b) or b'.'
1878
1878
1879
1879
1880 def checksignature(func, depth=1):
1880 def checksignature(func, depth=1):
1881 '''wrap a function with code to check for calling errors'''
1881 '''wrap a function with code to check for calling errors'''
1882
1882
1883 def check(*args, **kwargs):
1883 def check(*args, **kwargs):
1884 try:
1884 try:
1885 return func(*args, **kwargs)
1885 return func(*args, **kwargs)
1886 except TypeError:
1886 except TypeError:
1887 if len(traceback.extract_tb(sys.exc_info()[2])) == depth:
1887 if len(traceback.extract_tb(sys.exc_info()[2])) == depth:
1888 raise error.SignatureError
1888 raise error.SignatureError
1889 raise
1889 raise
1890
1890
1891 return check
1891 return check
1892
1892
1893
1893
1894 # a whilelist of known filesystems where hardlink works reliably
1894 # a whilelist of known filesystems where hardlink works reliably
1895 _hardlinkfswhitelist = {
1895 _hardlinkfswhitelist = {
1896 b'apfs',
1896 b'apfs',
1897 b'btrfs',
1897 b'btrfs',
1898 b'ext2',
1898 b'ext2',
1899 b'ext3',
1899 b'ext3',
1900 b'ext4',
1900 b'ext4',
1901 b'hfs',
1901 b'hfs',
1902 b'jfs',
1902 b'jfs',
1903 b'NTFS',
1903 b'NTFS',
1904 b'reiserfs',
1904 b'reiserfs',
1905 b'tmpfs',
1905 b'tmpfs',
1906 b'ufs',
1906 b'ufs',
1907 b'xfs',
1907 b'xfs',
1908 b'zfs',
1908 b'zfs',
1909 }
1909 }
1910
1910
1911
1911
1912 def copyfile(
1912 def copyfile(
1913 src, dest, hardlink=False, copystat=False, checkambig=False, nb_bytes=None
1913 src, dest, hardlink=False, copystat=False, checkambig=False, nb_bytes=None
1914 ):
1914 ):
1915 """copy a file, preserving mode and optionally other stat info like
1915 """copy a file, preserving mode and optionally other stat info like
1916 atime/mtime
1916 atime/mtime
1917
1917
1918 checkambig argument is used with filestat, and is useful only if
1918 checkambig argument is used with filestat, and is useful only if
1919 destination file is guarded by any lock (e.g. repo.lock or
1919 destination file is guarded by any lock (e.g. repo.lock or
1920 repo.wlock).
1920 repo.wlock).
1921
1921
1922 copystat and checkambig should be exclusive.
1922 copystat and checkambig should be exclusive.
1923
1923
1924 nb_bytes: if set only copy the first `nb_bytes` of the source file.
1924 nb_bytes: if set only copy the first `nb_bytes` of the source file.
1925 """
1925 """
1926 assert not (copystat and checkambig)
1926 assert not (copystat and checkambig)
1927 oldstat = None
1927 oldstat = None
1928 if os.path.lexists(dest):
1928 if os.path.lexists(dest):
1929 if checkambig:
1929 if checkambig:
1930 oldstat = checkambig and filestat.frompath(dest)
1930 oldstat = checkambig and filestat.frompath(dest)
1931 unlink(dest)
1931 unlink(dest)
1932 if hardlink:
1932 if hardlink:
1933 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1933 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1934 # unless we are confident that dest is on a whitelisted filesystem.
1934 # unless we are confident that dest is on a whitelisted filesystem.
1935 try:
1935 try:
1936 fstype = getfstype(os.path.dirname(dest))
1936 fstype = getfstype(os.path.dirname(dest))
1937 except OSError:
1937 except OSError:
1938 fstype = None
1938 fstype = None
1939 if fstype not in _hardlinkfswhitelist:
1939 if fstype not in _hardlinkfswhitelist:
1940 hardlink = False
1940 hardlink = False
1941 if hardlink:
1941 if hardlink:
1942 try:
1942 try:
1943 oslink(src, dest)
1943 oslink(src, dest)
1944 if nb_bytes is not None:
1944 if nb_bytes is not None:
1945 m = "the `nb_bytes` argument is incompatible with `hardlink`"
1945 m = "the `nb_bytes` argument is incompatible with `hardlink`"
1946 raise error.ProgrammingError(m)
1946 raise error.ProgrammingError(m)
1947 return
1947 return
1948 except (IOError, OSError):
1948 except (IOError, OSError):
1949 pass # fall back to normal copy
1949 pass # fall back to normal copy
1950 if os.path.islink(src):
1950 if os.path.islink(src):
1951 os.symlink(os.readlink(src), dest)
1951 os.symlink(os.readlink(src), dest)
1952 # copytime is ignored for symlinks, but in general copytime isn't needed
1952 # copytime is ignored for symlinks, but in general copytime isn't needed
1953 # for them anyway
1953 # for them anyway
1954 if nb_bytes is not None:
1954 if nb_bytes is not None:
1955 m = "cannot use `nb_bytes` on a symlink"
1955 m = "cannot use `nb_bytes` on a symlink"
1956 raise error.ProgrammingError(m)
1956 raise error.ProgrammingError(m)
1957 else:
1957 else:
1958 try:
1958 try:
1959 shutil.copyfile(src, dest)
1959 shutil.copyfile(src, dest)
1960 if copystat:
1960 if copystat:
1961 # copystat also copies mode
1961 # copystat also copies mode
1962 shutil.copystat(src, dest)
1962 shutil.copystat(src, dest)
1963 else:
1963 else:
1964 shutil.copymode(src, dest)
1964 shutil.copymode(src, dest)
1965 if oldstat and oldstat.stat:
1965 if oldstat and oldstat.stat:
1966 newstat = filestat.frompath(dest)
1966 newstat = filestat.frompath(dest)
1967 if newstat.isambig(oldstat):
1967 if newstat.isambig(oldstat):
1968 # stat of copied file is ambiguous to original one
1968 # stat of copied file is ambiguous to original one
1969 advanced = (
1969 advanced = (
1970 oldstat.stat[stat.ST_MTIME] + 1
1970 oldstat.stat[stat.ST_MTIME] + 1
1971 ) & 0x7FFFFFFF
1971 ) & 0x7FFFFFFF
1972 os.utime(dest, (advanced, advanced))
1972 os.utime(dest, (advanced, advanced))
1973 # We could do something smarter using `copy_file_range` call or similar
1973 # We could do something smarter using `copy_file_range` call or similar
1974 if nb_bytes is not None:
1974 if nb_bytes is not None:
1975 with open(dest, mode='r+') as f:
1975 with open(dest, mode='r+') as f:
1976 f.truncate(nb_bytes)
1976 f.truncate(nb_bytes)
1977 except shutil.Error as inst:
1977 except shutil.Error as inst:
1978 raise error.Abort(stringutil.forcebytestr(inst))
1978 raise error.Abort(stringutil.forcebytestr(inst))
1979
1979
1980
1980
1981 def copyfiles(src, dst, hardlink=None, progress=None):
1981 def copyfiles(src, dst, hardlink=None, progress=None):
1982 """Copy a directory tree using hardlinks if possible."""
1982 """Copy a directory tree using hardlinks if possible."""
1983 num = 0
1983 num = 0
1984
1984
1985 def settopic():
1985 def settopic():
1986 if progress:
1986 if progress:
1987 progress.topic = _(b'linking') if hardlink else _(b'copying')
1987 progress.topic = _(b'linking') if hardlink else _(b'copying')
1988
1988
1989 if os.path.isdir(src):
1989 if os.path.isdir(src):
1990 if hardlink is None:
1990 if hardlink is None:
1991 hardlink = (
1991 hardlink = (
1992 os.stat(src).st_dev == os.stat(os.path.dirname(dst)).st_dev
1992 os.stat(src).st_dev == os.stat(os.path.dirname(dst)).st_dev
1993 )
1993 )
1994 settopic()
1994 settopic()
1995 os.mkdir(dst)
1995 os.mkdir(dst)
1996 for name, kind in listdir(src):
1996 for name, kind in listdir(src):
1997 srcname = os.path.join(src, name)
1997 srcname = os.path.join(src, name)
1998 dstname = os.path.join(dst, name)
1998 dstname = os.path.join(dst, name)
1999 hardlink, n = copyfiles(srcname, dstname, hardlink, progress)
1999 hardlink, n = copyfiles(srcname, dstname, hardlink, progress)
2000 num += n
2000 num += n
2001 else:
2001 else:
2002 if hardlink is None:
2002 if hardlink is None:
2003 hardlink = (
2003 hardlink = (
2004 os.stat(os.path.dirname(src)).st_dev
2004 os.stat(os.path.dirname(src)).st_dev
2005 == os.stat(os.path.dirname(dst)).st_dev
2005 == os.stat(os.path.dirname(dst)).st_dev
2006 )
2006 )
2007 settopic()
2007 settopic()
2008
2008
2009 if hardlink:
2009 if hardlink:
2010 try:
2010 try:
2011 oslink(src, dst)
2011 oslink(src, dst)
2012 except (IOError, OSError):
2012 except (IOError, OSError) as exc:
2013 if exc.errno != errno.EEXIST:
2013 hardlink = False
2014 hardlink = False
2015 # XXX maybe try to relink if the file exist ?
2014 shutil.copy(src, dst)
2016 shutil.copy(src, dst)
2015 else:
2017 else:
2016 shutil.copy(src, dst)
2018 shutil.copy(src, dst)
2017 num += 1
2019 num += 1
2018 if progress:
2020 if progress:
2019 progress.increment()
2021 progress.increment()
2020
2022
2021 return hardlink, num
2023 return hardlink, num
2022
2024
2023
2025
2024 _winreservednames = {
2026 _winreservednames = {
2025 b'con',
2027 b'con',
2026 b'prn',
2028 b'prn',
2027 b'aux',
2029 b'aux',
2028 b'nul',
2030 b'nul',
2029 b'com1',
2031 b'com1',
2030 b'com2',
2032 b'com2',
2031 b'com3',
2033 b'com3',
2032 b'com4',
2034 b'com4',
2033 b'com5',
2035 b'com5',
2034 b'com6',
2036 b'com6',
2035 b'com7',
2037 b'com7',
2036 b'com8',
2038 b'com8',
2037 b'com9',
2039 b'com9',
2038 b'lpt1',
2040 b'lpt1',
2039 b'lpt2',
2041 b'lpt2',
2040 b'lpt3',
2042 b'lpt3',
2041 b'lpt4',
2043 b'lpt4',
2042 b'lpt5',
2044 b'lpt5',
2043 b'lpt6',
2045 b'lpt6',
2044 b'lpt7',
2046 b'lpt7',
2045 b'lpt8',
2047 b'lpt8',
2046 b'lpt9',
2048 b'lpt9',
2047 }
2049 }
2048 _winreservedchars = b':*?"<>|'
2050 _winreservedchars = b':*?"<>|'
2049
2051
2050
2052
2051 def checkwinfilename(path):
2053 def checkwinfilename(path):
2052 # type: (bytes) -> Optional[bytes]
2054 # type: (bytes) -> Optional[bytes]
2053 r"""Check that the base-relative path is a valid filename on Windows.
2055 r"""Check that the base-relative path is a valid filename on Windows.
2054 Returns None if the path is ok, or a UI string describing the problem.
2056 Returns None if the path is ok, or a UI string describing the problem.
2055
2057
2056 >>> checkwinfilename(b"just/a/normal/path")
2058 >>> checkwinfilename(b"just/a/normal/path")
2057 >>> checkwinfilename(b"foo/bar/con.xml")
2059 >>> checkwinfilename(b"foo/bar/con.xml")
2058 "filename contains 'con', which is reserved on Windows"
2060 "filename contains 'con', which is reserved on Windows"
2059 >>> checkwinfilename(b"foo/con.xml/bar")
2061 >>> checkwinfilename(b"foo/con.xml/bar")
2060 "filename contains 'con', which is reserved on Windows"
2062 "filename contains 'con', which is reserved on Windows"
2061 >>> checkwinfilename(b"foo/bar/xml.con")
2063 >>> checkwinfilename(b"foo/bar/xml.con")
2062 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
2064 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
2063 "filename contains 'AUX', which is reserved on Windows"
2065 "filename contains 'AUX', which is reserved on Windows"
2064 >>> checkwinfilename(b"foo/bar/bla:.txt")
2066 >>> checkwinfilename(b"foo/bar/bla:.txt")
2065 "filename contains ':', which is reserved on Windows"
2067 "filename contains ':', which is reserved on Windows"
2066 >>> checkwinfilename(b"foo/bar/b\07la.txt")
2068 >>> checkwinfilename(b"foo/bar/b\07la.txt")
2067 "filename contains '\\x07', which is invalid on Windows"
2069 "filename contains '\\x07', which is invalid on Windows"
2068 >>> checkwinfilename(b"foo/bar/bla ")
2070 >>> checkwinfilename(b"foo/bar/bla ")
2069 "filename ends with ' ', which is not allowed on Windows"
2071 "filename ends with ' ', which is not allowed on Windows"
2070 >>> checkwinfilename(b"../bar")
2072 >>> checkwinfilename(b"../bar")
2071 >>> checkwinfilename(b"foo\\")
2073 >>> checkwinfilename(b"foo\\")
2072 "filename ends with '\\', which is invalid on Windows"
2074 "filename ends with '\\', which is invalid on Windows"
2073 >>> checkwinfilename(b"foo\\/bar")
2075 >>> checkwinfilename(b"foo\\/bar")
2074 "directory name ends with '\\', which is invalid on Windows"
2076 "directory name ends with '\\', which is invalid on Windows"
2075 """
2077 """
2076 if path.endswith(b'\\'):
2078 if path.endswith(b'\\'):
2077 return _(b"filename ends with '\\', which is invalid on Windows")
2079 return _(b"filename ends with '\\', which is invalid on Windows")
2078 if b'\\/' in path:
2080 if b'\\/' in path:
2079 return _(b"directory name ends with '\\', which is invalid on Windows")
2081 return _(b"directory name ends with '\\', which is invalid on Windows")
2080 for n in path.replace(b'\\', b'/').split(b'/'):
2082 for n in path.replace(b'\\', b'/').split(b'/'):
2081 if not n:
2083 if not n:
2082 continue
2084 continue
2083 for c in _filenamebytestr(n):
2085 for c in _filenamebytestr(n):
2084 if c in _winreservedchars:
2086 if c in _winreservedchars:
2085 return (
2087 return (
2086 _(
2088 _(
2087 b"filename contains '%s', which is reserved "
2089 b"filename contains '%s', which is reserved "
2088 b"on Windows"
2090 b"on Windows"
2089 )
2091 )
2090 % c
2092 % c
2091 )
2093 )
2092 if ord(c) <= 31:
2094 if ord(c) <= 31:
2093 return _(
2095 return _(
2094 b"filename contains '%s', which is invalid on Windows"
2096 b"filename contains '%s', which is invalid on Windows"
2095 ) % stringutil.escapestr(c)
2097 ) % stringutil.escapestr(c)
2096 base = n.split(b'.')[0]
2098 base = n.split(b'.')[0]
2097 if base and base.lower() in _winreservednames:
2099 if base and base.lower() in _winreservednames:
2098 return (
2100 return (
2099 _(b"filename contains '%s', which is reserved on Windows")
2101 _(b"filename contains '%s', which is reserved on Windows")
2100 % base
2102 % base
2101 )
2103 )
2102 t = n[-1:]
2104 t = n[-1:]
2103 if t in b'. ' and n not in b'..':
2105 if t in b'. ' and n not in b'..':
2104 return (
2106 return (
2105 _(
2107 _(
2106 b"filename ends with '%s', which is not allowed "
2108 b"filename ends with '%s', which is not allowed "
2107 b"on Windows"
2109 b"on Windows"
2108 )
2110 )
2109 % t
2111 % t
2110 )
2112 )
2111
2113
2112
2114
2113 timer = getattr(time, "perf_counter", None)
2115 timer = getattr(time, "perf_counter", None)
2114
2116
2115 if pycompat.iswindows:
2117 if pycompat.iswindows:
2116 checkosfilename = checkwinfilename
2118 checkosfilename = checkwinfilename
2117 if not timer:
2119 if not timer:
2118 timer = time.clock
2120 timer = time.clock
2119 else:
2121 else:
2120 # mercurial.windows doesn't have platform.checkosfilename
2122 # mercurial.windows doesn't have platform.checkosfilename
2121 checkosfilename = platform.checkosfilename # pytype: disable=module-attr
2123 checkosfilename = platform.checkosfilename # pytype: disable=module-attr
2122 if not timer:
2124 if not timer:
2123 timer = time.time
2125 timer = time.time
2124
2126
2125
2127
2126 def makelock(info, pathname):
2128 def makelock(info, pathname):
2127 """Create a lock file atomically if possible
2129 """Create a lock file atomically if possible
2128
2130
2129 This may leave a stale lock file if symlink isn't supported and signal
2131 This may leave a stale lock file if symlink isn't supported and signal
2130 interrupt is enabled.
2132 interrupt is enabled.
2131 """
2133 """
2132 try:
2134 try:
2133 return os.symlink(info, pathname)
2135 return os.symlink(info, pathname)
2134 except OSError as why:
2136 except OSError as why:
2135 if why.errno == errno.EEXIST:
2137 if why.errno == errno.EEXIST:
2136 raise
2138 raise
2137 except AttributeError: # no symlink in os
2139 except AttributeError: # no symlink in os
2138 pass
2140 pass
2139
2141
2140 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
2142 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
2141 ld = os.open(pathname, flags)
2143 ld = os.open(pathname, flags)
2142 os.write(ld, info)
2144 os.write(ld, info)
2143 os.close(ld)
2145 os.close(ld)
2144
2146
2145
2147
2146 def readlock(pathname):
2148 def readlock(pathname):
2147 # type: (bytes) -> bytes
2149 # type: (bytes) -> bytes
2148 try:
2150 try:
2149 return readlink(pathname)
2151 return readlink(pathname)
2150 except OSError as why:
2152 except OSError as why:
2151 if why.errno not in (errno.EINVAL, errno.ENOSYS):
2153 if why.errno not in (errno.EINVAL, errno.ENOSYS):
2152 raise
2154 raise
2153 except AttributeError: # no symlink in os
2155 except AttributeError: # no symlink in os
2154 pass
2156 pass
2155 with posixfile(pathname, b'rb') as fp:
2157 with posixfile(pathname, b'rb') as fp:
2156 return fp.read()
2158 return fp.read()
2157
2159
2158
2160
2159 def fstat(fp):
2161 def fstat(fp):
2160 '''stat file object that may not have fileno method.'''
2162 '''stat file object that may not have fileno method.'''
2161 try:
2163 try:
2162 return os.fstat(fp.fileno())
2164 return os.fstat(fp.fileno())
2163 except AttributeError:
2165 except AttributeError:
2164 return os.stat(fp.name)
2166 return os.stat(fp.name)
2165
2167
2166
2168
2167 # File system features
2169 # File system features
2168
2170
2169
2171
2170 def fscasesensitive(path):
2172 def fscasesensitive(path):
2171 # type: (bytes) -> bool
2173 # type: (bytes) -> bool
2172 """
2174 """
2173 Return true if the given path is on a case-sensitive filesystem
2175 Return true if the given path is on a case-sensitive filesystem
2174
2176
2175 Requires a path (like /foo/.hg) ending with a foldable final
2177 Requires a path (like /foo/.hg) ending with a foldable final
2176 directory component.
2178 directory component.
2177 """
2179 """
2178 s1 = os.lstat(path)
2180 s1 = os.lstat(path)
2179 d, b = os.path.split(path)
2181 d, b = os.path.split(path)
2180 b2 = b.upper()
2182 b2 = b.upper()
2181 if b == b2:
2183 if b == b2:
2182 b2 = b.lower()
2184 b2 = b.lower()
2183 if b == b2:
2185 if b == b2:
2184 return True # no evidence against case sensitivity
2186 return True # no evidence against case sensitivity
2185 p2 = os.path.join(d, b2)
2187 p2 = os.path.join(d, b2)
2186 try:
2188 try:
2187 s2 = os.lstat(p2)
2189 s2 = os.lstat(p2)
2188 if s2 == s1:
2190 if s2 == s1:
2189 return False
2191 return False
2190 return True
2192 return True
2191 except OSError:
2193 except OSError:
2192 return True
2194 return True
2193
2195
2194
2196
2195 _re2_input = lambda x: x
2197 _re2_input = lambda x: x
2196 try:
2198 try:
2197 import re2 # pytype: disable=import-error
2199 import re2 # pytype: disable=import-error
2198
2200
2199 _re2 = None
2201 _re2 = None
2200 except ImportError:
2202 except ImportError:
2201 _re2 = False
2203 _re2 = False
2202
2204
2203
2205
2204 class _re(object):
2206 class _re(object):
2205 def _checkre2(self):
2207 def _checkre2(self):
2206 global _re2
2208 global _re2
2207 global _re2_input
2209 global _re2_input
2208
2210
2209 check_pattern = br'\[([^\[]+)\]'
2211 check_pattern = br'\[([^\[]+)\]'
2210 check_input = b'[ui]'
2212 check_input = b'[ui]'
2211 try:
2213 try:
2212 # check if match works, see issue3964
2214 # check if match works, see issue3964
2213 _re2 = bool(re2.match(check_pattern, check_input))
2215 _re2 = bool(re2.match(check_pattern, check_input))
2214 except ImportError:
2216 except ImportError:
2215 _re2 = False
2217 _re2 = False
2216 except TypeError:
2218 except TypeError:
2217 # the `pyre-2` project provides a re2 module that accept bytes
2219 # the `pyre-2` project provides a re2 module that accept bytes
2218 # the `fb-re2` project provides a re2 module that acccept sysstr
2220 # the `fb-re2` project provides a re2 module that acccept sysstr
2219 check_pattern = pycompat.sysstr(check_pattern)
2221 check_pattern = pycompat.sysstr(check_pattern)
2220 check_input = pycompat.sysstr(check_input)
2222 check_input = pycompat.sysstr(check_input)
2221 _re2 = bool(re2.match(check_pattern, check_input))
2223 _re2 = bool(re2.match(check_pattern, check_input))
2222 _re2_input = pycompat.sysstr
2224 _re2_input = pycompat.sysstr
2223
2225
2224 def compile(self, pat, flags=0):
2226 def compile(self, pat, flags=0):
2225 """Compile a regular expression, using re2 if possible
2227 """Compile a regular expression, using re2 if possible
2226
2228
2227 For best performance, use only re2-compatible regexp features. The
2229 For best performance, use only re2-compatible regexp features. The
2228 only flags from the re module that are re2-compatible are
2230 only flags from the re module that are re2-compatible are
2229 IGNORECASE and MULTILINE."""
2231 IGNORECASE and MULTILINE."""
2230 if _re2 is None:
2232 if _re2 is None:
2231 self._checkre2()
2233 self._checkre2()
2232 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
2234 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
2233 if flags & remod.IGNORECASE:
2235 if flags & remod.IGNORECASE:
2234 pat = b'(?i)' + pat
2236 pat = b'(?i)' + pat
2235 if flags & remod.MULTILINE:
2237 if flags & remod.MULTILINE:
2236 pat = b'(?m)' + pat
2238 pat = b'(?m)' + pat
2237 try:
2239 try:
2238 return re2.compile(_re2_input(pat))
2240 return re2.compile(_re2_input(pat))
2239 except re2.error:
2241 except re2.error:
2240 pass
2242 pass
2241 return remod.compile(pat, flags)
2243 return remod.compile(pat, flags)
2242
2244
2243 @propertycache
2245 @propertycache
2244 def escape(self):
2246 def escape(self):
2245 """Return the version of escape corresponding to self.compile.
2247 """Return the version of escape corresponding to self.compile.
2246
2248
2247 This is imperfect because whether re2 or re is used for a particular
2249 This is imperfect because whether re2 or re is used for a particular
2248 function depends on the flags, etc, but it's the best we can do.
2250 function depends on the flags, etc, but it's the best we can do.
2249 """
2251 """
2250 global _re2
2252 global _re2
2251 if _re2 is None:
2253 if _re2 is None:
2252 self._checkre2()
2254 self._checkre2()
2253 if _re2:
2255 if _re2:
2254 return re2.escape
2256 return re2.escape
2255 else:
2257 else:
2256 return remod.escape
2258 return remod.escape
2257
2259
2258
2260
2259 re = _re()
2261 re = _re()
2260
2262
2261 _fspathcache = {}
2263 _fspathcache = {}
2262
2264
2263
2265
2264 def fspath(name, root):
2266 def fspath(name, root):
2265 # type: (bytes, bytes) -> bytes
2267 # type: (bytes, bytes) -> bytes
2266 """Get name in the case stored in the filesystem
2268 """Get name in the case stored in the filesystem
2267
2269
2268 The name should be relative to root, and be normcase-ed for efficiency.
2270 The name should be relative to root, and be normcase-ed for efficiency.
2269
2271
2270 Note that this function is unnecessary, and should not be
2272 Note that this function is unnecessary, and should not be
2271 called, for case-sensitive filesystems (simply because it's expensive).
2273 called, for case-sensitive filesystems (simply because it's expensive).
2272
2274
2273 The root should be normcase-ed, too.
2275 The root should be normcase-ed, too.
2274 """
2276 """
2275
2277
2276 def _makefspathcacheentry(dir):
2278 def _makefspathcacheentry(dir):
2277 return {normcase(n): n for n in os.listdir(dir)}
2279 return {normcase(n): n for n in os.listdir(dir)}
2278
2280
2279 seps = pycompat.ossep
2281 seps = pycompat.ossep
2280 if pycompat.osaltsep:
2282 if pycompat.osaltsep:
2281 seps = seps + pycompat.osaltsep
2283 seps = seps + pycompat.osaltsep
2282 # Protect backslashes. This gets silly very quickly.
2284 # Protect backslashes. This gets silly very quickly.
2283 seps.replace(b'\\', b'\\\\')
2285 seps.replace(b'\\', b'\\\\')
2284 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
2286 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
2285 dir = os.path.normpath(root)
2287 dir = os.path.normpath(root)
2286 result = []
2288 result = []
2287 for part, sep in pattern.findall(name):
2289 for part, sep in pattern.findall(name):
2288 if sep:
2290 if sep:
2289 result.append(sep)
2291 result.append(sep)
2290 continue
2292 continue
2291
2293
2292 if dir not in _fspathcache:
2294 if dir not in _fspathcache:
2293 _fspathcache[dir] = _makefspathcacheentry(dir)
2295 _fspathcache[dir] = _makefspathcacheentry(dir)
2294 contents = _fspathcache[dir]
2296 contents = _fspathcache[dir]
2295
2297
2296 found = contents.get(part)
2298 found = contents.get(part)
2297 if not found:
2299 if not found:
2298 # retry "once per directory" per "dirstate.walk" which
2300 # retry "once per directory" per "dirstate.walk" which
2299 # may take place for each patches of "hg qpush", for example
2301 # may take place for each patches of "hg qpush", for example
2300 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
2302 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
2301 found = contents.get(part)
2303 found = contents.get(part)
2302
2304
2303 result.append(found or part)
2305 result.append(found or part)
2304 dir = os.path.join(dir, part)
2306 dir = os.path.join(dir, part)
2305
2307
2306 return b''.join(result)
2308 return b''.join(result)
2307
2309
2308
2310
2309 def checknlink(testfile):
2311 def checknlink(testfile):
2310 # type: (bytes) -> bool
2312 # type: (bytes) -> bool
2311 '''check whether hardlink count reporting works properly'''
2313 '''check whether hardlink count reporting works properly'''
2312
2314
2313 # testfile may be open, so we need a separate file for checking to
2315 # testfile may be open, so we need a separate file for checking to
2314 # work around issue2543 (or testfile may get lost on Samba shares)
2316 # work around issue2543 (or testfile may get lost on Samba shares)
2315 f1, f2, fp = None, None, None
2317 f1, f2, fp = None, None, None
2316 try:
2318 try:
2317 fd, f1 = pycompat.mkstemp(
2319 fd, f1 = pycompat.mkstemp(
2318 prefix=b'.%s-' % os.path.basename(testfile),
2320 prefix=b'.%s-' % os.path.basename(testfile),
2319 suffix=b'1~',
2321 suffix=b'1~',
2320 dir=os.path.dirname(testfile),
2322 dir=os.path.dirname(testfile),
2321 )
2323 )
2322 os.close(fd)
2324 os.close(fd)
2323 f2 = b'%s2~' % f1[:-2]
2325 f2 = b'%s2~' % f1[:-2]
2324
2326
2325 oslink(f1, f2)
2327 oslink(f1, f2)
2326 # nlinks() may behave differently for files on Windows shares if
2328 # nlinks() may behave differently for files on Windows shares if
2327 # the file is open.
2329 # the file is open.
2328 fp = posixfile(f2)
2330 fp = posixfile(f2)
2329 return nlinks(f2) > 1
2331 return nlinks(f2) > 1
2330 except OSError:
2332 except OSError:
2331 return False
2333 return False
2332 finally:
2334 finally:
2333 if fp is not None:
2335 if fp is not None:
2334 fp.close()
2336 fp.close()
2335 for f in (f1, f2):
2337 for f in (f1, f2):
2336 try:
2338 try:
2337 if f is not None:
2339 if f is not None:
2338 os.unlink(f)
2340 os.unlink(f)
2339 except OSError:
2341 except OSError:
2340 pass
2342 pass
2341
2343
2342
2344
2343 def endswithsep(path):
2345 def endswithsep(path):
2344 # type: (bytes) -> bool
2346 # type: (bytes) -> bool
2345 '''Check path ends with os.sep or os.altsep.'''
2347 '''Check path ends with os.sep or os.altsep.'''
2346 return bool( # help pytype
2348 return bool( # help pytype
2347 path.endswith(pycompat.ossep)
2349 path.endswith(pycompat.ossep)
2348 or pycompat.osaltsep
2350 or pycompat.osaltsep
2349 and path.endswith(pycompat.osaltsep)
2351 and path.endswith(pycompat.osaltsep)
2350 )
2352 )
2351
2353
2352
2354
2353 def splitpath(path):
2355 def splitpath(path):
2354 # type: (bytes) -> List[bytes]
2356 # type: (bytes) -> List[bytes]
2355 """Split path by os.sep.
2357 """Split path by os.sep.
2356 Note that this function does not use os.altsep because this is
2358 Note that this function does not use os.altsep because this is
2357 an alternative of simple "xxx.split(os.sep)".
2359 an alternative of simple "xxx.split(os.sep)".
2358 It is recommended to use os.path.normpath() before using this
2360 It is recommended to use os.path.normpath() before using this
2359 function if need."""
2361 function if need."""
2360 return path.split(pycompat.ossep)
2362 return path.split(pycompat.ossep)
2361
2363
2362
2364
2363 def mktempcopy(name, emptyok=False, createmode=None, enforcewritable=False):
2365 def mktempcopy(name, emptyok=False, createmode=None, enforcewritable=False):
2364 """Create a temporary file with the same contents from name
2366 """Create a temporary file with the same contents from name
2365
2367
2366 The permission bits are copied from the original file.
2368 The permission bits are copied from the original file.
2367
2369
2368 If the temporary file is going to be truncated immediately, you
2370 If the temporary file is going to be truncated immediately, you
2369 can use emptyok=True as an optimization.
2371 can use emptyok=True as an optimization.
2370
2372
2371 Returns the name of the temporary file.
2373 Returns the name of the temporary file.
2372 """
2374 """
2373 d, fn = os.path.split(name)
2375 d, fn = os.path.split(name)
2374 fd, temp = pycompat.mkstemp(prefix=b'.%s-' % fn, suffix=b'~', dir=d)
2376 fd, temp = pycompat.mkstemp(prefix=b'.%s-' % fn, suffix=b'~', dir=d)
2375 os.close(fd)
2377 os.close(fd)
2376 # Temporary files are created with mode 0600, which is usually not
2378 # Temporary files are created with mode 0600, which is usually not
2377 # what we want. If the original file already exists, just copy
2379 # what we want. If the original file already exists, just copy
2378 # its mode. Otherwise, manually obey umask.
2380 # its mode. Otherwise, manually obey umask.
2379 copymode(name, temp, createmode, enforcewritable)
2381 copymode(name, temp, createmode, enforcewritable)
2380
2382
2381 if emptyok:
2383 if emptyok:
2382 return temp
2384 return temp
2383 try:
2385 try:
2384 try:
2386 try:
2385 ifp = posixfile(name, b"rb")
2387 ifp = posixfile(name, b"rb")
2386 except IOError as inst:
2388 except IOError as inst:
2387 if inst.errno == errno.ENOENT:
2389 if inst.errno == errno.ENOENT:
2388 return temp
2390 return temp
2389 if not getattr(inst, 'filename', None):
2391 if not getattr(inst, 'filename', None):
2390 inst.filename = name
2392 inst.filename = name
2391 raise
2393 raise
2392 ofp = posixfile(temp, b"wb")
2394 ofp = posixfile(temp, b"wb")
2393 for chunk in filechunkiter(ifp):
2395 for chunk in filechunkiter(ifp):
2394 ofp.write(chunk)
2396 ofp.write(chunk)
2395 ifp.close()
2397 ifp.close()
2396 ofp.close()
2398 ofp.close()
2397 except: # re-raises
2399 except: # re-raises
2398 try:
2400 try:
2399 os.unlink(temp)
2401 os.unlink(temp)
2400 except OSError:
2402 except OSError:
2401 pass
2403 pass
2402 raise
2404 raise
2403 return temp
2405 return temp
2404
2406
2405
2407
2406 class filestat(object):
2408 class filestat(object):
2407 """help to exactly detect change of a file
2409 """help to exactly detect change of a file
2408
2410
2409 'stat' attribute is result of 'os.stat()' if specified 'path'
2411 'stat' attribute is result of 'os.stat()' if specified 'path'
2410 exists. Otherwise, it is None. This can avoid preparative
2412 exists. Otherwise, it is None. This can avoid preparative
2411 'exists()' examination on client side of this class.
2413 'exists()' examination on client side of this class.
2412 """
2414 """
2413
2415
2414 def __init__(self, stat):
2416 def __init__(self, stat):
2415 self.stat = stat
2417 self.stat = stat
2416
2418
2417 @classmethod
2419 @classmethod
2418 def frompath(cls, path):
2420 def frompath(cls, path):
2419 try:
2421 try:
2420 stat = os.stat(path)
2422 stat = os.stat(path)
2421 except OSError as err:
2423 except OSError as err:
2422 if err.errno != errno.ENOENT:
2424 if err.errno != errno.ENOENT:
2423 raise
2425 raise
2424 stat = None
2426 stat = None
2425 return cls(stat)
2427 return cls(stat)
2426
2428
2427 @classmethod
2429 @classmethod
2428 def fromfp(cls, fp):
2430 def fromfp(cls, fp):
2429 stat = os.fstat(fp.fileno())
2431 stat = os.fstat(fp.fileno())
2430 return cls(stat)
2432 return cls(stat)
2431
2433
2432 __hash__ = object.__hash__
2434 __hash__ = object.__hash__
2433
2435
2434 def __eq__(self, old):
2436 def __eq__(self, old):
2435 try:
2437 try:
2436 # if ambiguity between stat of new and old file is
2438 # if ambiguity between stat of new and old file is
2437 # avoided, comparison of size, ctime and mtime is enough
2439 # avoided, comparison of size, ctime and mtime is enough
2438 # to exactly detect change of a file regardless of platform
2440 # to exactly detect change of a file regardless of platform
2439 return (
2441 return (
2440 self.stat.st_size == old.stat.st_size
2442 self.stat.st_size == old.stat.st_size
2441 and self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2443 and self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2442 and self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME]
2444 and self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME]
2443 )
2445 )
2444 except AttributeError:
2446 except AttributeError:
2445 pass
2447 pass
2446 try:
2448 try:
2447 return self.stat is None and old.stat is None
2449 return self.stat is None and old.stat is None
2448 except AttributeError:
2450 except AttributeError:
2449 return False
2451 return False
2450
2452
2451 def isambig(self, old):
2453 def isambig(self, old):
2452 """Examine whether new (= self) stat is ambiguous against old one
2454 """Examine whether new (= self) stat is ambiguous against old one
2453
2455
2454 "S[N]" below means stat of a file at N-th change:
2456 "S[N]" below means stat of a file at N-th change:
2455
2457
2456 - S[n-1].ctime < S[n].ctime: can detect change of a file
2458 - S[n-1].ctime < S[n].ctime: can detect change of a file
2457 - S[n-1].ctime == S[n].ctime
2459 - S[n-1].ctime == S[n].ctime
2458 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2460 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2459 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2461 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2460 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2462 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2461 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2463 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2462
2464
2463 Case (*2) above means that a file was changed twice or more at
2465 Case (*2) above means that a file was changed twice or more at
2464 same time in sec (= S[n-1].ctime), and comparison of timestamp
2466 same time in sec (= S[n-1].ctime), and comparison of timestamp
2465 is ambiguous.
2467 is ambiguous.
2466
2468
2467 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2469 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2468 timestamp is ambiguous".
2470 timestamp is ambiguous".
2469
2471
2470 But advancing mtime only in case (*2) doesn't work as
2472 But advancing mtime only in case (*2) doesn't work as
2471 expected, because naturally advanced S[n].mtime in case (*1)
2473 expected, because naturally advanced S[n].mtime in case (*1)
2472 might be equal to manually advanced S[n-1 or earlier].mtime.
2474 might be equal to manually advanced S[n-1 or earlier].mtime.
2473
2475
2474 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2476 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2475 treated as ambiguous regardless of mtime, to avoid overlooking
2477 treated as ambiguous regardless of mtime, to avoid overlooking
2476 by confliction between such mtime.
2478 by confliction between such mtime.
2477
2479
2478 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2480 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2479 S[n].mtime", even if size of a file isn't changed.
2481 S[n].mtime", even if size of a file isn't changed.
2480 """
2482 """
2481 try:
2483 try:
2482 return self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2484 return self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2483 except AttributeError:
2485 except AttributeError:
2484 return False
2486 return False
2485
2487
2486 def avoidambig(self, path, old):
2488 def avoidambig(self, path, old):
2487 """Change file stat of specified path to avoid ambiguity
2489 """Change file stat of specified path to avoid ambiguity
2488
2490
2489 'old' should be previous filestat of 'path'.
2491 'old' should be previous filestat of 'path'.
2490
2492
2491 This skips avoiding ambiguity, if a process doesn't have
2493 This skips avoiding ambiguity, if a process doesn't have
2492 appropriate privileges for 'path'. This returns False in this
2494 appropriate privileges for 'path'. This returns False in this
2493 case.
2495 case.
2494
2496
2495 Otherwise, this returns True, as "ambiguity is avoided".
2497 Otherwise, this returns True, as "ambiguity is avoided".
2496 """
2498 """
2497 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2499 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2498 try:
2500 try:
2499 os.utime(path, (advanced, advanced))
2501 os.utime(path, (advanced, advanced))
2500 except OSError as inst:
2502 except OSError as inst:
2501 if inst.errno == errno.EPERM:
2503 if inst.errno == errno.EPERM:
2502 # utime() on the file created by another user causes EPERM,
2504 # utime() on the file created by another user causes EPERM,
2503 # if a process doesn't have appropriate privileges
2505 # if a process doesn't have appropriate privileges
2504 return False
2506 return False
2505 raise
2507 raise
2506 return True
2508 return True
2507
2509
2508 def __ne__(self, other):
2510 def __ne__(self, other):
2509 return not self == other
2511 return not self == other
2510
2512
2511
2513
2512 class atomictempfile(object):
2514 class atomictempfile(object):
2513 """writable file object that atomically updates a file
2515 """writable file object that atomically updates a file
2514
2516
2515 All writes will go to a temporary copy of the original file. Call
2517 All writes will go to a temporary copy of the original file. Call
2516 close() when you are done writing, and atomictempfile will rename
2518 close() when you are done writing, and atomictempfile will rename
2517 the temporary copy to the original name, making the changes
2519 the temporary copy to the original name, making the changes
2518 visible. If the object is destroyed without being closed, all your
2520 visible. If the object is destroyed without being closed, all your
2519 writes are discarded.
2521 writes are discarded.
2520
2522
2521 checkambig argument of constructor is used with filestat, and is
2523 checkambig argument of constructor is used with filestat, and is
2522 useful only if target file is guarded by any lock (e.g. repo.lock
2524 useful only if target file is guarded by any lock (e.g. repo.lock
2523 or repo.wlock).
2525 or repo.wlock).
2524 """
2526 """
2525
2527
2526 def __init__(self, name, mode=b'w+b', createmode=None, checkambig=False):
2528 def __init__(self, name, mode=b'w+b', createmode=None, checkambig=False):
2527 self.__name = name # permanent name
2529 self.__name = name # permanent name
2528 self._tempname = mktempcopy(
2530 self._tempname = mktempcopy(
2529 name,
2531 name,
2530 emptyok=(b'w' in mode),
2532 emptyok=(b'w' in mode),
2531 createmode=createmode,
2533 createmode=createmode,
2532 enforcewritable=(b'w' in mode),
2534 enforcewritable=(b'w' in mode),
2533 )
2535 )
2534
2536
2535 self._fp = posixfile(self._tempname, mode)
2537 self._fp = posixfile(self._tempname, mode)
2536 self._checkambig = checkambig
2538 self._checkambig = checkambig
2537
2539
2538 # delegated methods
2540 # delegated methods
2539 self.read = self._fp.read
2541 self.read = self._fp.read
2540 self.write = self._fp.write
2542 self.write = self._fp.write
2541 self.seek = self._fp.seek
2543 self.seek = self._fp.seek
2542 self.tell = self._fp.tell
2544 self.tell = self._fp.tell
2543 self.fileno = self._fp.fileno
2545 self.fileno = self._fp.fileno
2544
2546
2545 def close(self):
2547 def close(self):
2546 if not self._fp.closed:
2548 if not self._fp.closed:
2547 self._fp.close()
2549 self._fp.close()
2548 filename = localpath(self.__name)
2550 filename = localpath(self.__name)
2549 oldstat = self._checkambig and filestat.frompath(filename)
2551 oldstat = self._checkambig and filestat.frompath(filename)
2550 if oldstat and oldstat.stat:
2552 if oldstat and oldstat.stat:
2551 rename(self._tempname, filename)
2553 rename(self._tempname, filename)
2552 newstat = filestat.frompath(filename)
2554 newstat = filestat.frompath(filename)
2553 if newstat.isambig(oldstat):
2555 if newstat.isambig(oldstat):
2554 # stat of changed file is ambiguous to original one
2556 # stat of changed file is ambiguous to original one
2555 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2557 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2556 os.utime(filename, (advanced, advanced))
2558 os.utime(filename, (advanced, advanced))
2557 else:
2559 else:
2558 rename(self._tempname, filename)
2560 rename(self._tempname, filename)
2559
2561
2560 def discard(self):
2562 def discard(self):
2561 if not self._fp.closed:
2563 if not self._fp.closed:
2562 try:
2564 try:
2563 os.unlink(self._tempname)
2565 os.unlink(self._tempname)
2564 except OSError:
2566 except OSError:
2565 pass
2567 pass
2566 self._fp.close()
2568 self._fp.close()
2567
2569
2568 def __del__(self):
2570 def __del__(self):
2569 if safehasattr(self, '_fp'): # constructor actually did something
2571 if safehasattr(self, '_fp'): # constructor actually did something
2570 self.discard()
2572 self.discard()
2571
2573
2572 def __enter__(self):
2574 def __enter__(self):
2573 return self
2575 return self
2574
2576
2575 def __exit__(self, exctype, excvalue, traceback):
2577 def __exit__(self, exctype, excvalue, traceback):
2576 if exctype is not None:
2578 if exctype is not None:
2577 self.discard()
2579 self.discard()
2578 else:
2580 else:
2579 self.close()
2581 self.close()
2580
2582
2581
2583
2582 def unlinkpath(f, ignoremissing=False, rmdir=True):
2584 def unlinkpath(f, ignoremissing=False, rmdir=True):
2583 # type: (bytes, bool, bool) -> None
2585 # type: (bytes, bool, bool) -> None
2584 """unlink and remove the directory if it is empty"""
2586 """unlink and remove the directory if it is empty"""
2585 if ignoremissing:
2587 if ignoremissing:
2586 tryunlink(f)
2588 tryunlink(f)
2587 else:
2589 else:
2588 unlink(f)
2590 unlink(f)
2589 if rmdir:
2591 if rmdir:
2590 # try removing directories that might now be empty
2592 # try removing directories that might now be empty
2591 try:
2593 try:
2592 removedirs(os.path.dirname(f))
2594 removedirs(os.path.dirname(f))
2593 except OSError:
2595 except OSError:
2594 pass
2596 pass
2595
2597
2596
2598
2597 def tryunlink(f):
2599 def tryunlink(f):
2598 # type: (bytes) -> None
2600 # type: (bytes) -> None
2599 """Attempt to remove a file, ignoring ENOENT errors."""
2601 """Attempt to remove a file, ignoring ENOENT errors."""
2600 try:
2602 try:
2601 unlink(f)
2603 unlink(f)
2602 except OSError as e:
2604 except OSError as e:
2603 if e.errno != errno.ENOENT:
2605 if e.errno != errno.ENOENT:
2604 raise
2606 raise
2605
2607
2606
2608
2607 def makedirs(name, mode=None, notindexed=False):
2609 def makedirs(name, mode=None, notindexed=False):
2608 # type: (bytes, Optional[int], bool) -> None
2610 # type: (bytes, Optional[int], bool) -> None
2609 """recursive directory creation with parent mode inheritance
2611 """recursive directory creation with parent mode inheritance
2610
2612
2611 Newly created directories are marked as "not to be indexed by
2613 Newly created directories are marked as "not to be indexed by
2612 the content indexing service", if ``notindexed`` is specified
2614 the content indexing service", if ``notindexed`` is specified
2613 for "write" mode access.
2615 for "write" mode access.
2614 """
2616 """
2615 try:
2617 try:
2616 makedir(name, notindexed)
2618 makedir(name, notindexed)
2617 except OSError as err:
2619 except OSError as err:
2618 if err.errno == errno.EEXIST:
2620 if err.errno == errno.EEXIST:
2619 return
2621 return
2620 if err.errno != errno.ENOENT or not name:
2622 if err.errno != errno.ENOENT or not name:
2621 raise
2623 raise
2622 parent = os.path.dirname(os.path.abspath(name))
2624 parent = os.path.dirname(os.path.abspath(name))
2623 if parent == name:
2625 if parent == name:
2624 raise
2626 raise
2625 makedirs(parent, mode, notindexed)
2627 makedirs(parent, mode, notindexed)
2626 try:
2628 try:
2627 makedir(name, notindexed)
2629 makedir(name, notindexed)
2628 except OSError as err:
2630 except OSError as err:
2629 # Catch EEXIST to handle races
2631 # Catch EEXIST to handle races
2630 if err.errno == errno.EEXIST:
2632 if err.errno == errno.EEXIST:
2631 return
2633 return
2632 raise
2634 raise
2633 if mode is not None:
2635 if mode is not None:
2634 os.chmod(name, mode)
2636 os.chmod(name, mode)
2635
2637
2636
2638
2637 def readfile(path):
2639 def readfile(path):
2638 # type: (bytes) -> bytes
2640 # type: (bytes) -> bytes
2639 with open(path, b'rb') as fp:
2641 with open(path, b'rb') as fp:
2640 return fp.read()
2642 return fp.read()
2641
2643
2642
2644
2643 def writefile(path, text):
2645 def writefile(path, text):
2644 # type: (bytes, bytes) -> None
2646 # type: (bytes, bytes) -> None
2645 with open(path, b'wb') as fp:
2647 with open(path, b'wb') as fp:
2646 fp.write(text)
2648 fp.write(text)
2647
2649
2648
2650
2649 def appendfile(path, text):
2651 def appendfile(path, text):
2650 # type: (bytes, bytes) -> None
2652 # type: (bytes, bytes) -> None
2651 with open(path, b'ab') as fp:
2653 with open(path, b'ab') as fp:
2652 fp.write(text)
2654 fp.write(text)
2653
2655
2654
2656
2655 class chunkbuffer(object):
2657 class chunkbuffer(object):
2656 """Allow arbitrary sized chunks of data to be efficiently read from an
2658 """Allow arbitrary sized chunks of data to be efficiently read from an
2657 iterator over chunks of arbitrary size."""
2659 iterator over chunks of arbitrary size."""
2658
2660
2659 def __init__(self, in_iter):
2661 def __init__(self, in_iter):
2660 """in_iter is the iterator that's iterating over the input chunks."""
2662 """in_iter is the iterator that's iterating over the input chunks."""
2661
2663
2662 def splitbig(chunks):
2664 def splitbig(chunks):
2663 for chunk in chunks:
2665 for chunk in chunks:
2664 if len(chunk) > 2 ** 20:
2666 if len(chunk) > 2 ** 20:
2665 pos = 0
2667 pos = 0
2666 while pos < len(chunk):
2668 while pos < len(chunk):
2667 end = pos + 2 ** 18
2669 end = pos + 2 ** 18
2668 yield chunk[pos:end]
2670 yield chunk[pos:end]
2669 pos = end
2671 pos = end
2670 else:
2672 else:
2671 yield chunk
2673 yield chunk
2672
2674
2673 self.iter = splitbig(in_iter)
2675 self.iter = splitbig(in_iter)
2674 self._queue = collections.deque()
2676 self._queue = collections.deque()
2675 self._chunkoffset = 0
2677 self._chunkoffset = 0
2676
2678
2677 def read(self, l=None):
2679 def read(self, l=None):
2678 """Read L bytes of data from the iterator of chunks of data.
2680 """Read L bytes of data from the iterator of chunks of data.
2679 Returns less than L bytes if the iterator runs dry.
2681 Returns less than L bytes if the iterator runs dry.
2680
2682
2681 If size parameter is omitted, read everything"""
2683 If size parameter is omitted, read everything"""
2682 if l is None:
2684 if l is None:
2683 return b''.join(self.iter)
2685 return b''.join(self.iter)
2684
2686
2685 left = l
2687 left = l
2686 buf = []
2688 buf = []
2687 queue = self._queue
2689 queue = self._queue
2688 while left > 0:
2690 while left > 0:
2689 # refill the queue
2691 # refill the queue
2690 if not queue:
2692 if not queue:
2691 target = 2 ** 18
2693 target = 2 ** 18
2692 for chunk in self.iter:
2694 for chunk in self.iter:
2693 queue.append(chunk)
2695 queue.append(chunk)
2694 target -= len(chunk)
2696 target -= len(chunk)
2695 if target <= 0:
2697 if target <= 0:
2696 break
2698 break
2697 if not queue:
2699 if not queue:
2698 break
2700 break
2699
2701
2700 # The easy way to do this would be to queue.popleft(), modify the
2702 # The easy way to do this would be to queue.popleft(), modify the
2701 # chunk (if necessary), then queue.appendleft(). However, for cases
2703 # chunk (if necessary), then queue.appendleft(). However, for cases
2702 # where we read partial chunk content, this incurs 2 dequeue
2704 # where we read partial chunk content, this incurs 2 dequeue
2703 # mutations and creates a new str for the remaining chunk in the
2705 # mutations and creates a new str for the remaining chunk in the
2704 # queue. Our code below avoids this overhead.
2706 # queue. Our code below avoids this overhead.
2705
2707
2706 chunk = queue[0]
2708 chunk = queue[0]
2707 chunkl = len(chunk)
2709 chunkl = len(chunk)
2708 offset = self._chunkoffset
2710 offset = self._chunkoffset
2709
2711
2710 # Use full chunk.
2712 # Use full chunk.
2711 if offset == 0 and left >= chunkl:
2713 if offset == 0 and left >= chunkl:
2712 left -= chunkl
2714 left -= chunkl
2713 queue.popleft()
2715 queue.popleft()
2714 buf.append(chunk)
2716 buf.append(chunk)
2715 # self._chunkoffset remains at 0.
2717 # self._chunkoffset remains at 0.
2716 continue
2718 continue
2717
2719
2718 chunkremaining = chunkl - offset
2720 chunkremaining = chunkl - offset
2719
2721
2720 # Use all of unconsumed part of chunk.
2722 # Use all of unconsumed part of chunk.
2721 if left >= chunkremaining:
2723 if left >= chunkremaining:
2722 left -= chunkremaining
2724 left -= chunkremaining
2723 queue.popleft()
2725 queue.popleft()
2724 # offset == 0 is enabled by block above, so this won't merely
2726 # offset == 0 is enabled by block above, so this won't merely
2725 # copy via ``chunk[0:]``.
2727 # copy via ``chunk[0:]``.
2726 buf.append(chunk[offset:])
2728 buf.append(chunk[offset:])
2727 self._chunkoffset = 0
2729 self._chunkoffset = 0
2728
2730
2729 # Partial chunk needed.
2731 # Partial chunk needed.
2730 else:
2732 else:
2731 buf.append(chunk[offset : offset + left])
2733 buf.append(chunk[offset : offset + left])
2732 self._chunkoffset += left
2734 self._chunkoffset += left
2733 left -= chunkremaining
2735 left -= chunkremaining
2734
2736
2735 return b''.join(buf)
2737 return b''.join(buf)
2736
2738
2737
2739
2738 def filechunkiter(f, size=131072, limit=None):
2740 def filechunkiter(f, size=131072, limit=None):
2739 """Create a generator that produces the data in the file size
2741 """Create a generator that produces the data in the file size
2740 (default 131072) bytes at a time, up to optional limit (default is
2742 (default 131072) bytes at a time, up to optional limit (default is
2741 to read all data). Chunks may be less than size bytes if the
2743 to read all data). Chunks may be less than size bytes if the
2742 chunk is the last chunk in the file, or the file is a socket or
2744 chunk is the last chunk in the file, or the file is a socket or
2743 some other type of file that sometimes reads less data than is
2745 some other type of file that sometimes reads less data than is
2744 requested."""
2746 requested."""
2745 assert size >= 0
2747 assert size >= 0
2746 assert limit is None or limit >= 0
2748 assert limit is None or limit >= 0
2747 while True:
2749 while True:
2748 if limit is None:
2750 if limit is None:
2749 nbytes = size
2751 nbytes = size
2750 else:
2752 else:
2751 nbytes = min(limit, size)
2753 nbytes = min(limit, size)
2752 s = nbytes and f.read(nbytes)
2754 s = nbytes and f.read(nbytes)
2753 if not s:
2755 if not s:
2754 break
2756 break
2755 if limit:
2757 if limit:
2756 limit -= len(s)
2758 limit -= len(s)
2757 yield s
2759 yield s
2758
2760
2759
2761
2760 class cappedreader(object):
2762 class cappedreader(object):
2761 """A file object proxy that allows reading up to N bytes.
2763 """A file object proxy that allows reading up to N bytes.
2762
2764
2763 Given a source file object, instances of this type allow reading up to
2765 Given a source file object, instances of this type allow reading up to
2764 N bytes from that source file object. Attempts to read past the allowed
2766 N bytes from that source file object. Attempts to read past the allowed
2765 limit are treated as EOF.
2767 limit are treated as EOF.
2766
2768
2767 It is assumed that I/O is not performed on the original file object
2769 It is assumed that I/O is not performed on the original file object
2768 in addition to I/O that is performed by this instance. If there is,
2770 in addition to I/O that is performed by this instance. If there is,
2769 state tracking will get out of sync and unexpected results will ensue.
2771 state tracking will get out of sync and unexpected results will ensue.
2770 """
2772 """
2771
2773
2772 def __init__(self, fh, limit):
2774 def __init__(self, fh, limit):
2773 """Allow reading up to <limit> bytes from <fh>."""
2775 """Allow reading up to <limit> bytes from <fh>."""
2774 self._fh = fh
2776 self._fh = fh
2775 self._left = limit
2777 self._left = limit
2776
2778
2777 def read(self, n=-1):
2779 def read(self, n=-1):
2778 if not self._left:
2780 if not self._left:
2779 return b''
2781 return b''
2780
2782
2781 if n < 0:
2783 if n < 0:
2782 n = self._left
2784 n = self._left
2783
2785
2784 data = self._fh.read(min(n, self._left))
2786 data = self._fh.read(min(n, self._left))
2785 self._left -= len(data)
2787 self._left -= len(data)
2786 assert self._left >= 0
2788 assert self._left >= 0
2787
2789
2788 return data
2790 return data
2789
2791
2790 def readinto(self, b):
2792 def readinto(self, b):
2791 res = self.read(len(b))
2793 res = self.read(len(b))
2792 if res is None:
2794 if res is None:
2793 return None
2795 return None
2794
2796
2795 b[0 : len(res)] = res
2797 b[0 : len(res)] = res
2796 return len(res)
2798 return len(res)
2797
2799
2798
2800
2799 def unitcountfn(*unittable):
2801 def unitcountfn(*unittable):
2800 '''return a function that renders a readable count of some quantity'''
2802 '''return a function that renders a readable count of some quantity'''
2801
2803
2802 def go(count):
2804 def go(count):
2803 for multiplier, divisor, format in unittable:
2805 for multiplier, divisor, format in unittable:
2804 if abs(count) >= divisor * multiplier:
2806 if abs(count) >= divisor * multiplier:
2805 return format % (count / float(divisor))
2807 return format % (count / float(divisor))
2806 return unittable[-1][2] % count
2808 return unittable[-1][2] % count
2807
2809
2808 return go
2810 return go
2809
2811
2810
2812
2811 def processlinerange(fromline, toline):
2813 def processlinerange(fromline, toline):
2812 # type: (int, int) -> Tuple[int, int]
2814 # type: (int, int) -> Tuple[int, int]
2813 """Check that linerange <fromline>:<toline> makes sense and return a
2815 """Check that linerange <fromline>:<toline> makes sense and return a
2814 0-based range.
2816 0-based range.
2815
2817
2816 >>> processlinerange(10, 20)
2818 >>> processlinerange(10, 20)
2817 (9, 20)
2819 (9, 20)
2818 >>> processlinerange(2, 1)
2820 >>> processlinerange(2, 1)
2819 Traceback (most recent call last):
2821 Traceback (most recent call last):
2820 ...
2822 ...
2821 ParseError: line range must be positive
2823 ParseError: line range must be positive
2822 >>> processlinerange(0, 5)
2824 >>> processlinerange(0, 5)
2823 Traceback (most recent call last):
2825 Traceback (most recent call last):
2824 ...
2826 ...
2825 ParseError: fromline must be strictly positive
2827 ParseError: fromline must be strictly positive
2826 """
2828 """
2827 if toline - fromline < 0:
2829 if toline - fromline < 0:
2828 raise error.ParseError(_(b"line range must be positive"))
2830 raise error.ParseError(_(b"line range must be positive"))
2829 if fromline < 1:
2831 if fromline < 1:
2830 raise error.ParseError(_(b"fromline must be strictly positive"))
2832 raise error.ParseError(_(b"fromline must be strictly positive"))
2831 return fromline - 1, toline
2833 return fromline - 1, toline
2832
2834
2833
2835
2834 bytecount = unitcountfn(
2836 bytecount = unitcountfn(
2835 (100, 1 << 30, _(b'%.0f GB')),
2837 (100, 1 << 30, _(b'%.0f GB')),
2836 (10, 1 << 30, _(b'%.1f GB')),
2838 (10, 1 << 30, _(b'%.1f GB')),
2837 (1, 1 << 30, _(b'%.2f GB')),
2839 (1, 1 << 30, _(b'%.2f GB')),
2838 (100, 1 << 20, _(b'%.0f MB')),
2840 (100, 1 << 20, _(b'%.0f MB')),
2839 (10, 1 << 20, _(b'%.1f MB')),
2841 (10, 1 << 20, _(b'%.1f MB')),
2840 (1, 1 << 20, _(b'%.2f MB')),
2842 (1, 1 << 20, _(b'%.2f MB')),
2841 (100, 1 << 10, _(b'%.0f KB')),
2843 (100, 1 << 10, _(b'%.0f KB')),
2842 (10, 1 << 10, _(b'%.1f KB')),
2844 (10, 1 << 10, _(b'%.1f KB')),
2843 (1, 1 << 10, _(b'%.2f KB')),
2845 (1, 1 << 10, _(b'%.2f KB')),
2844 (1, 1, _(b'%.0f bytes')),
2846 (1, 1, _(b'%.0f bytes')),
2845 )
2847 )
2846
2848
2847
2849
2848 class transformingwriter(object):
2850 class transformingwriter(object):
2849 """Writable file wrapper to transform data by function"""
2851 """Writable file wrapper to transform data by function"""
2850
2852
2851 def __init__(self, fp, encode):
2853 def __init__(self, fp, encode):
2852 self._fp = fp
2854 self._fp = fp
2853 self._encode = encode
2855 self._encode = encode
2854
2856
2855 def close(self):
2857 def close(self):
2856 self._fp.close()
2858 self._fp.close()
2857
2859
2858 def flush(self):
2860 def flush(self):
2859 self._fp.flush()
2861 self._fp.flush()
2860
2862
2861 def write(self, data):
2863 def write(self, data):
2862 return self._fp.write(self._encode(data))
2864 return self._fp.write(self._encode(data))
2863
2865
2864
2866
2865 # Matches a single EOL which can either be a CRLF where repeated CR
2867 # Matches a single EOL which can either be a CRLF where repeated CR
2866 # are removed or a LF. We do not care about old Macintosh files, so a
2868 # are removed or a LF. We do not care about old Macintosh files, so a
2867 # stray CR is an error.
2869 # stray CR is an error.
2868 _eolre = remod.compile(br'\r*\n')
2870 _eolre = remod.compile(br'\r*\n')
2869
2871
2870
2872
2871 def tolf(s):
2873 def tolf(s):
2872 # type: (bytes) -> bytes
2874 # type: (bytes) -> bytes
2873 return _eolre.sub(b'\n', s)
2875 return _eolre.sub(b'\n', s)
2874
2876
2875
2877
2876 def tocrlf(s):
2878 def tocrlf(s):
2877 # type: (bytes) -> bytes
2879 # type: (bytes) -> bytes
2878 return _eolre.sub(b'\r\n', s)
2880 return _eolre.sub(b'\r\n', s)
2879
2881
2880
2882
2881 def _crlfwriter(fp):
2883 def _crlfwriter(fp):
2882 return transformingwriter(fp, tocrlf)
2884 return transformingwriter(fp, tocrlf)
2883
2885
2884
2886
2885 if pycompat.oslinesep == b'\r\n':
2887 if pycompat.oslinesep == b'\r\n':
2886 tonativeeol = tocrlf
2888 tonativeeol = tocrlf
2887 fromnativeeol = tolf
2889 fromnativeeol = tolf
2888 nativeeolwriter = _crlfwriter
2890 nativeeolwriter = _crlfwriter
2889 else:
2891 else:
2890 tonativeeol = pycompat.identity
2892 tonativeeol = pycompat.identity
2891 fromnativeeol = pycompat.identity
2893 fromnativeeol = pycompat.identity
2892 nativeeolwriter = pycompat.identity
2894 nativeeolwriter = pycompat.identity
2893
2895
2894 if pyplatform.python_implementation() == b'CPython' and sys.version_info < (
2896 if pyplatform.python_implementation() == b'CPython' and sys.version_info < (
2895 3,
2897 3,
2896 0,
2898 0,
2897 ):
2899 ):
2898 # There is an issue in CPython that some IO methods do not handle EINTR
2900 # There is an issue in CPython that some IO methods do not handle EINTR
2899 # correctly. The following table shows what CPython version (and functions)
2901 # correctly. The following table shows what CPython version (and functions)
2900 # are affected (buggy: has the EINTR bug, okay: otherwise):
2902 # are affected (buggy: has the EINTR bug, okay: otherwise):
2901 #
2903 #
2902 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2904 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2903 # --------------------------------------------------
2905 # --------------------------------------------------
2904 # fp.__iter__ | buggy | buggy | okay
2906 # fp.__iter__ | buggy | buggy | okay
2905 # fp.read* | buggy | okay [1] | okay
2907 # fp.read* | buggy | okay [1] | okay
2906 #
2908 #
2907 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2909 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2908 #
2910 #
2909 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2911 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2910 # like "read*" work fine, as we do not support Python < 2.7.4.
2912 # like "read*" work fine, as we do not support Python < 2.7.4.
2911 #
2913 #
2912 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2914 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2913 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2915 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2914 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2916 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2915 # fp.__iter__ but not other fp.read* methods.
2917 # fp.__iter__ but not other fp.read* methods.
2916 #
2918 #
2917 # On modern systems like Linux, the "read" syscall cannot be interrupted
2919 # On modern systems like Linux, the "read" syscall cannot be interrupted
2918 # when reading "fast" files like on-disk files. So the EINTR issue only
2920 # when reading "fast" files like on-disk files. So the EINTR issue only
2919 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2921 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2920 # files approximately as "fast" files and use the fast (unsafe) code path,
2922 # files approximately as "fast" files and use the fast (unsafe) code path,
2921 # to minimize the performance impact.
2923 # to minimize the performance impact.
2922
2924
2923 def iterfile(fp):
2925 def iterfile(fp):
2924 fastpath = True
2926 fastpath = True
2925 if type(fp) is file:
2927 if type(fp) is file:
2926 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2928 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2927 if fastpath:
2929 if fastpath:
2928 return fp
2930 return fp
2929 else:
2931 else:
2930 # fp.readline deals with EINTR correctly, use it as a workaround.
2932 # fp.readline deals with EINTR correctly, use it as a workaround.
2931 return iter(fp.readline, b'')
2933 return iter(fp.readline, b'')
2932
2934
2933
2935
2934 else:
2936 else:
2935 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2937 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2936 def iterfile(fp):
2938 def iterfile(fp):
2937 return fp
2939 return fp
2938
2940
2939
2941
2940 def iterlines(iterator):
2942 def iterlines(iterator):
2941 # type: (Iterator[bytes]) -> Iterator[bytes]
2943 # type: (Iterator[bytes]) -> Iterator[bytes]
2942 for chunk in iterator:
2944 for chunk in iterator:
2943 for line in chunk.splitlines():
2945 for line in chunk.splitlines():
2944 yield line
2946 yield line
2945
2947
2946
2948
2947 def expandpath(path):
2949 def expandpath(path):
2948 # type: (bytes) -> bytes
2950 # type: (bytes) -> bytes
2949 return os.path.expanduser(os.path.expandvars(path))
2951 return os.path.expanduser(os.path.expandvars(path))
2950
2952
2951
2953
2952 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2954 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2953 """Return the result of interpolating items in the mapping into string s.
2955 """Return the result of interpolating items in the mapping into string s.
2954
2956
2955 prefix is a single character string, or a two character string with
2957 prefix is a single character string, or a two character string with
2956 a backslash as the first character if the prefix needs to be escaped in
2958 a backslash as the first character if the prefix needs to be escaped in
2957 a regular expression.
2959 a regular expression.
2958
2960
2959 fn is an optional function that will be applied to the replacement text
2961 fn is an optional function that will be applied to the replacement text
2960 just before replacement.
2962 just before replacement.
2961
2963
2962 escape_prefix is an optional flag that allows using doubled prefix for
2964 escape_prefix is an optional flag that allows using doubled prefix for
2963 its escaping.
2965 its escaping.
2964 """
2966 """
2965 fn = fn or (lambda s: s)
2967 fn = fn or (lambda s: s)
2966 patterns = b'|'.join(mapping.keys())
2968 patterns = b'|'.join(mapping.keys())
2967 if escape_prefix:
2969 if escape_prefix:
2968 patterns += b'|' + prefix
2970 patterns += b'|' + prefix
2969 if len(prefix) > 1:
2971 if len(prefix) > 1:
2970 prefix_char = prefix[1:]
2972 prefix_char = prefix[1:]
2971 else:
2973 else:
2972 prefix_char = prefix
2974 prefix_char = prefix
2973 mapping[prefix_char] = prefix_char
2975 mapping[prefix_char] = prefix_char
2974 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2976 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2975 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2977 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2976
2978
2977
2979
2978 def getport(*args, **kwargs):
2980 def getport(*args, **kwargs):
2979 msg = b'getport(...) moved to mercurial.utils.urlutil'
2981 msg = b'getport(...) moved to mercurial.utils.urlutil'
2980 nouideprecwarn(msg, b'6.0', stacklevel=2)
2982 nouideprecwarn(msg, b'6.0', stacklevel=2)
2981 return urlutil.getport(*args, **kwargs)
2983 return urlutil.getport(*args, **kwargs)
2982
2984
2983
2985
2984 def url(*args, **kwargs):
2986 def url(*args, **kwargs):
2985 msg = b'url(...) moved to mercurial.utils.urlutil'
2987 msg = b'url(...) moved to mercurial.utils.urlutil'
2986 nouideprecwarn(msg, b'6.0', stacklevel=2)
2988 nouideprecwarn(msg, b'6.0', stacklevel=2)
2987 return urlutil.url(*args, **kwargs)
2989 return urlutil.url(*args, **kwargs)
2988
2990
2989
2991
2990 def hasscheme(*args, **kwargs):
2992 def hasscheme(*args, **kwargs):
2991 msg = b'hasscheme(...) moved to mercurial.utils.urlutil'
2993 msg = b'hasscheme(...) moved to mercurial.utils.urlutil'
2992 nouideprecwarn(msg, b'6.0', stacklevel=2)
2994 nouideprecwarn(msg, b'6.0', stacklevel=2)
2993 return urlutil.hasscheme(*args, **kwargs)
2995 return urlutil.hasscheme(*args, **kwargs)
2994
2996
2995
2997
2996 def hasdriveletter(*args, **kwargs):
2998 def hasdriveletter(*args, **kwargs):
2997 msg = b'hasdriveletter(...) moved to mercurial.utils.urlutil'
2999 msg = b'hasdriveletter(...) moved to mercurial.utils.urlutil'
2998 nouideprecwarn(msg, b'6.0', stacklevel=2)
3000 nouideprecwarn(msg, b'6.0', stacklevel=2)
2999 return urlutil.hasdriveletter(*args, **kwargs)
3001 return urlutil.hasdriveletter(*args, **kwargs)
3000
3002
3001
3003
3002 def urllocalpath(*args, **kwargs):
3004 def urllocalpath(*args, **kwargs):
3003 msg = b'urllocalpath(...) moved to mercurial.utils.urlutil'
3005 msg = b'urllocalpath(...) moved to mercurial.utils.urlutil'
3004 nouideprecwarn(msg, b'6.0', stacklevel=2)
3006 nouideprecwarn(msg, b'6.0', stacklevel=2)
3005 return urlutil.urllocalpath(*args, **kwargs)
3007 return urlutil.urllocalpath(*args, **kwargs)
3006
3008
3007
3009
3008 def checksafessh(*args, **kwargs):
3010 def checksafessh(*args, **kwargs):
3009 msg = b'checksafessh(...) moved to mercurial.utils.urlutil'
3011 msg = b'checksafessh(...) moved to mercurial.utils.urlutil'
3010 nouideprecwarn(msg, b'6.0', stacklevel=2)
3012 nouideprecwarn(msg, b'6.0', stacklevel=2)
3011 return urlutil.checksafessh(*args, **kwargs)
3013 return urlutil.checksafessh(*args, **kwargs)
3012
3014
3013
3015
3014 def hidepassword(*args, **kwargs):
3016 def hidepassword(*args, **kwargs):
3015 msg = b'hidepassword(...) moved to mercurial.utils.urlutil'
3017 msg = b'hidepassword(...) moved to mercurial.utils.urlutil'
3016 nouideprecwarn(msg, b'6.0', stacklevel=2)
3018 nouideprecwarn(msg, b'6.0', stacklevel=2)
3017 return urlutil.hidepassword(*args, **kwargs)
3019 return urlutil.hidepassword(*args, **kwargs)
3018
3020
3019
3021
3020 def removeauth(*args, **kwargs):
3022 def removeauth(*args, **kwargs):
3021 msg = b'removeauth(...) moved to mercurial.utils.urlutil'
3023 msg = b'removeauth(...) moved to mercurial.utils.urlutil'
3022 nouideprecwarn(msg, b'6.0', stacklevel=2)
3024 nouideprecwarn(msg, b'6.0', stacklevel=2)
3023 return urlutil.removeauth(*args, **kwargs)
3025 return urlutil.removeauth(*args, **kwargs)
3024
3026
3025
3027
3026 timecount = unitcountfn(
3028 timecount = unitcountfn(
3027 (1, 1e3, _(b'%.0f s')),
3029 (1, 1e3, _(b'%.0f s')),
3028 (100, 1, _(b'%.1f s')),
3030 (100, 1, _(b'%.1f s')),
3029 (10, 1, _(b'%.2f s')),
3031 (10, 1, _(b'%.2f s')),
3030 (1, 1, _(b'%.3f s')),
3032 (1, 1, _(b'%.3f s')),
3031 (100, 0.001, _(b'%.1f ms')),
3033 (100, 0.001, _(b'%.1f ms')),
3032 (10, 0.001, _(b'%.2f ms')),
3034 (10, 0.001, _(b'%.2f ms')),
3033 (1, 0.001, _(b'%.3f ms')),
3035 (1, 0.001, _(b'%.3f ms')),
3034 (100, 0.000001, _(b'%.1f us')),
3036 (100, 0.000001, _(b'%.1f us')),
3035 (10, 0.000001, _(b'%.2f us')),
3037 (10, 0.000001, _(b'%.2f us')),
3036 (1, 0.000001, _(b'%.3f us')),
3038 (1, 0.000001, _(b'%.3f us')),
3037 (100, 0.000000001, _(b'%.1f ns')),
3039 (100, 0.000000001, _(b'%.1f ns')),
3038 (10, 0.000000001, _(b'%.2f ns')),
3040 (10, 0.000000001, _(b'%.2f ns')),
3039 (1, 0.000000001, _(b'%.3f ns')),
3041 (1, 0.000000001, _(b'%.3f ns')),
3040 )
3042 )
3041
3043
3042
3044
3043 @attr.s
3045 @attr.s
3044 class timedcmstats(object):
3046 class timedcmstats(object):
3045 """Stats information produced by the timedcm context manager on entering."""
3047 """Stats information produced by the timedcm context manager on entering."""
3046
3048
3047 # the starting value of the timer as a float (meaning and resulution is
3049 # the starting value of the timer as a float (meaning and resulution is
3048 # platform dependent, see util.timer)
3050 # platform dependent, see util.timer)
3049 start = attr.ib(default=attr.Factory(lambda: timer()))
3051 start = attr.ib(default=attr.Factory(lambda: timer()))
3050 # the number of seconds as a floating point value; starts at 0, updated when
3052 # the number of seconds as a floating point value; starts at 0, updated when
3051 # the context is exited.
3053 # the context is exited.
3052 elapsed = attr.ib(default=0)
3054 elapsed = attr.ib(default=0)
3053 # the number of nested timedcm context managers.
3055 # the number of nested timedcm context managers.
3054 level = attr.ib(default=1)
3056 level = attr.ib(default=1)
3055
3057
3056 def __bytes__(self):
3058 def __bytes__(self):
3057 return timecount(self.elapsed) if self.elapsed else b'<unknown>'
3059 return timecount(self.elapsed) if self.elapsed else b'<unknown>'
3058
3060
3059 __str__ = encoding.strmethod(__bytes__)
3061 __str__ = encoding.strmethod(__bytes__)
3060
3062
3061
3063
3062 @contextlib.contextmanager
3064 @contextlib.contextmanager
3063 def timedcm(whencefmt, *whenceargs):
3065 def timedcm(whencefmt, *whenceargs):
3064 """A context manager that produces timing information for a given context.
3066 """A context manager that produces timing information for a given context.
3065
3067
3066 On entering a timedcmstats instance is produced.
3068 On entering a timedcmstats instance is produced.
3067
3069
3068 This context manager is reentrant.
3070 This context manager is reentrant.
3069
3071
3070 """
3072 """
3071 # track nested context managers
3073 # track nested context managers
3072 timedcm._nested += 1
3074 timedcm._nested += 1
3073 timing_stats = timedcmstats(level=timedcm._nested)
3075 timing_stats = timedcmstats(level=timedcm._nested)
3074 try:
3076 try:
3075 with tracing.log(whencefmt, *whenceargs):
3077 with tracing.log(whencefmt, *whenceargs):
3076 yield timing_stats
3078 yield timing_stats
3077 finally:
3079 finally:
3078 timing_stats.elapsed = timer() - timing_stats.start
3080 timing_stats.elapsed = timer() - timing_stats.start
3079 timedcm._nested -= 1
3081 timedcm._nested -= 1
3080
3082
3081
3083
3082 timedcm._nested = 0
3084 timedcm._nested = 0
3083
3085
3084
3086
3085 def timed(func):
3087 def timed(func):
3086 """Report the execution time of a function call to stderr.
3088 """Report the execution time of a function call to stderr.
3087
3089
3088 During development, use as a decorator when you need to measure
3090 During development, use as a decorator when you need to measure
3089 the cost of a function, e.g. as follows:
3091 the cost of a function, e.g. as follows:
3090
3092
3091 @util.timed
3093 @util.timed
3092 def foo(a, b, c):
3094 def foo(a, b, c):
3093 pass
3095 pass
3094 """
3096 """
3095
3097
3096 def wrapper(*args, **kwargs):
3098 def wrapper(*args, **kwargs):
3097 with timedcm(pycompat.bytestr(func.__name__)) as time_stats:
3099 with timedcm(pycompat.bytestr(func.__name__)) as time_stats:
3098 result = func(*args, **kwargs)
3100 result = func(*args, **kwargs)
3099 stderr = procutil.stderr
3101 stderr = procutil.stderr
3100 stderr.write(
3102 stderr.write(
3101 b'%s%s: %s\n'
3103 b'%s%s: %s\n'
3102 % (
3104 % (
3103 b' ' * time_stats.level * 2,
3105 b' ' * time_stats.level * 2,
3104 pycompat.bytestr(func.__name__),
3106 pycompat.bytestr(func.__name__),
3105 time_stats,
3107 time_stats,
3106 )
3108 )
3107 )
3109 )
3108 return result
3110 return result
3109
3111
3110 return wrapper
3112 return wrapper
3111
3113
3112
3114
3113 _sizeunits = (
3115 _sizeunits = (
3114 (b'm', 2 ** 20),
3116 (b'm', 2 ** 20),
3115 (b'k', 2 ** 10),
3117 (b'k', 2 ** 10),
3116 (b'g', 2 ** 30),
3118 (b'g', 2 ** 30),
3117 (b'kb', 2 ** 10),
3119 (b'kb', 2 ** 10),
3118 (b'mb', 2 ** 20),
3120 (b'mb', 2 ** 20),
3119 (b'gb', 2 ** 30),
3121 (b'gb', 2 ** 30),
3120 (b'b', 1),
3122 (b'b', 1),
3121 )
3123 )
3122
3124
3123
3125
3124 def sizetoint(s):
3126 def sizetoint(s):
3125 # type: (bytes) -> int
3127 # type: (bytes) -> int
3126 """Convert a space specifier to a byte count.
3128 """Convert a space specifier to a byte count.
3127
3129
3128 >>> sizetoint(b'30')
3130 >>> sizetoint(b'30')
3129 30
3131 30
3130 >>> sizetoint(b'2.2kb')
3132 >>> sizetoint(b'2.2kb')
3131 2252
3133 2252
3132 >>> sizetoint(b'6M')
3134 >>> sizetoint(b'6M')
3133 6291456
3135 6291456
3134 """
3136 """
3135 t = s.strip().lower()
3137 t = s.strip().lower()
3136 try:
3138 try:
3137 for k, u in _sizeunits:
3139 for k, u in _sizeunits:
3138 if t.endswith(k):
3140 if t.endswith(k):
3139 return int(float(t[: -len(k)]) * u)
3141 return int(float(t[: -len(k)]) * u)
3140 return int(t)
3142 return int(t)
3141 except ValueError:
3143 except ValueError:
3142 raise error.ParseError(_(b"couldn't parse size: %s") % s)
3144 raise error.ParseError(_(b"couldn't parse size: %s") % s)
3143
3145
3144
3146
3145 class hooks(object):
3147 class hooks(object):
3146 """A collection of hook functions that can be used to extend a
3148 """A collection of hook functions that can be used to extend a
3147 function's behavior. Hooks are called in lexicographic order,
3149 function's behavior. Hooks are called in lexicographic order,
3148 based on the names of their sources."""
3150 based on the names of their sources."""
3149
3151
3150 def __init__(self):
3152 def __init__(self):
3151 self._hooks = []
3153 self._hooks = []
3152
3154
3153 def add(self, source, hook):
3155 def add(self, source, hook):
3154 self._hooks.append((source, hook))
3156 self._hooks.append((source, hook))
3155
3157
3156 def __call__(self, *args):
3158 def __call__(self, *args):
3157 self._hooks.sort(key=lambda x: x[0])
3159 self._hooks.sort(key=lambda x: x[0])
3158 results = []
3160 results = []
3159 for source, hook in self._hooks:
3161 for source, hook in self._hooks:
3160 results.append(hook(*args))
3162 results.append(hook(*args))
3161 return results
3163 return results
3162
3164
3163
3165
3164 def getstackframes(skip=0, line=b' %-*s in %s\n', fileline=b'%s:%d', depth=0):
3166 def getstackframes(skip=0, line=b' %-*s in %s\n', fileline=b'%s:%d', depth=0):
3165 """Yields lines for a nicely formatted stacktrace.
3167 """Yields lines for a nicely formatted stacktrace.
3166 Skips the 'skip' last entries, then return the last 'depth' entries.
3168 Skips the 'skip' last entries, then return the last 'depth' entries.
3167 Each file+linenumber is formatted according to fileline.
3169 Each file+linenumber is formatted according to fileline.
3168 Each line is formatted according to line.
3170 Each line is formatted according to line.
3169 If line is None, it yields:
3171 If line is None, it yields:
3170 length of longest filepath+line number,
3172 length of longest filepath+line number,
3171 filepath+linenumber,
3173 filepath+linenumber,
3172 function
3174 function
3173
3175
3174 Not be used in production code but very convenient while developing.
3176 Not be used in production code but very convenient while developing.
3175 """
3177 """
3176 entries = [
3178 entries = [
3177 (fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3179 (fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3178 for fn, ln, func, _text in traceback.extract_stack()[: -skip - 1]
3180 for fn, ln, func, _text in traceback.extract_stack()[: -skip - 1]
3179 ][-depth:]
3181 ][-depth:]
3180 if entries:
3182 if entries:
3181 fnmax = max(len(entry[0]) for entry in entries)
3183 fnmax = max(len(entry[0]) for entry in entries)
3182 for fnln, func in entries:
3184 for fnln, func in entries:
3183 if line is None:
3185 if line is None:
3184 yield (fnmax, fnln, func)
3186 yield (fnmax, fnln, func)
3185 else:
3187 else:
3186 yield line % (fnmax, fnln, func)
3188 yield line % (fnmax, fnln, func)
3187
3189
3188
3190
3189 def debugstacktrace(
3191 def debugstacktrace(
3190 msg=b'stacktrace',
3192 msg=b'stacktrace',
3191 skip=0,
3193 skip=0,
3192 f=procutil.stderr,
3194 f=procutil.stderr,
3193 otherf=procutil.stdout,
3195 otherf=procutil.stdout,
3194 depth=0,
3196 depth=0,
3195 prefix=b'',
3197 prefix=b'',
3196 ):
3198 ):
3197 """Writes a message to f (stderr) with a nicely formatted stacktrace.
3199 """Writes a message to f (stderr) with a nicely formatted stacktrace.
3198 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3200 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3199 By default it will flush stdout first.
3201 By default it will flush stdout first.
3200 It can be used everywhere and intentionally does not require an ui object.
3202 It can be used everywhere and intentionally does not require an ui object.
3201 Not be used in production code but very convenient while developing.
3203 Not be used in production code but very convenient while developing.
3202 """
3204 """
3203 if otherf:
3205 if otherf:
3204 otherf.flush()
3206 otherf.flush()
3205 f.write(b'%s%s at:\n' % (prefix, msg.rstrip()))
3207 f.write(b'%s%s at:\n' % (prefix, msg.rstrip()))
3206 for line in getstackframes(skip + 1, depth=depth):
3208 for line in getstackframes(skip + 1, depth=depth):
3207 f.write(prefix + line)
3209 f.write(prefix + line)
3208 f.flush()
3210 f.flush()
3209
3211
3210
3212
3211 # convenient shortcut
3213 # convenient shortcut
3212 dst = debugstacktrace
3214 dst = debugstacktrace
3213
3215
3214
3216
3215 def safename(f, tag, ctx, others=None):
3217 def safename(f, tag, ctx, others=None):
3216 """
3218 """
3217 Generate a name that it is safe to rename f to in the given context.
3219 Generate a name that it is safe to rename f to in the given context.
3218
3220
3219 f: filename to rename
3221 f: filename to rename
3220 tag: a string tag that will be included in the new name
3222 tag: a string tag that will be included in the new name
3221 ctx: a context, in which the new name must not exist
3223 ctx: a context, in which the new name must not exist
3222 others: a set of other filenames that the new name must not be in
3224 others: a set of other filenames that the new name must not be in
3223
3225
3224 Returns a file name of the form oldname~tag[~number] which does not exist
3226 Returns a file name of the form oldname~tag[~number] which does not exist
3225 in the provided context and is not in the set of other names.
3227 in the provided context and is not in the set of other names.
3226 """
3228 """
3227 if others is None:
3229 if others is None:
3228 others = set()
3230 others = set()
3229
3231
3230 fn = b'%s~%s' % (f, tag)
3232 fn = b'%s~%s' % (f, tag)
3231 if fn not in ctx and fn not in others:
3233 if fn not in ctx and fn not in others:
3232 return fn
3234 return fn
3233 for n in itertools.count(1):
3235 for n in itertools.count(1):
3234 fn = b'%s~%s~%s' % (f, tag, n)
3236 fn = b'%s~%s~%s' % (f, tag, n)
3235 if fn not in ctx and fn not in others:
3237 if fn not in ctx and fn not in others:
3236 return fn
3238 return fn
3237
3239
3238
3240
3239 def readexactly(stream, n):
3241 def readexactly(stream, n):
3240 '''read n bytes from stream.read and abort if less was available'''
3242 '''read n bytes from stream.read and abort if less was available'''
3241 s = stream.read(n)
3243 s = stream.read(n)
3242 if len(s) < n:
3244 if len(s) < n:
3243 raise error.Abort(
3245 raise error.Abort(
3244 _(b"stream ended unexpectedly (got %d bytes, expected %d)")
3246 _(b"stream ended unexpectedly (got %d bytes, expected %d)")
3245 % (len(s), n)
3247 % (len(s), n)
3246 )
3248 )
3247 return s
3249 return s
3248
3250
3249
3251
3250 def uvarintencode(value):
3252 def uvarintencode(value):
3251 """Encode an unsigned integer value to a varint.
3253 """Encode an unsigned integer value to a varint.
3252
3254
3253 A varint is a variable length integer of 1 or more bytes. Each byte
3255 A varint is a variable length integer of 1 or more bytes. Each byte
3254 except the last has the most significant bit set. The lower 7 bits of
3256 except the last has the most significant bit set. The lower 7 bits of
3255 each byte store the 2's complement representation, least significant group
3257 each byte store the 2's complement representation, least significant group
3256 first.
3258 first.
3257
3259
3258 >>> uvarintencode(0)
3260 >>> uvarintencode(0)
3259 '\\x00'
3261 '\\x00'
3260 >>> uvarintencode(1)
3262 >>> uvarintencode(1)
3261 '\\x01'
3263 '\\x01'
3262 >>> uvarintencode(127)
3264 >>> uvarintencode(127)
3263 '\\x7f'
3265 '\\x7f'
3264 >>> uvarintencode(1337)
3266 >>> uvarintencode(1337)
3265 '\\xb9\\n'
3267 '\\xb9\\n'
3266 >>> uvarintencode(65536)
3268 >>> uvarintencode(65536)
3267 '\\x80\\x80\\x04'
3269 '\\x80\\x80\\x04'
3268 >>> uvarintencode(-1)
3270 >>> uvarintencode(-1)
3269 Traceback (most recent call last):
3271 Traceback (most recent call last):
3270 ...
3272 ...
3271 ProgrammingError: negative value for uvarint: -1
3273 ProgrammingError: negative value for uvarint: -1
3272 """
3274 """
3273 if value < 0:
3275 if value < 0:
3274 raise error.ProgrammingError(b'negative value for uvarint: %d' % value)
3276 raise error.ProgrammingError(b'negative value for uvarint: %d' % value)
3275 bits = value & 0x7F
3277 bits = value & 0x7F
3276 value >>= 7
3278 value >>= 7
3277 bytes = []
3279 bytes = []
3278 while value:
3280 while value:
3279 bytes.append(pycompat.bytechr(0x80 | bits))
3281 bytes.append(pycompat.bytechr(0x80 | bits))
3280 bits = value & 0x7F
3282 bits = value & 0x7F
3281 value >>= 7
3283 value >>= 7
3282 bytes.append(pycompat.bytechr(bits))
3284 bytes.append(pycompat.bytechr(bits))
3283
3285
3284 return b''.join(bytes)
3286 return b''.join(bytes)
3285
3287
3286
3288
3287 def uvarintdecodestream(fh):
3289 def uvarintdecodestream(fh):
3288 """Decode an unsigned variable length integer from a stream.
3290 """Decode an unsigned variable length integer from a stream.
3289
3291
3290 The passed argument is anything that has a ``.read(N)`` method.
3292 The passed argument is anything that has a ``.read(N)`` method.
3291
3293
3292 >>> try:
3294 >>> try:
3293 ... from StringIO import StringIO as BytesIO
3295 ... from StringIO import StringIO as BytesIO
3294 ... except ImportError:
3296 ... except ImportError:
3295 ... from io import BytesIO
3297 ... from io import BytesIO
3296 >>> uvarintdecodestream(BytesIO(b'\\x00'))
3298 >>> uvarintdecodestream(BytesIO(b'\\x00'))
3297 0
3299 0
3298 >>> uvarintdecodestream(BytesIO(b'\\x01'))
3300 >>> uvarintdecodestream(BytesIO(b'\\x01'))
3299 1
3301 1
3300 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
3302 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
3301 127
3303 127
3302 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
3304 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
3303 1337
3305 1337
3304 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
3306 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
3305 65536
3307 65536
3306 >>> uvarintdecodestream(BytesIO(b'\\x80'))
3308 >>> uvarintdecodestream(BytesIO(b'\\x80'))
3307 Traceback (most recent call last):
3309 Traceback (most recent call last):
3308 ...
3310 ...
3309 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
3311 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
3310 """
3312 """
3311 result = 0
3313 result = 0
3312 shift = 0
3314 shift = 0
3313 while True:
3315 while True:
3314 byte = ord(readexactly(fh, 1))
3316 byte = ord(readexactly(fh, 1))
3315 result |= (byte & 0x7F) << shift
3317 result |= (byte & 0x7F) << shift
3316 if not (byte & 0x80):
3318 if not (byte & 0x80):
3317 return result
3319 return result
3318 shift += 7
3320 shift += 7
3319
3321
3320
3322
3321 # Passing the '' locale means that the locale should be set according to the
3323 # Passing the '' locale means that the locale should be set according to the
3322 # user settings (environment variables).
3324 # user settings (environment variables).
3323 # Python sometimes avoids setting the global locale settings. When interfacing
3325 # Python sometimes avoids setting the global locale settings. When interfacing
3324 # with C code (e.g. the curses module or the Subversion bindings), the global
3326 # with C code (e.g. the curses module or the Subversion bindings), the global
3325 # locale settings must be initialized correctly. Python 2 does not initialize
3327 # locale settings must be initialized correctly. Python 2 does not initialize
3326 # the global locale settings on interpreter startup. Python 3 sometimes
3328 # the global locale settings on interpreter startup. Python 3 sometimes
3327 # initializes LC_CTYPE, but not consistently at least on Windows. Therefore we
3329 # initializes LC_CTYPE, but not consistently at least on Windows. Therefore we
3328 # explicitly initialize it to get consistent behavior if it's not already
3330 # explicitly initialize it to get consistent behavior if it's not already
3329 # initialized. Since CPython commit 177d921c8c03d30daa32994362023f777624b10d,
3331 # initialized. Since CPython commit 177d921c8c03d30daa32994362023f777624b10d,
3330 # LC_CTYPE is always initialized. If we require Python 3.8+, we should re-check
3332 # LC_CTYPE is always initialized. If we require Python 3.8+, we should re-check
3331 # if we can remove this code.
3333 # if we can remove this code.
3332 @contextlib.contextmanager
3334 @contextlib.contextmanager
3333 def with_lc_ctype():
3335 def with_lc_ctype():
3334 oldloc = locale.setlocale(locale.LC_CTYPE, None)
3336 oldloc = locale.setlocale(locale.LC_CTYPE, None)
3335 if oldloc == 'C':
3337 if oldloc == 'C':
3336 try:
3338 try:
3337 try:
3339 try:
3338 locale.setlocale(locale.LC_CTYPE, '')
3340 locale.setlocale(locale.LC_CTYPE, '')
3339 except locale.Error:
3341 except locale.Error:
3340 # The likely case is that the locale from the environment
3342 # The likely case is that the locale from the environment
3341 # variables is unknown.
3343 # variables is unknown.
3342 pass
3344 pass
3343 yield
3345 yield
3344 finally:
3346 finally:
3345 locale.setlocale(locale.LC_CTYPE, oldloc)
3347 locale.setlocale(locale.LC_CTYPE, oldloc)
3346 else:
3348 else:
3347 yield
3349 yield
3348
3350
3349
3351
3350 def _estimatememory():
3352 def _estimatememory():
3351 # type: () -> Optional[int]
3353 # type: () -> Optional[int]
3352 """Provide an estimate for the available system memory in Bytes.
3354 """Provide an estimate for the available system memory in Bytes.
3353
3355
3354 If no estimate can be provided on the platform, returns None.
3356 If no estimate can be provided on the platform, returns None.
3355 """
3357 """
3356 if pycompat.sysplatform.startswith(b'win'):
3358 if pycompat.sysplatform.startswith(b'win'):
3357 # On Windows, use the GlobalMemoryStatusEx kernel function directly.
3359 # On Windows, use the GlobalMemoryStatusEx kernel function directly.
3358 from ctypes import c_long as DWORD, c_ulonglong as DWORDLONG
3360 from ctypes import c_long as DWORD, c_ulonglong as DWORDLONG
3359 from ctypes.wintypes import ( # pytype: disable=import-error
3361 from ctypes.wintypes import ( # pytype: disable=import-error
3360 Structure,
3362 Structure,
3361 byref,
3363 byref,
3362 sizeof,
3364 sizeof,
3363 windll,
3365 windll,
3364 )
3366 )
3365
3367
3366 class MEMORYSTATUSEX(Structure):
3368 class MEMORYSTATUSEX(Structure):
3367 _fields_ = [
3369 _fields_ = [
3368 ('dwLength', DWORD),
3370 ('dwLength', DWORD),
3369 ('dwMemoryLoad', DWORD),
3371 ('dwMemoryLoad', DWORD),
3370 ('ullTotalPhys', DWORDLONG),
3372 ('ullTotalPhys', DWORDLONG),
3371 ('ullAvailPhys', DWORDLONG),
3373 ('ullAvailPhys', DWORDLONG),
3372 ('ullTotalPageFile', DWORDLONG),
3374 ('ullTotalPageFile', DWORDLONG),
3373 ('ullAvailPageFile', DWORDLONG),
3375 ('ullAvailPageFile', DWORDLONG),
3374 ('ullTotalVirtual', DWORDLONG),
3376 ('ullTotalVirtual', DWORDLONG),
3375 ('ullAvailVirtual', DWORDLONG),
3377 ('ullAvailVirtual', DWORDLONG),
3376 ('ullExtendedVirtual', DWORDLONG),
3378 ('ullExtendedVirtual', DWORDLONG),
3377 ]
3379 ]
3378
3380
3379 x = MEMORYSTATUSEX()
3381 x = MEMORYSTATUSEX()
3380 x.dwLength = sizeof(x)
3382 x.dwLength = sizeof(x)
3381 windll.kernel32.GlobalMemoryStatusEx(byref(x))
3383 windll.kernel32.GlobalMemoryStatusEx(byref(x))
3382 return x.ullAvailPhys
3384 return x.ullAvailPhys
3383
3385
3384 # On newer Unix-like systems and Mac OSX, the sysconf interface
3386 # On newer Unix-like systems and Mac OSX, the sysconf interface
3385 # can be used. _SC_PAGE_SIZE is part of POSIX; _SC_PHYS_PAGES
3387 # can be used. _SC_PAGE_SIZE is part of POSIX; _SC_PHYS_PAGES
3386 # seems to be implemented on most systems.
3388 # seems to be implemented on most systems.
3387 try:
3389 try:
3388 pagesize = os.sysconf(os.sysconf_names['SC_PAGE_SIZE'])
3390 pagesize = os.sysconf(os.sysconf_names['SC_PAGE_SIZE'])
3389 pages = os.sysconf(os.sysconf_names['SC_PHYS_PAGES'])
3391 pages = os.sysconf(os.sysconf_names['SC_PHYS_PAGES'])
3390 return pagesize * pages
3392 return pagesize * pages
3391 except OSError: # sysconf can fail
3393 except OSError: # sysconf can fail
3392 pass
3394 pass
3393 except KeyError: # unknown parameter
3395 except KeyError: # unknown parameter
3394 pass
3396 pass
General Comments 0
You need to be logged in to leave comments. Login now