##// END OF EJS Templates
util: fix the signature of observedbufferedinputpipe._fillbuffer()...
Matt Harbison -
r50709:1d1b244a default
parent child Browse files
Show More
@@ -1,3315 +1,3315
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16
17 17 import abc
18 18 import collections
19 19 import contextlib
20 20 import errno
21 21 import gc
22 22 import hashlib
23 23 import io
24 24 import itertools
25 25 import locale
26 26 import mmap
27 27 import os
28 28 import pickle # provides util.pickle symbol
29 29 import re as remod
30 30 import shutil
31 31 import stat
32 32 import sys
33 33 import time
34 34 import traceback
35 35 import warnings
36 36
37 37 from .node import hex
38 38 from .thirdparty import attr
39 39 from .pycompat import (
40 40 delattr,
41 41 getattr,
42 42 open,
43 43 setattr,
44 44 )
45 45 from hgdemandimport import tracing
46 46 from . import (
47 47 encoding,
48 48 error,
49 49 i18n,
50 50 policy,
51 51 pycompat,
52 52 urllibcompat,
53 53 )
54 54 from .utils import (
55 55 compression,
56 56 hashutil,
57 57 procutil,
58 58 stringutil,
59 59 )
60 60
61 61 if pycompat.TYPE_CHECKING:
62 62 from typing import (
63 63 Iterator,
64 64 List,
65 65 Optional,
66 66 Tuple,
67 67 )
68 68
69 69
70 70 base85 = policy.importmod('base85')
71 71 osutil = policy.importmod('osutil')
72 72
73 73 b85decode = base85.b85decode
74 74 b85encode = base85.b85encode
75 75
76 76 cookielib = pycompat.cookielib
77 77 httplib = pycompat.httplib
78 78 safehasattr = pycompat.safehasattr
79 79 socketserver = pycompat.socketserver
80 80 bytesio = io.BytesIO
81 81 # TODO deprecate stringio name, as it is a lie on Python 3.
82 82 stringio = bytesio
83 83 xmlrpclib = pycompat.xmlrpclib
84 84
85 85 httpserver = urllibcompat.httpserver
86 86 urlerr = urllibcompat.urlerr
87 87 urlreq = urllibcompat.urlreq
88 88
89 89 # workaround for win32mbcs
90 90 _filenamebytestr = pycompat.bytestr
91 91
92 92 if pycompat.iswindows:
93 93 from . import windows as platform
94 94 else:
95 95 from . import posix as platform
96 96
97 97 _ = i18n._
98 98
99 99 abspath = platform.abspath
100 100 bindunixsocket = platform.bindunixsocket
101 101 cachestat = platform.cachestat
102 102 checkexec = platform.checkexec
103 103 checklink = platform.checklink
104 104 copymode = platform.copymode
105 105 expandglobs = platform.expandglobs
106 106 getfsmountpoint = platform.getfsmountpoint
107 107 getfstype = platform.getfstype
108 108 get_password = platform.get_password
109 109 groupmembers = platform.groupmembers
110 110 groupname = platform.groupname
111 111 isexec = platform.isexec
112 112 isowner = platform.isowner
113 113 listdir = osutil.listdir
114 114 localpath = platform.localpath
115 115 lookupreg = platform.lookupreg
116 116 makedir = platform.makedir
117 117 nlinks = platform.nlinks
118 118 normpath = platform.normpath
119 119 normcase = platform.normcase
120 120 normcasespec = platform.normcasespec
121 121 normcasefallback = platform.normcasefallback
122 122 openhardlinks = platform.openhardlinks
123 123 oslink = platform.oslink
124 124 parsepatchoutput = platform.parsepatchoutput
125 125 pconvert = platform.pconvert
126 126 poll = platform.poll
127 127 posixfile = platform.posixfile
128 128 readlink = platform.readlink
129 129 rename = platform.rename
130 130 removedirs = platform.removedirs
131 131 samedevice = platform.samedevice
132 132 samefile = platform.samefile
133 133 samestat = platform.samestat
134 134 setflags = platform.setflags
135 135 split = platform.split
136 136 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
137 137 statisexec = platform.statisexec
138 138 statislink = platform.statislink
139 139 umask = platform.umask
140 140 unlink = platform.unlink
141 141 username = platform.username
142 142
143 143
144 144 def setumask(val):
145 145 # type: (int) -> None
146 146 '''updates the umask. used by chg server'''
147 147 if pycompat.iswindows:
148 148 return
149 149 os.umask(val)
150 150 global umask
151 151 platform.umask = umask = val & 0o777
152 152
153 153
154 154 # small compat layer
155 155 compengines = compression.compengines
156 156 SERVERROLE = compression.SERVERROLE
157 157 CLIENTROLE = compression.CLIENTROLE
158 158
159 159 # Python compatibility
160 160
161 161 _notset = object()
162 162
163 163
164 164 def bitsfrom(container):
165 165 bits = 0
166 166 for bit in container:
167 167 bits |= bit
168 168 return bits
169 169
170 170
171 171 # python 2.6 still have deprecation warning enabled by default. We do not want
172 172 # to display anything to standard user so detect if we are running test and
173 173 # only use python deprecation warning in this case.
174 174 _dowarn = bool(encoding.environ.get(b'HGEMITWARNINGS'))
175 175 if _dowarn:
176 176 # explicitly unfilter our warning for python 2.7
177 177 #
178 178 # The option of setting PYTHONWARNINGS in the test runner was investigated.
179 179 # However, module name set through PYTHONWARNINGS was exactly matched, so
180 180 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
181 181 # makes the whole PYTHONWARNINGS thing useless for our usecase.
182 182 warnings.filterwarnings('default', '', DeprecationWarning, 'mercurial')
183 183 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext')
184 184 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext3rd')
185 185 if _dowarn:
186 186 # silence warning emitted by passing user string to re.sub()
187 187 warnings.filterwarnings(
188 188 'ignore', 'bad escape', DeprecationWarning, 'mercurial'
189 189 )
190 190 warnings.filterwarnings(
191 191 'ignore', 'invalid escape sequence', DeprecationWarning, 'mercurial'
192 192 )
193 193 # TODO: reinvent imp.is_frozen()
194 194 warnings.filterwarnings(
195 195 'ignore',
196 196 'the imp module is deprecated',
197 197 DeprecationWarning,
198 198 'mercurial',
199 199 )
200 200
201 201
202 202 def nouideprecwarn(msg, version, stacklevel=1):
203 203 """Issue an python native deprecation warning
204 204
205 205 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
206 206 """
207 207 if _dowarn:
208 208 msg += (
209 209 b"\n(compatibility will be dropped after Mercurial-%s,"
210 210 b" update your code.)"
211 211 ) % version
212 212 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
213 213 # on python 3 with chg, we will need to explicitly flush the output
214 214 sys.stderr.flush()
215 215
216 216
217 217 DIGESTS = {
218 218 b'md5': hashlib.md5,
219 219 b'sha1': hashutil.sha1,
220 220 b'sha512': hashlib.sha512,
221 221 }
222 222 # List of digest types from strongest to weakest
223 223 DIGESTS_BY_STRENGTH = [b'sha512', b'sha1', b'md5']
224 224
225 225 for k in DIGESTS_BY_STRENGTH:
226 226 assert k in DIGESTS
227 227
228 228
229 229 class digester:
230 230 """helper to compute digests.
231 231
232 232 This helper can be used to compute one or more digests given their name.
233 233
234 234 >>> d = digester([b'md5', b'sha1'])
235 235 >>> d.update(b'foo')
236 236 >>> [k for k in sorted(d)]
237 237 ['md5', 'sha1']
238 238 >>> d[b'md5']
239 239 'acbd18db4cc2f85cedef654fccc4a4d8'
240 240 >>> d[b'sha1']
241 241 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
242 242 >>> digester.preferred([b'md5', b'sha1'])
243 243 'sha1'
244 244 """
245 245
246 246 def __init__(self, digests, s=b''):
247 247 self._hashes = {}
248 248 for k in digests:
249 249 if k not in DIGESTS:
250 250 raise error.Abort(_(b'unknown digest type: %s') % k)
251 251 self._hashes[k] = DIGESTS[k]()
252 252 if s:
253 253 self.update(s)
254 254
255 255 def update(self, data):
256 256 for h in self._hashes.values():
257 257 h.update(data)
258 258
259 259 def __getitem__(self, key):
260 260 if key not in DIGESTS:
261 261 raise error.Abort(_(b'unknown digest type: %s') % k)
262 262 return hex(self._hashes[key].digest())
263 263
264 264 def __iter__(self):
265 265 return iter(self._hashes)
266 266
267 267 @staticmethod
268 268 def preferred(supported):
269 269 """returns the strongest digest type in both supported and DIGESTS."""
270 270
271 271 for k in DIGESTS_BY_STRENGTH:
272 272 if k in supported:
273 273 return k
274 274 return None
275 275
276 276
277 277 class digestchecker:
278 278 """file handle wrapper that additionally checks content against a given
279 279 size and digests.
280 280
281 281 d = digestchecker(fh, size, {'md5': '...'})
282 282
283 283 When multiple digests are given, all of them are validated.
284 284 """
285 285
286 286 def __init__(self, fh, size, digests):
287 287 self._fh = fh
288 288 self._size = size
289 289 self._got = 0
290 290 self._digests = dict(digests)
291 291 self._digester = digester(self._digests.keys())
292 292
293 293 def read(self, length=-1):
294 294 content = self._fh.read(length)
295 295 self._digester.update(content)
296 296 self._got += len(content)
297 297 return content
298 298
299 299 def validate(self):
300 300 if self._size != self._got:
301 301 raise error.Abort(
302 302 _(b'size mismatch: expected %d, got %d')
303 303 % (self._size, self._got)
304 304 )
305 305 for k, v in self._digests.items():
306 306 if v != self._digester[k]:
307 307 # i18n: first parameter is a digest name
308 308 raise error.Abort(
309 309 _(b'%s mismatch: expected %s, got %s')
310 310 % (k, v, self._digester[k])
311 311 )
312 312
313 313
314 314 try:
315 315 buffer = buffer # pytype: disable=name-error
316 316 except NameError:
317 317
318 318 def buffer(sliceable, offset=0, length=None):
319 319 if length is not None:
320 320 return memoryview(sliceable)[offset : offset + length]
321 321 return memoryview(sliceable)[offset:]
322 322
323 323
324 324 _chunksize = 4096
325 325
326 326
327 327 class bufferedinputpipe:
328 328 """a manually buffered input pipe
329 329
330 330 Python will not let us use buffered IO and lazy reading with 'polling' at
331 331 the same time. We cannot probe the buffer state and select will not detect
332 332 that data are ready to read if they are already buffered.
333 333
334 334 This class let us work around that by implementing its own buffering
335 335 (allowing efficient readline) while offering a way to know if the buffer is
336 336 empty from the output (allowing collaboration of the buffer with polling).
337 337
338 338 This class lives in the 'util' module because it makes use of the 'os'
339 339 module from the python stdlib.
340 340 """
341 341
342 342 def __new__(cls, fh):
343 343 # If we receive a fileobjectproxy, we need to use a variation of this
344 344 # class that notifies observers about activity.
345 345 if isinstance(fh, fileobjectproxy):
346 346 cls = observedbufferedinputpipe
347 347
348 348 return super(bufferedinputpipe, cls).__new__(cls)
349 349
350 350 def __init__(self, input):
351 351 self._input = input
352 352 self._buffer = []
353 353 self._eof = False
354 354 self._lenbuf = 0
355 355
356 356 @property
357 357 def hasbuffer(self):
358 358 """True is any data is currently buffered
359 359
360 360 This will be used externally a pre-step for polling IO. If there is
361 361 already data then no polling should be set in place."""
362 362 return bool(self._buffer)
363 363
364 364 @property
365 365 def closed(self):
366 366 return self._input.closed
367 367
368 368 def fileno(self):
369 369 return self._input.fileno()
370 370
371 371 def close(self):
372 372 return self._input.close()
373 373
374 374 def read(self, size):
375 375 while (not self._eof) and (self._lenbuf < size):
376 376 self._fillbuffer()
377 377 return self._frombuffer(size)
378 378
379 379 def unbufferedread(self, size):
380 380 if not self._eof and self._lenbuf == 0:
381 381 self._fillbuffer(max(size, _chunksize))
382 382 return self._frombuffer(min(self._lenbuf, size))
383 383
384 384 def readline(self, *args, **kwargs):
385 385 if len(self._buffer) > 1:
386 386 # this should not happen because both read and readline end with a
387 387 # _frombuffer call that collapse it.
388 388 self._buffer = [b''.join(self._buffer)]
389 389 self._lenbuf = len(self._buffer[0])
390 390 lfi = -1
391 391 if self._buffer:
392 392 lfi = self._buffer[-1].find(b'\n')
393 393 while (not self._eof) and lfi < 0:
394 394 self._fillbuffer()
395 395 if self._buffer:
396 396 lfi = self._buffer[-1].find(b'\n')
397 397 size = lfi + 1
398 398 if lfi < 0: # end of file
399 399 size = self._lenbuf
400 400 elif len(self._buffer) > 1:
401 401 # we need to take previous chunks into account
402 402 size += self._lenbuf - len(self._buffer[-1])
403 403 return self._frombuffer(size)
404 404
405 405 def _frombuffer(self, size):
406 406 """return at most 'size' data from the buffer
407 407
408 408 The data are removed from the buffer."""
409 409 if size == 0 or not self._buffer:
410 410 return b''
411 411 buf = self._buffer[0]
412 412 if len(self._buffer) > 1:
413 413 buf = b''.join(self._buffer)
414 414
415 415 data = buf[:size]
416 416 buf = buf[len(data) :]
417 417 if buf:
418 418 self._buffer = [buf]
419 419 self._lenbuf = len(buf)
420 420 else:
421 421 self._buffer = []
422 422 self._lenbuf = 0
423 423 return data
424 424
425 425 def _fillbuffer(self, size=_chunksize):
426 426 """read data to the buffer"""
427 427 data = os.read(self._input.fileno(), size)
428 428 if not data:
429 429 self._eof = True
430 430 else:
431 431 self._lenbuf += len(data)
432 432 self._buffer.append(data)
433 433
434 434 return data
435 435
436 436
437 437 def mmapread(fp, size=None):
438 438 if size == 0:
439 439 # size of 0 to mmap.mmap() means "all data"
440 440 # rather than "zero bytes", so special case that.
441 441 return b''
442 442 elif size is None:
443 443 size = 0
444 444 fd = getattr(fp, 'fileno', lambda: fp)()
445 445 try:
446 446 return mmap.mmap(fd, size, access=mmap.ACCESS_READ)
447 447 except ValueError:
448 448 # Empty files cannot be mmapped, but mmapread should still work. Check
449 449 # if the file is empty, and if so, return an empty buffer.
450 450 if os.fstat(fd).st_size == 0:
451 451 return b''
452 452 raise
453 453
454 454
455 455 class fileobjectproxy:
456 456 """A proxy around file objects that tells a watcher when events occur.
457 457
458 458 This type is intended to only be used for testing purposes. Think hard
459 459 before using it in important code.
460 460 """
461 461
462 462 __slots__ = (
463 463 '_orig',
464 464 '_observer',
465 465 )
466 466
467 467 def __init__(self, fh, observer):
468 468 object.__setattr__(self, '_orig', fh)
469 469 object.__setattr__(self, '_observer', observer)
470 470
471 471 def __getattribute__(self, name):
472 472 ours = {
473 473 '_observer',
474 474 # IOBase
475 475 'close',
476 476 # closed if a property
477 477 'fileno',
478 478 'flush',
479 479 'isatty',
480 480 'readable',
481 481 'readline',
482 482 'readlines',
483 483 'seek',
484 484 'seekable',
485 485 'tell',
486 486 'truncate',
487 487 'writable',
488 488 'writelines',
489 489 # RawIOBase
490 490 'read',
491 491 'readall',
492 492 'readinto',
493 493 'write',
494 494 # BufferedIOBase
495 495 # raw is a property
496 496 'detach',
497 497 # read defined above
498 498 'read1',
499 499 # readinto defined above
500 500 # write defined above
501 501 }
502 502
503 503 # We only observe some methods.
504 504 if name in ours:
505 505 return object.__getattribute__(self, name)
506 506
507 507 return getattr(object.__getattribute__(self, '_orig'), name)
508 508
509 509 def __nonzero__(self):
510 510 return bool(object.__getattribute__(self, '_orig'))
511 511
512 512 __bool__ = __nonzero__
513 513
514 514 def __delattr__(self, name):
515 515 return delattr(object.__getattribute__(self, '_orig'), name)
516 516
517 517 def __setattr__(self, name, value):
518 518 return setattr(object.__getattribute__(self, '_orig'), name, value)
519 519
520 520 def __iter__(self):
521 521 return object.__getattribute__(self, '_orig').__iter__()
522 522
523 523 def _observedcall(self, name, *args, **kwargs):
524 524 # Call the original object.
525 525 orig = object.__getattribute__(self, '_orig')
526 526 res = getattr(orig, name)(*args, **kwargs)
527 527
528 528 # Call a method on the observer of the same name with arguments
529 529 # so it can react, log, etc.
530 530 observer = object.__getattribute__(self, '_observer')
531 531 fn = getattr(observer, name, None)
532 532 if fn:
533 533 fn(res, *args, **kwargs)
534 534
535 535 return res
536 536
537 537 def close(self, *args, **kwargs):
538 538 return object.__getattribute__(self, '_observedcall')(
539 539 'close', *args, **kwargs
540 540 )
541 541
542 542 def fileno(self, *args, **kwargs):
543 543 return object.__getattribute__(self, '_observedcall')(
544 544 'fileno', *args, **kwargs
545 545 )
546 546
547 547 def flush(self, *args, **kwargs):
548 548 return object.__getattribute__(self, '_observedcall')(
549 549 'flush', *args, **kwargs
550 550 )
551 551
552 552 def isatty(self, *args, **kwargs):
553 553 return object.__getattribute__(self, '_observedcall')(
554 554 'isatty', *args, **kwargs
555 555 )
556 556
557 557 def readable(self, *args, **kwargs):
558 558 return object.__getattribute__(self, '_observedcall')(
559 559 'readable', *args, **kwargs
560 560 )
561 561
562 562 def readline(self, *args, **kwargs):
563 563 return object.__getattribute__(self, '_observedcall')(
564 564 'readline', *args, **kwargs
565 565 )
566 566
567 567 def readlines(self, *args, **kwargs):
568 568 return object.__getattribute__(self, '_observedcall')(
569 569 'readlines', *args, **kwargs
570 570 )
571 571
572 572 def seek(self, *args, **kwargs):
573 573 return object.__getattribute__(self, '_observedcall')(
574 574 'seek', *args, **kwargs
575 575 )
576 576
577 577 def seekable(self, *args, **kwargs):
578 578 return object.__getattribute__(self, '_observedcall')(
579 579 'seekable', *args, **kwargs
580 580 )
581 581
582 582 def tell(self, *args, **kwargs):
583 583 return object.__getattribute__(self, '_observedcall')(
584 584 'tell', *args, **kwargs
585 585 )
586 586
587 587 def truncate(self, *args, **kwargs):
588 588 return object.__getattribute__(self, '_observedcall')(
589 589 'truncate', *args, **kwargs
590 590 )
591 591
592 592 def writable(self, *args, **kwargs):
593 593 return object.__getattribute__(self, '_observedcall')(
594 594 'writable', *args, **kwargs
595 595 )
596 596
597 597 def writelines(self, *args, **kwargs):
598 598 return object.__getattribute__(self, '_observedcall')(
599 599 'writelines', *args, **kwargs
600 600 )
601 601
602 602 def read(self, *args, **kwargs):
603 603 return object.__getattribute__(self, '_observedcall')(
604 604 'read', *args, **kwargs
605 605 )
606 606
607 607 def readall(self, *args, **kwargs):
608 608 return object.__getattribute__(self, '_observedcall')(
609 609 'readall', *args, **kwargs
610 610 )
611 611
612 612 def readinto(self, *args, **kwargs):
613 613 return object.__getattribute__(self, '_observedcall')(
614 614 'readinto', *args, **kwargs
615 615 )
616 616
617 617 def write(self, *args, **kwargs):
618 618 return object.__getattribute__(self, '_observedcall')(
619 619 'write', *args, **kwargs
620 620 )
621 621
622 622 def detach(self, *args, **kwargs):
623 623 return object.__getattribute__(self, '_observedcall')(
624 624 'detach', *args, **kwargs
625 625 )
626 626
627 627 def read1(self, *args, **kwargs):
628 628 return object.__getattribute__(self, '_observedcall')(
629 629 'read1', *args, **kwargs
630 630 )
631 631
632 632
633 633 class observedbufferedinputpipe(bufferedinputpipe):
634 634 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
635 635
636 636 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
637 637 bypass ``fileobjectproxy``. Because of this, we need to make
638 638 ``bufferedinputpipe`` aware of these operations.
639 639
640 640 This variation of ``bufferedinputpipe`` can notify observers about
641 641 ``os.read()`` events. It also re-publishes other events, such as
642 642 ``read()`` and ``readline()``.
643 643 """
644 644
645 def _fillbuffer(self):
646 res = super(observedbufferedinputpipe, self)._fillbuffer()
645 def _fillbuffer(self, size=_chunksize):
646 res = super(observedbufferedinputpipe, self)._fillbuffer(size=size)
647 647
648 648 fn = getattr(self._input._observer, 'osread', None)
649 649 if fn:
650 fn(res, _chunksize)
650 fn(res, size)
651 651
652 652 return res
653 653
654 654 # We use different observer methods because the operation isn't
655 655 # performed on the actual file object but on us.
656 656 def read(self, size):
657 657 res = super(observedbufferedinputpipe, self).read(size)
658 658
659 659 fn = getattr(self._input._observer, 'bufferedread', None)
660 660 if fn:
661 661 fn(res, size)
662 662
663 663 return res
664 664
665 665 def readline(self, *args, **kwargs):
666 666 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
667 667
668 668 fn = getattr(self._input._observer, 'bufferedreadline', None)
669 669 if fn:
670 670 fn(res)
671 671
672 672 return res
673 673
674 674
675 675 PROXIED_SOCKET_METHODS = {
676 676 'makefile',
677 677 'recv',
678 678 'recvfrom',
679 679 'recvfrom_into',
680 680 'recv_into',
681 681 'send',
682 682 'sendall',
683 683 'sendto',
684 684 'setblocking',
685 685 'settimeout',
686 686 'gettimeout',
687 687 'setsockopt',
688 688 }
689 689
690 690
691 691 class socketproxy:
692 692 """A proxy around a socket that tells a watcher when events occur.
693 693
694 694 This is like ``fileobjectproxy`` except for sockets.
695 695
696 696 This type is intended to only be used for testing purposes. Think hard
697 697 before using it in important code.
698 698 """
699 699
700 700 __slots__ = (
701 701 '_orig',
702 702 '_observer',
703 703 )
704 704
705 705 def __init__(self, sock, observer):
706 706 object.__setattr__(self, '_orig', sock)
707 707 object.__setattr__(self, '_observer', observer)
708 708
709 709 def __getattribute__(self, name):
710 710 if name in PROXIED_SOCKET_METHODS:
711 711 return object.__getattribute__(self, name)
712 712
713 713 return getattr(object.__getattribute__(self, '_orig'), name)
714 714
715 715 def __delattr__(self, name):
716 716 return delattr(object.__getattribute__(self, '_orig'), name)
717 717
718 718 def __setattr__(self, name, value):
719 719 return setattr(object.__getattribute__(self, '_orig'), name, value)
720 720
721 721 def __nonzero__(self):
722 722 return bool(object.__getattribute__(self, '_orig'))
723 723
724 724 __bool__ = __nonzero__
725 725
726 726 def _observedcall(self, name, *args, **kwargs):
727 727 # Call the original object.
728 728 orig = object.__getattribute__(self, '_orig')
729 729 res = getattr(orig, name)(*args, **kwargs)
730 730
731 731 # Call a method on the observer of the same name with arguments
732 732 # so it can react, log, etc.
733 733 observer = object.__getattribute__(self, '_observer')
734 734 fn = getattr(observer, name, None)
735 735 if fn:
736 736 fn(res, *args, **kwargs)
737 737
738 738 return res
739 739
740 740 def makefile(self, *args, **kwargs):
741 741 res = object.__getattribute__(self, '_observedcall')(
742 742 'makefile', *args, **kwargs
743 743 )
744 744
745 745 # The file object may be used for I/O. So we turn it into a
746 746 # proxy using our observer.
747 747 observer = object.__getattribute__(self, '_observer')
748 748 return makeloggingfileobject(
749 749 observer.fh,
750 750 res,
751 751 observer.name,
752 752 reads=observer.reads,
753 753 writes=observer.writes,
754 754 logdata=observer.logdata,
755 755 logdataapis=observer.logdataapis,
756 756 )
757 757
758 758 def recv(self, *args, **kwargs):
759 759 return object.__getattribute__(self, '_observedcall')(
760 760 'recv', *args, **kwargs
761 761 )
762 762
763 763 def recvfrom(self, *args, **kwargs):
764 764 return object.__getattribute__(self, '_observedcall')(
765 765 'recvfrom', *args, **kwargs
766 766 )
767 767
768 768 def recvfrom_into(self, *args, **kwargs):
769 769 return object.__getattribute__(self, '_observedcall')(
770 770 'recvfrom_into', *args, **kwargs
771 771 )
772 772
773 773 def recv_into(self, *args, **kwargs):
774 774 return object.__getattribute__(self, '_observedcall')(
775 775 'recv_info', *args, **kwargs
776 776 )
777 777
778 778 def send(self, *args, **kwargs):
779 779 return object.__getattribute__(self, '_observedcall')(
780 780 'send', *args, **kwargs
781 781 )
782 782
783 783 def sendall(self, *args, **kwargs):
784 784 return object.__getattribute__(self, '_observedcall')(
785 785 'sendall', *args, **kwargs
786 786 )
787 787
788 788 def sendto(self, *args, **kwargs):
789 789 return object.__getattribute__(self, '_observedcall')(
790 790 'sendto', *args, **kwargs
791 791 )
792 792
793 793 def setblocking(self, *args, **kwargs):
794 794 return object.__getattribute__(self, '_observedcall')(
795 795 'setblocking', *args, **kwargs
796 796 )
797 797
798 798 def settimeout(self, *args, **kwargs):
799 799 return object.__getattribute__(self, '_observedcall')(
800 800 'settimeout', *args, **kwargs
801 801 )
802 802
803 803 def gettimeout(self, *args, **kwargs):
804 804 return object.__getattribute__(self, '_observedcall')(
805 805 'gettimeout', *args, **kwargs
806 806 )
807 807
808 808 def setsockopt(self, *args, **kwargs):
809 809 return object.__getattribute__(self, '_observedcall')(
810 810 'setsockopt', *args, **kwargs
811 811 )
812 812
813 813
814 814 class baseproxyobserver:
815 815 def __init__(self, fh, name, logdata, logdataapis):
816 816 self.fh = fh
817 817 self.name = name
818 818 self.logdata = logdata
819 819 self.logdataapis = logdataapis
820 820
821 821 def _writedata(self, data):
822 822 if not self.logdata:
823 823 if self.logdataapis:
824 824 self.fh.write(b'\n')
825 825 self.fh.flush()
826 826 return
827 827
828 828 # Simple case writes all data on a single line.
829 829 if b'\n' not in data:
830 830 if self.logdataapis:
831 831 self.fh.write(b': %s\n' % stringutil.escapestr(data))
832 832 else:
833 833 self.fh.write(
834 834 b'%s> %s\n' % (self.name, stringutil.escapestr(data))
835 835 )
836 836 self.fh.flush()
837 837 return
838 838
839 839 # Data with newlines is written to multiple lines.
840 840 if self.logdataapis:
841 841 self.fh.write(b':\n')
842 842
843 843 lines = data.splitlines(True)
844 844 for line in lines:
845 845 self.fh.write(
846 846 b'%s> %s\n' % (self.name, stringutil.escapestr(line))
847 847 )
848 848 self.fh.flush()
849 849
850 850
851 851 class fileobjectobserver(baseproxyobserver):
852 852 """Logs file object activity."""
853 853
854 854 def __init__(
855 855 self, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
856 856 ):
857 857 super(fileobjectobserver, self).__init__(fh, name, logdata, logdataapis)
858 858 self.reads = reads
859 859 self.writes = writes
860 860
861 861 def read(self, res, size=-1):
862 862 if not self.reads:
863 863 return
864 864 # Python 3 can return None from reads at EOF instead of empty strings.
865 865 if res is None:
866 866 res = b''
867 867
868 868 if size == -1 and res == b'':
869 869 # Suppress pointless read(-1) calls that return
870 870 # nothing. These happen _a lot_ on Python 3, and there
871 871 # doesn't seem to be a better workaround to have matching
872 872 # Python 2 and 3 behavior. :(
873 873 return
874 874
875 875 if self.logdataapis:
876 876 self.fh.write(b'%s> read(%d) -> %d' % (self.name, size, len(res)))
877 877
878 878 self._writedata(res)
879 879
880 880 def readline(self, res, limit=-1):
881 881 if not self.reads:
882 882 return
883 883
884 884 if self.logdataapis:
885 885 self.fh.write(b'%s> readline() -> %d' % (self.name, len(res)))
886 886
887 887 self._writedata(res)
888 888
889 889 def readinto(self, res, dest):
890 890 if not self.reads:
891 891 return
892 892
893 893 if self.logdataapis:
894 894 self.fh.write(
895 895 b'%s> readinto(%d) -> %r' % (self.name, len(dest), res)
896 896 )
897 897
898 898 data = dest[0:res] if res is not None else b''
899 899
900 900 # _writedata() uses "in" operator and is confused by memoryview because
901 901 # characters are ints on Python 3.
902 902 if isinstance(data, memoryview):
903 903 data = data.tobytes()
904 904
905 905 self._writedata(data)
906 906
907 907 def write(self, res, data):
908 908 if not self.writes:
909 909 return
910 910
911 911 # Python 2 returns None from some write() calls. Python 3 (reasonably)
912 912 # returns the integer bytes written.
913 913 if res is None and data:
914 914 res = len(data)
915 915
916 916 if self.logdataapis:
917 917 self.fh.write(b'%s> write(%d) -> %r' % (self.name, len(data), res))
918 918
919 919 self._writedata(data)
920 920
921 921 def flush(self, res):
922 922 if not self.writes:
923 923 return
924 924
925 925 self.fh.write(b'%s> flush() -> %r\n' % (self.name, res))
926 926
927 927 # For observedbufferedinputpipe.
928 928 def bufferedread(self, res, size):
929 929 if not self.reads:
930 930 return
931 931
932 932 if self.logdataapis:
933 933 self.fh.write(
934 934 b'%s> bufferedread(%d) -> %d' % (self.name, size, len(res))
935 935 )
936 936
937 937 self._writedata(res)
938 938
939 939 def bufferedreadline(self, res):
940 940 if not self.reads:
941 941 return
942 942
943 943 if self.logdataapis:
944 944 self.fh.write(
945 945 b'%s> bufferedreadline() -> %d' % (self.name, len(res))
946 946 )
947 947
948 948 self._writedata(res)
949 949
950 950
951 951 def makeloggingfileobject(
952 952 logh, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
953 953 ):
954 954 """Turn a file object into a logging file object."""
955 955
956 956 observer = fileobjectobserver(
957 957 logh,
958 958 name,
959 959 reads=reads,
960 960 writes=writes,
961 961 logdata=logdata,
962 962 logdataapis=logdataapis,
963 963 )
964 964 return fileobjectproxy(fh, observer)
965 965
966 966
967 967 class socketobserver(baseproxyobserver):
968 968 """Logs socket activity."""
969 969
970 970 def __init__(
971 971 self,
972 972 fh,
973 973 name,
974 974 reads=True,
975 975 writes=True,
976 976 states=True,
977 977 logdata=False,
978 978 logdataapis=True,
979 979 ):
980 980 super(socketobserver, self).__init__(fh, name, logdata, logdataapis)
981 981 self.reads = reads
982 982 self.writes = writes
983 983 self.states = states
984 984
985 985 def makefile(self, res, mode=None, bufsize=None):
986 986 if not self.states:
987 987 return
988 988
989 989 self.fh.write(b'%s> makefile(%r, %r)\n' % (self.name, mode, bufsize))
990 990
991 991 def recv(self, res, size, flags=0):
992 992 if not self.reads:
993 993 return
994 994
995 995 if self.logdataapis:
996 996 self.fh.write(
997 997 b'%s> recv(%d, %d) -> %d' % (self.name, size, flags, len(res))
998 998 )
999 999 self._writedata(res)
1000 1000
1001 1001 def recvfrom(self, res, size, flags=0):
1002 1002 if not self.reads:
1003 1003 return
1004 1004
1005 1005 if self.logdataapis:
1006 1006 self.fh.write(
1007 1007 b'%s> recvfrom(%d, %d) -> %d'
1008 1008 % (self.name, size, flags, len(res[0]))
1009 1009 )
1010 1010
1011 1011 self._writedata(res[0])
1012 1012
1013 1013 def recvfrom_into(self, res, buf, size, flags=0):
1014 1014 if not self.reads:
1015 1015 return
1016 1016
1017 1017 if self.logdataapis:
1018 1018 self.fh.write(
1019 1019 b'%s> recvfrom_into(%d, %d) -> %d'
1020 1020 % (self.name, size, flags, res[0])
1021 1021 )
1022 1022
1023 1023 self._writedata(buf[0 : res[0]])
1024 1024
1025 1025 def recv_into(self, res, buf, size=0, flags=0):
1026 1026 if not self.reads:
1027 1027 return
1028 1028
1029 1029 if self.logdataapis:
1030 1030 self.fh.write(
1031 1031 b'%s> recv_into(%d, %d) -> %d' % (self.name, size, flags, res)
1032 1032 )
1033 1033
1034 1034 self._writedata(buf[0:res])
1035 1035
1036 1036 def send(self, res, data, flags=0):
1037 1037 if not self.writes:
1038 1038 return
1039 1039
1040 1040 self.fh.write(
1041 1041 b'%s> send(%d, %d) -> %d' % (self.name, len(data), flags, len(res))
1042 1042 )
1043 1043 self._writedata(data)
1044 1044
1045 1045 def sendall(self, res, data, flags=0):
1046 1046 if not self.writes:
1047 1047 return
1048 1048
1049 1049 if self.logdataapis:
1050 1050 # Returns None on success. So don't bother reporting return value.
1051 1051 self.fh.write(
1052 1052 b'%s> sendall(%d, %d)' % (self.name, len(data), flags)
1053 1053 )
1054 1054
1055 1055 self._writedata(data)
1056 1056
1057 1057 def sendto(self, res, data, flagsoraddress, address=None):
1058 1058 if not self.writes:
1059 1059 return
1060 1060
1061 1061 if address:
1062 1062 flags = flagsoraddress
1063 1063 else:
1064 1064 flags = 0
1065 1065
1066 1066 if self.logdataapis:
1067 1067 self.fh.write(
1068 1068 b'%s> sendto(%d, %d, %r) -> %d'
1069 1069 % (self.name, len(data), flags, address, res)
1070 1070 )
1071 1071
1072 1072 self._writedata(data)
1073 1073
1074 1074 def setblocking(self, res, flag):
1075 1075 if not self.states:
1076 1076 return
1077 1077
1078 1078 self.fh.write(b'%s> setblocking(%r)\n' % (self.name, flag))
1079 1079
1080 1080 def settimeout(self, res, value):
1081 1081 if not self.states:
1082 1082 return
1083 1083
1084 1084 self.fh.write(b'%s> settimeout(%r)\n' % (self.name, value))
1085 1085
1086 1086 def gettimeout(self, res):
1087 1087 if not self.states:
1088 1088 return
1089 1089
1090 1090 self.fh.write(b'%s> gettimeout() -> %f\n' % (self.name, res))
1091 1091
1092 1092 def setsockopt(self, res, level, optname, value):
1093 1093 if not self.states:
1094 1094 return
1095 1095
1096 1096 self.fh.write(
1097 1097 b'%s> setsockopt(%r, %r, %r) -> %r\n'
1098 1098 % (self.name, level, optname, value, res)
1099 1099 )
1100 1100
1101 1101
1102 1102 def makeloggingsocket(
1103 1103 logh,
1104 1104 fh,
1105 1105 name,
1106 1106 reads=True,
1107 1107 writes=True,
1108 1108 states=True,
1109 1109 logdata=False,
1110 1110 logdataapis=True,
1111 1111 ):
1112 1112 """Turn a socket into a logging socket."""
1113 1113
1114 1114 observer = socketobserver(
1115 1115 logh,
1116 1116 name,
1117 1117 reads=reads,
1118 1118 writes=writes,
1119 1119 states=states,
1120 1120 logdata=logdata,
1121 1121 logdataapis=logdataapis,
1122 1122 )
1123 1123 return socketproxy(fh, observer)
1124 1124
1125 1125
1126 1126 def version():
1127 1127 """Return version information if available."""
1128 1128 try:
1129 1129 from . import __version__
1130 1130
1131 1131 return __version__.version
1132 1132 except ImportError:
1133 1133 return b'unknown'
1134 1134
1135 1135
1136 1136 def versiontuple(v=None, n=4):
1137 1137 """Parses a Mercurial version string into an N-tuple.
1138 1138
1139 1139 The version string to be parsed is specified with the ``v`` argument.
1140 1140 If it isn't defined, the current Mercurial version string will be parsed.
1141 1141
1142 1142 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1143 1143 returned values:
1144 1144
1145 1145 >>> v = b'3.6.1+190-df9b73d2d444'
1146 1146 >>> versiontuple(v, 2)
1147 1147 (3, 6)
1148 1148 >>> versiontuple(v, 3)
1149 1149 (3, 6, 1)
1150 1150 >>> versiontuple(v, 4)
1151 1151 (3, 6, 1, '190-df9b73d2d444')
1152 1152
1153 1153 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1154 1154 (3, 6, 1, '190-df9b73d2d444+20151118')
1155 1155
1156 1156 >>> v = b'3.6'
1157 1157 >>> versiontuple(v, 2)
1158 1158 (3, 6)
1159 1159 >>> versiontuple(v, 3)
1160 1160 (3, 6, None)
1161 1161 >>> versiontuple(v, 4)
1162 1162 (3, 6, None, None)
1163 1163
1164 1164 >>> v = b'3.9-rc'
1165 1165 >>> versiontuple(v, 2)
1166 1166 (3, 9)
1167 1167 >>> versiontuple(v, 3)
1168 1168 (3, 9, None)
1169 1169 >>> versiontuple(v, 4)
1170 1170 (3, 9, None, 'rc')
1171 1171
1172 1172 >>> v = b'3.9-rc+2-02a8fea4289b'
1173 1173 >>> versiontuple(v, 2)
1174 1174 (3, 9)
1175 1175 >>> versiontuple(v, 3)
1176 1176 (3, 9, None)
1177 1177 >>> versiontuple(v, 4)
1178 1178 (3, 9, None, 'rc+2-02a8fea4289b')
1179 1179
1180 1180 >>> versiontuple(b'4.6rc0')
1181 1181 (4, 6, None, 'rc0')
1182 1182 >>> versiontuple(b'4.6rc0+12-425d55e54f98')
1183 1183 (4, 6, None, 'rc0+12-425d55e54f98')
1184 1184 >>> versiontuple(b'.1.2.3')
1185 1185 (None, None, None, '.1.2.3')
1186 1186 >>> versiontuple(b'12.34..5')
1187 1187 (12, 34, None, '..5')
1188 1188 >>> versiontuple(b'1.2.3.4.5.6')
1189 1189 (1, 2, 3, '.4.5.6')
1190 1190 """
1191 1191 if not v:
1192 1192 v = version()
1193 1193 m = remod.match(br'(\d+(?:\.\d+){,2})[+-]?(.*)', v)
1194 1194 if not m:
1195 1195 vparts, extra = b'', v
1196 1196 elif m.group(2):
1197 1197 vparts, extra = m.groups()
1198 1198 else:
1199 1199 vparts, extra = m.group(1), None
1200 1200
1201 1201 assert vparts is not None # help pytype
1202 1202
1203 1203 vints = []
1204 1204 for i in vparts.split(b'.'):
1205 1205 try:
1206 1206 vints.append(int(i))
1207 1207 except ValueError:
1208 1208 break
1209 1209 # (3, 6) -> (3, 6, None)
1210 1210 while len(vints) < 3:
1211 1211 vints.append(None)
1212 1212
1213 1213 if n == 2:
1214 1214 return (vints[0], vints[1])
1215 1215 if n == 3:
1216 1216 return (vints[0], vints[1], vints[2])
1217 1217 if n == 4:
1218 1218 return (vints[0], vints[1], vints[2], extra)
1219 1219
1220 1220 raise error.ProgrammingError(b"invalid version part request: %d" % n)
1221 1221
1222 1222
1223 1223 def cachefunc(func):
1224 1224 '''cache the result of function calls'''
1225 1225 # XXX doesn't handle keywords args
1226 1226 if func.__code__.co_argcount == 0:
1227 1227 listcache = []
1228 1228
1229 1229 def f():
1230 1230 if len(listcache) == 0:
1231 1231 listcache.append(func())
1232 1232 return listcache[0]
1233 1233
1234 1234 return f
1235 1235 cache = {}
1236 1236 if func.__code__.co_argcount == 1:
1237 1237 # we gain a small amount of time because
1238 1238 # we don't need to pack/unpack the list
1239 1239 def f(arg):
1240 1240 if arg not in cache:
1241 1241 cache[arg] = func(arg)
1242 1242 return cache[arg]
1243 1243
1244 1244 else:
1245 1245
1246 1246 def f(*args):
1247 1247 if args not in cache:
1248 1248 cache[args] = func(*args)
1249 1249 return cache[args]
1250 1250
1251 1251 return f
1252 1252
1253 1253
1254 1254 class cow:
1255 1255 """helper class to make copy-on-write easier
1256 1256
1257 1257 Call preparewrite before doing any writes.
1258 1258 """
1259 1259
1260 1260 def preparewrite(self):
1261 1261 """call this before writes, return self or a copied new object"""
1262 1262 if getattr(self, '_copied', 0):
1263 1263 self._copied -= 1
1264 1264 # Function cow.__init__ expects 1 arg(s), got 2 [wrong-arg-count]
1265 1265 return self.__class__(self) # pytype: disable=wrong-arg-count
1266 1266 return self
1267 1267
1268 1268 def copy(self):
1269 1269 """always do a cheap copy"""
1270 1270 self._copied = getattr(self, '_copied', 0) + 1
1271 1271 return self
1272 1272
1273 1273
1274 1274 class sortdict(collections.OrderedDict):
1275 1275 """a simple sorted dictionary
1276 1276
1277 1277 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1278 1278 >>> d2 = d1.copy()
1279 1279 >>> d2
1280 1280 sortdict([('a', 0), ('b', 1)])
1281 1281 >>> d2.update([(b'a', 2)])
1282 1282 >>> list(d2.keys()) # should still be in last-set order
1283 1283 ['b', 'a']
1284 1284 >>> d1.insert(1, b'a.5', 0.5)
1285 1285 >>> d1
1286 1286 sortdict([('a', 0), ('a.5', 0.5), ('b', 1)])
1287 1287 """
1288 1288
1289 1289 def __setitem__(self, key, value):
1290 1290 if key in self:
1291 1291 del self[key]
1292 1292 super(sortdict, self).__setitem__(key, value)
1293 1293
1294 1294 if pycompat.ispypy:
1295 1295 # __setitem__() isn't called as of PyPy 5.8.0
1296 1296 def update(self, src, **f):
1297 1297 if isinstance(src, dict):
1298 1298 src = src.items()
1299 1299 for k, v in src:
1300 1300 self[k] = v
1301 1301 for k in f:
1302 1302 self[k] = f[k]
1303 1303
1304 1304 def insert(self, position, key, value):
1305 1305 for (i, (k, v)) in enumerate(list(self.items())):
1306 1306 if i == position:
1307 1307 self[key] = value
1308 1308 if i >= position:
1309 1309 del self[k]
1310 1310 self[k] = v
1311 1311
1312 1312
1313 1313 class cowdict(cow, dict):
1314 1314 """copy-on-write dict
1315 1315
1316 1316 Be sure to call d = d.preparewrite() before writing to d.
1317 1317
1318 1318 >>> a = cowdict()
1319 1319 >>> a is a.preparewrite()
1320 1320 True
1321 1321 >>> b = a.copy()
1322 1322 >>> b is a
1323 1323 True
1324 1324 >>> c = b.copy()
1325 1325 >>> c is a
1326 1326 True
1327 1327 >>> a = a.preparewrite()
1328 1328 >>> b is a
1329 1329 False
1330 1330 >>> a is a.preparewrite()
1331 1331 True
1332 1332 >>> c = c.preparewrite()
1333 1333 >>> b is c
1334 1334 False
1335 1335 >>> b is b.preparewrite()
1336 1336 True
1337 1337 """
1338 1338
1339 1339
1340 1340 class cowsortdict(cow, sortdict):
1341 1341 """copy-on-write sortdict
1342 1342
1343 1343 Be sure to call d = d.preparewrite() before writing to d.
1344 1344 """
1345 1345
1346 1346
1347 1347 class transactional: # pytype: disable=ignored-metaclass
1348 1348 """Base class for making a transactional type into a context manager."""
1349 1349
1350 1350 __metaclass__ = abc.ABCMeta
1351 1351
1352 1352 @abc.abstractmethod
1353 1353 def close(self):
1354 1354 """Successfully closes the transaction."""
1355 1355
1356 1356 @abc.abstractmethod
1357 1357 def release(self):
1358 1358 """Marks the end of the transaction.
1359 1359
1360 1360 If the transaction has not been closed, it will be aborted.
1361 1361 """
1362 1362
1363 1363 def __enter__(self):
1364 1364 return self
1365 1365
1366 1366 def __exit__(self, exc_type, exc_val, exc_tb):
1367 1367 try:
1368 1368 if exc_type is None:
1369 1369 self.close()
1370 1370 finally:
1371 1371 self.release()
1372 1372
1373 1373
1374 1374 @contextlib.contextmanager
1375 1375 def acceptintervention(tr=None):
1376 1376 """A context manager that closes the transaction on InterventionRequired
1377 1377
1378 1378 If no transaction was provided, this simply runs the body and returns
1379 1379 """
1380 1380 if not tr:
1381 1381 yield
1382 1382 return
1383 1383 try:
1384 1384 yield
1385 1385 tr.close()
1386 1386 except error.InterventionRequired:
1387 1387 tr.close()
1388 1388 raise
1389 1389 finally:
1390 1390 tr.release()
1391 1391
1392 1392
1393 1393 @contextlib.contextmanager
1394 1394 def nullcontextmanager(enter_result=None):
1395 1395 yield enter_result
1396 1396
1397 1397
1398 1398 class _lrucachenode:
1399 1399 """A node in a doubly linked list.
1400 1400
1401 1401 Holds a reference to nodes on either side as well as a key-value
1402 1402 pair for the dictionary entry.
1403 1403 """
1404 1404
1405 1405 __slots__ = ('next', 'prev', 'key', 'value', 'cost')
1406 1406
1407 1407 def __init__(self):
1408 1408 self.next = self
1409 1409 self.prev = self
1410 1410
1411 1411 self.key = _notset
1412 1412 self.value = None
1413 1413 self.cost = 0
1414 1414
1415 1415 def markempty(self):
1416 1416 """Mark the node as emptied."""
1417 1417 self.key = _notset
1418 1418 self.value = None
1419 1419 self.cost = 0
1420 1420
1421 1421
1422 1422 class lrucachedict:
1423 1423 """Dict that caches most recent accesses and sets.
1424 1424
1425 1425 The dict consists of an actual backing dict - indexed by original
1426 1426 key - and a doubly linked circular list defining the order of entries in
1427 1427 the cache.
1428 1428
1429 1429 The head node is the newest entry in the cache. If the cache is full,
1430 1430 we recycle head.prev and make it the new head. Cache accesses result in
1431 1431 the node being moved to before the existing head and being marked as the
1432 1432 new head node.
1433 1433
1434 1434 Items in the cache can be inserted with an optional "cost" value. This is
1435 1435 simply an integer that is specified by the caller. The cache can be queried
1436 1436 for the total cost of all items presently in the cache.
1437 1437
1438 1438 The cache can also define a maximum cost. If a cache insertion would
1439 1439 cause the total cost of the cache to go beyond the maximum cost limit,
1440 1440 nodes will be evicted to make room for the new code. This can be used
1441 1441 to e.g. set a max memory limit and associate an estimated bytes size
1442 1442 cost to each item in the cache. By default, no maximum cost is enforced.
1443 1443 """
1444 1444
1445 1445 def __init__(self, max, maxcost=0):
1446 1446 self._cache = {}
1447 1447
1448 1448 self._head = _lrucachenode()
1449 1449 self._size = 1
1450 1450 self.capacity = max
1451 1451 self.totalcost = 0
1452 1452 self.maxcost = maxcost
1453 1453
1454 1454 def __len__(self):
1455 1455 return len(self._cache)
1456 1456
1457 1457 def __contains__(self, k):
1458 1458 return k in self._cache
1459 1459
1460 1460 def __iter__(self):
1461 1461 # We don't have to iterate in cache order, but why not.
1462 1462 n = self._head
1463 1463 for i in range(len(self._cache)):
1464 1464 yield n.key
1465 1465 n = n.next
1466 1466
1467 1467 def __getitem__(self, k):
1468 1468 node = self._cache[k]
1469 1469 self._movetohead(node)
1470 1470 return node.value
1471 1471
1472 1472 def insert(self, k, v, cost=0):
1473 1473 """Insert a new item in the cache with optional cost value."""
1474 1474 node = self._cache.get(k)
1475 1475 # Replace existing value and mark as newest.
1476 1476 if node is not None:
1477 1477 self.totalcost -= node.cost
1478 1478 node.value = v
1479 1479 node.cost = cost
1480 1480 self.totalcost += cost
1481 1481 self._movetohead(node)
1482 1482
1483 1483 if self.maxcost:
1484 1484 self._enforcecostlimit()
1485 1485
1486 1486 return
1487 1487
1488 1488 if self._size < self.capacity:
1489 1489 node = self._addcapacity()
1490 1490 else:
1491 1491 # Grab the last/oldest item.
1492 1492 node = self._head.prev
1493 1493
1494 1494 # At capacity. Kill the old entry.
1495 1495 if node.key is not _notset:
1496 1496 self.totalcost -= node.cost
1497 1497 del self._cache[node.key]
1498 1498
1499 1499 node.key = k
1500 1500 node.value = v
1501 1501 node.cost = cost
1502 1502 self.totalcost += cost
1503 1503 self._cache[k] = node
1504 1504 # And mark it as newest entry. No need to adjust order since it
1505 1505 # is already self._head.prev.
1506 1506 self._head = node
1507 1507
1508 1508 if self.maxcost:
1509 1509 self._enforcecostlimit()
1510 1510
1511 1511 def __setitem__(self, k, v):
1512 1512 self.insert(k, v)
1513 1513
1514 1514 def __delitem__(self, k):
1515 1515 self.pop(k)
1516 1516
1517 1517 def pop(self, k, default=_notset):
1518 1518 try:
1519 1519 node = self._cache.pop(k)
1520 1520 except KeyError:
1521 1521 if default is _notset:
1522 1522 raise
1523 1523 return default
1524 1524
1525 1525 assert node is not None # help pytype
1526 1526 value = node.value
1527 1527 self.totalcost -= node.cost
1528 1528 node.markempty()
1529 1529
1530 1530 # Temporarily mark as newest item before re-adjusting head to make
1531 1531 # this node the oldest item.
1532 1532 self._movetohead(node)
1533 1533 self._head = node.next
1534 1534
1535 1535 return value
1536 1536
1537 1537 # Additional dict methods.
1538 1538
1539 1539 def get(self, k, default=None):
1540 1540 try:
1541 1541 return self.__getitem__(k)
1542 1542 except KeyError:
1543 1543 return default
1544 1544
1545 1545 def peek(self, k, default=_notset):
1546 1546 """Get the specified item without moving it to the head
1547 1547
1548 1548 Unlike get(), this doesn't mutate the internal state. But be aware
1549 1549 that it doesn't mean peek() is thread safe.
1550 1550 """
1551 1551 try:
1552 1552 node = self._cache[k]
1553 1553 assert node is not None # help pytype
1554 1554 return node.value
1555 1555 except KeyError:
1556 1556 if default is _notset:
1557 1557 raise
1558 1558 return default
1559 1559
1560 1560 def clear(self):
1561 1561 n = self._head
1562 1562 while n.key is not _notset:
1563 1563 self.totalcost -= n.cost
1564 1564 n.markempty()
1565 1565 n = n.next
1566 1566
1567 1567 self._cache.clear()
1568 1568
1569 1569 def copy(self, capacity=None, maxcost=0):
1570 1570 """Create a new cache as a copy of the current one.
1571 1571
1572 1572 By default, the new cache has the same capacity as the existing one.
1573 1573 But, the cache capacity can be changed as part of performing the
1574 1574 copy.
1575 1575
1576 1576 Items in the copy have an insertion/access order matching this
1577 1577 instance.
1578 1578 """
1579 1579
1580 1580 capacity = capacity or self.capacity
1581 1581 maxcost = maxcost or self.maxcost
1582 1582 result = lrucachedict(capacity, maxcost=maxcost)
1583 1583
1584 1584 # We copy entries by iterating in oldest-to-newest order so the copy
1585 1585 # has the correct ordering.
1586 1586
1587 1587 # Find the first non-empty entry.
1588 1588 n = self._head.prev
1589 1589 while n.key is _notset and n is not self._head:
1590 1590 n = n.prev
1591 1591
1592 1592 # We could potentially skip the first N items when decreasing capacity.
1593 1593 # But let's keep it simple unless it is a performance problem.
1594 1594 for i in range(len(self._cache)):
1595 1595 result.insert(n.key, n.value, cost=n.cost)
1596 1596 n = n.prev
1597 1597
1598 1598 return result
1599 1599
1600 1600 def popoldest(self):
1601 1601 """Remove the oldest item from the cache.
1602 1602
1603 1603 Returns the (key, value) describing the removed cache entry.
1604 1604 """
1605 1605 if not self._cache:
1606 1606 return
1607 1607
1608 1608 # Walk the linked list backwards starting at tail node until we hit
1609 1609 # a non-empty node.
1610 1610 n = self._head.prev
1611 1611
1612 1612 assert n is not None # help pytype
1613 1613
1614 1614 while n.key is _notset:
1615 1615 n = n.prev
1616 1616
1617 1617 assert n is not None # help pytype
1618 1618
1619 1619 key, value = n.key, n.value
1620 1620
1621 1621 # And remove it from the cache and mark it as empty.
1622 1622 del self._cache[n.key]
1623 1623 self.totalcost -= n.cost
1624 1624 n.markempty()
1625 1625
1626 1626 return key, value
1627 1627
1628 1628 def _movetohead(self, node):
1629 1629 """Mark a node as the newest, making it the new head.
1630 1630
1631 1631 When a node is accessed, it becomes the freshest entry in the LRU
1632 1632 list, which is denoted by self._head.
1633 1633
1634 1634 Visually, let's make ``N`` the new head node (* denotes head):
1635 1635
1636 1636 previous/oldest <-> head <-> next/next newest
1637 1637
1638 1638 ----<->--- A* ---<->-----
1639 1639 | |
1640 1640 E <-> D <-> N <-> C <-> B
1641 1641
1642 1642 To:
1643 1643
1644 1644 ----<->--- N* ---<->-----
1645 1645 | |
1646 1646 E <-> D <-> C <-> B <-> A
1647 1647
1648 1648 This requires the following moves:
1649 1649
1650 1650 C.next = D (node.prev.next = node.next)
1651 1651 D.prev = C (node.next.prev = node.prev)
1652 1652 E.next = N (head.prev.next = node)
1653 1653 N.prev = E (node.prev = head.prev)
1654 1654 N.next = A (node.next = head)
1655 1655 A.prev = N (head.prev = node)
1656 1656 """
1657 1657 head = self._head
1658 1658 # C.next = D
1659 1659 node.prev.next = node.next
1660 1660 # D.prev = C
1661 1661 node.next.prev = node.prev
1662 1662 # N.prev = E
1663 1663 node.prev = head.prev
1664 1664 # N.next = A
1665 1665 # It is tempting to do just "head" here, however if node is
1666 1666 # adjacent to head, this will do bad things.
1667 1667 node.next = head.prev.next
1668 1668 # E.next = N
1669 1669 node.next.prev = node
1670 1670 # A.prev = N
1671 1671 node.prev.next = node
1672 1672
1673 1673 self._head = node
1674 1674
1675 1675 def _addcapacity(self):
1676 1676 """Add a node to the circular linked list.
1677 1677
1678 1678 The new node is inserted before the head node.
1679 1679 """
1680 1680 head = self._head
1681 1681 node = _lrucachenode()
1682 1682 head.prev.next = node
1683 1683 node.prev = head.prev
1684 1684 node.next = head
1685 1685 head.prev = node
1686 1686 self._size += 1
1687 1687 return node
1688 1688
1689 1689 def _enforcecostlimit(self):
1690 1690 # This should run after an insertion. It should only be called if total
1691 1691 # cost limits are being enforced.
1692 1692 # The most recently inserted node is never evicted.
1693 1693 if len(self) <= 1 or self.totalcost <= self.maxcost:
1694 1694 return
1695 1695
1696 1696 # This is logically equivalent to calling popoldest() until we
1697 1697 # free up enough cost. We don't do that since popoldest() needs
1698 1698 # to walk the linked list and doing this in a loop would be
1699 1699 # quadratic. So we find the first non-empty node and then
1700 1700 # walk nodes until we free up enough capacity.
1701 1701 #
1702 1702 # If we only removed the minimum number of nodes to free enough
1703 1703 # cost at insert time, chances are high that the next insert would
1704 1704 # also require pruning. This would effectively constitute quadratic
1705 1705 # behavior for insert-heavy workloads. To mitigate this, we set a
1706 1706 # target cost that is a percentage of the max cost. This will tend
1707 1707 # to free more nodes when the high water mark is reached, which
1708 1708 # lowers the chances of needing to prune on the subsequent insert.
1709 1709 targetcost = int(self.maxcost * 0.75)
1710 1710
1711 1711 n = self._head.prev
1712 1712 while n.key is _notset:
1713 1713 n = n.prev
1714 1714
1715 1715 while len(self) > 1 and self.totalcost > targetcost:
1716 1716 del self._cache[n.key]
1717 1717 self.totalcost -= n.cost
1718 1718 n.markempty()
1719 1719 n = n.prev
1720 1720
1721 1721
1722 1722 def lrucachefunc(func):
1723 1723 '''cache most recent results of function calls'''
1724 1724 cache = {}
1725 1725 order = collections.deque()
1726 1726 if func.__code__.co_argcount == 1:
1727 1727
1728 1728 def f(arg):
1729 1729 if arg not in cache:
1730 1730 if len(cache) > 20:
1731 1731 del cache[order.popleft()]
1732 1732 cache[arg] = func(arg)
1733 1733 else:
1734 1734 order.remove(arg)
1735 1735 order.append(arg)
1736 1736 return cache[arg]
1737 1737
1738 1738 else:
1739 1739
1740 1740 def f(*args):
1741 1741 if args not in cache:
1742 1742 if len(cache) > 20:
1743 1743 del cache[order.popleft()]
1744 1744 cache[args] = func(*args)
1745 1745 else:
1746 1746 order.remove(args)
1747 1747 order.append(args)
1748 1748 return cache[args]
1749 1749
1750 1750 return f
1751 1751
1752 1752
1753 1753 class propertycache:
1754 1754 def __init__(self, func):
1755 1755 self.func = func
1756 1756 self.name = func.__name__
1757 1757
1758 1758 def __get__(self, obj, type=None):
1759 1759 result = self.func(obj)
1760 1760 self.cachevalue(obj, result)
1761 1761 return result
1762 1762
1763 1763 def cachevalue(self, obj, value):
1764 1764 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1765 1765 obj.__dict__[self.name] = value
1766 1766
1767 1767
1768 1768 def clearcachedproperty(obj, prop):
1769 1769 '''clear a cached property value, if one has been set'''
1770 1770 prop = pycompat.sysstr(prop)
1771 1771 if prop in obj.__dict__:
1772 1772 del obj.__dict__[prop]
1773 1773
1774 1774
1775 1775 def increasingchunks(source, min=1024, max=65536):
1776 1776 """return no less than min bytes per chunk while data remains,
1777 1777 doubling min after each chunk until it reaches max"""
1778 1778
1779 1779 def log2(x):
1780 1780 if not x:
1781 1781 return 0
1782 1782 i = 0
1783 1783 while x:
1784 1784 x >>= 1
1785 1785 i += 1
1786 1786 return i - 1
1787 1787
1788 1788 buf = []
1789 1789 blen = 0
1790 1790 for chunk in source:
1791 1791 buf.append(chunk)
1792 1792 blen += len(chunk)
1793 1793 if blen >= min:
1794 1794 if min < max:
1795 1795 min = min << 1
1796 1796 nmin = 1 << log2(blen)
1797 1797 if nmin > min:
1798 1798 min = nmin
1799 1799 if min > max:
1800 1800 min = max
1801 1801 yield b''.join(buf)
1802 1802 blen = 0
1803 1803 buf = []
1804 1804 if buf:
1805 1805 yield b''.join(buf)
1806 1806
1807 1807
1808 1808 def always(fn):
1809 1809 return True
1810 1810
1811 1811
1812 1812 def never(fn):
1813 1813 return False
1814 1814
1815 1815
1816 1816 def nogc(func):
1817 1817 """disable garbage collector
1818 1818
1819 1819 Python's garbage collector triggers a GC each time a certain number of
1820 1820 container objects (the number being defined by gc.get_threshold()) are
1821 1821 allocated even when marked not to be tracked by the collector. Tracking has
1822 1822 no effect on when GCs are triggered, only on what objects the GC looks
1823 1823 into. As a workaround, disable GC while building complex (huge)
1824 1824 containers.
1825 1825
1826 1826 This garbage collector issue have been fixed in 2.7. But it still affect
1827 1827 CPython's performance.
1828 1828 """
1829 1829
1830 1830 def wrapper(*args, **kwargs):
1831 1831 gcenabled = gc.isenabled()
1832 1832 gc.disable()
1833 1833 try:
1834 1834 return func(*args, **kwargs)
1835 1835 finally:
1836 1836 if gcenabled:
1837 1837 gc.enable()
1838 1838
1839 1839 return wrapper
1840 1840
1841 1841
1842 1842 if pycompat.ispypy:
1843 1843 # PyPy runs slower with gc disabled
1844 1844 nogc = lambda x: x
1845 1845
1846 1846
1847 1847 def pathto(root, n1, n2):
1848 1848 # type: (bytes, bytes, bytes) -> bytes
1849 1849 """return the relative path from one place to another.
1850 1850 root should use os.sep to separate directories
1851 1851 n1 should use os.sep to separate directories
1852 1852 n2 should use "/" to separate directories
1853 1853 returns an os.sep-separated path.
1854 1854
1855 1855 If n1 is a relative path, it's assumed it's
1856 1856 relative to root.
1857 1857 n2 should always be relative to root.
1858 1858 """
1859 1859 if not n1:
1860 1860 return localpath(n2)
1861 1861 if os.path.isabs(n1):
1862 1862 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1863 1863 return os.path.join(root, localpath(n2))
1864 1864 n2 = b'/'.join((pconvert(root), n2))
1865 1865 a, b = splitpath(n1), n2.split(b'/')
1866 1866 a.reverse()
1867 1867 b.reverse()
1868 1868 while a and b and a[-1] == b[-1]:
1869 1869 a.pop()
1870 1870 b.pop()
1871 1871 b.reverse()
1872 1872 return pycompat.ossep.join(([b'..'] * len(a)) + b) or b'.'
1873 1873
1874 1874
1875 1875 def checksignature(func, depth=1):
1876 1876 '''wrap a function with code to check for calling errors'''
1877 1877
1878 1878 def check(*args, **kwargs):
1879 1879 try:
1880 1880 return func(*args, **kwargs)
1881 1881 except TypeError:
1882 1882 if len(traceback.extract_tb(sys.exc_info()[2])) == depth:
1883 1883 raise error.SignatureError
1884 1884 raise
1885 1885
1886 1886 return check
1887 1887
1888 1888
1889 1889 # a whilelist of known filesystems where hardlink works reliably
1890 1890 _hardlinkfswhitelist = {
1891 1891 b'apfs',
1892 1892 b'btrfs',
1893 1893 b'ext2',
1894 1894 b'ext3',
1895 1895 b'ext4',
1896 1896 b'hfs',
1897 1897 b'jfs',
1898 1898 b'NTFS',
1899 1899 b'reiserfs',
1900 1900 b'tmpfs',
1901 1901 b'ufs',
1902 1902 b'xfs',
1903 1903 b'zfs',
1904 1904 }
1905 1905
1906 1906
1907 1907 def copyfile(
1908 1908 src,
1909 1909 dest,
1910 1910 hardlink=False,
1911 1911 copystat=False,
1912 1912 checkambig=False,
1913 1913 nb_bytes=None,
1914 1914 no_hardlink_cb=None,
1915 1915 check_fs_hardlink=True,
1916 1916 ):
1917 1917 """copy a file, preserving mode and optionally other stat info like
1918 1918 atime/mtime
1919 1919
1920 1920 checkambig argument is used with filestat, and is useful only if
1921 1921 destination file is guarded by any lock (e.g. repo.lock or
1922 1922 repo.wlock).
1923 1923
1924 1924 copystat and checkambig should be exclusive.
1925 1925
1926 1926 nb_bytes: if set only copy the first `nb_bytes` of the source file.
1927 1927 """
1928 1928 assert not (copystat and checkambig)
1929 1929 oldstat = None
1930 1930 if os.path.lexists(dest):
1931 1931 if checkambig:
1932 1932 oldstat = checkambig and filestat.frompath(dest)
1933 1933 unlink(dest)
1934 1934 if hardlink and check_fs_hardlink:
1935 1935 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1936 1936 # unless we are confident that dest is on a whitelisted filesystem.
1937 1937 try:
1938 1938 fstype = getfstype(os.path.dirname(dest))
1939 1939 except OSError:
1940 1940 fstype = None
1941 1941 if fstype not in _hardlinkfswhitelist:
1942 1942 if no_hardlink_cb is not None:
1943 1943 no_hardlink_cb()
1944 1944 hardlink = False
1945 1945 if hardlink:
1946 1946 try:
1947 1947 oslink(src, dest)
1948 1948 if nb_bytes is not None:
1949 1949 m = "the `nb_bytes` argument is incompatible with `hardlink`"
1950 1950 raise error.ProgrammingError(m)
1951 1951 return
1952 1952 except (IOError, OSError) as exc:
1953 1953 if exc.errno != errno.EEXIST and no_hardlink_cb is not None:
1954 1954 no_hardlink_cb()
1955 1955 # fall back to normal copy
1956 1956 if os.path.islink(src):
1957 1957 os.symlink(os.readlink(src), dest)
1958 1958 # copytime is ignored for symlinks, but in general copytime isn't needed
1959 1959 # for them anyway
1960 1960 if nb_bytes is not None:
1961 1961 m = "cannot use `nb_bytes` on a symlink"
1962 1962 raise error.ProgrammingError(m)
1963 1963 else:
1964 1964 try:
1965 1965 shutil.copyfile(src, dest)
1966 1966 if copystat:
1967 1967 # copystat also copies mode
1968 1968 shutil.copystat(src, dest)
1969 1969 else:
1970 1970 shutil.copymode(src, dest)
1971 1971 if oldstat and oldstat.stat:
1972 1972 newstat = filestat.frompath(dest)
1973 1973 if newstat.isambig(oldstat):
1974 1974 # stat of copied file is ambiguous to original one
1975 1975 advanced = (
1976 1976 oldstat.stat[stat.ST_MTIME] + 1
1977 1977 ) & 0x7FFFFFFF
1978 1978 os.utime(dest, (advanced, advanced))
1979 1979 # We could do something smarter using `copy_file_range` call or similar
1980 1980 if nb_bytes is not None:
1981 1981 with open(dest, mode='r+') as f:
1982 1982 f.truncate(nb_bytes)
1983 1983 except shutil.Error as inst:
1984 1984 raise error.Abort(stringutil.forcebytestr(inst))
1985 1985
1986 1986
1987 1987 def copyfiles(src, dst, hardlink=None, progress=None):
1988 1988 """Copy a directory tree using hardlinks if possible."""
1989 1989 num = 0
1990 1990
1991 1991 def settopic():
1992 1992 if progress:
1993 1993 progress.topic = _(b'linking') if hardlink else _(b'copying')
1994 1994
1995 1995 if os.path.isdir(src):
1996 1996 if hardlink is None:
1997 1997 hardlink = (
1998 1998 os.stat(src).st_dev == os.stat(os.path.dirname(dst)).st_dev
1999 1999 )
2000 2000 settopic()
2001 2001 os.mkdir(dst)
2002 2002 for name, kind in listdir(src):
2003 2003 srcname = os.path.join(src, name)
2004 2004 dstname = os.path.join(dst, name)
2005 2005 hardlink, n = copyfiles(srcname, dstname, hardlink, progress)
2006 2006 num += n
2007 2007 else:
2008 2008 if hardlink is None:
2009 2009 hardlink = (
2010 2010 os.stat(os.path.dirname(src)).st_dev
2011 2011 == os.stat(os.path.dirname(dst)).st_dev
2012 2012 )
2013 2013 settopic()
2014 2014
2015 2015 if hardlink:
2016 2016 try:
2017 2017 oslink(src, dst)
2018 2018 except (IOError, OSError) as exc:
2019 2019 if exc.errno != errno.EEXIST:
2020 2020 hardlink = False
2021 2021 # XXX maybe try to relink if the file exist ?
2022 2022 shutil.copy(src, dst)
2023 2023 else:
2024 2024 shutil.copy(src, dst)
2025 2025 num += 1
2026 2026 if progress:
2027 2027 progress.increment()
2028 2028
2029 2029 return hardlink, num
2030 2030
2031 2031
2032 2032 _winreservednames = {
2033 2033 b'con',
2034 2034 b'prn',
2035 2035 b'aux',
2036 2036 b'nul',
2037 2037 b'com1',
2038 2038 b'com2',
2039 2039 b'com3',
2040 2040 b'com4',
2041 2041 b'com5',
2042 2042 b'com6',
2043 2043 b'com7',
2044 2044 b'com8',
2045 2045 b'com9',
2046 2046 b'lpt1',
2047 2047 b'lpt2',
2048 2048 b'lpt3',
2049 2049 b'lpt4',
2050 2050 b'lpt5',
2051 2051 b'lpt6',
2052 2052 b'lpt7',
2053 2053 b'lpt8',
2054 2054 b'lpt9',
2055 2055 }
2056 2056 _winreservedchars = b':*?"<>|'
2057 2057
2058 2058
2059 2059 def checkwinfilename(path):
2060 2060 # type: (bytes) -> Optional[bytes]
2061 2061 r"""Check that the base-relative path is a valid filename on Windows.
2062 2062 Returns None if the path is ok, or a UI string describing the problem.
2063 2063
2064 2064 >>> checkwinfilename(b"just/a/normal/path")
2065 2065 >>> checkwinfilename(b"foo/bar/con.xml")
2066 2066 "filename contains 'con', which is reserved on Windows"
2067 2067 >>> checkwinfilename(b"foo/con.xml/bar")
2068 2068 "filename contains 'con', which is reserved on Windows"
2069 2069 >>> checkwinfilename(b"foo/bar/xml.con")
2070 2070 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
2071 2071 "filename contains 'AUX', which is reserved on Windows"
2072 2072 >>> checkwinfilename(b"foo/bar/bla:.txt")
2073 2073 "filename contains ':', which is reserved on Windows"
2074 2074 >>> checkwinfilename(b"foo/bar/b\07la.txt")
2075 2075 "filename contains '\\x07', which is invalid on Windows"
2076 2076 >>> checkwinfilename(b"foo/bar/bla ")
2077 2077 "filename ends with ' ', which is not allowed on Windows"
2078 2078 >>> checkwinfilename(b"../bar")
2079 2079 >>> checkwinfilename(b"foo\\")
2080 2080 "filename ends with '\\', which is invalid on Windows"
2081 2081 >>> checkwinfilename(b"foo\\/bar")
2082 2082 "directory name ends with '\\', which is invalid on Windows"
2083 2083 """
2084 2084 if path.endswith(b'\\'):
2085 2085 return _(b"filename ends with '\\', which is invalid on Windows")
2086 2086 if b'\\/' in path:
2087 2087 return _(b"directory name ends with '\\', which is invalid on Windows")
2088 2088 for n in path.replace(b'\\', b'/').split(b'/'):
2089 2089 if not n:
2090 2090 continue
2091 2091 for c in _filenamebytestr(n):
2092 2092 if c in _winreservedchars:
2093 2093 return (
2094 2094 _(
2095 2095 b"filename contains '%s', which is reserved "
2096 2096 b"on Windows"
2097 2097 )
2098 2098 % c
2099 2099 )
2100 2100 if ord(c) <= 31:
2101 2101 return _(
2102 2102 b"filename contains '%s', which is invalid on Windows"
2103 2103 ) % stringutil.escapestr(c)
2104 2104 base = n.split(b'.')[0]
2105 2105 if base and base.lower() in _winreservednames:
2106 2106 return (
2107 2107 _(b"filename contains '%s', which is reserved on Windows")
2108 2108 % base
2109 2109 )
2110 2110 t = n[-1:]
2111 2111 if t in b'. ' and n not in b'..':
2112 2112 return (
2113 2113 _(
2114 2114 b"filename ends with '%s', which is not allowed "
2115 2115 b"on Windows"
2116 2116 )
2117 2117 % t
2118 2118 )
2119 2119
2120 2120
2121 2121 timer = getattr(time, "perf_counter", None)
2122 2122
2123 2123 if pycompat.iswindows:
2124 2124 checkosfilename = checkwinfilename
2125 2125 if not timer:
2126 2126 timer = time.clock
2127 2127 else:
2128 2128 # mercurial.windows doesn't have platform.checkosfilename
2129 2129 checkosfilename = platform.checkosfilename # pytype: disable=module-attr
2130 2130 if not timer:
2131 2131 timer = time.time
2132 2132
2133 2133
2134 2134 def makelock(info, pathname):
2135 2135 """Create a lock file atomically if possible
2136 2136
2137 2137 This may leave a stale lock file if symlink isn't supported and signal
2138 2138 interrupt is enabled.
2139 2139 """
2140 2140 try:
2141 2141 return os.symlink(info, pathname)
2142 2142 except OSError as why:
2143 2143 if why.errno == errno.EEXIST:
2144 2144 raise
2145 2145 except AttributeError: # no symlink in os
2146 2146 pass
2147 2147
2148 2148 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
2149 2149 ld = os.open(pathname, flags)
2150 2150 os.write(ld, info)
2151 2151 os.close(ld)
2152 2152
2153 2153
2154 2154 def readlock(pathname):
2155 2155 # type: (bytes) -> bytes
2156 2156 try:
2157 2157 return readlink(pathname)
2158 2158 except OSError as why:
2159 2159 if why.errno not in (errno.EINVAL, errno.ENOSYS):
2160 2160 raise
2161 2161 except AttributeError: # no symlink in os
2162 2162 pass
2163 2163 with posixfile(pathname, b'rb') as fp:
2164 2164 return fp.read()
2165 2165
2166 2166
2167 2167 def fstat(fp):
2168 2168 '''stat file object that may not have fileno method.'''
2169 2169 try:
2170 2170 return os.fstat(fp.fileno())
2171 2171 except AttributeError:
2172 2172 return os.stat(fp.name)
2173 2173
2174 2174
2175 2175 # File system features
2176 2176
2177 2177
2178 2178 def fscasesensitive(path):
2179 2179 # type: (bytes) -> bool
2180 2180 """
2181 2181 Return true if the given path is on a case-sensitive filesystem
2182 2182
2183 2183 Requires a path (like /foo/.hg) ending with a foldable final
2184 2184 directory component.
2185 2185 """
2186 2186 s1 = os.lstat(path)
2187 2187 d, b = os.path.split(path)
2188 2188 b2 = b.upper()
2189 2189 if b == b2:
2190 2190 b2 = b.lower()
2191 2191 if b == b2:
2192 2192 return True # no evidence against case sensitivity
2193 2193 p2 = os.path.join(d, b2)
2194 2194 try:
2195 2195 s2 = os.lstat(p2)
2196 2196 if s2 == s1:
2197 2197 return False
2198 2198 return True
2199 2199 except OSError:
2200 2200 return True
2201 2201
2202 2202
2203 2203 _re2_input = lambda x: x
2204 2204 try:
2205 2205 import re2 # pytype: disable=import-error
2206 2206
2207 2207 _re2 = None
2208 2208 except ImportError:
2209 2209 _re2 = False
2210 2210
2211 2211
2212 2212 class _re:
2213 2213 def _checkre2(self):
2214 2214 global _re2
2215 2215 global _re2_input
2216 2216
2217 2217 check_pattern = br'\[([^\[]+)\]'
2218 2218 check_input = b'[ui]'
2219 2219 try:
2220 2220 # check if match works, see issue3964
2221 2221 _re2 = bool(re2.match(check_pattern, check_input))
2222 2222 except ImportError:
2223 2223 _re2 = False
2224 2224 except TypeError:
2225 2225 # the `pyre-2` project provides a re2 module that accept bytes
2226 2226 # the `fb-re2` project provides a re2 module that acccept sysstr
2227 2227 check_pattern = pycompat.sysstr(check_pattern)
2228 2228 check_input = pycompat.sysstr(check_input)
2229 2229 _re2 = bool(re2.match(check_pattern, check_input))
2230 2230 _re2_input = pycompat.sysstr
2231 2231
2232 2232 def compile(self, pat, flags=0):
2233 2233 """Compile a regular expression, using re2 if possible
2234 2234
2235 2235 For best performance, use only re2-compatible regexp features. The
2236 2236 only flags from the re module that are re2-compatible are
2237 2237 IGNORECASE and MULTILINE."""
2238 2238 if _re2 is None:
2239 2239 self._checkre2()
2240 2240 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
2241 2241 if flags & remod.IGNORECASE:
2242 2242 pat = b'(?i)' + pat
2243 2243 if flags & remod.MULTILINE:
2244 2244 pat = b'(?m)' + pat
2245 2245 try:
2246 2246 return re2.compile(_re2_input(pat))
2247 2247 except re2.error:
2248 2248 pass
2249 2249 return remod.compile(pat, flags)
2250 2250
2251 2251 @propertycache
2252 2252 def escape(self):
2253 2253 """Return the version of escape corresponding to self.compile.
2254 2254
2255 2255 This is imperfect because whether re2 or re is used for a particular
2256 2256 function depends on the flags, etc, but it's the best we can do.
2257 2257 """
2258 2258 global _re2
2259 2259 if _re2 is None:
2260 2260 self._checkre2()
2261 2261 if _re2:
2262 2262 return re2.escape
2263 2263 else:
2264 2264 return remod.escape
2265 2265
2266 2266
2267 2267 re = _re()
2268 2268
2269 2269 _fspathcache = {}
2270 2270
2271 2271
2272 2272 def fspath(name, root):
2273 2273 # type: (bytes, bytes) -> bytes
2274 2274 """Get name in the case stored in the filesystem
2275 2275
2276 2276 The name should be relative to root, and be normcase-ed for efficiency.
2277 2277
2278 2278 Note that this function is unnecessary, and should not be
2279 2279 called, for case-sensitive filesystems (simply because it's expensive).
2280 2280
2281 2281 The root should be normcase-ed, too.
2282 2282 """
2283 2283
2284 2284 def _makefspathcacheentry(dir):
2285 2285 return {normcase(n): n for n in os.listdir(dir)}
2286 2286
2287 2287 seps = pycompat.ossep
2288 2288 if pycompat.osaltsep:
2289 2289 seps = seps + pycompat.osaltsep
2290 2290 # Protect backslashes. This gets silly very quickly.
2291 2291 seps.replace(b'\\', b'\\\\')
2292 2292 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
2293 2293 dir = os.path.normpath(root)
2294 2294 result = []
2295 2295 for part, sep in pattern.findall(name):
2296 2296 if sep:
2297 2297 result.append(sep)
2298 2298 continue
2299 2299
2300 2300 if dir not in _fspathcache:
2301 2301 _fspathcache[dir] = _makefspathcacheentry(dir)
2302 2302 contents = _fspathcache[dir]
2303 2303
2304 2304 found = contents.get(part)
2305 2305 if not found:
2306 2306 # retry "once per directory" per "dirstate.walk" which
2307 2307 # may take place for each patches of "hg qpush", for example
2308 2308 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
2309 2309 found = contents.get(part)
2310 2310
2311 2311 result.append(found or part)
2312 2312 dir = os.path.join(dir, part)
2313 2313
2314 2314 return b''.join(result)
2315 2315
2316 2316
2317 2317 def checknlink(testfile):
2318 2318 # type: (bytes) -> bool
2319 2319 '''check whether hardlink count reporting works properly'''
2320 2320
2321 2321 # testfile may be open, so we need a separate file for checking to
2322 2322 # work around issue2543 (or testfile may get lost on Samba shares)
2323 2323 f1, f2, fp = None, None, None
2324 2324 try:
2325 2325 fd, f1 = pycompat.mkstemp(
2326 2326 prefix=b'.%s-' % os.path.basename(testfile),
2327 2327 suffix=b'1~',
2328 2328 dir=os.path.dirname(testfile),
2329 2329 )
2330 2330 os.close(fd)
2331 2331 f2 = b'%s2~' % f1[:-2]
2332 2332
2333 2333 oslink(f1, f2)
2334 2334 # nlinks() may behave differently for files on Windows shares if
2335 2335 # the file is open.
2336 2336 fp = posixfile(f2)
2337 2337 return nlinks(f2) > 1
2338 2338 except OSError:
2339 2339 return False
2340 2340 finally:
2341 2341 if fp is not None:
2342 2342 fp.close()
2343 2343 for f in (f1, f2):
2344 2344 try:
2345 2345 if f is not None:
2346 2346 os.unlink(f)
2347 2347 except OSError:
2348 2348 pass
2349 2349
2350 2350
2351 2351 def endswithsep(path):
2352 2352 # type: (bytes) -> bool
2353 2353 '''Check path ends with os.sep or os.altsep.'''
2354 2354 return bool( # help pytype
2355 2355 path.endswith(pycompat.ossep)
2356 2356 or pycompat.osaltsep
2357 2357 and path.endswith(pycompat.osaltsep)
2358 2358 )
2359 2359
2360 2360
2361 2361 def splitpath(path):
2362 2362 # type: (bytes) -> List[bytes]
2363 2363 """Split path by os.sep.
2364 2364 Note that this function does not use os.altsep because this is
2365 2365 an alternative of simple "xxx.split(os.sep)".
2366 2366 It is recommended to use os.path.normpath() before using this
2367 2367 function if need."""
2368 2368 return path.split(pycompat.ossep)
2369 2369
2370 2370
2371 2371 def mktempcopy(name, emptyok=False, createmode=None, enforcewritable=False):
2372 2372 """Create a temporary file with the same contents from name
2373 2373
2374 2374 The permission bits are copied from the original file.
2375 2375
2376 2376 If the temporary file is going to be truncated immediately, you
2377 2377 can use emptyok=True as an optimization.
2378 2378
2379 2379 Returns the name of the temporary file.
2380 2380 """
2381 2381 d, fn = os.path.split(name)
2382 2382 fd, temp = pycompat.mkstemp(prefix=b'.%s-' % fn, suffix=b'~', dir=d)
2383 2383 os.close(fd)
2384 2384 # Temporary files are created with mode 0600, which is usually not
2385 2385 # what we want. If the original file already exists, just copy
2386 2386 # its mode. Otherwise, manually obey umask.
2387 2387 copymode(name, temp, createmode, enforcewritable)
2388 2388
2389 2389 if emptyok:
2390 2390 return temp
2391 2391 try:
2392 2392 try:
2393 2393 ifp = posixfile(name, b"rb")
2394 2394 except IOError as inst:
2395 2395 if inst.errno == errno.ENOENT:
2396 2396 return temp
2397 2397 if not getattr(inst, 'filename', None):
2398 2398 inst.filename = name
2399 2399 raise
2400 2400 ofp = posixfile(temp, b"wb")
2401 2401 for chunk in filechunkiter(ifp):
2402 2402 ofp.write(chunk)
2403 2403 ifp.close()
2404 2404 ofp.close()
2405 2405 except: # re-raises
2406 2406 try:
2407 2407 os.unlink(temp)
2408 2408 except OSError:
2409 2409 pass
2410 2410 raise
2411 2411 return temp
2412 2412
2413 2413
2414 2414 class filestat:
2415 2415 """help to exactly detect change of a file
2416 2416
2417 2417 'stat' attribute is result of 'os.stat()' if specified 'path'
2418 2418 exists. Otherwise, it is None. This can avoid preparative
2419 2419 'exists()' examination on client side of this class.
2420 2420 """
2421 2421
2422 2422 def __init__(self, stat):
2423 2423 self.stat = stat
2424 2424
2425 2425 @classmethod
2426 2426 def frompath(cls, path):
2427 2427 try:
2428 2428 stat = os.stat(path)
2429 2429 except FileNotFoundError:
2430 2430 stat = None
2431 2431 return cls(stat)
2432 2432
2433 2433 @classmethod
2434 2434 def fromfp(cls, fp):
2435 2435 stat = os.fstat(fp.fileno())
2436 2436 return cls(stat)
2437 2437
2438 2438 __hash__ = object.__hash__
2439 2439
2440 2440 def __eq__(self, old):
2441 2441 try:
2442 2442 # if ambiguity between stat of new and old file is
2443 2443 # avoided, comparison of size, ctime and mtime is enough
2444 2444 # to exactly detect change of a file regardless of platform
2445 2445 return (
2446 2446 self.stat.st_size == old.stat.st_size
2447 2447 and self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2448 2448 and self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME]
2449 2449 )
2450 2450 except AttributeError:
2451 2451 pass
2452 2452 try:
2453 2453 return self.stat is None and old.stat is None
2454 2454 except AttributeError:
2455 2455 return False
2456 2456
2457 2457 def isambig(self, old):
2458 2458 """Examine whether new (= self) stat is ambiguous against old one
2459 2459
2460 2460 "S[N]" below means stat of a file at N-th change:
2461 2461
2462 2462 - S[n-1].ctime < S[n].ctime: can detect change of a file
2463 2463 - S[n-1].ctime == S[n].ctime
2464 2464 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2465 2465 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2466 2466 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2467 2467 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2468 2468
2469 2469 Case (*2) above means that a file was changed twice or more at
2470 2470 same time in sec (= S[n-1].ctime), and comparison of timestamp
2471 2471 is ambiguous.
2472 2472
2473 2473 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2474 2474 timestamp is ambiguous".
2475 2475
2476 2476 But advancing mtime only in case (*2) doesn't work as
2477 2477 expected, because naturally advanced S[n].mtime in case (*1)
2478 2478 might be equal to manually advanced S[n-1 or earlier].mtime.
2479 2479
2480 2480 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2481 2481 treated as ambiguous regardless of mtime, to avoid overlooking
2482 2482 by confliction between such mtime.
2483 2483
2484 2484 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2485 2485 S[n].mtime", even if size of a file isn't changed.
2486 2486 """
2487 2487 try:
2488 2488 return self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2489 2489 except AttributeError:
2490 2490 return False
2491 2491
2492 2492 def avoidambig(self, path, old):
2493 2493 """Change file stat of specified path to avoid ambiguity
2494 2494
2495 2495 'old' should be previous filestat of 'path'.
2496 2496
2497 2497 This skips avoiding ambiguity, if a process doesn't have
2498 2498 appropriate privileges for 'path'. This returns False in this
2499 2499 case.
2500 2500
2501 2501 Otherwise, this returns True, as "ambiguity is avoided".
2502 2502 """
2503 2503 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2504 2504 try:
2505 2505 os.utime(path, (advanced, advanced))
2506 2506 except PermissionError:
2507 2507 # utime() on the file created by another user causes EPERM,
2508 2508 # if a process doesn't have appropriate privileges
2509 2509 return False
2510 2510 return True
2511 2511
2512 2512 def __ne__(self, other):
2513 2513 return not self == other
2514 2514
2515 2515
2516 2516 class atomictempfile:
2517 2517 """writable file object that atomically updates a file
2518 2518
2519 2519 All writes will go to a temporary copy of the original file. Call
2520 2520 close() when you are done writing, and atomictempfile will rename
2521 2521 the temporary copy to the original name, making the changes
2522 2522 visible. If the object is destroyed without being closed, all your
2523 2523 writes are discarded.
2524 2524
2525 2525 checkambig argument of constructor is used with filestat, and is
2526 2526 useful only if target file is guarded by any lock (e.g. repo.lock
2527 2527 or repo.wlock).
2528 2528 """
2529 2529
2530 2530 def __init__(self, name, mode=b'w+b', createmode=None, checkambig=False):
2531 2531 self.__name = name # permanent name
2532 2532 self._tempname = mktempcopy(
2533 2533 name,
2534 2534 emptyok=(b'w' in mode),
2535 2535 createmode=createmode,
2536 2536 enforcewritable=(b'w' in mode),
2537 2537 )
2538 2538
2539 2539 self._fp = posixfile(self._tempname, mode)
2540 2540 self._checkambig = checkambig
2541 2541
2542 2542 # delegated methods
2543 2543 self.read = self._fp.read
2544 2544 self.write = self._fp.write
2545 2545 self.writelines = self._fp.writelines
2546 2546 self.seek = self._fp.seek
2547 2547 self.tell = self._fp.tell
2548 2548 self.fileno = self._fp.fileno
2549 2549
2550 2550 def close(self):
2551 2551 if not self._fp.closed:
2552 2552 self._fp.close()
2553 2553 filename = localpath(self.__name)
2554 2554 oldstat = self._checkambig and filestat.frompath(filename)
2555 2555 if oldstat and oldstat.stat:
2556 2556 rename(self._tempname, filename)
2557 2557 newstat = filestat.frompath(filename)
2558 2558 if newstat.isambig(oldstat):
2559 2559 # stat of changed file is ambiguous to original one
2560 2560 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2561 2561 os.utime(filename, (advanced, advanced))
2562 2562 else:
2563 2563 rename(self._tempname, filename)
2564 2564
2565 2565 def discard(self):
2566 2566 if not self._fp.closed:
2567 2567 try:
2568 2568 os.unlink(self._tempname)
2569 2569 except OSError:
2570 2570 pass
2571 2571 self._fp.close()
2572 2572
2573 2573 def __del__(self):
2574 2574 if safehasattr(self, '_fp'): # constructor actually did something
2575 2575 self.discard()
2576 2576
2577 2577 def __enter__(self):
2578 2578 return self
2579 2579
2580 2580 def __exit__(self, exctype, excvalue, traceback):
2581 2581 if exctype is not None:
2582 2582 self.discard()
2583 2583 else:
2584 2584 self.close()
2585 2585
2586 2586
2587 2587 def tryrmdir(f):
2588 2588 try:
2589 2589 removedirs(f)
2590 2590 except OSError as e:
2591 2591 if e.errno != errno.ENOENT and e.errno != errno.ENOTEMPTY:
2592 2592 raise
2593 2593
2594 2594
2595 2595 def unlinkpath(f, ignoremissing=False, rmdir=True):
2596 2596 # type: (bytes, bool, bool) -> None
2597 2597 """unlink and remove the directory if it is empty"""
2598 2598 if ignoremissing:
2599 2599 tryunlink(f)
2600 2600 else:
2601 2601 unlink(f)
2602 2602 if rmdir:
2603 2603 # try removing directories that might now be empty
2604 2604 try:
2605 2605 removedirs(os.path.dirname(f))
2606 2606 except OSError:
2607 2607 pass
2608 2608
2609 2609
2610 2610 def tryunlink(f):
2611 2611 # type: (bytes) -> None
2612 2612 """Attempt to remove a file, ignoring FileNotFoundError."""
2613 2613 try:
2614 2614 unlink(f)
2615 2615 except FileNotFoundError:
2616 2616 pass
2617 2617
2618 2618
2619 2619 def makedirs(name, mode=None, notindexed=False):
2620 2620 # type: (bytes, Optional[int], bool) -> None
2621 2621 """recursive directory creation with parent mode inheritance
2622 2622
2623 2623 Newly created directories are marked as "not to be indexed by
2624 2624 the content indexing service", if ``notindexed`` is specified
2625 2625 for "write" mode access.
2626 2626 """
2627 2627 try:
2628 2628 makedir(name, notindexed)
2629 2629 except OSError as err:
2630 2630 if err.errno == errno.EEXIST:
2631 2631 return
2632 2632 if err.errno != errno.ENOENT or not name:
2633 2633 raise
2634 2634 parent = os.path.dirname(abspath(name))
2635 2635 if parent == name:
2636 2636 raise
2637 2637 makedirs(parent, mode, notindexed)
2638 2638 try:
2639 2639 makedir(name, notindexed)
2640 2640 except OSError as err:
2641 2641 # Catch EEXIST to handle races
2642 2642 if err.errno == errno.EEXIST:
2643 2643 return
2644 2644 raise
2645 2645 if mode is not None:
2646 2646 os.chmod(name, mode)
2647 2647
2648 2648
2649 2649 def readfile(path):
2650 2650 # type: (bytes) -> bytes
2651 2651 with open(path, b'rb') as fp:
2652 2652 return fp.read()
2653 2653
2654 2654
2655 2655 def writefile(path, text):
2656 2656 # type: (bytes, bytes) -> None
2657 2657 with open(path, b'wb') as fp:
2658 2658 fp.write(text)
2659 2659
2660 2660
2661 2661 def appendfile(path, text):
2662 2662 # type: (bytes, bytes) -> None
2663 2663 with open(path, b'ab') as fp:
2664 2664 fp.write(text)
2665 2665
2666 2666
2667 2667 class chunkbuffer:
2668 2668 """Allow arbitrary sized chunks of data to be efficiently read from an
2669 2669 iterator over chunks of arbitrary size."""
2670 2670
2671 2671 def __init__(self, in_iter):
2672 2672 """in_iter is the iterator that's iterating over the input chunks."""
2673 2673
2674 2674 def splitbig(chunks):
2675 2675 for chunk in chunks:
2676 2676 if len(chunk) > 2 ** 20:
2677 2677 pos = 0
2678 2678 while pos < len(chunk):
2679 2679 end = pos + 2 ** 18
2680 2680 yield chunk[pos:end]
2681 2681 pos = end
2682 2682 else:
2683 2683 yield chunk
2684 2684
2685 2685 self.iter = splitbig(in_iter)
2686 2686 self._queue = collections.deque()
2687 2687 self._chunkoffset = 0
2688 2688
2689 2689 def read(self, l=None):
2690 2690 """Read L bytes of data from the iterator of chunks of data.
2691 2691 Returns less than L bytes if the iterator runs dry.
2692 2692
2693 2693 If size parameter is omitted, read everything"""
2694 2694 if l is None:
2695 2695 return b''.join(self.iter)
2696 2696
2697 2697 left = l
2698 2698 buf = []
2699 2699 queue = self._queue
2700 2700 while left > 0:
2701 2701 # refill the queue
2702 2702 if not queue:
2703 2703 target = 2 ** 18
2704 2704 for chunk in self.iter:
2705 2705 queue.append(chunk)
2706 2706 target -= len(chunk)
2707 2707 if target <= 0:
2708 2708 break
2709 2709 if not queue:
2710 2710 break
2711 2711
2712 2712 # The easy way to do this would be to queue.popleft(), modify the
2713 2713 # chunk (if necessary), then queue.appendleft(). However, for cases
2714 2714 # where we read partial chunk content, this incurs 2 dequeue
2715 2715 # mutations and creates a new str for the remaining chunk in the
2716 2716 # queue. Our code below avoids this overhead.
2717 2717
2718 2718 chunk = queue[0]
2719 2719 chunkl = len(chunk)
2720 2720 offset = self._chunkoffset
2721 2721
2722 2722 # Use full chunk.
2723 2723 if offset == 0 and left >= chunkl:
2724 2724 left -= chunkl
2725 2725 queue.popleft()
2726 2726 buf.append(chunk)
2727 2727 # self._chunkoffset remains at 0.
2728 2728 continue
2729 2729
2730 2730 chunkremaining = chunkl - offset
2731 2731
2732 2732 # Use all of unconsumed part of chunk.
2733 2733 if left >= chunkremaining:
2734 2734 left -= chunkremaining
2735 2735 queue.popleft()
2736 2736 # offset == 0 is enabled by block above, so this won't merely
2737 2737 # copy via ``chunk[0:]``.
2738 2738 buf.append(chunk[offset:])
2739 2739 self._chunkoffset = 0
2740 2740
2741 2741 # Partial chunk needed.
2742 2742 else:
2743 2743 buf.append(chunk[offset : offset + left])
2744 2744 self._chunkoffset += left
2745 2745 left -= chunkremaining
2746 2746
2747 2747 return b''.join(buf)
2748 2748
2749 2749
2750 2750 def filechunkiter(f, size=131072, limit=None):
2751 2751 """Create a generator that produces the data in the file size
2752 2752 (default 131072) bytes at a time, up to optional limit (default is
2753 2753 to read all data). Chunks may be less than size bytes if the
2754 2754 chunk is the last chunk in the file, or the file is a socket or
2755 2755 some other type of file that sometimes reads less data than is
2756 2756 requested."""
2757 2757 assert size >= 0
2758 2758 assert limit is None or limit >= 0
2759 2759 while True:
2760 2760 if limit is None:
2761 2761 nbytes = size
2762 2762 else:
2763 2763 nbytes = min(limit, size)
2764 2764 s = nbytes and f.read(nbytes)
2765 2765 if not s:
2766 2766 break
2767 2767 if limit:
2768 2768 limit -= len(s)
2769 2769 yield s
2770 2770
2771 2771
2772 2772 class cappedreader:
2773 2773 """A file object proxy that allows reading up to N bytes.
2774 2774
2775 2775 Given a source file object, instances of this type allow reading up to
2776 2776 N bytes from that source file object. Attempts to read past the allowed
2777 2777 limit are treated as EOF.
2778 2778
2779 2779 It is assumed that I/O is not performed on the original file object
2780 2780 in addition to I/O that is performed by this instance. If there is,
2781 2781 state tracking will get out of sync and unexpected results will ensue.
2782 2782 """
2783 2783
2784 2784 def __init__(self, fh, limit):
2785 2785 """Allow reading up to <limit> bytes from <fh>."""
2786 2786 self._fh = fh
2787 2787 self._left = limit
2788 2788
2789 2789 def read(self, n=-1):
2790 2790 if not self._left:
2791 2791 return b''
2792 2792
2793 2793 if n < 0:
2794 2794 n = self._left
2795 2795
2796 2796 data = self._fh.read(min(n, self._left))
2797 2797 self._left -= len(data)
2798 2798 assert self._left >= 0
2799 2799
2800 2800 return data
2801 2801
2802 2802 def readinto(self, b):
2803 2803 res = self.read(len(b))
2804 2804 if res is None:
2805 2805 return None
2806 2806
2807 2807 b[0 : len(res)] = res
2808 2808 return len(res)
2809 2809
2810 2810
2811 2811 def unitcountfn(*unittable):
2812 2812 '''return a function that renders a readable count of some quantity'''
2813 2813
2814 2814 def go(count):
2815 2815 for multiplier, divisor, format in unittable:
2816 2816 if abs(count) >= divisor * multiplier:
2817 2817 return format % (count / float(divisor))
2818 2818 return unittable[-1][2] % count
2819 2819
2820 2820 return go
2821 2821
2822 2822
2823 2823 def processlinerange(fromline, toline):
2824 2824 # type: (int, int) -> Tuple[int, int]
2825 2825 """Check that linerange <fromline>:<toline> makes sense and return a
2826 2826 0-based range.
2827 2827
2828 2828 >>> processlinerange(10, 20)
2829 2829 (9, 20)
2830 2830 >>> processlinerange(2, 1)
2831 2831 Traceback (most recent call last):
2832 2832 ...
2833 2833 ParseError: line range must be positive
2834 2834 >>> processlinerange(0, 5)
2835 2835 Traceback (most recent call last):
2836 2836 ...
2837 2837 ParseError: fromline must be strictly positive
2838 2838 """
2839 2839 if toline - fromline < 0:
2840 2840 raise error.ParseError(_(b"line range must be positive"))
2841 2841 if fromline < 1:
2842 2842 raise error.ParseError(_(b"fromline must be strictly positive"))
2843 2843 return fromline - 1, toline
2844 2844
2845 2845
2846 2846 bytecount = unitcountfn(
2847 2847 (100, 1 << 30, _(b'%.0f GB')),
2848 2848 (10, 1 << 30, _(b'%.1f GB')),
2849 2849 (1, 1 << 30, _(b'%.2f GB')),
2850 2850 (100, 1 << 20, _(b'%.0f MB')),
2851 2851 (10, 1 << 20, _(b'%.1f MB')),
2852 2852 (1, 1 << 20, _(b'%.2f MB')),
2853 2853 (100, 1 << 10, _(b'%.0f KB')),
2854 2854 (10, 1 << 10, _(b'%.1f KB')),
2855 2855 (1, 1 << 10, _(b'%.2f KB')),
2856 2856 (1, 1, _(b'%.0f bytes')),
2857 2857 )
2858 2858
2859 2859
2860 2860 class transformingwriter:
2861 2861 """Writable file wrapper to transform data by function"""
2862 2862
2863 2863 def __init__(self, fp, encode):
2864 2864 self._fp = fp
2865 2865 self._encode = encode
2866 2866
2867 2867 def close(self):
2868 2868 self._fp.close()
2869 2869
2870 2870 def flush(self):
2871 2871 self._fp.flush()
2872 2872
2873 2873 def write(self, data):
2874 2874 return self._fp.write(self._encode(data))
2875 2875
2876 2876
2877 2877 # Matches a single EOL which can either be a CRLF where repeated CR
2878 2878 # are removed or a LF. We do not care about old Macintosh files, so a
2879 2879 # stray CR is an error.
2880 2880 _eolre = remod.compile(br'\r*\n')
2881 2881
2882 2882
2883 2883 def tolf(s):
2884 2884 # type: (bytes) -> bytes
2885 2885 return _eolre.sub(b'\n', s)
2886 2886
2887 2887
2888 2888 def tocrlf(s):
2889 2889 # type: (bytes) -> bytes
2890 2890 return _eolre.sub(b'\r\n', s)
2891 2891
2892 2892
2893 2893 def _crlfwriter(fp):
2894 2894 return transformingwriter(fp, tocrlf)
2895 2895
2896 2896
2897 2897 if pycompat.oslinesep == b'\r\n':
2898 2898 tonativeeol = tocrlf
2899 2899 fromnativeeol = tolf
2900 2900 nativeeolwriter = _crlfwriter
2901 2901 else:
2902 2902 tonativeeol = pycompat.identity
2903 2903 fromnativeeol = pycompat.identity
2904 2904 nativeeolwriter = pycompat.identity
2905 2905
2906 2906
2907 2907 # TODO delete since workaround variant for Python 2 no longer needed.
2908 2908 def iterfile(fp):
2909 2909 return fp
2910 2910
2911 2911
2912 2912 def iterlines(iterator):
2913 2913 # type: (Iterator[bytes]) -> Iterator[bytes]
2914 2914 for chunk in iterator:
2915 2915 for line in chunk.splitlines():
2916 2916 yield line
2917 2917
2918 2918
2919 2919 def expandpath(path):
2920 2920 # type: (bytes) -> bytes
2921 2921 return os.path.expanduser(os.path.expandvars(path))
2922 2922
2923 2923
2924 2924 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2925 2925 """Return the result of interpolating items in the mapping into string s.
2926 2926
2927 2927 prefix is a single character string, or a two character string with
2928 2928 a backslash as the first character if the prefix needs to be escaped in
2929 2929 a regular expression.
2930 2930
2931 2931 fn is an optional function that will be applied to the replacement text
2932 2932 just before replacement.
2933 2933
2934 2934 escape_prefix is an optional flag that allows using doubled prefix for
2935 2935 its escaping.
2936 2936 """
2937 2937 fn = fn or (lambda s: s)
2938 2938 patterns = b'|'.join(mapping.keys())
2939 2939 if escape_prefix:
2940 2940 patterns += b'|' + prefix
2941 2941 if len(prefix) > 1:
2942 2942 prefix_char = prefix[1:]
2943 2943 else:
2944 2944 prefix_char = prefix
2945 2945 mapping[prefix_char] = prefix_char
2946 2946 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2947 2947 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2948 2948
2949 2949
2950 2950 timecount = unitcountfn(
2951 2951 (1, 1e3, _(b'%.0f s')),
2952 2952 (100, 1, _(b'%.1f s')),
2953 2953 (10, 1, _(b'%.2f s')),
2954 2954 (1, 1, _(b'%.3f s')),
2955 2955 (100, 0.001, _(b'%.1f ms')),
2956 2956 (10, 0.001, _(b'%.2f ms')),
2957 2957 (1, 0.001, _(b'%.3f ms')),
2958 2958 (100, 0.000001, _(b'%.1f us')),
2959 2959 (10, 0.000001, _(b'%.2f us')),
2960 2960 (1, 0.000001, _(b'%.3f us')),
2961 2961 (100, 0.000000001, _(b'%.1f ns')),
2962 2962 (10, 0.000000001, _(b'%.2f ns')),
2963 2963 (1, 0.000000001, _(b'%.3f ns')),
2964 2964 )
2965 2965
2966 2966
2967 2967 @attr.s
2968 2968 class timedcmstats:
2969 2969 """Stats information produced by the timedcm context manager on entering."""
2970 2970
2971 2971 # the starting value of the timer as a float (meaning and resulution is
2972 2972 # platform dependent, see util.timer)
2973 2973 start = attr.ib(default=attr.Factory(lambda: timer()))
2974 2974 # the number of seconds as a floating point value; starts at 0, updated when
2975 2975 # the context is exited.
2976 2976 elapsed = attr.ib(default=0)
2977 2977 # the number of nested timedcm context managers.
2978 2978 level = attr.ib(default=1)
2979 2979
2980 2980 def __bytes__(self):
2981 2981 return timecount(self.elapsed) if self.elapsed else b'<unknown>'
2982 2982
2983 2983 __str__ = encoding.strmethod(__bytes__)
2984 2984
2985 2985
2986 2986 @contextlib.contextmanager
2987 2987 def timedcm(whencefmt, *whenceargs):
2988 2988 """A context manager that produces timing information for a given context.
2989 2989
2990 2990 On entering a timedcmstats instance is produced.
2991 2991
2992 2992 This context manager is reentrant.
2993 2993
2994 2994 """
2995 2995 # track nested context managers
2996 2996 timedcm._nested += 1
2997 2997 timing_stats = timedcmstats(level=timedcm._nested)
2998 2998 try:
2999 2999 with tracing.log(whencefmt, *whenceargs):
3000 3000 yield timing_stats
3001 3001 finally:
3002 3002 timing_stats.elapsed = timer() - timing_stats.start
3003 3003 timedcm._nested -= 1
3004 3004
3005 3005
3006 3006 timedcm._nested = 0
3007 3007
3008 3008
3009 3009 def timed(func):
3010 3010 """Report the execution time of a function call to stderr.
3011 3011
3012 3012 During development, use as a decorator when you need to measure
3013 3013 the cost of a function, e.g. as follows:
3014 3014
3015 3015 @util.timed
3016 3016 def foo(a, b, c):
3017 3017 pass
3018 3018 """
3019 3019
3020 3020 def wrapper(*args, **kwargs):
3021 3021 with timedcm(pycompat.bytestr(func.__name__)) as time_stats:
3022 3022 result = func(*args, **kwargs)
3023 3023 stderr = procutil.stderr
3024 3024 stderr.write(
3025 3025 b'%s%s: %s\n'
3026 3026 % (
3027 3027 b' ' * time_stats.level * 2,
3028 3028 pycompat.bytestr(func.__name__),
3029 3029 time_stats,
3030 3030 )
3031 3031 )
3032 3032 return result
3033 3033
3034 3034 return wrapper
3035 3035
3036 3036
3037 3037 _sizeunits = (
3038 3038 (b'm', 2 ** 20),
3039 3039 (b'k', 2 ** 10),
3040 3040 (b'g', 2 ** 30),
3041 3041 (b'kb', 2 ** 10),
3042 3042 (b'mb', 2 ** 20),
3043 3043 (b'gb', 2 ** 30),
3044 3044 (b'b', 1),
3045 3045 )
3046 3046
3047 3047
3048 3048 def sizetoint(s):
3049 3049 # type: (bytes) -> int
3050 3050 """Convert a space specifier to a byte count.
3051 3051
3052 3052 >>> sizetoint(b'30')
3053 3053 30
3054 3054 >>> sizetoint(b'2.2kb')
3055 3055 2252
3056 3056 >>> sizetoint(b'6M')
3057 3057 6291456
3058 3058 """
3059 3059 t = s.strip().lower()
3060 3060 try:
3061 3061 for k, u in _sizeunits:
3062 3062 if t.endswith(k):
3063 3063 return int(float(t[: -len(k)]) * u)
3064 3064 return int(t)
3065 3065 except ValueError:
3066 3066 raise error.ParseError(_(b"couldn't parse size: %s") % s)
3067 3067
3068 3068
3069 3069 class hooks:
3070 3070 """A collection of hook functions that can be used to extend a
3071 3071 function's behavior. Hooks are called in lexicographic order,
3072 3072 based on the names of their sources."""
3073 3073
3074 3074 def __init__(self):
3075 3075 self._hooks = []
3076 3076
3077 3077 def add(self, source, hook):
3078 3078 self._hooks.append((source, hook))
3079 3079
3080 3080 def __call__(self, *args):
3081 3081 self._hooks.sort(key=lambda x: x[0])
3082 3082 results = []
3083 3083 for source, hook in self._hooks:
3084 3084 results.append(hook(*args))
3085 3085 return results
3086 3086
3087 3087
3088 3088 def getstackframes(skip=0, line=b' %-*s in %s\n', fileline=b'%s:%d', depth=0):
3089 3089 """Yields lines for a nicely formatted stacktrace.
3090 3090 Skips the 'skip' last entries, then return the last 'depth' entries.
3091 3091 Each file+linenumber is formatted according to fileline.
3092 3092 Each line is formatted according to line.
3093 3093 If line is None, it yields:
3094 3094 length of longest filepath+line number,
3095 3095 filepath+linenumber,
3096 3096 function
3097 3097
3098 3098 Not be used in production code but very convenient while developing.
3099 3099 """
3100 3100 entries = [
3101 3101 (fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3102 3102 for fn, ln, func, _text in traceback.extract_stack()[: -skip - 1]
3103 3103 ][-depth:]
3104 3104 if entries:
3105 3105 fnmax = max(len(entry[0]) for entry in entries)
3106 3106 for fnln, func in entries:
3107 3107 if line is None:
3108 3108 yield (fnmax, fnln, func)
3109 3109 else:
3110 3110 yield line % (fnmax, fnln, func)
3111 3111
3112 3112
3113 3113 def debugstacktrace(
3114 3114 msg=b'stacktrace',
3115 3115 skip=0,
3116 3116 f=procutil.stderr,
3117 3117 otherf=procutil.stdout,
3118 3118 depth=0,
3119 3119 prefix=b'',
3120 3120 ):
3121 3121 """Writes a message to f (stderr) with a nicely formatted stacktrace.
3122 3122 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3123 3123 By default it will flush stdout first.
3124 3124 It can be used everywhere and intentionally does not require an ui object.
3125 3125 Not be used in production code but very convenient while developing.
3126 3126 """
3127 3127 if otherf:
3128 3128 otherf.flush()
3129 3129 f.write(b'%s%s at:\n' % (prefix, msg.rstrip()))
3130 3130 for line in getstackframes(skip + 1, depth=depth):
3131 3131 f.write(prefix + line)
3132 3132 f.flush()
3133 3133
3134 3134
3135 3135 # convenient shortcut
3136 3136 dst = debugstacktrace
3137 3137
3138 3138
3139 3139 def safename(f, tag, ctx, others=None):
3140 3140 """
3141 3141 Generate a name that it is safe to rename f to in the given context.
3142 3142
3143 3143 f: filename to rename
3144 3144 tag: a string tag that will be included in the new name
3145 3145 ctx: a context, in which the new name must not exist
3146 3146 others: a set of other filenames that the new name must not be in
3147 3147
3148 3148 Returns a file name of the form oldname~tag[~number] which does not exist
3149 3149 in the provided context and is not in the set of other names.
3150 3150 """
3151 3151 if others is None:
3152 3152 others = set()
3153 3153
3154 3154 fn = b'%s~%s' % (f, tag)
3155 3155 if fn not in ctx and fn not in others:
3156 3156 return fn
3157 3157 for n in itertools.count(1):
3158 3158 fn = b'%s~%s~%s' % (f, tag, n)
3159 3159 if fn not in ctx and fn not in others:
3160 3160 return fn
3161 3161
3162 3162
3163 3163 def readexactly(stream, n):
3164 3164 '''read n bytes from stream.read and abort if less was available'''
3165 3165 s = stream.read(n)
3166 3166 if len(s) < n:
3167 3167 raise error.Abort(
3168 3168 _(b"stream ended unexpectedly (got %d bytes, expected %d)")
3169 3169 % (len(s), n)
3170 3170 )
3171 3171 return s
3172 3172
3173 3173
3174 3174 def uvarintencode(value):
3175 3175 """Encode an unsigned integer value to a varint.
3176 3176
3177 3177 A varint is a variable length integer of 1 or more bytes. Each byte
3178 3178 except the last has the most significant bit set. The lower 7 bits of
3179 3179 each byte store the 2's complement representation, least significant group
3180 3180 first.
3181 3181
3182 3182 >>> uvarintencode(0)
3183 3183 '\\x00'
3184 3184 >>> uvarintencode(1)
3185 3185 '\\x01'
3186 3186 >>> uvarintencode(127)
3187 3187 '\\x7f'
3188 3188 >>> uvarintencode(1337)
3189 3189 '\\xb9\\n'
3190 3190 >>> uvarintencode(65536)
3191 3191 '\\x80\\x80\\x04'
3192 3192 >>> uvarintencode(-1)
3193 3193 Traceback (most recent call last):
3194 3194 ...
3195 3195 ProgrammingError: negative value for uvarint: -1
3196 3196 """
3197 3197 if value < 0:
3198 3198 raise error.ProgrammingError(b'negative value for uvarint: %d' % value)
3199 3199 bits = value & 0x7F
3200 3200 value >>= 7
3201 3201 bytes = []
3202 3202 while value:
3203 3203 bytes.append(pycompat.bytechr(0x80 | bits))
3204 3204 bits = value & 0x7F
3205 3205 value >>= 7
3206 3206 bytes.append(pycompat.bytechr(bits))
3207 3207
3208 3208 return b''.join(bytes)
3209 3209
3210 3210
3211 3211 def uvarintdecodestream(fh):
3212 3212 """Decode an unsigned variable length integer from a stream.
3213 3213
3214 3214 The passed argument is anything that has a ``.read(N)`` method.
3215 3215
3216 3216 >>> from io import BytesIO
3217 3217 >>> uvarintdecodestream(BytesIO(b'\\x00'))
3218 3218 0
3219 3219 >>> uvarintdecodestream(BytesIO(b'\\x01'))
3220 3220 1
3221 3221 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
3222 3222 127
3223 3223 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
3224 3224 1337
3225 3225 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
3226 3226 65536
3227 3227 >>> uvarintdecodestream(BytesIO(b'\\x80'))
3228 3228 Traceback (most recent call last):
3229 3229 ...
3230 3230 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
3231 3231 """
3232 3232 result = 0
3233 3233 shift = 0
3234 3234 while True:
3235 3235 byte = ord(readexactly(fh, 1))
3236 3236 result |= (byte & 0x7F) << shift
3237 3237 if not (byte & 0x80):
3238 3238 return result
3239 3239 shift += 7
3240 3240
3241 3241
3242 3242 # Passing the '' locale means that the locale should be set according to the
3243 3243 # user settings (environment variables).
3244 3244 # Python sometimes avoids setting the global locale settings. When interfacing
3245 3245 # with C code (e.g. the curses module or the Subversion bindings), the global
3246 3246 # locale settings must be initialized correctly. Python 2 does not initialize
3247 3247 # the global locale settings on interpreter startup. Python 3 sometimes
3248 3248 # initializes LC_CTYPE, but not consistently at least on Windows. Therefore we
3249 3249 # explicitly initialize it to get consistent behavior if it's not already
3250 3250 # initialized. Since CPython commit 177d921c8c03d30daa32994362023f777624b10d,
3251 3251 # LC_CTYPE is always initialized. If we require Python 3.8+, we should re-check
3252 3252 # if we can remove this code.
3253 3253 @contextlib.contextmanager
3254 3254 def with_lc_ctype():
3255 3255 oldloc = locale.setlocale(locale.LC_CTYPE, None)
3256 3256 if oldloc == 'C':
3257 3257 try:
3258 3258 try:
3259 3259 locale.setlocale(locale.LC_CTYPE, '')
3260 3260 except locale.Error:
3261 3261 # The likely case is that the locale from the environment
3262 3262 # variables is unknown.
3263 3263 pass
3264 3264 yield
3265 3265 finally:
3266 3266 locale.setlocale(locale.LC_CTYPE, oldloc)
3267 3267 else:
3268 3268 yield
3269 3269
3270 3270
3271 3271 def _estimatememory():
3272 3272 # type: () -> Optional[int]
3273 3273 """Provide an estimate for the available system memory in Bytes.
3274 3274
3275 3275 If no estimate can be provided on the platform, returns None.
3276 3276 """
3277 3277 if pycompat.sysplatform.startswith(b'win'):
3278 3278 # On Windows, use the GlobalMemoryStatusEx kernel function directly.
3279 3279 from ctypes import c_long as DWORD, c_ulonglong as DWORDLONG
3280 3280 from ctypes.wintypes import ( # pytype: disable=import-error
3281 3281 Structure,
3282 3282 byref,
3283 3283 sizeof,
3284 3284 windll,
3285 3285 )
3286 3286
3287 3287 class MEMORYSTATUSEX(Structure):
3288 3288 _fields_ = [
3289 3289 ('dwLength', DWORD),
3290 3290 ('dwMemoryLoad', DWORD),
3291 3291 ('ullTotalPhys', DWORDLONG),
3292 3292 ('ullAvailPhys', DWORDLONG),
3293 3293 ('ullTotalPageFile', DWORDLONG),
3294 3294 ('ullAvailPageFile', DWORDLONG),
3295 3295 ('ullTotalVirtual', DWORDLONG),
3296 3296 ('ullAvailVirtual', DWORDLONG),
3297 3297 ('ullExtendedVirtual', DWORDLONG),
3298 3298 ]
3299 3299
3300 3300 x = MEMORYSTATUSEX()
3301 3301 x.dwLength = sizeof(x)
3302 3302 windll.kernel32.GlobalMemoryStatusEx(byref(x))
3303 3303 return x.ullAvailPhys
3304 3304
3305 3305 # On newer Unix-like systems and Mac OSX, the sysconf interface
3306 3306 # can be used. _SC_PAGE_SIZE is part of POSIX; _SC_PHYS_PAGES
3307 3307 # seems to be implemented on most systems.
3308 3308 try:
3309 3309 pagesize = os.sysconf(os.sysconf_names['SC_PAGE_SIZE'])
3310 3310 pages = os.sysconf(os.sysconf_names['SC_PHYS_PAGES'])
3311 3311 return pagesize * pages
3312 3312 except OSError: # sysconf can fail
3313 3313 pass
3314 3314 except KeyError: # unknown parameter
3315 3315 pass
General Comments 0
You need to be logged in to leave comments. Login now