##// END OF EJS Templates
re2: make errors quiet...
marmoute -
r52503:6c39edd1 stable
parent child Browse files
Show More
@@ -1,3312 +1,3326
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16
17 17 import abc
18 18 import collections
19 19 import contextlib
20 20 import errno
21 21 import gc
22 22 import hashlib
23 23 import io
24 24 import itertools
25 25 import locale
26 26 import mmap
27 27 import os
28 28 import pickle # provides util.pickle symbol
29 29 import re as remod
30 30 import shutil
31 31 import stat
32 32 import sys
33 33 import time
34 34 import traceback
35 35 import warnings
36 36
37 37 from typing import (
38 38 Iterable,
39 39 Iterator,
40 40 List,
41 41 Optional,
42 42 Tuple,
43 43 )
44 44
45 45 from .node import hex
46 46 from .thirdparty import attr
47 47 from .pycompat import (
48 48 open,
49 49 )
50 50 from hgdemandimport import tracing
51 51 from . import (
52 52 encoding,
53 53 error,
54 54 i18n,
55 55 policy,
56 56 pycompat,
57 57 urllibcompat,
58 58 )
59 59 from .utils import (
60 60 compression,
61 61 hashutil,
62 62 procutil,
63 63 stringutil,
64 64 )
65 65
66 66 # keeps pyflakes happy
67 67 assert [
68 68 Iterable,
69 69 Iterator,
70 70 List,
71 71 Optional,
72 72 Tuple,
73 73 ]
74 74
75 75
76 76 base85 = policy.importmod('base85')
77 77 osutil = policy.importmod('osutil')
78 78
79 79 b85decode = base85.b85decode
80 80 b85encode = base85.b85encode
81 81
82 82 cookielib = pycompat.cookielib
83 83 httplib = pycompat.httplib
84 84 safehasattr = pycompat.safehasattr
85 85 socketserver = pycompat.socketserver
86 86 bytesio = io.BytesIO
87 87 # TODO deprecate stringio name, as it is a lie on Python 3.
88 88 stringio = bytesio
89 89 xmlrpclib = pycompat.xmlrpclib
90 90
91 91 httpserver = urllibcompat.httpserver
92 92 urlerr = urllibcompat.urlerr
93 93 urlreq = urllibcompat.urlreq
94 94
95 95 # workaround for win32mbcs
96 96 _filenamebytestr = pycompat.bytestr
97 97
98 98 if pycompat.iswindows:
99 99 from . import windows as platform
100 100 else:
101 101 from . import posix as platform
102 102
103 103 _ = i18n._
104 104
105 105 abspath = platform.abspath
106 106 bindunixsocket = platform.bindunixsocket
107 107 cachestat = platform.cachestat
108 108 checkexec = platform.checkexec
109 109 checklink = platform.checklink
110 110 copymode = platform.copymode
111 111 expandglobs = platform.expandglobs
112 112 getfsmountpoint = platform.getfsmountpoint
113 113 getfstype = platform.getfstype
114 114 get_password = platform.get_password
115 115 groupmembers = platform.groupmembers
116 116 groupname = platform.groupname
117 117 isexec = platform.isexec
118 118 isowner = platform.isowner
119 119 listdir = osutil.listdir
120 120 localpath = platform.localpath
121 121 lookupreg = platform.lookupreg
122 122 makedir = platform.makedir
123 123 nlinks = platform.nlinks
124 124 normpath = platform.normpath
125 125 normcase = platform.normcase
126 126 normcasespec = platform.normcasespec
127 127 normcasefallback = platform.normcasefallback
128 128 openhardlinks = platform.openhardlinks
129 129 oslink = platform.oslink
130 130 parsepatchoutput = platform.parsepatchoutput
131 131 pconvert = platform.pconvert
132 132 poll = platform.poll
133 133 posixfile = platform.posixfile
134 134 readlink = platform.readlink
135 135 rename = platform.rename
136 136 removedirs = platform.removedirs
137 137 samedevice = platform.samedevice
138 138 samefile = platform.samefile
139 139 samestat = platform.samestat
140 140 setflags = platform.setflags
141 141 split = platform.split
142 142 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
143 143 statisexec = platform.statisexec
144 144 statislink = platform.statislink
145 145 umask = platform.umask
146 146 unlink = platform.unlink
147 147 username = platform.username
148 148
149 149
150 150 def setumask(val: int) -> None:
151 151 '''updates the umask. used by chg server'''
152 152 if pycompat.iswindows:
153 153 return
154 154 os.umask(val)
155 155 global umask
156 156 platform.umask = umask = val & 0o777
157 157
158 158
159 159 # small compat layer
160 160 compengines = compression.compengines
161 161 SERVERROLE = compression.SERVERROLE
162 162 CLIENTROLE = compression.CLIENTROLE
163 163
164 164 # Python compatibility
165 165
166 166 _notset = object()
167 167
168 168
169 169 def bitsfrom(container):
170 170 bits = 0
171 171 for bit in container:
172 172 bits |= bit
173 173 return bits
174 174
175 175
176 176 # python 2.6 still have deprecation warning enabled by default. We do not want
177 177 # to display anything to standard user so detect if we are running test and
178 178 # only use python deprecation warning in this case.
179 179 _dowarn = bool(encoding.environ.get(b'HGEMITWARNINGS'))
180 180 if _dowarn:
181 181 # explicitly unfilter our warning for python 2.7
182 182 #
183 183 # The option of setting PYTHONWARNINGS in the test runner was investigated.
184 184 # However, module name set through PYTHONWARNINGS was exactly matched, so
185 185 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
186 186 # makes the whole PYTHONWARNINGS thing useless for our usecase.
187 187 warnings.filterwarnings('default', '', DeprecationWarning, 'mercurial')
188 188 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext')
189 189 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext3rd')
190 190 if _dowarn:
191 191 # silence warning emitted by passing user string to re.sub()
192 192 warnings.filterwarnings(
193 193 'ignore', 'bad escape', DeprecationWarning, 'mercurial'
194 194 )
195 195 warnings.filterwarnings(
196 196 'ignore', 'invalid escape sequence', DeprecationWarning, 'mercurial'
197 197 )
198 198 # TODO: reinvent imp.is_frozen()
199 199 warnings.filterwarnings(
200 200 'ignore',
201 201 'the imp module is deprecated',
202 202 DeprecationWarning,
203 203 'mercurial',
204 204 )
205 205
206 206
207 207 def nouideprecwarn(msg, version, stacklevel=1):
208 208 """Issue an python native deprecation warning
209 209
210 210 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
211 211 """
212 212 if _dowarn:
213 213 msg += (
214 214 b"\n(compatibility will be dropped after Mercurial-%s,"
215 215 b" update your code.)"
216 216 ) % version
217 217 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
218 218 # on python 3 with chg, we will need to explicitly flush the output
219 219 sys.stderr.flush()
220 220
221 221
222 222 DIGESTS = {
223 223 b'md5': hashlib.md5,
224 224 b'sha1': hashutil.sha1,
225 225 b'sha512': hashlib.sha512,
226 226 }
227 227 # List of digest types from strongest to weakest
228 228 DIGESTS_BY_STRENGTH = [b'sha512', b'sha1', b'md5']
229 229
230 230 for k in DIGESTS_BY_STRENGTH:
231 231 assert k in DIGESTS
232 232
233 233
234 234 class digester:
235 235 """helper to compute digests.
236 236
237 237 This helper can be used to compute one or more digests given their name.
238 238
239 239 >>> d = digester([b'md5', b'sha1'])
240 240 >>> d.update(b'foo')
241 241 >>> [k for k in sorted(d)]
242 242 ['md5', 'sha1']
243 243 >>> d[b'md5']
244 244 'acbd18db4cc2f85cedef654fccc4a4d8'
245 245 >>> d[b'sha1']
246 246 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
247 247 >>> digester.preferred([b'md5', b'sha1'])
248 248 'sha1'
249 249 """
250 250
251 251 def __init__(self, digests, s=b''):
252 252 self._hashes = {}
253 253 for k in digests:
254 254 if k not in DIGESTS:
255 255 raise error.Abort(_(b'unknown digest type: %s') % k)
256 256 self._hashes[k] = DIGESTS[k]()
257 257 if s:
258 258 self.update(s)
259 259
260 260 def update(self, data):
261 261 for h in self._hashes.values():
262 262 h.update(data)
263 263
264 264 def __getitem__(self, key):
265 265 if key not in DIGESTS:
266 266 raise error.Abort(_(b'unknown digest type: %s') % k)
267 267 return hex(self._hashes[key].digest())
268 268
269 269 def __iter__(self):
270 270 return iter(self._hashes)
271 271
272 272 @staticmethod
273 273 def preferred(supported):
274 274 """returns the strongest digest type in both supported and DIGESTS."""
275 275
276 276 for k in DIGESTS_BY_STRENGTH:
277 277 if k in supported:
278 278 return k
279 279 return None
280 280
281 281
282 282 class digestchecker:
283 283 """file handle wrapper that additionally checks content against a given
284 284 size and digests.
285 285
286 286 d = digestchecker(fh, size, {'md5': '...'})
287 287
288 288 When multiple digests are given, all of them are validated.
289 289 """
290 290
291 291 def __init__(self, fh, size, digests):
292 292 self._fh = fh
293 293 self._size = size
294 294 self._got = 0
295 295 self._digests = dict(digests)
296 296 self._digester = digester(self._digests.keys())
297 297
298 298 def read(self, length=-1):
299 299 content = self._fh.read(length)
300 300 self._digester.update(content)
301 301 self._got += len(content)
302 302 return content
303 303
304 304 def validate(self):
305 305 if self._size != self._got:
306 306 raise error.Abort(
307 307 _(b'size mismatch: expected %d, got %d')
308 308 % (self._size, self._got)
309 309 )
310 310 for k, v in self._digests.items():
311 311 if v != self._digester[k]:
312 312 # i18n: first parameter is a digest name
313 313 raise error.Abort(
314 314 _(b'%s mismatch: expected %s, got %s')
315 315 % (k, v, self._digester[k])
316 316 )
317 317
318 318
319 319 try:
320 320 buffer = buffer # pytype: disable=name-error
321 321 except NameError:
322 322
323 323 def buffer(sliceable, offset=0, length=None):
324 324 if length is not None:
325 325 return memoryview(sliceable)[offset : offset + length]
326 326 return memoryview(sliceable)[offset:]
327 327
328 328
329 329 _chunksize = 4096
330 330
331 331
332 332 class bufferedinputpipe:
333 333 """a manually buffered input pipe
334 334
335 335 Python will not let us use buffered IO and lazy reading with 'polling' at
336 336 the same time. We cannot probe the buffer state and select will not detect
337 337 that data are ready to read if they are already buffered.
338 338
339 339 This class let us work around that by implementing its own buffering
340 340 (allowing efficient readline) while offering a way to know if the buffer is
341 341 empty from the output (allowing collaboration of the buffer with polling).
342 342
343 343 This class lives in the 'util' module because it makes use of the 'os'
344 344 module from the python stdlib.
345 345 """
346 346
347 347 def __new__(cls, fh):
348 348 # If we receive a fileobjectproxy, we need to use a variation of this
349 349 # class that notifies observers about activity.
350 350 if isinstance(fh, fileobjectproxy):
351 351 cls = observedbufferedinputpipe
352 352
353 353 return super(bufferedinputpipe, cls).__new__(cls)
354 354
355 355 def __init__(self, input):
356 356 self._input = input
357 357 self._buffer = []
358 358 self._eof = False
359 359 self._lenbuf = 0
360 360
361 361 @property
362 362 def hasbuffer(self):
363 363 """True is any data is currently buffered
364 364
365 365 This will be used externally a pre-step for polling IO. If there is
366 366 already data then no polling should be set in place."""
367 367 return bool(self._buffer)
368 368
369 369 @property
370 370 def closed(self):
371 371 return self._input.closed
372 372
373 373 def fileno(self):
374 374 return self._input.fileno()
375 375
376 376 def close(self):
377 377 return self._input.close()
378 378
379 379 def read(self, size):
380 380 while (not self._eof) and (self._lenbuf < size):
381 381 self._fillbuffer()
382 382 return self._frombuffer(size)
383 383
384 384 def unbufferedread(self, size):
385 385 if not self._eof and self._lenbuf == 0:
386 386 self._fillbuffer(max(size, _chunksize))
387 387 return self._frombuffer(min(self._lenbuf, size))
388 388
389 389 def readline(self, *args, **kwargs):
390 390 if len(self._buffer) > 1:
391 391 # this should not happen because both read and readline end with a
392 392 # _frombuffer call that collapse it.
393 393 self._buffer = [b''.join(self._buffer)]
394 394 self._lenbuf = len(self._buffer[0])
395 395 lfi = -1
396 396 if self._buffer:
397 397 lfi = self._buffer[-1].find(b'\n')
398 398 while (not self._eof) and lfi < 0:
399 399 self._fillbuffer()
400 400 if self._buffer:
401 401 lfi = self._buffer[-1].find(b'\n')
402 402 size = lfi + 1
403 403 if lfi < 0: # end of file
404 404 size = self._lenbuf
405 405 elif len(self._buffer) > 1:
406 406 # we need to take previous chunks into account
407 407 size += self._lenbuf - len(self._buffer[-1])
408 408 return self._frombuffer(size)
409 409
410 410 def _frombuffer(self, size):
411 411 """return at most 'size' data from the buffer
412 412
413 413 The data are removed from the buffer."""
414 414 if size == 0 or not self._buffer:
415 415 return b''
416 416 buf = self._buffer[0]
417 417 if len(self._buffer) > 1:
418 418 buf = b''.join(self._buffer)
419 419
420 420 data = buf[:size]
421 421 buf = buf[len(data) :]
422 422 if buf:
423 423 self._buffer = [buf]
424 424 self._lenbuf = len(buf)
425 425 else:
426 426 self._buffer = []
427 427 self._lenbuf = 0
428 428 return data
429 429
430 430 def _fillbuffer(self, size=_chunksize):
431 431 """read data to the buffer"""
432 432 data = os.read(self._input.fileno(), size)
433 433 if not data:
434 434 self._eof = True
435 435 else:
436 436 self._lenbuf += len(data)
437 437 self._buffer.append(data)
438 438
439 439 return data
440 440
441 441
442 442 def mmapread(fp, size=None):
443 443 if size == 0:
444 444 # size of 0 to mmap.mmap() means "all data"
445 445 # rather than "zero bytes", so special case that.
446 446 return b''
447 447 elif size is None:
448 448 size = 0
449 449 fd = getattr(fp, 'fileno', lambda: fp)()
450 450 try:
451 451 return mmap.mmap(fd, size, access=mmap.ACCESS_READ)
452 452 except ValueError:
453 453 # Empty files cannot be mmapped, but mmapread should still work. Check
454 454 # if the file is empty, and if so, return an empty buffer.
455 455 if os.fstat(fd).st_size == 0:
456 456 return b''
457 457 raise
458 458
459 459
460 460 class fileobjectproxy:
461 461 """A proxy around file objects that tells a watcher when events occur.
462 462
463 463 This type is intended to only be used for testing purposes. Think hard
464 464 before using it in important code.
465 465 """
466 466
467 467 __slots__ = (
468 468 '_orig',
469 469 '_observer',
470 470 )
471 471
472 472 def __init__(self, fh, observer):
473 473 object.__setattr__(self, '_orig', fh)
474 474 object.__setattr__(self, '_observer', observer)
475 475
476 476 def __getattribute__(self, name):
477 477 ours = {
478 478 '_observer',
479 479 # IOBase
480 480 'close',
481 481 # closed if a property
482 482 'fileno',
483 483 'flush',
484 484 'isatty',
485 485 'readable',
486 486 'readline',
487 487 'readlines',
488 488 'seek',
489 489 'seekable',
490 490 'tell',
491 491 'truncate',
492 492 'writable',
493 493 'writelines',
494 494 # RawIOBase
495 495 'read',
496 496 'readall',
497 497 'readinto',
498 498 'write',
499 499 # BufferedIOBase
500 500 # raw is a property
501 501 'detach',
502 502 # read defined above
503 503 'read1',
504 504 # readinto defined above
505 505 # write defined above
506 506 }
507 507
508 508 # We only observe some methods.
509 509 if name in ours:
510 510 return object.__getattribute__(self, name)
511 511
512 512 return getattr(object.__getattribute__(self, '_orig'), name)
513 513
514 514 def __nonzero__(self):
515 515 return bool(object.__getattribute__(self, '_orig'))
516 516
517 517 __bool__ = __nonzero__
518 518
519 519 def __delattr__(self, name):
520 520 return delattr(object.__getattribute__(self, '_orig'), name)
521 521
522 522 def __setattr__(self, name, value):
523 523 return setattr(object.__getattribute__(self, '_orig'), name, value)
524 524
525 525 def __iter__(self):
526 526 return object.__getattribute__(self, '_orig').__iter__()
527 527
528 528 def _observedcall(self, name, *args, **kwargs):
529 529 # Call the original object.
530 530 orig = object.__getattribute__(self, '_orig')
531 531 res = getattr(orig, name)(*args, **kwargs)
532 532
533 533 # Call a method on the observer of the same name with arguments
534 534 # so it can react, log, etc.
535 535 observer = object.__getattribute__(self, '_observer')
536 536 fn = getattr(observer, name, None)
537 537 if fn:
538 538 fn(res, *args, **kwargs)
539 539
540 540 return res
541 541
542 542 def close(self, *args, **kwargs):
543 543 return object.__getattribute__(self, '_observedcall')(
544 544 'close', *args, **kwargs
545 545 )
546 546
547 547 def fileno(self, *args, **kwargs):
548 548 return object.__getattribute__(self, '_observedcall')(
549 549 'fileno', *args, **kwargs
550 550 )
551 551
552 552 def flush(self, *args, **kwargs):
553 553 return object.__getattribute__(self, '_observedcall')(
554 554 'flush', *args, **kwargs
555 555 )
556 556
557 557 def isatty(self, *args, **kwargs):
558 558 return object.__getattribute__(self, '_observedcall')(
559 559 'isatty', *args, **kwargs
560 560 )
561 561
562 562 def readable(self, *args, **kwargs):
563 563 return object.__getattribute__(self, '_observedcall')(
564 564 'readable', *args, **kwargs
565 565 )
566 566
567 567 def readline(self, *args, **kwargs):
568 568 return object.__getattribute__(self, '_observedcall')(
569 569 'readline', *args, **kwargs
570 570 )
571 571
572 572 def readlines(self, *args, **kwargs):
573 573 return object.__getattribute__(self, '_observedcall')(
574 574 'readlines', *args, **kwargs
575 575 )
576 576
577 577 def seek(self, *args, **kwargs):
578 578 return object.__getattribute__(self, '_observedcall')(
579 579 'seek', *args, **kwargs
580 580 )
581 581
582 582 def seekable(self, *args, **kwargs):
583 583 return object.__getattribute__(self, '_observedcall')(
584 584 'seekable', *args, **kwargs
585 585 )
586 586
587 587 def tell(self, *args, **kwargs):
588 588 return object.__getattribute__(self, '_observedcall')(
589 589 'tell', *args, **kwargs
590 590 )
591 591
592 592 def truncate(self, *args, **kwargs):
593 593 return object.__getattribute__(self, '_observedcall')(
594 594 'truncate', *args, **kwargs
595 595 )
596 596
597 597 def writable(self, *args, **kwargs):
598 598 return object.__getattribute__(self, '_observedcall')(
599 599 'writable', *args, **kwargs
600 600 )
601 601
602 602 def writelines(self, *args, **kwargs):
603 603 return object.__getattribute__(self, '_observedcall')(
604 604 'writelines', *args, **kwargs
605 605 )
606 606
607 607 def read(self, *args, **kwargs):
608 608 return object.__getattribute__(self, '_observedcall')(
609 609 'read', *args, **kwargs
610 610 )
611 611
612 612 def readall(self, *args, **kwargs):
613 613 return object.__getattribute__(self, '_observedcall')(
614 614 'readall', *args, **kwargs
615 615 )
616 616
617 617 def readinto(self, *args, **kwargs):
618 618 return object.__getattribute__(self, '_observedcall')(
619 619 'readinto', *args, **kwargs
620 620 )
621 621
622 622 def write(self, *args, **kwargs):
623 623 return object.__getattribute__(self, '_observedcall')(
624 624 'write', *args, **kwargs
625 625 )
626 626
627 627 def detach(self, *args, **kwargs):
628 628 return object.__getattribute__(self, '_observedcall')(
629 629 'detach', *args, **kwargs
630 630 )
631 631
632 632 def read1(self, *args, **kwargs):
633 633 return object.__getattribute__(self, '_observedcall')(
634 634 'read1', *args, **kwargs
635 635 )
636 636
637 637
638 638 class observedbufferedinputpipe(bufferedinputpipe):
639 639 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
640 640
641 641 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
642 642 bypass ``fileobjectproxy``. Because of this, we need to make
643 643 ``bufferedinputpipe`` aware of these operations.
644 644
645 645 This variation of ``bufferedinputpipe`` can notify observers about
646 646 ``os.read()`` events. It also re-publishes other events, such as
647 647 ``read()`` and ``readline()``.
648 648 """
649 649
650 650 def _fillbuffer(self, size=_chunksize):
651 651 res = super(observedbufferedinputpipe, self)._fillbuffer(size=size)
652 652
653 653 fn = getattr(self._input._observer, 'osread', None)
654 654 if fn:
655 655 fn(res, size)
656 656
657 657 return res
658 658
659 659 # We use different observer methods because the operation isn't
660 660 # performed on the actual file object but on us.
661 661 def read(self, size):
662 662 res = super(observedbufferedinputpipe, self).read(size)
663 663
664 664 fn = getattr(self._input._observer, 'bufferedread', None)
665 665 if fn:
666 666 fn(res, size)
667 667
668 668 return res
669 669
670 670 def readline(self, *args, **kwargs):
671 671 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
672 672
673 673 fn = getattr(self._input._observer, 'bufferedreadline', None)
674 674 if fn:
675 675 fn(res)
676 676
677 677 return res
678 678
679 679
680 680 PROXIED_SOCKET_METHODS = {
681 681 'makefile',
682 682 'recv',
683 683 'recvfrom',
684 684 'recvfrom_into',
685 685 'recv_into',
686 686 'send',
687 687 'sendall',
688 688 'sendto',
689 689 'setblocking',
690 690 'settimeout',
691 691 'gettimeout',
692 692 'setsockopt',
693 693 }
694 694
695 695
696 696 class socketproxy:
697 697 """A proxy around a socket that tells a watcher when events occur.
698 698
699 699 This is like ``fileobjectproxy`` except for sockets.
700 700
701 701 This type is intended to only be used for testing purposes. Think hard
702 702 before using it in important code.
703 703 """
704 704
705 705 __slots__ = (
706 706 '_orig',
707 707 '_observer',
708 708 )
709 709
710 710 def __init__(self, sock, observer):
711 711 object.__setattr__(self, '_orig', sock)
712 712 object.__setattr__(self, '_observer', observer)
713 713
714 714 def __getattribute__(self, name):
715 715 if name in PROXIED_SOCKET_METHODS:
716 716 return object.__getattribute__(self, name)
717 717
718 718 return getattr(object.__getattribute__(self, '_orig'), name)
719 719
720 720 def __delattr__(self, name):
721 721 return delattr(object.__getattribute__(self, '_orig'), name)
722 722
723 723 def __setattr__(self, name, value):
724 724 return setattr(object.__getattribute__(self, '_orig'), name, value)
725 725
726 726 def __nonzero__(self):
727 727 return bool(object.__getattribute__(self, '_orig'))
728 728
729 729 __bool__ = __nonzero__
730 730
731 731 def _observedcall(self, name, *args, **kwargs):
732 732 # Call the original object.
733 733 orig = object.__getattribute__(self, '_orig')
734 734 res = getattr(orig, name)(*args, **kwargs)
735 735
736 736 # Call a method on the observer of the same name with arguments
737 737 # so it can react, log, etc.
738 738 observer = object.__getattribute__(self, '_observer')
739 739 fn = getattr(observer, name, None)
740 740 if fn:
741 741 fn(res, *args, **kwargs)
742 742
743 743 return res
744 744
745 745 def makefile(self, *args, **kwargs):
746 746 res = object.__getattribute__(self, '_observedcall')(
747 747 'makefile', *args, **kwargs
748 748 )
749 749
750 750 # The file object may be used for I/O. So we turn it into a
751 751 # proxy using our observer.
752 752 observer = object.__getattribute__(self, '_observer')
753 753 return makeloggingfileobject(
754 754 observer.fh,
755 755 res,
756 756 observer.name,
757 757 reads=observer.reads,
758 758 writes=observer.writes,
759 759 logdata=observer.logdata,
760 760 logdataapis=observer.logdataapis,
761 761 )
762 762
763 763 def recv(self, *args, **kwargs):
764 764 return object.__getattribute__(self, '_observedcall')(
765 765 'recv', *args, **kwargs
766 766 )
767 767
768 768 def recvfrom(self, *args, **kwargs):
769 769 return object.__getattribute__(self, '_observedcall')(
770 770 'recvfrom', *args, **kwargs
771 771 )
772 772
773 773 def recvfrom_into(self, *args, **kwargs):
774 774 return object.__getattribute__(self, '_observedcall')(
775 775 'recvfrom_into', *args, **kwargs
776 776 )
777 777
778 778 def recv_into(self, *args, **kwargs):
779 779 return object.__getattribute__(self, '_observedcall')(
780 780 'recv_info', *args, **kwargs
781 781 )
782 782
783 783 def send(self, *args, **kwargs):
784 784 return object.__getattribute__(self, '_observedcall')(
785 785 'send', *args, **kwargs
786 786 )
787 787
788 788 def sendall(self, *args, **kwargs):
789 789 return object.__getattribute__(self, '_observedcall')(
790 790 'sendall', *args, **kwargs
791 791 )
792 792
793 793 def sendto(self, *args, **kwargs):
794 794 return object.__getattribute__(self, '_observedcall')(
795 795 'sendto', *args, **kwargs
796 796 )
797 797
798 798 def setblocking(self, *args, **kwargs):
799 799 return object.__getattribute__(self, '_observedcall')(
800 800 'setblocking', *args, **kwargs
801 801 )
802 802
803 803 def settimeout(self, *args, **kwargs):
804 804 return object.__getattribute__(self, '_observedcall')(
805 805 'settimeout', *args, **kwargs
806 806 )
807 807
808 808 def gettimeout(self, *args, **kwargs):
809 809 return object.__getattribute__(self, '_observedcall')(
810 810 'gettimeout', *args, **kwargs
811 811 )
812 812
813 813 def setsockopt(self, *args, **kwargs):
814 814 return object.__getattribute__(self, '_observedcall')(
815 815 'setsockopt', *args, **kwargs
816 816 )
817 817
818 818
819 819 class baseproxyobserver:
820 820 def __init__(self, fh, name, logdata, logdataapis):
821 821 self.fh = fh
822 822 self.name = name
823 823 self.logdata = logdata
824 824 self.logdataapis = logdataapis
825 825
826 826 def _writedata(self, data):
827 827 if not self.logdata:
828 828 if self.logdataapis:
829 829 self.fh.write(b'\n')
830 830 self.fh.flush()
831 831 return
832 832
833 833 # Simple case writes all data on a single line.
834 834 if b'\n' not in data:
835 835 if self.logdataapis:
836 836 self.fh.write(b': %s\n' % stringutil.escapestr(data))
837 837 else:
838 838 self.fh.write(
839 839 b'%s> %s\n' % (self.name, stringutil.escapestr(data))
840 840 )
841 841 self.fh.flush()
842 842 return
843 843
844 844 # Data with newlines is written to multiple lines.
845 845 if self.logdataapis:
846 846 self.fh.write(b':\n')
847 847
848 848 lines = data.splitlines(True)
849 849 for line in lines:
850 850 self.fh.write(
851 851 b'%s> %s\n' % (self.name, stringutil.escapestr(line))
852 852 )
853 853 self.fh.flush()
854 854
855 855
856 856 class fileobjectobserver(baseproxyobserver):
857 857 """Logs file object activity."""
858 858
859 859 def __init__(
860 860 self, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
861 861 ):
862 862 super(fileobjectobserver, self).__init__(fh, name, logdata, logdataapis)
863 863 self.reads = reads
864 864 self.writes = writes
865 865
866 866 def read(self, res, size=-1):
867 867 if not self.reads:
868 868 return
869 869 # Python 3 can return None from reads at EOF instead of empty strings.
870 870 if res is None:
871 871 res = b''
872 872
873 873 if size == -1 and res == b'':
874 874 # Suppress pointless read(-1) calls that return
875 875 # nothing. These happen _a lot_ on Python 3, and there
876 876 # doesn't seem to be a better workaround to have matching
877 877 # Python 2 and 3 behavior. :(
878 878 return
879 879
880 880 if self.logdataapis:
881 881 self.fh.write(b'%s> read(%d) -> %d' % (self.name, size, len(res)))
882 882
883 883 self._writedata(res)
884 884
885 885 def readline(self, res, limit=-1):
886 886 if not self.reads:
887 887 return
888 888
889 889 if self.logdataapis:
890 890 self.fh.write(b'%s> readline() -> %d' % (self.name, len(res)))
891 891
892 892 self._writedata(res)
893 893
894 894 def readinto(self, res, dest):
895 895 if not self.reads:
896 896 return
897 897
898 898 if self.logdataapis:
899 899 self.fh.write(
900 900 b'%s> readinto(%d) -> %r' % (self.name, len(dest), res)
901 901 )
902 902
903 903 data = dest[0:res] if res is not None else b''
904 904
905 905 # _writedata() uses "in" operator and is confused by memoryview because
906 906 # characters are ints on Python 3.
907 907 if isinstance(data, memoryview):
908 908 data = data.tobytes()
909 909
910 910 self._writedata(data)
911 911
912 912 def write(self, res, data):
913 913 if not self.writes:
914 914 return
915 915
916 916 # Python 2 returns None from some write() calls. Python 3 (reasonably)
917 917 # returns the integer bytes written.
918 918 if res is None and data:
919 919 res = len(data)
920 920
921 921 if self.logdataapis:
922 922 self.fh.write(b'%s> write(%d) -> %r' % (self.name, len(data), res))
923 923
924 924 self._writedata(data)
925 925
926 926 def flush(self, res):
927 927 if not self.writes:
928 928 return
929 929
930 930 self.fh.write(b'%s> flush() -> %r\n' % (self.name, res))
931 931
932 932 # For observedbufferedinputpipe.
933 933 def bufferedread(self, res, size):
934 934 if not self.reads:
935 935 return
936 936
937 937 if self.logdataapis:
938 938 self.fh.write(
939 939 b'%s> bufferedread(%d) -> %d' % (self.name, size, len(res))
940 940 )
941 941
942 942 self._writedata(res)
943 943
944 944 def bufferedreadline(self, res):
945 945 if not self.reads:
946 946 return
947 947
948 948 if self.logdataapis:
949 949 self.fh.write(
950 950 b'%s> bufferedreadline() -> %d' % (self.name, len(res))
951 951 )
952 952
953 953 self._writedata(res)
954 954
955 955
956 956 def makeloggingfileobject(
957 957 logh, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
958 958 ):
959 959 """Turn a file object into a logging file object."""
960 960
961 961 observer = fileobjectobserver(
962 962 logh,
963 963 name,
964 964 reads=reads,
965 965 writes=writes,
966 966 logdata=logdata,
967 967 logdataapis=logdataapis,
968 968 )
969 969 return fileobjectproxy(fh, observer)
970 970
971 971
972 972 class socketobserver(baseproxyobserver):
973 973 """Logs socket activity."""
974 974
975 975 def __init__(
976 976 self,
977 977 fh,
978 978 name,
979 979 reads=True,
980 980 writes=True,
981 981 states=True,
982 982 logdata=False,
983 983 logdataapis=True,
984 984 ):
985 985 super(socketobserver, self).__init__(fh, name, logdata, logdataapis)
986 986 self.reads = reads
987 987 self.writes = writes
988 988 self.states = states
989 989
990 990 def makefile(self, res, mode=None, bufsize=None):
991 991 if not self.states:
992 992 return
993 993
994 994 self.fh.write(b'%s> makefile(%r, %r)\n' % (self.name, mode, bufsize))
995 995
996 996 def recv(self, res, size, flags=0):
997 997 if not self.reads:
998 998 return
999 999
1000 1000 if self.logdataapis:
1001 1001 self.fh.write(
1002 1002 b'%s> recv(%d, %d) -> %d' % (self.name, size, flags, len(res))
1003 1003 )
1004 1004 self._writedata(res)
1005 1005
1006 1006 def recvfrom(self, res, size, flags=0):
1007 1007 if not self.reads:
1008 1008 return
1009 1009
1010 1010 if self.logdataapis:
1011 1011 self.fh.write(
1012 1012 b'%s> recvfrom(%d, %d) -> %d'
1013 1013 % (self.name, size, flags, len(res[0]))
1014 1014 )
1015 1015
1016 1016 self._writedata(res[0])
1017 1017
1018 1018 def recvfrom_into(self, res, buf, size, flags=0):
1019 1019 if not self.reads:
1020 1020 return
1021 1021
1022 1022 if self.logdataapis:
1023 1023 self.fh.write(
1024 1024 b'%s> recvfrom_into(%d, %d) -> %d'
1025 1025 % (self.name, size, flags, res[0])
1026 1026 )
1027 1027
1028 1028 self._writedata(buf[0 : res[0]])
1029 1029
1030 1030 def recv_into(self, res, buf, size=0, flags=0):
1031 1031 if not self.reads:
1032 1032 return
1033 1033
1034 1034 if self.logdataapis:
1035 1035 self.fh.write(
1036 1036 b'%s> recv_into(%d, %d) -> %d' % (self.name, size, flags, res)
1037 1037 )
1038 1038
1039 1039 self._writedata(buf[0:res])
1040 1040
1041 1041 def send(self, res, data, flags=0):
1042 1042 if not self.writes:
1043 1043 return
1044 1044
1045 1045 self.fh.write(
1046 1046 b'%s> send(%d, %d) -> %d' % (self.name, len(data), flags, len(res))
1047 1047 )
1048 1048 self._writedata(data)
1049 1049
1050 1050 def sendall(self, res, data, flags=0):
1051 1051 if not self.writes:
1052 1052 return
1053 1053
1054 1054 if self.logdataapis:
1055 1055 # Returns None on success. So don't bother reporting return value.
1056 1056 self.fh.write(
1057 1057 b'%s> sendall(%d, %d)' % (self.name, len(data), flags)
1058 1058 )
1059 1059
1060 1060 self._writedata(data)
1061 1061
1062 1062 def sendto(self, res, data, flagsoraddress, address=None):
1063 1063 if not self.writes:
1064 1064 return
1065 1065
1066 1066 if address:
1067 1067 flags = flagsoraddress
1068 1068 else:
1069 1069 flags = 0
1070 1070
1071 1071 if self.logdataapis:
1072 1072 self.fh.write(
1073 1073 b'%s> sendto(%d, %d, %r) -> %d'
1074 1074 % (self.name, len(data), flags, address, res)
1075 1075 )
1076 1076
1077 1077 self._writedata(data)
1078 1078
1079 1079 def setblocking(self, res, flag):
1080 1080 if not self.states:
1081 1081 return
1082 1082
1083 1083 self.fh.write(b'%s> setblocking(%r)\n' % (self.name, flag))
1084 1084
1085 1085 def settimeout(self, res, value):
1086 1086 if not self.states:
1087 1087 return
1088 1088
1089 1089 self.fh.write(b'%s> settimeout(%r)\n' % (self.name, value))
1090 1090
1091 1091 def gettimeout(self, res):
1092 1092 if not self.states:
1093 1093 return
1094 1094
1095 1095 self.fh.write(b'%s> gettimeout() -> %f\n' % (self.name, res))
1096 1096
1097 1097 def setsockopt(self, res, level, optname, value):
1098 1098 if not self.states:
1099 1099 return
1100 1100
1101 1101 self.fh.write(
1102 1102 b'%s> setsockopt(%r, %r, %r) -> %r\n'
1103 1103 % (self.name, level, optname, value, res)
1104 1104 )
1105 1105
1106 1106
1107 1107 def makeloggingsocket(
1108 1108 logh,
1109 1109 fh,
1110 1110 name,
1111 1111 reads=True,
1112 1112 writes=True,
1113 1113 states=True,
1114 1114 logdata=False,
1115 1115 logdataapis=True,
1116 1116 ):
1117 1117 """Turn a socket into a logging socket."""
1118 1118
1119 1119 observer = socketobserver(
1120 1120 logh,
1121 1121 name,
1122 1122 reads=reads,
1123 1123 writes=writes,
1124 1124 states=states,
1125 1125 logdata=logdata,
1126 1126 logdataapis=logdataapis,
1127 1127 )
1128 1128 return socketproxy(fh, observer)
1129 1129
1130 1130
1131 1131 def version():
1132 1132 """Return version information if available."""
1133 1133 try:
1134 1134 from . import __version__
1135 1135
1136 1136 return __version__.version
1137 1137 except ImportError:
1138 1138 return b'unknown'
1139 1139
1140 1140
1141 1141 def versiontuple(v=None, n=4):
1142 1142 """Parses a Mercurial version string into an N-tuple.
1143 1143
1144 1144 The version string to be parsed is specified with the ``v`` argument.
1145 1145 If it isn't defined, the current Mercurial version string will be parsed.
1146 1146
1147 1147 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1148 1148 returned values:
1149 1149
1150 1150 >>> v = b'3.6.1+190-df9b73d2d444'
1151 1151 >>> versiontuple(v, 2)
1152 1152 (3, 6)
1153 1153 >>> versiontuple(v, 3)
1154 1154 (3, 6, 1)
1155 1155 >>> versiontuple(v, 4)
1156 1156 (3, 6, 1, '190-df9b73d2d444')
1157 1157
1158 1158 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1159 1159 (3, 6, 1, '190-df9b73d2d444+20151118')
1160 1160
1161 1161 >>> v = b'3.6'
1162 1162 >>> versiontuple(v, 2)
1163 1163 (3, 6)
1164 1164 >>> versiontuple(v, 3)
1165 1165 (3, 6, None)
1166 1166 >>> versiontuple(v, 4)
1167 1167 (3, 6, None, None)
1168 1168
1169 1169 >>> v = b'3.9-rc'
1170 1170 >>> versiontuple(v, 2)
1171 1171 (3, 9)
1172 1172 >>> versiontuple(v, 3)
1173 1173 (3, 9, None)
1174 1174 >>> versiontuple(v, 4)
1175 1175 (3, 9, None, 'rc')
1176 1176
1177 1177 >>> v = b'3.9-rc+2-02a8fea4289b'
1178 1178 >>> versiontuple(v, 2)
1179 1179 (3, 9)
1180 1180 >>> versiontuple(v, 3)
1181 1181 (3, 9, None)
1182 1182 >>> versiontuple(v, 4)
1183 1183 (3, 9, None, 'rc+2-02a8fea4289b')
1184 1184
1185 1185 >>> versiontuple(b'4.6rc0')
1186 1186 (4, 6, None, 'rc0')
1187 1187 >>> versiontuple(b'4.6rc0+12-425d55e54f98')
1188 1188 (4, 6, None, 'rc0+12-425d55e54f98')
1189 1189 >>> versiontuple(b'.1.2.3')
1190 1190 (None, None, None, '.1.2.3')
1191 1191 >>> versiontuple(b'12.34..5')
1192 1192 (12, 34, None, '..5')
1193 1193 >>> versiontuple(b'1.2.3.4.5.6')
1194 1194 (1, 2, 3, '.4.5.6')
1195 1195 """
1196 1196 if not v:
1197 1197 v = version()
1198 1198 m = remod.match(br'(\d+(?:\.\d+){,2})[+-]?(.*)', v)
1199 1199 if not m:
1200 1200 vparts, extra = b'', v
1201 1201 elif m.group(2):
1202 1202 vparts, extra = m.groups()
1203 1203 else:
1204 1204 vparts, extra = m.group(1), None
1205 1205
1206 1206 assert vparts is not None # help pytype
1207 1207
1208 1208 vints = []
1209 1209 for i in vparts.split(b'.'):
1210 1210 try:
1211 1211 vints.append(int(i))
1212 1212 except ValueError:
1213 1213 break
1214 1214 # (3, 6) -> (3, 6, None)
1215 1215 while len(vints) < 3:
1216 1216 vints.append(None)
1217 1217
1218 1218 if n == 2:
1219 1219 return (vints[0], vints[1])
1220 1220 if n == 3:
1221 1221 return (vints[0], vints[1], vints[2])
1222 1222 if n == 4:
1223 1223 return (vints[0], vints[1], vints[2], extra)
1224 1224
1225 1225 raise error.ProgrammingError(b"invalid version part request: %d" % n)
1226 1226
1227 1227
1228 1228 def cachefunc(func):
1229 1229 '''cache the result of function calls'''
1230 1230 # XXX doesn't handle keywords args
1231 1231 if func.__code__.co_argcount == 0:
1232 1232 listcache = []
1233 1233
1234 1234 def f():
1235 1235 if len(listcache) == 0:
1236 1236 listcache.append(func())
1237 1237 return listcache[0]
1238 1238
1239 1239 return f
1240 1240 cache = {}
1241 1241 if func.__code__.co_argcount == 1:
1242 1242 # we gain a small amount of time because
1243 1243 # we don't need to pack/unpack the list
1244 1244 def f(arg):
1245 1245 if arg not in cache:
1246 1246 cache[arg] = func(arg)
1247 1247 return cache[arg]
1248 1248
1249 1249 else:
1250 1250
1251 1251 def f(*args):
1252 1252 if args not in cache:
1253 1253 cache[args] = func(*args)
1254 1254 return cache[args]
1255 1255
1256 1256 return f
1257 1257
1258 1258
1259 1259 class cow:
1260 1260 """helper class to make copy-on-write easier
1261 1261
1262 1262 Call preparewrite before doing any writes.
1263 1263 """
1264 1264
1265 1265 def preparewrite(self):
1266 1266 """call this before writes, return self or a copied new object"""
1267 1267 if getattr(self, '_copied', 0):
1268 1268 self._copied -= 1
1269 1269 # Function cow.__init__ expects 1 arg(s), got 2 [wrong-arg-count]
1270 1270 return self.__class__(self) # pytype: disable=wrong-arg-count
1271 1271 return self
1272 1272
1273 1273 def copy(self):
1274 1274 """always do a cheap copy"""
1275 1275 self._copied = getattr(self, '_copied', 0) + 1
1276 1276 return self
1277 1277
1278 1278
1279 1279 class sortdict(collections.OrderedDict):
1280 1280 """a simple sorted dictionary
1281 1281
1282 1282 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1283 1283 >>> d2 = d1.copy()
1284 1284 >>> list(d2.items())
1285 1285 [('a', 0), ('b', 1)]
1286 1286 >>> d2.update([(b'a', 2)])
1287 1287 >>> list(d2.keys()) # should still be in last-set order
1288 1288 ['b', 'a']
1289 1289 >>> d1.insert(1, b'a.5', 0.5)
1290 1290 >>> list(d1.items())
1291 1291 [('a', 0), ('a.5', 0.5), ('b', 1)]
1292 1292 """
1293 1293
1294 1294 def __setitem__(self, key, value):
1295 1295 if key in self:
1296 1296 del self[key]
1297 1297 super(sortdict, self).__setitem__(key, value)
1298 1298
1299 1299 if pycompat.ispypy:
1300 1300 # __setitem__() isn't called as of PyPy 5.8.0
1301 1301 def update(self, src, **f):
1302 1302 if isinstance(src, dict):
1303 1303 src = src.items()
1304 1304 for k, v in src:
1305 1305 self[k] = v
1306 1306 for k in f:
1307 1307 self[k] = f[k]
1308 1308
1309 1309 def insert(self, position, key, value):
1310 1310 for (i, (k, v)) in enumerate(list(self.items())):
1311 1311 if i == position:
1312 1312 self[key] = value
1313 1313 if i >= position:
1314 1314 del self[k]
1315 1315 self[k] = v
1316 1316
1317 1317
1318 1318 class cowdict(cow, dict):
1319 1319 """copy-on-write dict
1320 1320
1321 1321 Be sure to call d = d.preparewrite() before writing to d.
1322 1322
1323 1323 >>> a = cowdict()
1324 1324 >>> a is a.preparewrite()
1325 1325 True
1326 1326 >>> b = a.copy()
1327 1327 >>> b is a
1328 1328 True
1329 1329 >>> c = b.copy()
1330 1330 >>> c is a
1331 1331 True
1332 1332 >>> a = a.preparewrite()
1333 1333 >>> b is a
1334 1334 False
1335 1335 >>> a is a.preparewrite()
1336 1336 True
1337 1337 >>> c = c.preparewrite()
1338 1338 >>> b is c
1339 1339 False
1340 1340 >>> b is b.preparewrite()
1341 1341 True
1342 1342 """
1343 1343
1344 1344
1345 1345 class cowsortdict(cow, sortdict):
1346 1346 """copy-on-write sortdict
1347 1347
1348 1348 Be sure to call d = d.preparewrite() before writing to d.
1349 1349 """
1350 1350
1351 1351
1352 1352 class transactional: # pytype: disable=ignored-metaclass
1353 1353 """Base class for making a transactional type into a context manager."""
1354 1354
1355 1355 __metaclass__ = abc.ABCMeta
1356 1356
1357 1357 @abc.abstractmethod
1358 1358 def close(self):
1359 1359 """Successfully closes the transaction."""
1360 1360
1361 1361 @abc.abstractmethod
1362 1362 def release(self):
1363 1363 """Marks the end of the transaction.
1364 1364
1365 1365 If the transaction has not been closed, it will be aborted.
1366 1366 """
1367 1367
1368 1368 def __enter__(self):
1369 1369 return self
1370 1370
1371 1371 def __exit__(self, exc_type, exc_val, exc_tb):
1372 1372 try:
1373 1373 if exc_type is None:
1374 1374 self.close()
1375 1375 finally:
1376 1376 self.release()
1377 1377
1378 1378
1379 1379 @contextlib.contextmanager
1380 1380 def acceptintervention(tr=None):
1381 1381 """A context manager that closes the transaction on InterventionRequired
1382 1382
1383 1383 If no transaction was provided, this simply runs the body and returns
1384 1384 """
1385 1385 if not tr:
1386 1386 yield
1387 1387 return
1388 1388 try:
1389 1389 yield
1390 1390 tr.close()
1391 1391 except error.InterventionRequired:
1392 1392 tr.close()
1393 1393 raise
1394 1394 finally:
1395 1395 tr.release()
1396 1396
1397 1397
1398 1398 @contextlib.contextmanager
1399 1399 def nullcontextmanager(enter_result=None):
1400 1400 yield enter_result
1401 1401
1402 1402
1403 1403 class _lrucachenode:
1404 1404 """A node in a doubly linked list.
1405 1405
1406 1406 Holds a reference to nodes on either side as well as a key-value
1407 1407 pair for the dictionary entry.
1408 1408 """
1409 1409
1410 1410 __slots__ = ('next', 'prev', 'key', 'value', 'cost')
1411 1411
1412 1412 def __init__(self):
1413 1413 self.next = self
1414 1414 self.prev = self
1415 1415
1416 1416 self.key = _notset
1417 1417 self.value = None
1418 1418 self.cost = 0
1419 1419
1420 1420 def markempty(self):
1421 1421 """Mark the node as emptied."""
1422 1422 self.key = _notset
1423 1423 self.value = None
1424 1424 self.cost = 0
1425 1425
1426 1426
1427 1427 class lrucachedict:
1428 1428 """Dict that caches most recent accesses and sets.
1429 1429
1430 1430 The dict consists of an actual backing dict - indexed by original
1431 1431 key - and a doubly linked circular list defining the order of entries in
1432 1432 the cache.
1433 1433
1434 1434 The head node is the newest entry in the cache. If the cache is full,
1435 1435 we recycle head.prev and make it the new head. Cache accesses result in
1436 1436 the node being moved to before the existing head and being marked as the
1437 1437 new head node.
1438 1438
1439 1439 Items in the cache can be inserted with an optional "cost" value. This is
1440 1440 simply an integer that is specified by the caller. The cache can be queried
1441 1441 for the total cost of all items presently in the cache.
1442 1442
1443 1443 The cache can also define a maximum cost. If a cache insertion would
1444 1444 cause the total cost of the cache to go beyond the maximum cost limit,
1445 1445 nodes will be evicted to make room for the new code. This can be used
1446 1446 to e.g. set a max memory limit and associate an estimated bytes size
1447 1447 cost to each item in the cache. By default, no maximum cost is enforced.
1448 1448 """
1449 1449
1450 1450 def __init__(self, max, maxcost=0):
1451 1451 self._cache = {}
1452 1452
1453 1453 self._head = _lrucachenode()
1454 1454 self._size = 1
1455 1455 self.capacity = max
1456 1456 self.totalcost = 0
1457 1457 self.maxcost = maxcost
1458 1458
1459 1459 def __len__(self):
1460 1460 return len(self._cache)
1461 1461
1462 1462 def __contains__(self, k):
1463 1463 return k in self._cache
1464 1464
1465 1465 def __iter__(self):
1466 1466 # We don't have to iterate in cache order, but why not.
1467 1467 n = self._head
1468 1468 for i in range(len(self._cache)):
1469 1469 yield n.key
1470 1470 n = n.next
1471 1471
1472 1472 def __getitem__(self, k):
1473 1473 node = self._cache[k]
1474 1474 self._movetohead(node)
1475 1475 return node.value
1476 1476
1477 1477 def insert(self, k, v, cost=0):
1478 1478 """Insert a new item in the cache with optional cost value."""
1479 1479 node = self._cache.get(k)
1480 1480 # Replace existing value and mark as newest.
1481 1481 if node is not None:
1482 1482 self.totalcost -= node.cost
1483 1483 node.value = v
1484 1484 node.cost = cost
1485 1485 self.totalcost += cost
1486 1486 self._movetohead(node)
1487 1487
1488 1488 if self.maxcost:
1489 1489 self._enforcecostlimit()
1490 1490
1491 1491 return
1492 1492
1493 1493 if self._size < self.capacity:
1494 1494 node = self._addcapacity()
1495 1495 else:
1496 1496 # Grab the last/oldest item.
1497 1497 node = self._head.prev
1498 1498
1499 1499 # At capacity. Kill the old entry.
1500 1500 if node.key is not _notset:
1501 1501 self.totalcost -= node.cost
1502 1502 del self._cache[node.key]
1503 1503
1504 1504 node.key = k
1505 1505 node.value = v
1506 1506 node.cost = cost
1507 1507 self.totalcost += cost
1508 1508 self._cache[k] = node
1509 1509 # And mark it as newest entry. No need to adjust order since it
1510 1510 # is already self._head.prev.
1511 1511 self._head = node
1512 1512
1513 1513 if self.maxcost:
1514 1514 self._enforcecostlimit()
1515 1515
1516 1516 def __setitem__(self, k, v):
1517 1517 self.insert(k, v)
1518 1518
1519 1519 def __delitem__(self, k):
1520 1520 self.pop(k)
1521 1521
1522 1522 def pop(self, k, default=_notset):
1523 1523 try:
1524 1524 node = self._cache.pop(k)
1525 1525 except KeyError:
1526 1526 if default is _notset:
1527 1527 raise
1528 1528 return default
1529 1529
1530 1530 value = node.value
1531 1531 self.totalcost -= node.cost
1532 1532 node.markempty()
1533 1533
1534 1534 # Temporarily mark as newest item before re-adjusting head to make
1535 1535 # this node the oldest item.
1536 1536 self._movetohead(node)
1537 1537 self._head = node.next
1538 1538
1539 1539 return value
1540 1540
1541 1541 # Additional dict methods.
1542 1542
1543 1543 def get(self, k, default=None):
1544 1544 try:
1545 1545 return self.__getitem__(k)
1546 1546 except KeyError:
1547 1547 return default
1548 1548
1549 1549 def peek(self, k, default=_notset):
1550 1550 """Get the specified item without moving it to the head
1551 1551
1552 1552 Unlike get(), this doesn't mutate the internal state. But be aware
1553 1553 that it doesn't mean peek() is thread safe.
1554 1554 """
1555 1555 try:
1556 1556 node = self._cache[k]
1557 1557 return node.value
1558 1558 except KeyError:
1559 1559 if default is _notset:
1560 1560 raise
1561 1561 return default
1562 1562
1563 1563 def clear(self):
1564 1564 n = self._head
1565 1565 while n.key is not _notset:
1566 1566 self.totalcost -= n.cost
1567 1567 n.markempty()
1568 1568 n = n.next
1569 1569
1570 1570 self._cache.clear()
1571 1571
1572 1572 def copy(self, capacity=None, maxcost=0):
1573 1573 """Create a new cache as a copy of the current one.
1574 1574
1575 1575 By default, the new cache has the same capacity as the existing one.
1576 1576 But, the cache capacity can be changed as part of performing the
1577 1577 copy.
1578 1578
1579 1579 Items in the copy have an insertion/access order matching this
1580 1580 instance.
1581 1581 """
1582 1582
1583 1583 capacity = capacity or self.capacity
1584 1584 maxcost = maxcost or self.maxcost
1585 1585 result = lrucachedict(capacity, maxcost=maxcost)
1586 1586
1587 1587 # We copy entries by iterating in oldest-to-newest order so the copy
1588 1588 # has the correct ordering.
1589 1589
1590 1590 # Find the first non-empty entry.
1591 1591 n = self._head.prev
1592 1592 while n.key is _notset and n is not self._head:
1593 1593 n = n.prev
1594 1594
1595 1595 # We could potentially skip the first N items when decreasing capacity.
1596 1596 # But let's keep it simple unless it is a performance problem.
1597 1597 for i in range(len(self._cache)):
1598 1598 result.insert(n.key, n.value, cost=n.cost)
1599 1599 n = n.prev
1600 1600
1601 1601 return result
1602 1602
1603 1603 def popoldest(self):
1604 1604 """Remove the oldest item from the cache.
1605 1605
1606 1606 Returns the (key, value) describing the removed cache entry.
1607 1607 """
1608 1608 if not self._cache:
1609 1609 return
1610 1610
1611 1611 # Walk the linked list backwards starting at tail node until we hit
1612 1612 # a non-empty node.
1613 1613 n = self._head.prev
1614 1614
1615 1615 while n.key is _notset:
1616 1616 n = n.prev
1617 1617
1618 1618 key, value = n.key, n.value
1619 1619
1620 1620 # And remove it from the cache and mark it as empty.
1621 1621 del self._cache[n.key]
1622 1622 self.totalcost -= n.cost
1623 1623 n.markempty()
1624 1624
1625 1625 return key, value
1626 1626
1627 1627 def _movetohead(self, node: _lrucachenode):
1628 1628 """Mark a node as the newest, making it the new head.
1629 1629
1630 1630 When a node is accessed, it becomes the freshest entry in the LRU
1631 1631 list, which is denoted by self._head.
1632 1632
1633 1633 Visually, let's make ``N`` the new head node (* denotes head):
1634 1634
1635 1635 previous/oldest <-> head <-> next/next newest
1636 1636
1637 1637 ----<->--- A* ---<->-----
1638 1638 | |
1639 1639 E <-> D <-> N <-> C <-> B
1640 1640
1641 1641 To:
1642 1642
1643 1643 ----<->--- N* ---<->-----
1644 1644 | |
1645 1645 E <-> D <-> C <-> B <-> A
1646 1646
1647 1647 This requires the following moves:
1648 1648
1649 1649 C.next = D (node.prev.next = node.next)
1650 1650 D.prev = C (node.next.prev = node.prev)
1651 1651 E.next = N (head.prev.next = node)
1652 1652 N.prev = E (node.prev = head.prev)
1653 1653 N.next = A (node.next = head)
1654 1654 A.prev = N (head.prev = node)
1655 1655 """
1656 1656 head = self._head
1657 1657 # C.next = D
1658 1658 node.prev.next = node.next
1659 1659 # D.prev = C
1660 1660 node.next.prev = node.prev
1661 1661 # N.prev = E
1662 1662 node.prev = head.prev
1663 1663 # N.next = A
1664 1664 # It is tempting to do just "head" here, however if node is
1665 1665 # adjacent to head, this will do bad things.
1666 1666 node.next = head.prev.next
1667 1667 # E.next = N
1668 1668 node.next.prev = node
1669 1669 # A.prev = N
1670 1670 node.prev.next = node
1671 1671
1672 1672 self._head = node
1673 1673
1674 1674 def _addcapacity(self) -> _lrucachenode:
1675 1675 """Add a node to the circular linked list.
1676 1676
1677 1677 The new node is inserted before the head node.
1678 1678 """
1679 1679 head = self._head
1680 1680 node = _lrucachenode()
1681 1681 head.prev.next = node
1682 1682 node.prev = head.prev
1683 1683 node.next = head
1684 1684 head.prev = node
1685 1685 self._size += 1
1686 1686 return node
1687 1687
1688 1688 def _enforcecostlimit(self):
1689 1689 # This should run after an insertion. It should only be called if total
1690 1690 # cost limits are being enforced.
1691 1691 # The most recently inserted node is never evicted.
1692 1692 if len(self) <= 1 or self.totalcost <= self.maxcost:
1693 1693 return
1694 1694
1695 1695 # This is logically equivalent to calling popoldest() until we
1696 1696 # free up enough cost. We don't do that since popoldest() needs
1697 1697 # to walk the linked list and doing this in a loop would be
1698 1698 # quadratic. So we find the first non-empty node and then
1699 1699 # walk nodes until we free up enough capacity.
1700 1700 #
1701 1701 # If we only removed the minimum number of nodes to free enough
1702 1702 # cost at insert time, chances are high that the next insert would
1703 1703 # also require pruning. This would effectively constitute quadratic
1704 1704 # behavior for insert-heavy workloads. To mitigate this, we set a
1705 1705 # target cost that is a percentage of the max cost. This will tend
1706 1706 # to free more nodes when the high water mark is reached, which
1707 1707 # lowers the chances of needing to prune on the subsequent insert.
1708 1708 targetcost = int(self.maxcost * 0.75)
1709 1709
1710 1710 n = self._head.prev
1711 1711 while n.key is _notset:
1712 1712 n = n.prev
1713 1713
1714 1714 while len(self) > 1 and self.totalcost > targetcost:
1715 1715 del self._cache[n.key]
1716 1716 self.totalcost -= n.cost
1717 1717 n.markempty()
1718 1718 n = n.prev
1719 1719
1720 1720
1721 1721 def lrucachefunc(func):
1722 1722 '''cache most recent results of function calls'''
1723 1723 cache = {}
1724 1724 order = collections.deque()
1725 1725 if func.__code__.co_argcount == 1:
1726 1726
1727 1727 def f(arg):
1728 1728 if arg not in cache:
1729 1729 if len(cache) > 20:
1730 1730 del cache[order.popleft()]
1731 1731 cache[arg] = func(arg)
1732 1732 else:
1733 1733 order.remove(arg)
1734 1734 order.append(arg)
1735 1735 return cache[arg]
1736 1736
1737 1737 else:
1738 1738
1739 1739 def f(*args):
1740 1740 if args not in cache:
1741 1741 if len(cache) > 20:
1742 1742 del cache[order.popleft()]
1743 1743 cache[args] = func(*args)
1744 1744 else:
1745 1745 order.remove(args)
1746 1746 order.append(args)
1747 1747 return cache[args]
1748 1748
1749 1749 return f
1750 1750
1751 1751
1752 1752 class propertycache:
1753 1753 def __init__(self, func):
1754 1754 self.func = func
1755 1755 self.name = func.__name__
1756 1756
1757 1757 def __get__(self, obj, type=None):
1758 1758 result = self.func(obj)
1759 1759 self.cachevalue(obj, result)
1760 1760 return result
1761 1761
1762 1762 def cachevalue(self, obj, value):
1763 1763 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1764 1764 obj.__dict__[self.name] = value
1765 1765
1766 1766
1767 1767 def clearcachedproperty(obj, prop):
1768 1768 '''clear a cached property value, if one has been set'''
1769 1769 prop = pycompat.sysstr(prop)
1770 1770 if prop in obj.__dict__:
1771 1771 del obj.__dict__[prop]
1772 1772
1773 1773
1774 1774 def increasingchunks(source, min=1024, max=65536):
1775 1775 """return no less than min bytes per chunk while data remains,
1776 1776 doubling min after each chunk until it reaches max"""
1777 1777
1778 1778 def log2(x):
1779 1779 if not x:
1780 1780 return 0
1781 1781 i = 0
1782 1782 while x:
1783 1783 x >>= 1
1784 1784 i += 1
1785 1785 return i - 1
1786 1786
1787 1787 buf = []
1788 1788 blen = 0
1789 1789 for chunk in source:
1790 1790 buf.append(chunk)
1791 1791 blen += len(chunk)
1792 1792 if blen >= min:
1793 1793 if min < max:
1794 1794 min = min << 1
1795 1795 nmin = 1 << log2(blen)
1796 1796 if nmin > min:
1797 1797 min = nmin
1798 1798 if min > max:
1799 1799 min = max
1800 1800 yield b''.join(buf)
1801 1801 blen = 0
1802 1802 buf = []
1803 1803 if buf:
1804 1804 yield b''.join(buf)
1805 1805
1806 1806
1807 1807 def always(fn):
1808 1808 return True
1809 1809
1810 1810
1811 1811 def never(fn):
1812 1812 return False
1813 1813
1814 1814
1815 1815 def nogc(func):
1816 1816 """disable garbage collector
1817 1817
1818 1818 Python's garbage collector triggers a GC each time a certain number of
1819 1819 container objects (the number being defined by gc.get_threshold()) are
1820 1820 allocated even when marked not to be tracked by the collector. Tracking has
1821 1821 no effect on when GCs are triggered, only on what objects the GC looks
1822 1822 into. As a workaround, disable GC while building complex (huge)
1823 1823 containers.
1824 1824
1825 1825 This garbage collector issue have been fixed in 2.7. But it still affect
1826 1826 CPython's performance.
1827 1827 """
1828 1828
1829 1829 def wrapper(*args, **kwargs):
1830 1830 gcenabled = gc.isenabled()
1831 1831 gc.disable()
1832 1832 try:
1833 1833 return func(*args, **kwargs)
1834 1834 finally:
1835 1835 if gcenabled:
1836 1836 gc.enable()
1837 1837
1838 1838 return wrapper
1839 1839
1840 1840
1841 1841 if pycompat.ispypy:
1842 1842 # PyPy runs slower with gc disabled
1843 1843 nogc = lambda x: x
1844 1844
1845 1845
1846 1846 def pathto(root: bytes, n1: bytes, n2: bytes) -> bytes:
1847 1847 """return the relative path from one place to another.
1848 1848 root should use os.sep to separate directories
1849 1849 n1 should use os.sep to separate directories
1850 1850 n2 should use "/" to separate directories
1851 1851 returns an os.sep-separated path.
1852 1852
1853 1853 If n1 is a relative path, it's assumed it's
1854 1854 relative to root.
1855 1855 n2 should always be relative to root.
1856 1856 """
1857 1857 if not n1:
1858 1858 return localpath(n2)
1859 1859 if os.path.isabs(n1):
1860 1860 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1861 1861 return os.path.join(root, localpath(n2))
1862 1862 n2 = b'/'.join((pconvert(root), n2))
1863 1863 a, b = splitpath(n1), n2.split(b'/')
1864 1864 a.reverse()
1865 1865 b.reverse()
1866 1866 while a and b and a[-1] == b[-1]:
1867 1867 a.pop()
1868 1868 b.pop()
1869 1869 b.reverse()
1870 1870 return pycompat.ossep.join(([b'..'] * len(a)) + b) or b'.'
1871 1871
1872 1872
1873 1873 def checksignature(func, depth=1):
1874 1874 '''wrap a function with code to check for calling errors'''
1875 1875
1876 1876 def check(*args, **kwargs):
1877 1877 try:
1878 1878 return func(*args, **kwargs)
1879 1879 except TypeError:
1880 1880 if len(traceback.extract_tb(sys.exc_info()[2])) == depth:
1881 1881 raise error.SignatureError
1882 1882 raise
1883 1883
1884 1884 return check
1885 1885
1886 1886
1887 1887 # a whilelist of known filesystems where hardlink works reliably
1888 1888 _hardlinkfswhitelist = {
1889 1889 b'apfs',
1890 1890 b'btrfs',
1891 1891 b'ext2',
1892 1892 b'ext3',
1893 1893 b'ext4',
1894 1894 b'hfs',
1895 1895 b'jfs',
1896 1896 b'NTFS',
1897 1897 b'reiserfs',
1898 1898 b'tmpfs',
1899 1899 b'ufs',
1900 1900 b'xfs',
1901 1901 b'zfs',
1902 1902 }
1903 1903
1904 1904
1905 1905 def copyfile(
1906 1906 src,
1907 1907 dest,
1908 1908 hardlink=False,
1909 1909 copystat=False,
1910 1910 checkambig=False,
1911 1911 nb_bytes=None,
1912 1912 no_hardlink_cb=None,
1913 1913 check_fs_hardlink=True,
1914 1914 ):
1915 1915 """copy a file, preserving mode and optionally other stat info like
1916 1916 atime/mtime
1917 1917
1918 1918 checkambig argument is used with filestat, and is useful only if
1919 1919 destination file is guarded by any lock (e.g. repo.lock or
1920 1920 repo.wlock).
1921 1921
1922 1922 copystat and checkambig should be exclusive.
1923 1923
1924 1924 nb_bytes: if set only copy the first `nb_bytes` of the source file.
1925 1925 """
1926 1926 assert not (copystat and checkambig)
1927 1927 oldstat = None
1928 1928 if os.path.lexists(dest):
1929 1929 if checkambig:
1930 1930 oldstat = checkambig and filestat.frompath(dest)
1931 1931 unlink(dest)
1932 1932 if hardlink and check_fs_hardlink:
1933 1933 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1934 1934 # unless we are confident that dest is on a whitelisted filesystem.
1935 1935 try:
1936 1936 fstype = getfstype(os.path.dirname(dest))
1937 1937 except OSError:
1938 1938 fstype = None
1939 1939 if fstype not in _hardlinkfswhitelist:
1940 1940 if no_hardlink_cb is not None:
1941 1941 no_hardlink_cb()
1942 1942 hardlink = False
1943 1943 if hardlink:
1944 1944 try:
1945 1945 oslink(src, dest)
1946 1946 if nb_bytes is not None:
1947 1947 m = "the `nb_bytes` argument is incompatible with `hardlink`"
1948 1948 raise error.ProgrammingError(m)
1949 1949 return
1950 1950 except (IOError, OSError) as exc:
1951 1951 if exc.errno != errno.EEXIST and no_hardlink_cb is not None:
1952 1952 no_hardlink_cb()
1953 1953 # fall back to normal copy
1954 1954 if os.path.islink(src):
1955 1955 os.symlink(os.readlink(src), dest)
1956 1956 # copytime is ignored for symlinks, but in general copytime isn't needed
1957 1957 # for them anyway
1958 1958 if nb_bytes is not None:
1959 1959 m = "cannot use `nb_bytes` on a symlink"
1960 1960 raise error.ProgrammingError(m)
1961 1961 else:
1962 1962 try:
1963 1963 shutil.copyfile(src, dest)
1964 1964 if copystat:
1965 1965 # copystat also copies mode
1966 1966 shutil.copystat(src, dest)
1967 1967 else:
1968 1968 shutil.copymode(src, dest)
1969 1969 if oldstat and oldstat.stat:
1970 1970 newstat = filestat.frompath(dest)
1971 1971 if newstat.isambig(oldstat):
1972 1972 # stat of copied file is ambiguous to original one
1973 1973 advanced = (
1974 1974 oldstat.stat[stat.ST_MTIME] + 1
1975 1975 ) & 0x7FFFFFFF
1976 1976 os.utime(dest, (advanced, advanced))
1977 1977 # We could do something smarter using `copy_file_range` call or similar
1978 1978 if nb_bytes is not None:
1979 1979 with open(dest, mode='r+') as f:
1980 1980 f.truncate(nb_bytes)
1981 1981 except shutil.Error as inst:
1982 1982 raise error.Abort(stringutil.forcebytestr(inst))
1983 1983
1984 1984
1985 1985 def copyfiles(src, dst, hardlink=None, progress=None):
1986 1986 """Copy a directory tree using hardlinks if possible."""
1987 1987 num = 0
1988 1988
1989 1989 def settopic():
1990 1990 if progress:
1991 1991 progress.topic = _(b'linking') if hardlink else _(b'copying')
1992 1992
1993 1993 if os.path.isdir(src):
1994 1994 if hardlink is None:
1995 1995 hardlink = (
1996 1996 os.stat(src).st_dev == os.stat(os.path.dirname(dst)).st_dev
1997 1997 )
1998 1998 settopic()
1999 1999 os.mkdir(dst)
2000 2000 for name, kind in listdir(src):
2001 2001 srcname = os.path.join(src, name)
2002 2002 dstname = os.path.join(dst, name)
2003 2003 hardlink, n = copyfiles(srcname, dstname, hardlink, progress)
2004 2004 num += n
2005 2005 else:
2006 2006 if hardlink is None:
2007 2007 hardlink = (
2008 2008 os.stat(os.path.dirname(src)).st_dev
2009 2009 == os.stat(os.path.dirname(dst)).st_dev
2010 2010 )
2011 2011 settopic()
2012 2012
2013 2013 if hardlink:
2014 2014 try:
2015 2015 oslink(src, dst)
2016 2016 except (IOError, OSError) as exc:
2017 2017 if exc.errno != errno.EEXIST:
2018 2018 hardlink = False
2019 2019 # XXX maybe try to relink if the file exist ?
2020 2020 shutil.copy(src, dst)
2021 2021 else:
2022 2022 shutil.copy(src, dst)
2023 2023 num += 1
2024 2024 if progress:
2025 2025 progress.increment()
2026 2026
2027 2027 return hardlink, num
2028 2028
2029 2029
2030 2030 _winreservednames = {
2031 2031 b'con',
2032 2032 b'prn',
2033 2033 b'aux',
2034 2034 b'nul',
2035 2035 b'com1',
2036 2036 b'com2',
2037 2037 b'com3',
2038 2038 b'com4',
2039 2039 b'com5',
2040 2040 b'com6',
2041 2041 b'com7',
2042 2042 b'com8',
2043 2043 b'com9',
2044 2044 b'lpt1',
2045 2045 b'lpt2',
2046 2046 b'lpt3',
2047 2047 b'lpt4',
2048 2048 b'lpt5',
2049 2049 b'lpt6',
2050 2050 b'lpt7',
2051 2051 b'lpt8',
2052 2052 b'lpt9',
2053 2053 }
2054 2054 _winreservedchars = b':*?"<>|'
2055 2055
2056 2056
2057 2057 def checkwinfilename(path: bytes) -> Optional[bytes]:
2058 2058 r"""Check that the base-relative path is a valid filename on Windows.
2059 2059 Returns None if the path is ok, or a UI string describing the problem.
2060 2060
2061 2061 >>> checkwinfilename(b"just/a/normal/path")
2062 2062 >>> checkwinfilename(b"foo/bar/con.xml")
2063 2063 "filename contains 'con', which is reserved on Windows"
2064 2064 >>> checkwinfilename(b"foo/con.xml/bar")
2065 2065 "filename contains 'con', which is reserved on Windows"
2066 2066 >>> checkwinfilename(b"foo/bar/xml.con")
2067 2067 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
2068 2068 "filename contains 'AUX', which is reserved on Windows"
2069 2069 >>> checkwinfilename(b"foo/bar/bla:.txt")
2070 2070 "filename contains ':', which is reserved on Windows"
2071 2071 >>> checkwinfilename(b"foo/bar/b\07la.txt")
2072 2072 "filename contains '\\x07', which is invalid on Windows"
2073 2073 >>> checkwinfilename(b"foo/bar/bla ")
2074 2074 "filename ends with ' ', which is not allowed on Windows"
2075 2075 >>> checkwinfilename(b"../bar")
2076 2076 >>> checkwinfilename(b"foo\\")
2077 2077 "filename ends with '\\', which is invalid on Windows"
2078 2078 >>> checkwinfilename(b"foo\\/bar")
2079 2079 "directory name ends with '\\', which is invalid on Windows"
2080 2080 """
2081 2081 if path.endswith(b'\\'):
2082 2082 return _(b"filename ends with '\\', which is invalid on Windows")
2083 2083 if b'\\/' in path:
2084 2084 return _(b"directory name ends with '\\', which is invalid on Windows")
2085 2085 for n in path.replace(b'\\', b'/').split(b'/'):
2086 2086 if not n:
2087 2087 continue
2088 2088 for c in _filenamebytestr(n):
2089 2089 if c in _winreservedchars:
2090 2090 return (
2091 2091 _(
2092 2092 b"filename contains '%s', which is reserved "
2093 2093 b"on Windows"
2094 2094 )
2095 2095 % c
2096 2096 )
2097 2097 if ord(c) <= 31:
2098 2098 return _(
2099 2099 b"filename contains '%s', which is invalid on Windows"
2100 2100 ) % stringutil.escapestr(c)
2101 2101 base = n.split(b'.')[0]
2102 2102 if base and base.lower() in _winreservednames:
2103 2103 return (
2104 2104 _(b"filename contains '%s', which is reserved on Windows")
2105 2105 % base
2106 2106 )
2107 2107 t = n[-1:]
2108 2108 if t in b'. ' and n not in b'..':
2109 2109 return (
2110 2110 _(
2111 2111 b"filename ends with '%s', which is not allowed "
2112 2112 b"on Windows"
2113 2113 )
2114 2114 % t
2115 2115 )
2116 2116
2117 2117
2118 2118 timer = getattr(time, "perf_counter", None)
2119 2119
2120 2120 if pycompat.iswindows:
2121 2121 checkosfilename = checkwinfilename
2122 2122 if not timer:
2123 2123 timer = time.clock # pytype: disable=module-attr
2124 2124 else:
2125 2125 # mercurial.windows doesn't have platform.checkosfilename
2126 2126 checkosfilename = platform.checkosfilename # pytype: disable=module-attr
2127 2127 if not timer:
2128 2128 timer = time.time
2129 2129
2130 2130
2131 2131 def makelock(info, pathname):
2132 2132 """Create a lock file atomically if possible
2133 2133
2134 2134 This may leave a stale lock file if symlink isn't supported and signal
2135 2135 interrupt is enabled.
2136 2136 """
2137 2137 try:
2138 2138 return os.symlink(info, pathname)
2139 2139 except OSError as why:
2140 2140 if why.errno == errno.EEXIST:
2141 2141 raise
2142 2142 except AttributeError: # no symlink in os
2143 2143 pass
2144 2144
2145 2145 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
2146 2146 ld = os.open(pathname, flags)
2147 2147 os.write(ld, info)
2148 2148 os.close(ld)
2149 2149
2150 2150
2151 2151 def readlock(pathname: bytes) -> bytes:
2152 2152 try:
2153 2153 return readlink(pathname)
2154 2154 except OSError as why:
2155 2155 if why.errno not in (errno.EINVAL, errno.ENOSYS):
2156 2156 raise
2157 2157 except AttributeError: # no symlink in os
2158 2158 pass
2159 2159 with posixfile(pathname, b'rb') as fp:
2160 2160 return fp.read()
2161 2161
2162 2162
2163 2163 def fstat(fp):
2164 2164 '''stat file object that may not have fileno method.'''
2165 2165 try:
2166 2166 return os.fstat(fp.fileno())
2167 2167 except AttributeError:
2168 2168 return os.stat(fp.name)
2169 2169
2170 2170
2171 2171 # File system features
2172 2172
2173 2173
2174 2174 def fscasesensitive(path: bytes) -> bool:
2175 2175 """
2176 2176 Return true if the given path is on a case-sensitive filesystem
2177 2177
2178 2178 Requires a path (like /foo/.hg) ending with a foldable final
2179 2179 directory component.
2180 2180 """
2181 2181 s1 = os.lstat(path)
2182 2182 d, b = os.path.split(path)
2183 2183 b2 = b.upper()
2184 2184 if b == b2:
2185 2185 b2 = b.lower()
2186 2186 if b == b2:
2187 2187 return True # no evidence against case sensitivity
2188 2188 p2 = os.path.join(d, b2)
2189 2189 try:
2190 2190 s2 = os.lstat(p2)
2191 2191 if s2 == s1:
2192 2192 return False
2193 2193 return True
2194 2194 except OSError:
2195 2195 return True
2196 2196
2197 2197
2198 2198 _re2_input = lambda x: x
2199 # google-re2 will need to be tell to not output error on its own
2200 _re2_options = None
2199 2201 try:
2200 2202 import re2 # pytype: disable=import-error
2201 2203
2202 2204 _re2 = None
2203 2205 except ImportError:
2204 2206 _re2 = False
2205 2207
2206 2208
2207 2209 def has_re2():
2208 2210 """return True is re2 is available, False otherwise"""
2209 2211 if _re2 is None:
2210 2212 _re._checkre2()
2211 2213 return _re2
2212 2214
2213 2215
2214 2216 class _re:
2215 2217 @staticmethod
2216 2218 def _checkre2():
2217 2219 global _re2
2218 2220 global _re2_input
2221 global _re2_options
2219 2222 if _re2 is not None:
2220 2223 # we already have the answer
2221 2224 return
2222 2225
2223 2226 check_pattern = br'\[([^\[]+)\]'
2224 2227 check_input = b'[ui]'
2225 2228 try:
2226 2229 # check if match works, see issue3964
2227 2230 _re2 = bool(re2.match(check_pattern, check_input))
2228 2231 except ImportError:
2229 2232 _re2 = False
2230 2233 except TypeError:
2231 2234 # the `pyre-2` project provides a re2 module that accept bytes
2232 2235 # the `fb-re2` project provides a re2 module that acccept sysstr
2233 2236 check_pattern = pycompat.sysstr(check_pattern)
2234 2237 check_input = pycompat.sysstr(check_input)
2235 2238 _re2 = bool(re2.match(check_pattern, check_input))
2236 2239 _re2_input = pycompat.sysstr
2240 try:
2241 quiet = re2.Options()
2242 quiet.log_errors = False
2243 _re2_options = quiet
2244 except AttributeError:
2245 pass
2237 2246
2238 2247 def compile(self, pat, flags=0):
2239 2248 """Compile a regular expression, using re2 if possible
2240 2249
2241 2250 For best performance, use only re2-compatible regexp features. The
2242 2251 only flags from the re module that are re2-compatible are
2243 2252 IGNORECASE and MULTILINE."""
2244 2253 if _re2 is None:
2245 2254 self._checkre2()
2246 2255 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
2247 2256 if flags & remod.IGNORECASE:
2248 2257 pat = b'(?i)' + pat
2249 2258 if flags & remod.MULTILINE:
2250 2259 pat = b'(?m)' + pat
2251 2260 try:
2252 return re2.compile(_re2_input(pat))
2261 input_regex = _re2_input(pat)
2262 if _re2_options is not None:
2263 compiled = re2.compile(input_regex, options=_re2_options)
2264 else:
2265 compiled = re2.compile(input_regex)
2266 return compiled
2253 2267 except re2.error:
2254 2268 pass
2255 2269 return remod.compile(pat, flags)
2256 2270
2257 2271 @propertycache
2258 2272 def escape(self):
2259 2273 """Return the version of escape corresponding to self.compile.
2260 2274
2261 2275 This is imperfect because whether re2 or re is used for a particular
2262 2276 function depends on the flags, etc, but it's the best we can do.
2263 2277 """
2264 2278 global _re2
2265 2279 if _re2 is None:
2266 2280 self._checkre2()
2267 2281 if _re2:
2268 2282 return re2.escape
2269 2283 else:
2270 2284 return remod.escape
2271 2285
2272 2286
2273 2287 re = _re()
2274 2288
2275 2289 _fspathcache = {}
2276 2290
2277 2291
2278 2292 def fspath(name: bytes, root: bytes) -> bytes:
2279 2293 """Get name in the case stored in the filesystem
2280 2294
2281 2295 The name should be relative to root, and be normcase-ed for efficiency.
2282 2296
2283 2297 Note that this function is unnecessary, and should not be
2284 2298 called, for case-sensitive filesystems (simply because it's expensive).
2285 2299
2286 2300 The root should be normcase-ed, too.
2287 2301 """
2288 2302
2289 2303 def _makefspathcacheentry(dir):
2290 2304 return {normcase(n): n for n in os.listdir(dir)}
2291 2305
2292 2306 seps = pycompat.ossep
2293 2307 if pycompat.osaltsep:
2294 2308 seps = seps + pycompat.osaltsep
2295 2309 # Protect backslashes. This gets silly very quickly.
2296 2310 seps.replace(b'\\', b'\\\\')
2297 2311 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
2298 2312 dir = os.path.normpath(root)
2299 2313 result = []
2300 2314 for part, sep in pattern.findall(name):
2301 2315 if sep:
2302 2316 result.append(sep)
2303 2317 continue
2304 2318
2305 2319 if dir not in _fspathcache:
2306 2320 _fspathcache[dir] = _makefspathcacheentry(dir)
2307 2321 contents = _fspathcache[dir]
2308 2322
2309 2323 found = contents.get(part)
2310 2324 if not found:
2311 2325 # retry "once per directory" per "dirstate.walk" which
2312 2326 # may take place for each patches of "hg qpush", for example
2313 2327 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
2314 2328 found = contents.get(part)
2315 2329
2316 2330 result.append(found or part)
2317 2331 dir = os.path.join(dir, part)
2318 2332
2319 2333 return b''.join(result)
2320 2334
2321 2335
2322 2336 def checknlink(testfile: bytes) -> bool:
2323 2337 '''check whether hardlink count reporting works properly'''
2324 2338
2325 2339 # testfile may be open, so we need a separate file for checking to
2326 2340 # work around issue2543 (or testfile may get lost on Samba shares)
2327 2341 f1, f2, fp = None, None, None
2328 2342 try:
2329 2343 fd, f1 = pycompat.mkstemp(
2330 2344 prefix=b'.%s-' % os.path.basename(testfile),
2331 2345 suffix=b'1~',
2332 2346 dir=os.path.dirname(testfile),
2333 2347 )
2334 2348 os.close(fd)
2335 2349 f2 = b'%s2~' % f1[:-2]
2336 2350
2337 2351 oslink(f1, f2)
2338 2352 # nlinks() may behave differently for files on Windows shares if
2339 2353 # the file is open.
2340 2354 fp = posixfile(f2)
2341 2355 return nlinks(f2) > 1
2342 2356 except OSError:
2343 2357 return False
2344 2358 finally:
2345 2359 if fp is not None:
2346 2360 fp.close()
2347 2361 for f in (f1, f2):
2348 2362 try:
2349 2363 if f is not None:
2350 2364 os.unlink(f)
2351 2365 except OSError:
2352 2366 pass
2353 2367
2354 2368
2355 2369 def endswithsep(path: bytes) -> bool:
2356 2370 '''Check path ends with os.sep or os.altsep.'''
2357 2371 return bool( # help pytype
2358 2372 path.endswith(pycompat.ossep)
2359 2373 or pycompat.osaltsep
2360 2374 and path.endswith(pycompat.osaltsep)
2361 2375 )
2362 2376
2363 2377
2364 2378 def splitpath(path: bytes) -> List[bytes]:
2365 2379 """Split path by os.sep.
2366 2380 Note that this function does not use os.altsep because this is
2367 2381 an alternative of simple "xxx.split(os.sep)".
2368 2382 It is recommended to use os.path.normpath() before using this
2369 2383 function if need."""
2370 2384 return path.split(pycompat.ossep)
2371 2385
2372 2386
2373 2387 def mktempcopy(name, emptyok=False, createmode=None, enforcewritable=False):
2374 2388 """Create a temporary file with the same contents from name
2375 2389
2376 2390 The permission bits are copied from the original file.
2377 2391
2378 2392 If the temporary file is going to be truncated immediately, you
2379 2393 can use emptyok=True as an optimization.
2380 2394
2381 2395 Returns the name of the temporary file.
2382 2396 """
2383 2397 d, fn = os.path.split(name)
2384 2398 fd, temp = pycompat.mkstemp(prefix=b'.%s-' % fn, suffix=b'~', dir=d)
2385 2399 os.close(fd)
2386 2400 # Temporary files are created with mode 0600, which is usually not
2387 2401 # what we want. If the original file already exists, just copy
2388 2402 # its mode. Otherwise, manually obey umask.
2389 2403 copymode(name, temp, createmode, enforcewritable)
2390 2404
2391 2405 if emptyok:
2392 2406 return temp
2393 2407 try:
2394 2408 try:
2395 2409 ifp = posixfile(name, b"rb")
2396 2410 except IOError as inst:
2397 2411 if inst.errno == errno.ENOENT:
2398 2412 return temp
2399 2413 if not getattr(inst, 'filename', None):
2400 2414 inst.filename = name
2401 2415 raise
2402 2416 ofp = posixfile(temp, b"wb")
2403 2417 for chunk in filechunkiter(ifp):
2404 2418 ofp.write(chunk)
2405 2419 ifp.close()
2406 2420 ofp.close()
2407 2421 except: # re-raises
2408 2422 try:
2409 2423 os.unlink(temp)
2410 2424 except OSError:
2411 2425 pass
2412 2426 raise
2413 2427 return temp
2414 2428
2415 2429
2416 2430 class filestat:
2417 2431 """help to exactly detect change of a file
2418 2432
2419 2433 'stat' attribute is result of 'os.stat()' if specified 'path'
2420 2434 exists. Otherwise, it is None. This can avoid preparative
2421 2435 'exists()' examination on client side of this class.
2422 2436 """
2423 2437
2424 2438 def __init__(self, stat):
2425 2439 self.stat = stat
2426 2440
2427 2441 @classmethod
2428 2442 def frompath(cls, path):
2429 2443 try:
2430 2444 stat = os.stat(path)
2431 2445 except FileNotFoundError:
2432 2446 stat = None
2433 2447 return cls(stat)
2434 2448
2435 2449 @classmethod
2436 2450 def fromfp(cls, fp):
2437 2451 stat = os.fstat(fp.fileno())
2438 2452 return cls(stat)
2439 2453
2440 2454 __hash__ = object.__hash__
2441 2455
2442 2456 def __eq__(self, old):
2443 2457 try:
2444 2458 # if ambiguity between stat of new and old file is
2445 2459 # avoided, comparison of size, ctime and mtime is enough
2446 2460 # to exactly detect change of a file regardless of platform
2447 2461 return (
2448 2462 self.stat.st_size == old.stat.st_size
2449 2463 and self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2450 2464 and self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME]
2451 2465 )
2452 2466 except AttributeError:
2453 2467 pass
2454 2468 try:
2455 2469 return self.stat is None and old.stat is None
2456 2470 except AttributeError:
2457 2471 return False
2458 2472
2459 2473 def isambig(self, old):
2460 2474 """Examine whether new (= self) stat is ambiguous against old one
2461 2475
2462 2476 "S[N]" below means stat of a file at N-th change:
2463 2477
2464 2478 - S[n-1].ctime < S[n].ctime: can detect change of a file
2465 2479 - S[n-1].ctime == S[n].ctime
2466 2480 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2467 2481 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2468 2482 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2469 2483 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2470 2484
2471 2485 Case (*2) above means that a file was changed twice or more at
2472 2486 same time in sec (= S[n-1].ctime), and comparison of timestamp
2473 2487 is ambiguous.
2474 2488
2475 2489 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2476 2490 timestamp is ambiguous".
2477 2491
2478 2492 But advancing mtime only in case (*2) doesn't work as
2479 2493 expected, because naturally advanced S[n].mtime in case (*1)
2480 2494 might be equal to manually advanced S[n-1 or earlier].mtime.
2481 2495
2482 2496 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2483 2497 treated as ambiguous regardless of mtime, to avoid overlooking
2484 2498 by confliction between such mtime.
2485 2499
2486 2500 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2487 2501 S[n].mtime", even if size of a file isn't changed.
2488 2502 """
2489 2503 try:
2490 2504 return self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2491 2505 except AttributeError:
2492 2506 return False
2493 2507
2494 2508 def avoidambig(self, path, old):
2495 2509 """Change file stat of specified path to avoid ambiguity
2496 2510
2497 2511 'old' should be previous filestat of 'path'.
2498 2512
2499 2513 This skips avoiding ambiguity, if a process doesn't have
2500 2514 appropriate privileges for 'path'. This returns False in this
2501 2515 case.
2502 2516
2503 2517 Otherwise, this returns True, as "ambiguity is avoided".
2504 2518 """
2505 2519 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2506 2520 try:
2507 2521 os.utime(path, (advanced, advanced))
2508 2522 except PermissionError:
2509 2523 # utime() on the file created by another user causes EPERM,
2510 2524 # if a process doesn't have appropriate privileges
2511 2525 return False
2512 2526 return True
2513 2527
2514 2528 def __ne__(self, other):
2515 2529 return not self == other
2516 2530
2517 2531
2518 2532 class atomictempfile:
2519 2533 """writable file object that atomically updates a file
2520 2534
2521 2535 All writes will go to a temporary copy of the original file. Call
2522 2536 close() when you are done writing, and atomictempfile will rename
2523 2537 the temporary copy to the original name, making the changes
2524 2538 visible. If the object is destroyed without being closed, all your
2525 2539 writes are discarded.
2526 2540
2527 2541 checkambig argument of constructor is used with filestat, and is
2528 2542 useful only if target file is guarded by any lock (e.g. repo.lock
2529 2543 or repo.wlock).
2530 2544 """
2531 2545
2532 2546 def __init__(self, name, mode=b'w+b', createmode=None, checkambig=False):
2533 2547 self.__name = name # permanent name
2534 2548 self._tempname = mktempcopy(
2535 2549 name,
2536 2550 emptyok=(b'w' in mode),
2537 2551 createmode=createmode,
2538 2552 enforcewritable=(b'w' in mode),
2539 2553 )
2540 2554
2541 2555 self._fp = posixfile(self._tempname, mode)
2542 2556 self._checkambig = checkambig
2543 2557
2544 2558 # delegated methods
2545 2559 self.read = self._fp.read
2546 2560 self.write = self._fp.write
2547 2561 self.writelines = self._fp.writelines
2548 2562 self.seek = self._fp.seek
2549 2563 self.tell = self._fp.tell
2550 2564 self.fileno = self._fp.fileno
2551 2565
2552 2566 def close(self):
2553 2567 if not self._fp.closed:
2554 2568 self._fp.close()
2555 2569 filename = localpath(self.__name)
2556 2570 oldstat = self._checkambig and filestat.frompath(filename)
2557 2571 if oldstat and oldstat.stat:
2558 2572 rename(self._tempname, filename)
2559 2573 newstat = filestat.frompath(filename)
2560 2574 if newstat.isambig(oldstat):
2561 2575 # stat of changed file is ambiguous to original one
2562 2576 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2563 2577 os.utime(filename, (advanced, advanced))
2564 2578 else:
2565 2579 rename(self._tempname, filename)
2566 2580
2567 2581 def discard(self):
2568 2582 if not self._fp.closed:
2569 2583 try:
2570 2584 os.unlink(self._tempname)
2571 2585 except OSError:
2572 2586 pass
2573 2587 self._fp.close()
2574 2588
2575 2589 def __del__(self):
2576 2590 if hasattr(self, '_fp'): # constructor actually did something
2577 2591 self.discard()
2578 2592
2579 2593 def __enter__(self):
2580 2594 return self
2581 2595
2582 2596 def __exit__(self, exctype, excvalue, traceback):
2583 2597 if exctype is not None:
2584 2598 self.discard()
2585 2599 else:
2586 2600 self.close()
2587 2601
2588 2602
2589 2603 def tryrmdir(f):
2590 2604 try:
2591 2605 removedirs(f)
2592 2606 except OSError as e:
2593 2607 if e.errno != errno.ENOENT and e.errno != errno.ENOTEMPTY:
2594 2608 raise
2595 2609
2596 2610
2597 2611 def unlinkpath(
2598 2612 f: bytes, ignoremissing: bool = False, rmdir: bool = True
2599 2613 ) -> None:
2600 2614 """unlink and remove the directory if it is empty"""
2601 2615 if ignoremissing:
2602 2616 tryunlink(f)
2603 2617 else:
2604 2618 unlink(f)
2605 2619 if rmdir:
2606 2620 # try removing directories that might now be empty
2607 2621 try:
2608 2622 removedirs(os.path.dirname(f))
2609 2623 except OSError:
2610 2624 pass
2611 2625
2612 2626
2613 2627 def tryunlink(f: bytes) -> bool:
2614 2628 """Attempt to remove a file, ignoring FileNotFoundError.
2615 2629
2616 2630 Returns False in case the file did not exit, True otherwise
2617 2631 """
2618 2632 try:
2619 2633 unlink(f)
2620 2634 return True
2621 2635 except FileNotFoundError:
2622 2636 return False
2623 2637
2624 2638
2625 2639 def makedirs(
2626 2640 name: bytes, mode: Optional[int] = None, notindexed: bool = False
2627 2641 ) -> None:
2628 2642 """recursive directory creation with parent mode inheritance
2629 2643
2630 2644 Newly created directories are marked as "not to be indexed by
2631 2645 the content indexing service", if ``notindexed`` is specified
2632 2646 for "write" mode access.
2633 2647 """
2634 2648 try:
2635 2649 makedir(name, notindexed)
2636 2650 except OSError as err:
2637 2651 if err.errno == errno.EEXIST:
2638 2652 return
2639 2653 if err.errno != errno.ENOENT or not name:
2640 2654 raise
2641 2655 parent = os.path.dirname(abspath(name))
2642 2656 if parent == name:
2643 2657 raise
2644 2658 makedirs(parent, mode, notindexed)
2645 2659 try:
2646 2660 makedir(name, notindexed)
2647 2661 except OSError as err:
2648 2662 # Catch EEXIST to handle races
2649 2663 if err.errno == errno.EEXIST:
2650 2664 return
2651 2665 raise
2652 2666 if mode is not None:
2653 2667 os.chmod(name, mode)
2654 2668
2655 2669
2656 2670 def readfile(path: bytes) -> bytes:
2657 2671 with open(path, b'rb') as fp:
2658 2672 return fp.read()
2659 2673
2660 2674
2661 2675 def writefile(path: bytes, text: bytes) -> None:
2662 2676 with open(path, b'wb') as fp:
2663 2677 fp.write(text)
2664 2678
2665 2679
2666 2680 def appendfile(path: bytes, text: bytes) -> None:
2667 2681 with open(path, b'ab') as fp:
2668 2682 fp.write(text)
2669 2683
2670 2684
2671 2685 class chunkbuffer:
2672 2686 """Allow arbitrary sized chunks of data to be efficiently read from an
2673 2687 iterator over chunks of arbitrary size."""
2674 2688
2675 2689 def __init__(self, in_iter):
2676 2690 """in_iter is the iterator that's iterating over the input chunks."""
2677 2691
2678 2692 def splitbig(chunks):
2679 2693 for chunk in chunks:
2680 2694 if len(chunk) > 2 ** 20:
2681 2695 pos = 0
2682 2696 while pos < len(chunk):
2683 2697 end = pos + 2 ** 18
2684 2698 yield chunk[pos:end]
2685 2699 pos = end
2686 2700 else:
2687 2701 yield chunk
2688 2702
2689 2703 self.iter = splitbig(in_iter)
2690 2704 self._queue = collections.deque()
2691 2705 self._chunkoffset = 0
2692 2706
2693 2707 def read(self, l=None):
2694 2708 """Read L bytes of data from the iterator of chunks of data.
2695 2709 Returns less than L bytes if the iterator runs dry.
2696 2710
2697 2711 If size parameter is omitted, read everything"""
2698 2712 if l is None:
2699 2713 return b''.join(self.iter)
2700 2714
2701 2715 left = l
2702 2716 buf = []
2703 2717 queue = self._queue
2704 2718 while left > 0:
2705 2719 # refill the queue
2706 2720 if not queue:
2707 2721 target = 2 ** 18
2708 2722 for chunk in self.iter:
2709 2723 queue.append(chunk)
2710 2724 target -= len(chunk)
2711 2725 if target <= 0:
2712 2726 break
2713 2727 if not queue:
2714 2728 break
2715 2729
2716 2730 # The easy way to do this would be to queue.popleft(), modify the
2717 2731 # chunk (if necessary), then queue.appendleft(). However, for cases
2718 2732 # where we read partial chunk content, this incurs 2 dequeue
2719 2733 # mutations and creates a new str for the remaining chunk in the
2720 2734 # queue. Our code below avoids this overhead.
2721 2735
2722 2736 chunk = queue[0]
2723 2737 chunkl = len(chunk)
2724 2738 offset = self._chunkoffset
2725 2739
2726 2740 # Use full chunk.
2727 2741 if offset == 0 and left >= chunkl:
2728 2742 left -= chunkl
2729 2743 queue.popleft()
2730 2744 buf.append(chunk)
2731 2745 # self._chunkoffset remains at 0.
2732 2746 continue
2733 2747
2734 2748 chunkremaining = chunkl - offset
2735 2749
2736 2750 # Use all of unconsumed part of chunk.
2737 2751 if left >= chunkremaining:
2738 2752 left -= chunkremaining
2739 2753 queue.popleft()
2740 2754 # offset == 0 is enabled by block above, so this won't merely
2741 2755 # copy via ``chunk[0:]``.
2742 2756 buf.append(chunk[offset:])
2743 2757 self._chunkoffset = 0
2744 2758
2745 2759 # Partial chunk needed.
2746 2760 else:
2747 2761 buf.append(chunk[offset : offset + left])
2748 2762 self._chunkoffset += left
2749 2763 left -= chunkremaining
2750 2764
2751 2765 return b''.join(buf)
2752 2766
2753 2767
2754 2768 def filechunkiter(f, size=131072, limit=None):
2755 2769 """Create a generator that produces the data in the file size
2756 2770 (default 131072) bytes at a time, up to optional limit (default is
2757 2771 to read all data). Chunks may be less than size bytes if the
2758 2772 chunk is the last chunk in the file, or the file is a socket or
2759 2773 some other type of file that sometimes reads less data than is
2760 2774 requested."""
2761 2775 assert size >= 0
2762 2776 assert limit is None or limit >= 0
2763 2777 while True:
2764 2778 if limit is None:
2765 2779 nbytes = size
2766 2780 else:
2767 2781 nbytes = min(limit, size)
2768 2782 s = nbytes and f.read(nbytes)
2769 2783 if not s:
2770 2784 break
2771 2785 if limit:
2772 2786 limit -= len(s)
2773 2787 yield s
2774 2788
2775 2789
2776 2790 class cappedreader:
2777 2791 """A file object proxy that allows reading up to N bytes.
2778 2792
2779 2793 Given a source file object, instances of this type allow reading up to
2780 2794 N bytes from that source file object. Attempts to read past the allowed
2781 2795 limit are treated as EOF.
2782 2796
2783 2797 It is assumed that I/O is not performed on the original file object
2784 2798 in addition to I/O that is performed by this instance. If there is,
2785 2799 state tracking will get out of sync and unexpected results will ensue.
2786 2800 """
2787 2801
2788 2802 def __init__(self, fh, limit):
2789 2803 """Allow reading up to <limit> bytes from <fh>."""
2790 2804 self._fh = fh
2791 2805 self._left = limit
2792 2806
2793 2807 def read(self, n=-1):
2794 2808 if not self._left:
2795 2809 return b''
2796 2810
2797 2811 if n < 0:
2798 2812 n = self._left
2799 2813
2800 2814 data = self._fh.read(min(n, self._left))
2801 2815 self._left -= len(data)
2802 2816 assert self._left >= 0
2803 2817
2804 2818 return data
2805 2819
2806 2820 def readinto(self, b):
2807 2821 res = self.read(len(b))
2808 2822 if res is None:
2809 2823 return None
2810 2824
2811 2825 b[0 : len(res)] = res
2812 2826 return len(res)
2813 2827
2814 2828
2815 2829 def unitcountfn(*unittable):
2816 2830 '''return a function that renders a readable count of some quantity'''
2817 2831
2818 2832 def go(count):
2819 2833 for multiplier, divisor, format in unittable:
2820 2834 if abs(count) >= divisor * multiplier:
2821 2835 return format % (count / float(divisor))
2822 2836 return unittable[-1][2] % count
2823 2837
2824 2838 return go
2825 2839
2826 2840
2827 2841 def processlinerange(fromline: int, toline: int) -> Tuple[int, int]:
2828 2842 """Check that linerange <fromline>:<toline> makes sense and return a
2829 2843 0-based range.
2830 2844
2831 2845 >>> processlinerange(10, 20)
2832 2846 (9, 20)
2833 2847 >>> processlinerange(2, 1)
2834 2848 Traceback (most recent call last):
2835 2849 ...
2836 2850 ParseError: line range must be positive
2837 2851 >>> processlinerange(0, 5)
2838 2852 Traceback (most recent call last):
2839 2853 ...
2840 2854 ParseError: fromline must be strictly positive
2841 2855 """
2842 2856 if toline - fromline < 0:
2843 2857 raise error.ParseError(_(b"line range must be positive"))
2844 2858 if fromline < 1:
2845 2859 raise error.ParseError(_(b"fromline must be strictly positive"))
2846 2860 return fromline - 1, toline
2847 2861
2848 2862
2849 2863 bytecount = unitcountfn(
2850 2864 (100, 1 << 30, _(b'%.0f GB')),
2851 2865 (10, 1 << 30, _(b'%.1f GB')),
2852 2866 (1, 1 << 30, _(b'%.2f GB')),
2853 2867 (100, 1 << 20, _(b'%.0f MB')),
2854 2868 (10, 1 << 20, _(b'%.1f MB')),
2855 2869 (1, 1 << 20, _(b'%.2f MB')),
2856 2870 (100, 1 << 10, _(b'%.0f KB')),
2857 2871 (10, 1 << 10, _(b'%.1f KB')),
2858 2872 (1, 1 << 10, _(b'%.2f KB')),
2859 2873 (1, 1, _(b'%.0f bytes')),
2860 2874 )
2861 2875
2862 2876
2863 2877 class transformingwriter:
2864 2878 """Writable file wrapper to transform data by function"""
2865 2879
2866 2880 def __init__(self, fp, encode):
2867 2881 self._fp = fp
2868 2882 self._encode = encode
2869 2883
2870 2884 def close(self):
2871 2885 self._fp.close()
2872 2886
2873 2887 def flush(self):
2874 2888 self._fp.flush()
2875 2889
2876 2890 def write(self, data):
2877 2891 return self._fp.write(self._encode(data))
2878 2892
2879 2893
2880 2894 # Matches a single EOL which can either be a CRLF where repeated CR
2881 2895 # are removed or a LF. We do not care about old Macintosh files, so a
2882 2896 # stray CR is an error.
2883 2897 _eolre = remod.compile(br'\r*\n')
2884 2898
2885 2899
2886 2900 def tolf(s: bytes) -> bytes:
2887 2901 return _eolre.sub(b'\n', s)
2888 2902
2889 2903
2890 2904 def tocrlf(s: bytes) -> bytes:
2891 2905 return _eolre.sub(b'\r\n', s)
2892 2906
2893 2907
2894 2908 def _crlfwriter(fp):
2895 2909 return transformingwriter(fp, tocrlf)
2896 2910
2897 2911
2898 2912 if pycompat.oslinesep == b'\r\n':
2899 2913 tonativeeol = tocrlf
2900 2914 fromnativeeol = tolf
2901 2915 nativeeolwriter = _crlfwriter
2902 2916 else:
2903 2917 tonativeeol = pycompat.identity
2904 2918 fromnativeeol = pycompat.identity
2905 2919 nativeeolwriter = pycompat.identity
2906 2920
2907 2921
2908 2922 # TODO delete since workaround variant for Python 2 no longer needed.
2909 2923 def iterfile(fp):
2910 2924 return fp
2911 2925
2912 2926
2913 2927 def iterlines(iterator: Iterable[bytes]) -> Iterator[bytes]:
2914 2928 for chunk in iterator:
2915 2929 for line in chunk.splitlines():
2916 2930 yield line
2917 2931
2918 2932
2919 2933 def expandpath(path: bytes) -> bytes:
2920 2934 return os.path.expanduser(os.path.expandvars(path))
2921 2935
2922 2936
2923 2937 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2924 2938 """Return the result of interpolating items in the mapping into string s.
2925 2939
2926 2940 prefix is a single character string, or a two character string with
2927 2941 a backslash as the first character if the prefix needs to be escaped in
2928 2942 a regular expression.
2929 2943
2930 2944 fn is an optional function that will be applied to the replacement text
2931 2945 just before replacement.
2932 2946
2933 2947 escape_prefix is an optional flag that allows using doubled prefix for
2934 2948 its escaping.
2935 2949 """
2936 2950 fn = fn or (lambda s: s)
2937 2951 patterns = b'|'.join(mapping.keys())
2938 2952 if escape_prefix:
2939 2953 patterns += b'|' + prefix
2940 2954 if len(prefix) > 1:
2941 2955 prefix_char = prefix[1:]
2942 2956 else:
2943 2957 prefix_char = prefix
2944 2958 mapping[prefix_char] = prefix_char
2945 2959 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2946 2960 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2947 2961
2948 2962
2949 2963 timecount = unitcountfn(
2950 2964 (1, 1e3, _(b'%.0f s')),
2951 2965 (100, 1, _(b'%.1f s')),
2952 2966 (10, 1, _(b'%.2f s')),
2953 2967 (1, 1, _(b'%.3f s')),
2954 2968 (100, 0.001, _(b'%.1f ms')),
2955 2969 (10, 0.001, _(b'%.2f ms')),
2956 2970 (1, 0.001, _(b'%.3f ms')),
2957 2971 (100, 0.000001, _(b'%.1f us')),
2958 2972 (10, 0.000001, _(b'%.2f us')),
2959 2973 (1, 0.000001, _(b'%.3f us')),
2960 2974 (100, 0.000000001, _(b'%.1f ns')),
2961 2975 (10, 0.000000001, _(b'%.2f ns')),
2962 2976 (1, 0.000000001, _(b'%.3f ns')),
2963 2977 )
2964 2978
2965 2979
2966 2980 @attr.s
2967 2981 class timedcmstats:
2968 2982 """Stats information produced by the timedcm context manager on entering."""
2969 2983
2970 2984 # the starting value of the timer as a float (meaning and resulution is
2971 2985 # platform dependent, see util.timer)
2972 2986 start = attr.ib(default=attr.Factory(lambda: timer()))
2973 2987 # the number of seconds as a floating point value; starts at 0, updated when
2974 2988 # the context is exited.
2975 2989 elapsed = attr.ib(default=0)
2976 2990 # the number of nested timedcm context managers.
2977 2991 level = attr.ib(default=1)
2978 2992
2979 2993 def __bytes__(self):
2980 2994 return timecount(self.elapsed) if self.elapsed else b'<unknown>'
2981 2995
2982 2996 __str__ = encoding.strmethod(__bytes__)
2983 2997
2984 2998
2985 2999 @contextlib.contextmanager
2986 3000 def timedcm(whencefmt, *whenceargs):
2987 3001 """A context manager that produces timing information for a given context.
2988 3002
2989 3003 On entering a timedcmstats instance is produced.
2990 3004
2991 3005 This context manager is reentrant.
2992 3006
2993 3007 """
2994 3008 # track nested context managers
2995 3009 timedcm._nested += 1
2996 3010 timing_stats = timedcmstats(level=timedcm._nested)
2997 3011 try:
2998 3012 with tracing.log(whencefmt, *whenceargs):
2999 3013 yield timing_stats
3000 3014 finally:
3001 3015 timing_stats.elapsed = timer() - timing_stats.start
3002 3016 timedcm._nested -= 1
3003 3017
3004 3018
3005 3019 timedcm._nested = 0
3006 3020
3007 3021
3008 3022 def timed(func):
3009 3023 """Report the execution time of a function call to stderr.
3010 3024
3011 3025 During development, use as a decorator when you need to measure
3012 3026 the cost of a function, e.g. as follows:
3013 3027
3014 3028 @util.timed
3015 3029 def foo(a, b, c):
3016 3030 pass
3017 3031 """
3018 3032
3019 3033 def wrapper(*args, **kwargs):
3020 3034 with timedcm(pycompat.bytestr(func.__name__)) as time_stats:
3021 3035 result = func(*args, **kwargs)
3022 3036 stderr = procutil.stderr
3023 3037 stderr.write(
3024 3038 b'%s%s: %s\n'
3025 3039 % (
3026 3040 b' ' * time_stats.level * 2,
3027 3041 pycompat.bytestr(func.__name__),
3028 3042 time_stats,
3029 3043 )
3030 3044 )
3031 3045 return result
3032 3046
3033 3047 return wrapper
3034 3048
3035 3049
3036 3050 _sizeunits = (
3037 3051 (b'm', 2 ** 20),
3038 3052 (b'k', 2 ** 10),
3039 3053 (b'g', 2 ** 30),
3040 3054 (b'kb', 2 ** 10),
3041 3055 (b'mb', 2 ** 20),
3042 3056 (b'gb', 2 ** 30),
3043 3057 (b'b', 1),
3044 3058 )
3045 3059
3046 3060
3047 3061 def sizetoint(s: bytes) -> int:
3048 3062 """Convert a space specifier to a byte count.
3049 3063
3050 3064 >>> sizetoint(b'30')
3051 3065 30
3052 3066 >>> sizetoint(b'2.2kb')
3053 3067 2252
3054 3068 >>> sizetoint(b'6M')
3055 3069 6291456
3056 3070 """
3057 3071 t = s.strip().lower()
3058 3072 try:
3059 3073 for k, u in _sizeunits:
3060 3074 if t.endswith(k):
3061 3075 return int(float(t[: -len(k)]) * u)
3062 3076 return int(t)
3063 3077 except ValueError:
3064 3078 raise error.ParseError(_(b"couldn't parse size: %s") % s)
3065 3079
3066 3080
3067 3081 class hooks:
3068 3082 """A collection of hook functions that can be used to extend a
3069 3083 function's behavior. Hooks are called in lexicographic order,
3070 3084 based on the names of their sources."""
3071 3085
3072 3086 def __init__(self):
3073 3087 self._hooks = []
3074 3088
3075 3089 def add(self, source, hook):
3076 3090 self._hooks.append((source, hook))
3077 3091
3078 3092 def __call__(self, *args):
3079 3093 self._hooks.sort(key=lambda x: x[0])
3080 3094 results = []
3081 3095 for source, hook in self._hooks:
3082 3096 results.append(hook(*args))
3083 3097 return results
3084 3098
3085 3099
3086 3100 def getstackframes(skip=0, line=b' %-*s in %s\n', fileline=b'%s:%d', depth=0):
3087 3101 """Yields lines for a nicely formatted stacktrace.
3088 3102 Skips the 'skip' last entries, then return the last 'depth' entries.
3089 3103 Each file+linenumber is formatted according to fileline.
3090 3104 Each line is formatted according to line.
3091 3105 If line is None, it yields:
3092 3106 length of longest filepath+line number,
3093 3107 filepath+linenumber,
3094 3108 function
3095 3109
3096 3110 Not be used in production code but very convenient while developing.
3097 3111 """
3098 3112 entries = [
3099 3113 (fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3100 3114 for fn, ln, func, _text in traceback.extract_stack()[: -skip - 1]
3101 3115 ][-depth:]
3102 3116 if entries:
3103 3117 fnmax = max(len(entry[0]) for entry in entries)
3104 3118 for fnln, func in entries:
3105 3119 if line is None:
3106 3120 yield (fnmax, fnln, func)
3107 3121 else:
3108 3122 yield line % (fnmax, fnln, func)
3109 3123
3110 3124
3111 3125 def debugstacktrace(
3112 3126 msg=b'stacktrace',
3113 3127 skip=0,
3114 3128 f=procutil.stderr,
3115 3129 otherf=procutil.stdout,
3116 3130 depth=0,
3117 3131 prefix=b'',
3118 3132 ):
3119 3133 """Writes a message to f (stderr) with a nicely formatted stacktrace.
3120 3134 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3121 3135 By default it will flush stdout first.
3122 3136 It can be used everywhere and intentionally does not require an ui object.
3123 3137 Not be used in production code but very convenient while developing.
3124 3138 """
3125 3139 if otherf:
3126 3140 otherf.flush()
3127 3141 f.write(b'%s%s at:\n' % (prefix, msg.rstrip()))
3128 3142 for line in getstackframes(skip + 1, depth=depth):
3129 3143 f.write(prefix + line)
3130 3144 f.flush()
3131 3145
3132 3146
3133 3147 # convenient shortcut
3134 3148 dst = debugstacktrace
3135 3149
3136 3150
3137 3151 def safename(f, tag, ctx, others=None):
3138 3152 """
3139 3153 Generate a name that it is safe to rename f to in the given context.
3140 3154
3141 3155 f: filename to rename
3142 3156 tag: a string tag that will be included in the new name
3143 3157 ctx: a context, in which the new name must not exist
3144 3158 others: a set of other filenames that the new name must not be in
3145 3159
3146 3160 Returns a file name of the form oldname~tag[~number] which does not exist
3147 3161 in the provided context and is not in the set of other names.
3148 3162 """
3149 3163 if others is None:
3150 3164 others = set()
3151 3165
3152 3166 fn = b'%s~%s' % (f, tag)
3153 3167 if fn not in ctx and fn not in others:
3154 3168 return fn
3155 3169 for n in itertools.count(1):
3156 3170 fn = b'%s~%s~%s' % (f, tag, n)
3157 3171 if fn not in ctx and fn not in others:
3158 3172 return fn
3159 3173
3160 3174
3161 3175 def readexactly(stream, n):
3162 3176 '''read n bytes from stream.read and abort if less was available'''
3163 3177 s = stream.read(n)
3164 3178 if len(s) < n:
3165 3179 raise error.Abort(
3166 3180 _(b"stream ended unexpectedly (got %d bytes, expected %d)")
3167 3181 % (len(s), n)
3168 3182 )
3169 3183 return s
3170 3184
3171 3185
3172 3186 def uvarintencode(value):
3173 3187 """Encode an unsigned integer value to a varint.
3174 3188
3175 3189 A varint is a variable length integer of 1 or more bytes. Each byte
3176 3190 except the last has the most significant bit set. The lower 7 bits of
3177 3191 each byte store the 2's complement representation, least significant group
3178 3192 first.
3179 3193
3180 3194 >>> uvarintencode(0)
3181 3195 '\\x00'
3182 3196 >>> uvarintencode(1)
3183 3197 '\\x01'
3184 3198 >>> uvarintencode(127)
3185 3199 '\\x7f'
3186 3200 >>> uvarintencode(1337)
3187 3201 '\\xb9\\n'
3188 3202 >>> uvarintencode(65536)
3189 3203 '\\x80\\x80\\x04'
3190 3204 >>> uvarintencode(-1)
3191 3205 Traceback (most recent call last):
3192 3206 ...
3193 3207 ProgrammingError: negative value for uvarint: -1
3194 3208 """
3195 3209 if value < 0:
3196 3210 raise error.ProgrammingError(b'negative value for uvarint: %d' % value)
3197 3211 bits = value & 0x7F
3198 3212 value >>= 7
3199 3213 bytes = []
3200 3214 while value:
3201 3215 bytes.append(pycompat.bytechr(0x80 | bits))
3202 3216 bits = value & 0x7F
3203 3217 value >>= 7
3204 3218 bytes.append(pycompat.bytechr(bits))
3205 3219
3206 3220 return b''.join(bytes)
3207 3221
3208 3222
3209 3223 def uvarintdecodestream(fh):
3210 3224 """Decode an unsigned variable length integer from a stream.
3211 3225
3212 3226 The passed argument is anything that has a ``.read(N)`` method.
3213 3227
3214 3228 >>> from io import BytesIO
3215 3229 >>> uvarintdecodestream(BytesIO(b'\\x00'))
3216 3230 0
3217 3231 >>> uvarintdecodestream(BytesIO(b'\\x01'))
3218 3232 1
3219 3233 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
3220 3234 127
3221 3235 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
3222 3236 1337
3223 3237 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
3224 3238 65536
3225 3239 >>> uvarintdecodestream(BytesIO(b'\\x80'))
3226 3240 Traceback (most recent call last):
3227 3241 ...
3228 3242 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
3229 3243 """
3230 3244 result = 0
3231 3245 shift = 0
3232 3246 while True:
3233 3247 byte = ord(readexactly(fh, 1))
3234 3248 result |= (byte & 0x7F) << shift
3235 3249 if not (byte & 0x80):
3236 3250 return result
3237 3251 shift += 7
3238 3252
3239 3253
3240 3254 # Passing the '' locale means that the locale should be set according to the
3241 3255 # user settings (environment variables).
3242 3256 # Python sometimes avoids setting the global locale settings. When interfacing
3243 3257 # with C code (e.g. the curses module or the Subversion bindings), the global
3244 3258 # locale settings must be initialized correctly. Python 2 does not initialize
3245 3259 # the global locale settings on interpreter startup. Python 3 sometimes
3246 3260 # initializes LC_CTYPE, but not consistently at least on Windows. Therefore we
3247 3261 # explicitly initialize it to get consistent behavior if it's not already
3248 3262 # initialized. Since CPython commit 177d921c8c03d30daa32994362023f777624b10d,
3249 3263 # LC_CTYPE is always initialized. If we require Python 3.8+, we should re-check
3250 3264 # if we can remove this code.
3251 3265 @contextlib.contextmanager
3252 3266 def with_lc_ctype():
3253 3267 oldloc = locale.setlocale(locale.LC_CTYPE, None)
3254 3268 if oldloc == 'C':
3255 3269 try:
3256 3270 try:
3257 3271 locale.setlocale(locale.LC_CTYPE, '')
3258 3272 except locale.Error:
3259 3273 # The likely case is that the locale from the environment
3260 3274 # variables is unknown.
3261 3275 pass
3262 3276 yield
3263 3277 finally:
3264 3278 locale.setlocale(locale.LC_CTYPE, oldloc)
3265 3279 else:
3266 3280 yield
3267 3281
3268 3282
3269 3283 def _estimatememory() -> Optional[int]:
3270 3284 """Provide an estimate for the available system memory in Bytes.
3271 3285
3272 3286 If no estimate can be provided on the platform, returns None.
3273 3287 """
3274 3288 if pycompat.sysplatform.startswith(b'win'):
3275 3289 # On Windows, use the GlobalMemoryStatusEx kernel function directly.
3276 3290 from ctypes import c_long as DWORD, c_ulonglong as DWORDLONG
3277 3291 from ctypes.wintypes import ( # pytype: disable=import-error
3278 3292 Structure,
3279 3293 byref,
3280 3294 sizeof,
3281 3295 windll,
3282 3296 )
3283 3297
3284 3298 class MEMORYSTATUSEX(Structure):
3285 3299 _fields_ = [
3286 3300 ('dwLength', DWORD),
3287 3301 ('dwMemoryLoad', DWORD),
3288 3302 ('ullTotalPhys', DWORDLONG),
3289 3303 ('ullAvailPhys', DWORDLONG),
3290 3304 ('ullTotalPageFile', DWORDLONG),
3291 3305 ('ullAvailPageFile', DWORDLONG),
3292 3306 ('ullTotalVirtual', DWORDLONG),
3293 3307 ('ullAvailVirtual', DWORDLONG),
3294 3308 ('ullExtendedVirtual', DWORDLONG),
3295 3309 ]
3296 3310
3297 3311 x = MEMORYSTATUSEX()
3298 3312 x.dwLength = sizeof(x)
3299 3313 windll.kernel32.GlobalMemoryStatusEx(byref(x))
3300 3314 return x.ullAvailPhys
3301 3315
3302 3316 # On newer Unix-like systems and Mac OSX, the sysconf interface
3303 3317 # can be used. _SC_PAGE_SIZE is part of POSIX; _SC_PHYS_PAGES
3304 3318 # seems to be implemented on most systems.
3305 3319 try:
3306 3320 pagesize = os.sysconf(os.sysconf_names['SC_PAGE_SIZE'])
3307 3321 pages = os.sysconf(os.sysconf_names['SC_PHYS_PAGES'])
3308 3322 return pagesize * pages
3309 3323 except OSError: # sysconf can fail
3310 3324 pass
3311 3325 except KeyError: # unknown parameter
3312 3326 pass
General Comments 0
You need to be logged in to leave comments. Login now