##// END OF EJS Templates
tests: fix sortdict doctest with Python 3.12...
Mads Kiilerich -
r51646:a2df7485 stable
parent child Browse files
Show More
@@ -1,3327 +1,3327 b''
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16
17 17 import abc
18 18 import collections
19 19 import contextlib
20 20 import errno
21 21 import gc
22 22 import hashlib
23 23 import io
24 24 import itertools
25 25 import locale
26 26 import mmap
27 27 import os
28 28 import pickle # provides util.pickle symbol
29 29 import re as remod
30 30 import shutil
31 31 import stat
32 32 import sys
33 33 import time
34 34 import traceback
35 35 import warnings
36 36
37 37 from .node import hex
38 38 from .thirdparty import attr
39 39 from .pycompat import (
40 40 delattr,
41 41 getattr,
42 42 open,
43 43 setattr,
44 44 )
45 45 from hgdemandimport import tracing
46 46 from . import (
47 47 encoding,
48 48 error,
49 49 i18n,
50 50 policy,
51 51 pycompat,
52 52 urllibcompat,
53 53 )
54 54 from .utils import (
55 55 compression,
56 56 hashutil,
57 57 procutil,
58 58 stringutil,
59 59 )
60 60
61 61 if pycompat.TYPE_CHECKING:
62 62 from typing import (
63 63 Iterable,
64 64 Iterator,
65 65 List,
66 66 Optional,
67 67 Tuple,
68 68 )
69 69
70 70
71 71 base85 = policy.importmod('base85')
72 72 osutil = policy.importmod('osutil')
73 73
74 74 b85decode = base85.b85decode
75 75 b85encode = base85.b85encode
76 76
77 77 cookielib = pycompat.cookielib
78 78 httplib = pycompat.httplib
79 79 safehasattr = pycompat.safehasattr
80 80 socketserver = pycompat.socketserver
81 81 bytesio = io.BytesIO
82 82 # TODO deprecate stringio name, as it is a lie on Python 3.
83 83 stringio = bytesio
84 84 xmlrpclib = pycompat.xmlrpclib
85 85
86 86 httpserver = urllibcompat.httpserver
87 87 urlerr = urllibcompat.urlerr
88 88 urlreq = urllibcompat.urlreq
89 89
90 90 # workaround for win32mbcs
91 91 _filenamebytestr = pycompat.bytestr
92 92
93 93 if pycompat.iswindows:
94 94 from . import windows as platform
95 95 else:
96 96 from . import posix as platform
97 97
98 98 _ = i18n._
99 99
100 100 abspath = platform.abspath
101 101 bindunixsocket = platform.bindunixsocket
102 102 cachestat = platform.cachestat
103 103 checkexec = platform.checkexec
104 104 checklink = platform.checklink
105 105 copymode = platform.copymode
106 106 expandglobs = platform.expandglobs
107 107 getfsmountpoint = platform.getfsmountpoint
108 108 getfstype = platform.getfstype
109 109 get_password = platform.get_password
110 110 groupmembers = platform.groupmembers
111 111 groupname = platform.groupname
112 112 isexec = platform.isexec
113 113 isowner = platform.isowner
114 114 listdir = osutil.listdir
115 115 localpath = platform.localpath
116 116 lookupreg = platform.lookupreg
117 117 makedir = platform.makedir
118 118 nlinks = platform.nlinks
119 119 normpath = platform.normpath
120 120 normcase = platform.normcase
121 121 normcasespec = platform.normcasespec
122 122 normcasefallback = platform.normcasefallback
123 123 openhardlinks = platform.openhardlinks
124 124 oslink = platform.oslink
125 125 parsepatchoutput = platform.parsepatchoutput
126 126 pconvert = platform.pconvert
127 127 poll = platform.poll
128 128 posixfile = platform.posixfile
129 129 readlink = platform.readlink
130 130 rename = platform.rename
131 131 removedirs = platform.removedirs
132 132 samedevice = platform.samedevice
133 133 samefile = platform.samefile
134 134 samestat = platform.samestat
135 135 setflags = platform.setflags
136 136 split = platform.split
137 137 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
138 138 statisexec = platform.statisexec
139 139 statislink = platform.statislink
140 140 umask = platform.umask
141 141 unlink = platform.unlink
142 142 username = platform.username
143 143
144 144
145 145 def setumask(val):
146 146 # type: (int) -> None
147 147 '''updates the umask. used by chg server'''
148 148 if pycompat.iswindows:
149 149 return
150 150 os.umask(val)
151 151 global umask
152 152 platform.umask = umask = val & 0o777
153 153
154 154
155 155 # small compat layer
156 156 compengines = compression.compengines
157 157 SERVERROLE = compression.SERVERROLE
158 158 CLIENTROLE = compression.CLIENTROLE
159 159
160 160 # Python compatibility
161 161
162 162 _notset = object()
163 163
164 164
165 165 def bitsfrom(container):
166 166 bits = 0
167 167 for bit in container:
168 168 bits |= bit
169 169 return bits
170 170
171 171
172 172 # python 2.6 still have deprecation warning enabled by default. We do not want
173 173 # to display anything to standard user so detect if we are running test and
174 174 # only use python deprecation warning in this case.
175 175 _dowarn = bool(encoding.environ.get(b'HGEMITWARNINGS'))
176 176 if _dowarn:
177 177 # explicitly unfilter our warning for python 2.7
178 178 #
179 179 # The option of setting PYTHONWARNINGS in the test runner was investigated.
180 180 # However, module name set through PYTHONWARNINGS was exactly matched, so
181 181 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
182 182 # makes the whole PYTHONWARNINGS thing useless for our usecase.
183 183 warnings.filterwarnings('default', '', DeprecationWarning, 'mercurial')
184 184 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext')
185 185 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext3rd')
186 186 if _dowarn:
187 187 # silence warning emitted by passing user string to re.sub()
188 188 warnings.filterwarnings(
189 189 'ignore', 'bad escape', DeprecationWarning, 'mercurial'
190 190 )
191 191 warnings.filterwarnings(
192 192 'ignore', 'invalid escape sequence', DeprecationWarning, 'mercurial'
193 193 )
194 194 # TODO: reinvent imp.is_frozen()
195 195 warnings.filterwarnings(
196 196 'ignore',
197 197 'the imp module is deprecated',
198 198 DeprecationWarning,
199 199 'mercurial',
200 200 )
201 201
202 202
203 203 def nouideprecwarn(msg, version, stacklevel=1):
204 204 """Issue an python native deprecation warning
205 205
206 206 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
207 207 """
208 208 if _dowarn:
209 209 msg += (
210 210 b"\n(compatibility will be dropped after Mercurial-%s,"
211 211 b" update your code.)"
212 212 ) % version
213 213 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
214 214 # on python 3 with chg, we will need to explicitly flush the output
215 215 sys.stderr.flush()
216 216
217 217
218 218 DIGESTS = {
219 219 b'md5': hashlib.md5,
220 220 b'sha1': hashutil.sha1,
221 221 b'sha512': hashlib.sha512,
222 222 }
223 223 # List of digest types from strongest to weakest
224 224 DIGESTS_BY_STRENGTH = [b'sha512', b'sha1', b'md5']
225 225
226 226 for k in DIGESTS_BY_STRENGTH:
227 227 assert k in DIGESTS
228 228
229 229
230 230 class digester:
231 231 """helper to compute digests.
232 232
233 233 This helper can be used to compute one or more digests given their name.
234 234
235 235 >>> d = digester([b'md5', b'sha1'])
236 236 >>> d.update(b'foo')
237 237 >>> [k for k in sorted(d)]
238 238 ['md5', 'sha1']
239 239 >>> d[b'md5']
240 240 'acbd18db4cc2f85cedef654fccc4a4d8'
241 241 >>> d[b'sha1']
242 242 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
243 243 >>> digester.preferred([b'md5', b'sha1'])
244 244 'sha1'
245 245 """
246 246
247 247 def __init__(self, digests, s=b''):
248 248 self._hashes = {}
249 249 for k in digests:
250 250 if k not in DIGESTS:
251 251 raise error.Abort(_(b'unknown digest type: %s') % k)
252 252 self._hashes[k] = DIGESTS[k]()
253 253 if s:
254 254 self.update(s)
255 255
256 256 def update(self, data):
257 257 for h in self._hashes.values():
258 258 h.update(data)
259 259
260 260 def __getitem__(self, key):
261 261 if key not in DIGESTS:
262 262 raise error.Abort(_(b'unknown digest type: %s') % k)
263 263 return hex(self._hashes[key].digest())
264 264
265 265 def __iter__(self):
266 266 return iter(self._hashes)
267 267
268 268 @staticmethod
269 269 def preferred(supported):
270 270 """returns the strongest digest type in both supported and DIGESTS."""
271 271
272 272 for k in DIGESTS_BY_STRENGTH:
273 273 if k in supported:
274 274 return k
275 275 return None
276 276
277 277
278 278 class digestchecker:
279 279 """file handle wrapper that additionally checks content against a given
280 280 size and digests.
281 281
282 282 d = digestchecker(fh, size, {'md5': '...'})
283 283
284 284 When multiple digests are given, all of them are validated.
285 285 """
286 286
287 287 def __init__(self, fh, size, digests):
288 288 self._fh = fh
289 289 self._size = size
290 290 self._got = 0
291 291 self._digests = dict(digests)
292 292 self._digester = digester(self._digests.keys())
293 293
294 294 def read(self, length=-1):
295 295 content = self._fh.read(length)
296 296 self._digester.update(content)
297 297 self._got += len(content)
298 298 return content
299 299
300 300 def validate(self):
301 301 if self._size != self._got:
302 302 raise error.Abort(
303 303 _(b'size mismatch: expected %d, got %d')
304 304 % (self._size, self._got)
305 305 )
306 306 for k, v in self._digests.items():
307 307 if v != self._digester[k]:
308 308 # i18n: first parameter is a digest name
309 309 raise error.Abort(
310 310 _(b'%s mismatch: expected %s, got %s')
311 311 % (k, v, self._digester[k])
312 312 )
313 313
314 314
315 315 try:
316 316 buffer = buffer # pytype: disable=name-error
317 317 except NameError:
318 318
319 319 def buffer(sliceable, offset=0, length=None):
320 320 if length is not None:
321 321 return memoryview(sliceable)[offset : offset + length]
322 322 return memoryview(sliceable)[offset:]
323 323
324 324
325 325 _chunksize = 4096
326 326
327 327
328 328 class bufferedinputpipe:
329 329 """a manually buffered input pipe
330 330
331 331 Python will not let us use buffered IO and lazy reading with 'polling' at
332 332 the same time. We cannot probe the buffer state and select will not detect
333 333 that data are ready to read if they are already buffered.
334 334
335 335 This class let us work around that by implementing its own buffering
336 336 (allowing efficient readline) while offering a way to know if the buffer is
337 337 empty from the output (allowing collaboration of the buffer with polling).
338 338
339 339 This class lives in the 'util' module because it makes use of the 'os'
340 340 module from the python stdlib.
341 341 """
342 342
343 343 def __new__(cls, fh):
344 344 # If we receive a fileobjectproxy, we need to use a variation of this
345 345 # class that notifies observers about activity.
346 346 if isinstance(fh, fileobjectproxy):
347 347 cls = observedbufferedinputpipe
348 348
349 349 return super(bufferedinputpipe, cls).__new__(cls)
350 350
351 351 def __init__(self, input):
352 352 self._input = input
353 353 self._buffer = []
354 354 self._eof = False
355 355 self._lenbuf = 0
356 356
357 357 @property
358 358 def hasbuffer(self):
359 359 """True is any data is currently buffered
360 360
361 361 This will be used externally a pre-step for polling IO. If there is
362 362 already data then no polling should be set in place."""
363 363 return bool(self._buffer)
364 364
365 365 @property
366 366 def closed(self):
367 367 return self._input.closed
368 368
369 369 def fileno(self):
370 370 return self._input.fileno()
371 371
372 372 def close(self):
373 373 return self._input.close()
374 374
375 375 def read(self, size):
376 376 while (not self._eof) and (self._lenbuf < size):
377 377 self._fillbuffer()
378 378 return self._frombuffer(size)
379 379
380 380 def unbufferedread(self, size):
381 381 if not self._eof and self._lenbuf == 0:
382 382 self._fillbuffer(max(size, _chunksize))
383 383 return self._frombuffer(min(self._lenbuf, size))
384 384
385 385 def readline(self, *args, **kwargs):
386 386 if len(self._buffer) > 1:
387 387 # this should not happen because both read and readline end with a
388 388 # _frombuffer call that collapse it.
389 389 self._buffer = [b''.join(self._buffer)]
390 390 self._lenbuf = len(self._buffer[0])
391 391 lfi = -1
392 392 if self._buffer:
393 393 lfi = self._buffer[-1].find(b'\n')
394 394 while (not self._eof) and lfi < 0:
395 395 self._fillbuffer()
396 396 if self._buffer:
397 397 lfi = self._buffer[-1].find(b'\n')
398 398 size = lfi + 1
399 399 if lfi < 0: # end of file
400 400 size = self._lenbuf
401 401 elif len(self._buffer) > 1:
402 402 # we need to take previous chunks into account
403 403 size += self._lenbuf - len(self._buffer[-1])
404 404 return self._frombuffer(size)
405 405
406 406 def _frombuffer(self, size):
407 407 """return at most 'size' data from the buffer
408 408
409 409 The data are removed from the buffer."""
410 410 if size == 0 or not self._buffer:
411 411 return b''
412 412 buf = self._buffer[0]
413 413 if len(self._buffer) > 1:
414 414 buf = b''.join(self._buffer)
415 415
416 416 data = buf[:size]
417 417 buf = buf[len(data) :]
418 418 if buf:
419 419 self._buffer = [buf]
420 420 self._lenbuf = len(buf)
421 421 else:
422 422 self._buffer = []
423 423 self._lenbuf = 0
424 424 return data
425 425
426 426 def _fillbuffer(self, size=_chunksize):
427 427 """read data to the buffer"""
428 428 data = os.read(self._input.fileno(), size)
429 429 if not data:
430 430 self._eof = True
431 431 else:
432 432 self._lenbuf += len(data)
433 433 self._buffer.append(data)
434 434
435 435 return data
436 436
437 437
438 438 def mmapread(fp, size=None):
439 439 if size == 0:
440 440 # size of 0 to mmap.mmap() means "all data"
441 441 # rather than "zero bytes", so special case that.
442 442 return b''
443 443 elif size is None:
444 444 size = 0
445 445 fd = getattr(fp, 'fileno', lambda: fp)()
446 446 try:
447 447 return mmap.mmap(fd, size, access=mmap.ACCESS_READ)
448 448 except ValueError:
449 449 # Empty files cannot be mmapped, but mmapread should still work. Check
450 450 # if the file is empty, and if so, return an empty buffer.
451 451 if os.fstat(fd).st_size == 0:
452 452 return b''
453 453 raise
454 454
455 455
456 456 class fileobjectproxy:
457 457 """A proxy around file objects that tells a watcher when events occur.
458 458
459 459 This type is intended to only be used for testing purposes. Think hard
460 460 before using it in important code.
461 461 """
462 462
463 463 __slots__ = (
464 464 '_orig',
465 465 '_observer',
466 466 )
467 467
468 468 def __init__(self, fh, observer):
469 469 object.__setattr__(self, '_orig', fh)
470 470 object.__setattr__(self, '_observer', observer)
471 471
472 472 def __getattribute__(self, name):
473 473 ours = {
474 474 '_observer',
475 475 # IOBase
476 476 'close',
477 477 # closed if a property
478 478 'fileno',
479 479 'flush',
480 480 'isatty',
481 481 'readable',
482 482 'readline',
483 483 'readlines',
484 484 'seek',
485 485 'seekable',
486 486 'tell',
487 487 'truncate',
488 488 'writable',
489 489 'writelines',
490 490 # RawIOBase
491 491 'read',
492 492 'readall',
493 493 'readinto',
494 494 'write',
495 495 # BufferedIOBase
496 496 # raw is a property
497 497 'detach',
498 498 # read defined above
499 499 'read1',
500 500 # readinto defined above
501 501 # write defined above
502 502 }
503 503
504 504 # We only observe some methods.
505 505 if name in ours:
506 506 return object.__getattribute__(self, name)
507 507
508 508 return getattr(object.__getattribute__(self, '_orig'), name)
509 509
510 510 def __nonzero__(self):
511 511 return bool(object.__getattribute__(self, '_orig'))
512 512
513 513 __bool__ = __nonzero__
514 514
515 515 def __delattr__(self, name):
516 516 return delattr(object.__getattribute__(self, '_orig'), name)
517 517
518 518 def __setattr__(self, name, value):
519 519 return setattr(object.__getattribute__(self, '_orig'), name, value)
520 520
521 521 def __iter__(self):
522 522 return object.__getattribute__(self, '_orig').__iter__()
523 523
524 524 def _observedcall(self, name, *args, **kwargs):
525 525 # Call the original object.
526 526 orig = object.__getattribute__(self, '_orig')
527 527 res = getattr(orig, name)(*args, **kwargs)
528 528
529 529 # Call a method on the observer of the same name with arguments
530 530 # so it can react, log, etc.
531 531 observer = object.__getattribute__(self, '_observer')
532 532 fn = getattr(observer, name, None)
533 533 if fn:
534 534 fn(res, *args, **kwargs)
535 535
536 536 return res
537 537
538 538 def close(self, *args, **kwargs):
539 539 return object.__getattribute__(self, '_observedcall')(
540 540 'close', *args, **kwargs
541 541 )
542 542
543 543 def fileno(self, *args, **kwargs):
544 544 return object.__getattribute__(self, '_observedcall')(
545 545 'fileno', *args, **kwargs
546 546 )
547 547
548 548 def flush(self, *args, **kwargs):
549 549 return object.__getattribute__(self, '_observedcall')(
550 550 'flush', *args, **kwargs
551 551 )
552 552
553 553 def isatty(self, *args, **kwargs):
554 554 return object.__getattribute__(self, '_observedcall')(
555 555 'isatty', *args, **kwargs
556 556 )
557 557
558 558 def readable(self, *args, **kwargs):
559 559 return object.__getattribute__(self, '_observedcall')(
560 560 'readable', *args, **kwargs
561 561 )
562 562
563 563 def readline(self, *args, **kwargs):
564 564 return object.__getattribute__(self, '_observedcall')(
565 565 'readline', *args, **kwargs
566 566 )
567 567
568 568 def readlines(self, *args, **kwargs):
569 569 return object.__getattribute__(self, '_observedcall')(
570 570 'readlines', *args, **kwargs
571 571 )
572 572
573 573 def seek(self, *args, **kwargs):
574 574 return object.__getattribute__(self, '_observedcall')(
575 575 'seek', *args, **kwargs
576 576 )
577 577
578 578 def seekable(self, *args, **kwargs):
579 579 return object.__getattribute__(self, '_observedcall')(
580 580 'seekable', *args, **kwargs
581 581 )
582 582
583 583 def tell(self, *args, **kwargs):
584 584 return object.__getattribute__(self, '_observedcall')(
585 585 'tell', *args, **kwargs
586 586 )
587 587
588 588 def truncate(self, *args, **kwargs):
589 589 return object.__getattribute__(self, '_observedcall')(
590 590 'truncate', *args, **kwargs
591 591 )
592 592
593 593 def writable(self, *args, **kwargs):
594 594 return object.__getattribute__(self, '_observedcall')(
595 595 'writable', *args, **kwargs
596 596 )
597 597
598 598 def writelines(self, *args, **kwargs):
599 599 return object.__getattribute__(self, '_observedcall')(
600 600 'writelines', *args, **kwargs
601 601 )
602 602
603 603 def read(self, *args, **kwargs):
604 604 return object.__getattribute__(self, '_observedcall')(
605 605 'read', *args, **kwargs
606 606 )
607 607
608 608 def readall(self, *args, **kwargs):
609 609 return object.__getattribute__(self, '_observedcall')(
610 610 'readall', *args, **kwargs
611 611 )
612 612
613 613 def readinto(self, *args, **kwargs):
614 614 return object.__getattribute__(self, '_observedcall')(
615 615 'readinto', *args, **kwargs
616 616 )
617 617
618 618 def write(self, *args, **kwargs):
619 619 return object.__getattribute__(self, '_observedcall')(
620 620 'write', *args, **kwargs
621 621 )
622 622
623 623 def detach(self, *args, **kwargs):
624 624 return object.__getattribute__(self, '_observedcall')(
625 625 'detach', *args, **kwargs
626 626 )
627 627
628 628 def read1(self, *args, **kwargs):
629 629 return object.__getattribute__(self, '_observedcall')(
630 630 'read1', *args, **kwargs
631 631 )
632 632
633 633
634 634 class observedbufferedinputpipe(bufferedinputpipe):
635 635 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
636 636
637 637 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
638 638 bypass ``fileobjectproxy``. Because of this, we need to make
639 639 ``bufferedinputpipe`` aware of these operations.
640 640
641 641 This variation of ``bufferedinputpipe`` can notify observers about
642 642 ``os.read()`` events. It also re-publishes other events, such as
643 643 ``read()`` and ``readline()``.
644 644 """
645 645
646 646 def _fillbuffer(self, size=_chunksize):
647 647 res = super(observedbufferedinputpipe, self)._fillbuffer(size=size)
648 648
649 649 fn = getattr(self._input._observer, 'osread', None)
650 650 if fn:
651 651 fn(res, size)
652 652
653 653 return res
654 654
655 655 # We use different observer methods because the operation isn't
656 656 # performed on the actual file object but on us.
657 657 def read(self, size):
658 658 res = super(observedbufferedinputpipe, self).read(size)
659 659
660 660 fn = getattr(self._input._observer, 'bufferedread', None)
661 661 if fn:
662 662 fn(res, size)
663 663
664 664 return res
665 665
666 666 def readline(self, *args, **kwargs):
667 667 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
668 668
669 669 fn = getattr(self._input._observer, 'bufferedreadline', None)
670 670 if fn:
671 671 fn(res)
672 672
673 673 return res
674 674
675 675
676 676 PROXIED_SOCKET_METHODS = {
677 677 'makefile',
678 678 'recv',
679 679 'recvfrom',
680 680 'recvfrom_into',
681 681 'recv_into',
682 682 'send',
683 683 'sendall',
684 684 'sendto',
685 685 'setblocking',
686 686 'settimeout',
687 687 'gettimeout',
688 688 'setsockopt',
689 689 }
690 690
691 691
692 692 class socketproxy:
693 693 """A proxy around a socket that tells a watcher when events occur.
694 694
695 695 This is like ``fileobjectproxy`` except for sockets.
696 696
697 697 This type is intended to only be used for testing purposes. Think hard
698 698 before using it in important code.
699 699 """
700 700
701 701 __slots__ = (
702 702 '_orig',
703 703 '_observer',
704 704 )
705 705
706 706 def __init__(self, sock, observer):
707 707 object.__setattr__(self, '_orig', sock)
708 708 object.__setattr__(self, '_observer', observer)
709 709
710 710 def __getattribute__(self, name):
711 711 if name in PROXIED_SOCKET_METHODS:
712 712 return object.__getattribute__(self, name)
713 713
714 714 return getattr(object.__getattribute__(self, '_orig'), name)
715 715
716 716 def __delattr__(self, name):
717 717 return delattr(object.__getattribute__(self, '_orig'), name)
718 718
719 719 def __setattr__(self, name, value):
720 720 return setattr(object.__getattribute__(self, '_orig'), name, value)
721 721
722 722 def __nonzero__(self):
723 723 return bool(object.__getattribute__(self, '_orig'))
724 724
725 725 __bool__ = __nonzero__
726 726
727 727 def _observedcall(self, name, *args, **kwargs):
728 728 # Call the original object.
729 729 orig = object.__getattribute__(self, '_orig')
730 730 res = getattr(orig, name)(*args, **kwargs)
731 731
732 732 # Call a method on the observer of the same name with arguments
733 733 # so it can react, log, etc.
734 734 observer = object.__getattribute__(self, '_observer')
735 735 fn = getattr(observer, name, None)
736 736 if fn:
737 737 fn(res, *args, **kwargs)
738 738
739 739 return res
740 740
741 741 def makefile(self, *args, **kwargs):
742 742 res = object.__getattribute__(self, '_observedcall')(
743 743 'makefile', *args, **kwargs
744 744 )
745 745
746 746 # The file object may be used for I/O. So we turn it into a
747 747 # proxy using our observer.
748 748 observer = object.__getattribute__(self, '_observer')
749 749 return makeloggingfileobject(
750 750 observer.fh,
751 751 res,
752 752 observer.name,
753 753 reads=observer.reads,
754 754 writes=observer.writes,
755 755 logdata=observer.logdata,
756 756 logdataapis=observer.logdataapis,
757 757 )
758 758
759 759 def recv(self, *args, **kwargs):
760 760 return object.__getattribute__(self, '_observedcall')(
761 761 'recv', *args, **kwargs
762 762 )
763 763
764 764 def recvfrom(self, *args, **kwargs):
765 765 return object.__getattribute__(self, '_observedcall')(
766 766 'recvfrom', *args, **kwargs
767 767 )
768 768
769 769 def recvfrom_into(self, *args, **kwargs):
770 770 return object.__getattribute__(self, '_observedcall')(
771 771 'recvfrom_into', *args, **kwargs
772 772 )
773 773
774 774 def recv_into(self, *args, **kwargs):
775 775 return object.__getattribute__(self, '_observedcall')(
776 776 'recv_info', *args, **kwargs
777 777 )
778 778
779 779 def send(self, *args, **kwargs):
780 780 return object.__getattribute__(self, '_observedcall')(
781 781 'send', *args, **kwargs
782 782 )
783 783
784 784 def sendall(self, *args, **kwargs):
785 785 return object.__getattribute__(self, '_observedcall')(
786 786 'sendall', *args, **kwargs
787 787 )
788 788
789 789 def sendto(self, *args, **kwargs):
790 790 return object.__getattribute__(self, '_observedcall')(
791 791 'sendto', *args, **kwargs
792 792 )
793 793
794 794 def setblocking(self, *args, **kwargs):
795 795 return object.__getattribute__(self, '_observedcall')(
796 796 'setblocking', *args, **kwargs
797 797 )
798 798
799 799 def settimeout(self, *args, **kwargs):
800 800 return object.__getattribute__(self, '_observedcall')(
801 801 'settimeout', *args, **kwargs
802 802 )
803 803
804 804 def gettimeout(self, *args, **kwargs):
805 805 return object.__getattribute__(self, '_observedcall')(
806 806 'gettimeout', *args, **kwargs
807 807 )
808 808
809 809 def setsockopt(self, *args, **kwargs):
810 810 return object.__getattribute__(self, '_observedcall')(
811 811 'setsockopt', *args, **kwargs
812 812 )
813 813
814 814
815 815 class baseproxyobserver:
816 816 def __init__(self, fh, name, logdata, logdataapis):
817 817 self.fh = fh
818 818 self.name = name
819 819 self.logdata = logdata
820 820 self.logdataapis = logdataapis
821 821
822 822 def _writedata(self, data):
823 823 if not self.logdata:
824 824 if self.logdataapis:
825 825 self.fh.write(b'\n')
826 826 self.fh.flush()
827 827 return
828 828
829 829 # Simple case writes all data on a single line.
830 830 if b'\n' not in data:
831 831 if self.logdataapis:
832 832 self.fh.write(b': %s\n' % stringutil.escapestr(data))
833 833 else:
834 834 self.fh.write(
835 835 b'%s> %s\n' % (self.name, stringutil.escapestr(data))
836 836 )
837 837 self.fh.flush()
838 838 return
839 839
840 840 # Data with newlines is written to multiple lines.
841 841 if self.logdataapis:
842 842 self.fh.write(b':\n')
843 843
844 844 lines = data.splitlines(True)
845 845 for line in lines:
846 846 self.fh.write(
847 847 b'%s> %s\n' % (self.name, stringutil.escapestr(line))
848 848 )
849 849 self.fh.flush()
850 850
851 851
852 852 class fileobjectobserver(baseproxyobserver):
853 853 """Logs file object activity."""
854 854
855 855 def __init__(
856 856 self, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
857 857 ):
858 858 super(fileobjectobserver, self).__init__(fh, name, logdata, logdataapis)
859 859 self.reads = reads
860 860 self.writes = writes
861 861
862 862 def read(self, res, size=-1):
863 863 if not self.reads:
864 864 return
865 865 # Python 3 can return None from reads at EOF instead of empty strings.
866 866 if res is None:
867 867 res = b''
868 868
869 869 if size == -1 and res == b'':
870 870 # Suppress pointless read(-1) calls that return
871 871 # nothing. These happen _a lot_ on Python 3, and there
872 872 # doesn't seem to be a better workaround to have matching
873 873 # Python 2 and 3 behavior. :(
874 874 return
875 875
876 876 if self.logdataapis:
877 877 self.fh.write(b'%s> read(%d) -> %d' % (self.name, size, len(res)))
878 878
879 879 self._writedata(res)
880 880
881 881 def readline(self, res, limit=-1):
882 882 if not self.reads:
883 883 return
884 884
885 885 if self.logdataapis:
886 886 self.fh.write(b'%s> readline() -> %d' % (self.name, len(res)))
887 887
888 888 self._writedata(res)
889 889
890 890 def readinto(self, res, dest):
891 891 if not self.reads:
892 892 return
893 893
894 894 if self.logdataapis:
895 895 self.fh.write(
896 896 b'%s> readinto(%d) -> %r' % (self.name, len(dest), res)
897 897 )
898 898
899 899 data = dest[0:res] if res is not None else b''
900 900
901 901 # _writedata() uses "in" operator and is confused by memoryview because
902 902 # characters are ints on Python 3.
903 903 if isinstance(data, memoryview):
904 904 data = data.tobytes()
905 905
906 906 self._writedata(data)
907 907
908 908 def write(self, res, data):
909 909 if not self.writes:
910 910 return
911 911
912 912 # Python 2 returns None from some write() calls. Python 3 (reasonably)
913 913 # returns the integer bytes written.
914 914 if res is None and data:
915 915 res = len(data)
916 916
917 917 if self.logdataapis:
918 918 self.fh.write(b'%s> write(%d) -> %r' % (self.name, len(data), res))
919 919
920 920 self._writedata(data)
921 921
922 922 def flush(self, res):
923 923 if not self.writes:
924 924 return
925 925
926 926 self.fh.write(b'%s> flush() -> %r\n' % (self.name, res))
927 927
928 928 # For observedbufferedinputpipe.
929 929 def bufferedread(self, res, size):
930 930 if not self.reads:
931 931 return
932 932
933 933 if self.logdataapis:
934 934 self.fh.write(
935 935 b'%s> bufferedread(%d) -> %d' % (self.name, size, len(res))
936 936 )
937 937
938 938 self._writedata(res)
939 939
940 940 def bufferedreadline(self, res):
941 941 if not self.reads:
942 942 return
943 943
944 944 if self.logdataapis:
945 945 self.fh.write(
946 946 b'%s> bufferedreadline() -> %d' % (self.name, len(res))
947 947 )
948 948
949 949 self._writedata(res)
950 950
951 951
952 952 def makeloggingfileobject(
953 953 logh, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
954 954 ):
955 955 """Turn a file object into a logging file object."""
956 956
957 957 observer = fileobjectobserver(
958 958 logh,
959 959 name,
960 960 reads=reads,
961 961 writes=writes,
962 962 logdata=logdata,
963 963 logdataapis=logdataapis,
964 964 )
965 965 return fileobjectproxy(fh, observer)
966 966
967 967
968 968 class socketobserver(baseproxyobserver):
969 969 """Logs socket activity."""
970 970
971 971 def __init__(
972 972 self,
973 973 fh,
974 974 name,
975 975 reads=True,
976 976 writes=True,
977 977 states=True,
978 978 logdata=False,
979 979 logdataapis=True,
980 980 ):
981 981 super(socketobserver, self).__init__(fh, name, logdata, logdataapis)
982 982 self.reads = reads
983 983 self.writes = writes
984 984 self.states = states
985 985
986 986 def makefile(self, res, mode=None, bufsize=None):
987 987 if not self.states:
988 988 return
989 989
990 990 self.fh.write(b'%s> makefile(%r, %r)\n' % (self.name, mode, bufsize))
991 991
992 992 def recv(self, res, size, flags=0):
993 993 if not self.reads:
994 994 return
995 995
996 996 if self.logdataapis:
997 997 self.fh.write(
998 998 b'%s> recv(%d, %d) -> %d' % (self.name, size, flags, len(res))
999 999 )
1000 1000 self._writedata(res)
1001 1001
1002 1002 def recvfrom(self, res, size, flags=0):
1003 1003 if not self.reads:
1004 1004 return
1005 1005
1006 1006 if self.logdataapis:
1007 1007 self.fh.write(
1008 1008 b'%s> recvfrom(%d, %d) -> %d'
1009 1009 % (self.name, size, flags, len(res[0]))
1010 1010 )
1011 1011
1012 1012 self._writedata(res[0])
1013 1013
1014 1014 def recvfrom_into(self, res, buf, size, flags=0):
1015 1015 if not self.reads:
1016 1016 return
1017 1017
1018 1018 if self.logdataapis:
1019 1019 self.fh.write(
1020 1020 b'%s> recvfrom_into(%d, %d) -> %d'
1021 1021 % (self.name, size, flags, res[0])
1022 1022 )
1023 1023
1024 1024 self._writedata(buf[0 : res[0]])
1025 1025
1026 1026 def recv_into(self, res, buf, size=0, flags=0):
1027 1027 if not self.reads:
1028 1028 return
1029 1029
1030 1030 if self.logdataapis:
1031 1031 self.fh.write(
1032 1032 b'%s> recv_into(%d, %d) -> %d' % (self.name, size, flags, res)
1033 1033 )
1034 1034
1035 1035 self._writedata(buf[0:res])
1036 1036
1037 1037 def send(self, res, data, flags=0):
1038 1038 if not self.writes:
1039 1039 return
1040 1040
1041 1041 self.fh.write(
1042 1042 b'%s> send(%d, %d) -> %d' % (self.name, len(data), flags, len(res))
1043 1043 )
1044 1044 self._writedata(data)
1045 1045
1046 1046 def sendall(self, res, data, flags=0):
1047 1047 if not self.writes:
1048 1048 return
1049 1049
1050 1050 if self.logdataapis:
1051 1051 # Returns None on success. So don't bother reporting return value.
1052 1052 self.fh.write(
1053 1053 b'%s> sendall(%d, %d)' % (self.name, len(data), flags)
1054 1054 )
1055 1055
1056 1056 self._writedata(data)
1057 1057
1058 1058 def sendto(self, res, data, flagsoraddress, address=None):
1059 1059 if not self.writes:
1060 1060 return
1061 1061
1062 1062 if address:
1063 1063 flags = flagsoraddress
1064 1064 else:
1065 1065 flags = 0
1066 1066
1067 1067 if self.logdataapis:
1068 1068 self.fh.write(
1069 1069 b'%s> sendto(%d, %d, %r) -> %d'
1070 1070 % (self.name, len(data), flags, address, res)
1071 1071 )
1072 1072
1073 1073 self._writedata(data)
1074 1074
1075 1075 def setblocking(self, res, flag):
1076 1076 if not self.states:
1077 1077 return
1078 1078
1079 1079 self.fh.write(b'%s> setblocking(%r)\n' % (self.name, flag))
1080 1080
1081 1081 def settimeout(self, res, value):
1082 1082 if not self.states:
1083 1083 return
1084 1084
1085 1085 self.fh.write(b'%s> settimeout(%r)\n' % (self.name, value))
1086 1086
1087 1087 def gettimeout(self, res):
1088 1088 if not self.states:
1089 1089 return
1090 1090
1091 1091 self.fh.write(b'%s> gettimeout() -> %f\n' % (self.name, res))
1092 1092
1093 1093 def setsockopt(self, res, level, optname, value):
1094 1094 if not self.states:
1095 1095 return
1096 1096
1097 1097 self.fh.write(
1098 1098 b'%s> setsockopt(%r, %r, %r) -> %r\n'
1099 1099 % (self.name, level, optname, value, res)
1100 1100 )
1101 1101
1102 1102
1103 1103 def makeloggingsocket(
1104 1104 logh,
1105 1105 fh,
1106 1106 name,
1107 1107 reads=True,
1108 1108 writes=True,
1109 1109 states=True,
1110 1110 logdata=False,
1111 1111 logdataapis=True,
1112 1112 ):
1113 1113 """Turn a socket into a logging socket."""
1114 1114
1115 1115 observer = socketobserver(
1116 1116 logh,
1117 1117 name,
1118 1118 reads=reads,
1119 1119 writes=writes,
1120 1120 states=states,
1121 1121 logdata=logdata,
1122 1122 logdataapis=logdataapis,
1123 1123 )
1124 1124 return socketproxy(fh, observer)
1125 1125
1126 1126
1127 1127 def version():
1128 1128 """Return version information if available."""
1129 1129 try:
1130 1130 from . import __version__
1131 1131
1132 1132 return __version__.version
1133 1133 except ImportError:
1134 1134 return b'unknown'
1135 1135
1136 1136
1137 1137 def versiontuple(v=None, n=4):
1138 1138 """Parses a Mercurial version string into an N-tuple.
1139 1139
1140 1140 The version string to be parsed is specified with the ``v`` argument.
1141 1141 If it isn't defined, the current Mercurial version string will be parsed.
1142 1142
1143 1143 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1144 1144 returned values:
1145 1145
1146 1146 >>> v = b'3.6.1+190-df9b73d2d444'
1147 1147 >>> versiontuple(v, 2)
1148 1148 (3, 6)
1149 1149 >>> versiontuple(v, 3)
1150 1150 (3, 6, 1)
1151 1151 >>> versiontuple(v, 4)
1152 1152 (3, 6, 1, '190-df9b73d2d444')
1153 1153
1154 1154 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1155 1155 (3, 6, 1, '190-df9b73d2d444+20151118')
1156 1156
1157 1157 >>> v = b'3.6'
1158 1158 >>> versiontuple(v, 2)
1159 1159 (3, 6)
1160 1160 >>> versiontuple(v, 3)
1161 1161 (3, 6, None)
1162 1162 >>> versiontuple(v, 4)
1163 1163 (3, 6, None, None)
1164 1164
1165 1165 >>> v = b'3.9-rc'
1166 1166 >>> versiontuple(v, 2)
1167 1167 (3, 9)
1168 1168 >>> versiontuple(v, 3)
1169 1169 (3, 9, None)
1170 1170 >>> versiontuple(v, 4)
1171 1171 (3, 9, None, 'rc')
1172 1172
1173 1173 >>> v = b'3.9-rc+2-02a8fea4289b'
1174 1174 >>> versiontuple(v, 2)
1175 1175 (3, 9)
1176 1176 >>> versiontuple(v, 3)
1177 1177 (3, 9, None)
1178 1178 >>> versiontuple(v, 4)
1179 1179 (3, 9, None, 'rc+2-02a8fea4289b')
1180 1180
1181 1181 >>> versiontuple(b'4.6rc0')
1182 1182 (4, 6, None, 'rc0')
1183 1183 >>> versiontuple(b'4.6rc0+12-425d55e54f98')
1184 1184 (4, 6, None, 'rc0+12-425d55e54f98')
1185 1185 >>> versiontuple(b'.1.2.3')
1186 1186 (None, None, None, '.1.2.3')
1187 1187 >>> versiontuple(b'12.34..5')
1188 1188 (12, 34, None, '..5')
1189 1189 >>> versiontuple(b'1.2.3.4.5.6')
1190 1190 (1, 2, 3, '.4.5.6')
1191 1191 """
1192 1192 if not v:
1193 1193 v = version()
1194 1194 m = remod.match(br'(\d+(?:\.\d+){,2})[+-]?(.*)', v)
1195 1195 if not m:
1196 1196 vparts, extra = b'', v
1197 1197 elif m.group(2):
1198 1198 vparts, extra = m.groups()
1199 1199 else:
1200 1200 vparts, extra = m.group(1), None
1201 1201
1202 1202 assert vparts is not None # help pytype
1203 1203
1204 1204 vints = []
1205 1205 for i in vparts.split(b'.'):
1206 1206 try:
1207 1207 vints.append(int(i))
1208 1208 except ValueError:
1209 1209 break
1210 1210 # (3, 6) -> (3, 6, None)
1211 1211 while len(vints) < 3:
1212 1212 vints.append(None)
1213 1213
1214 1214 if n == 2:
1215 1215 return (vints[0], vints[1])
1216 1216 if n == 3:
1217 1217 return (vints[0], vints[1], vints[2])
1218 1218 if n == 4:
1219 1219 return (vints[0], vints[1], vints[2], extra)
1220 1220
1221 1221 raise error.ProgrammingError(b"invalid version part request: %d" % n)
1222 1222
1223 1223
1224 1224 def cachefunc(func):
1225 1225 '''cache the result of function calls'''
1226 1226 # XXX doesn't handle keywords args
1227 1227 if func.__code__.co_argcount == 0:
1228 1228 listcache = []
1229 1229
1230 1230 def f():
1231 1231 if len(listcache) == 0:
1232 1232 listcache.append(func())
1233 1233 return listcache[0]
1234 1234
1235 1235 return f
1236 1236 cache = {}
1237 1237 if func.__code__.co_argcount == 1:
1238 1238 # we gain a small amount of time because
1239 1239 # we don't need to pack/unpack the list
1240 1240 def f(arg):
1241 1241 if arg not in cache:
1242 1242 cache[arg] = func(arg)
1243 1243 return cache[arg]
1244 1244
1245 1245 else:
1246 1246
1247 1247 def f(*args):
1248 1248 if args not in cache:
1249 1249 cache[args] = func(*args)
1250 1250 return cache[args]
1251 1251
1252 1252 return f
1253 1253
1254 1254
1255 1255 class cow:
1256 1256 """helper class to make copy-on-write easier
1257 1257
1258 1258 Call preparewrite before doing any writes.
1259 1259 """
1260 1260
1261 1261 def preparewrite(self):
1262 1262 """call this before writes, return self or a copied new object"""
1263 1263 if getattr(self, '_copied', 0):
1264 1264 self._copied -= 1
1265 1265 # Function cow.__init__ expects 1 arg(s), got 2 [wrong-arg-count]
1266 1266 return self.__class__(self) # pytype: disable=wrong-arg-count
1267 1267 return self
1268 1268
1269 1269 def copy(self):
1270 1270 """always do a cheap copy"""
1271 1271 self._copied = getattr(self, '_copied', 0) + 1
1272 1272 return self
1273 1273
1274 1274
1275 1275 class sortdict(collections.OrderedDict):
1276 1276 """a simple sorted dictionary
1277 1277
1278 1278 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1279 1279 >>> d2 = d1.copy()
1280 >>> d2
1281 sortdict([('a', 0), ('b', 1)])
1280 >>> list(d2.items())
1281 [('a', 0), ('b', 1)]
1282 1282 >>> d2.update([(b'a', 2)])
1283 1283 >>> list(d2.keys()) # should still be in last-set order
1284 1284 ['b', 'a']
1285 1285 >>> d1.insert(1, b'a.5', 0.5)
1286 >>> d1
1287 sortdict([('a', 0), ('a.5', 0.5), ('b', 1)])
1286 >>> list(d1.items())
1287 [('a', 0), ('a.5', 0.5), ('b', 1)]
1288 1288 """
1289 1289
1290 1290 def __setitem__(self, key, value):
1291 1291 if key in self:
1292 1292 del self[key]
1293 1293 super(sortdict, self).__setitem__(key, value)
1294 1294
1295 1295 if pycompat.ispypy:
1296 1296 # __setitem__() isn't called as of PyPy 5.8.0
1297 1297 def update(self, src, **f):
1298 1298 if isinstance(src, dict):
1299 1299 src = src.items()
1300 1300 for k, v in src:
1301 1301 self[k] = v
1302 1302 for k in f:
1303 1303 self[k] = f[k]
1304 1304
1305 1305 def insert(self, position, key, value):
1306 1306 for (i, (k, v)) in enumerate(list(self.items())):
1307 1307 if i == position:
1308 1308 self[key] = value
1309 1309 if i >= position:
1310 1310 del self[k]
1311 1311 self[k] = v
1312 1312
1313 1313
1314 1314 class cowdict(cow, dict):
1315 1315 """copy-on-write dict
1316 1316
1317 1317 Be sure to call d = d.preparewrite() before writing to d.
1318 1318
1319 1319 >>> a = cowdict()
1320 1320 >>> a is a.preparewrite()
1321 1321 True
1322 1322 >>> b = a.copy()
1323 1323 >>> b is a
1324 1324 True
1325 1325 >>> c = b.copy()
1326 1326 >>> c is a
1327 1327 True
1328 1328 >>> a = a.preparewrite()
1329 1329 >>> b is a
1330 1330 False
1331 1331 >>> a is a.preparewrite()
1332 1332 True
1333 1333 >>> c = c.preparewrite()
1334 1334 >>> b is c
1335 1335 False
1336 1336 >>> b is b.preparewrite()
1337 1337 True
1338 1338 """
1339 1339
1340 1340
1341 1341 class cowsortdict(cow, sortdict):
1342 1342 """copy-on-write sortdict
1343 1343
1344 1344 Be sure to call d = d.preparewrite() before writing to d.
1345 1345 """
1346 1346
1347 1347
1348 1348 class transactional: # pytype: disable=ignored-metaclass
1349 1349 """Base class for making a transactional type into a context manager."""
1350 1350
1351 1351 __metaclass__ = abc.ABCMeta
1352 1352
1353 1353 @abc.abstractmethod
1354 1354 def close(self):
1355 1355 """Successfully closes the transaction."""
1356 1356
1357 1357 @abc.abstractmethod
1358 1358 def release(self):
1359 1359 """Marks the end of the transaction.
1360 1360
1361 1361 If the transaction has not been closed, it will be aborted.
1362 1362 """
1363 1363
1364 1364 def __enter__(self):
1365 1365 return self
1366 1366
1367 1367 def __exit__(self, exc_type, exc_val, exc_tb):
1368 1368 try:
1369 1369 if exc_type is None:
1370 1370 self.close()
1371 1371 finally:
1372 1372 self.release()
1373 1373
1374 1374
1375 1375 @contextlib.contextmanager
1376 1376 def acceptintervention(tr=None):
1377 1377 """A context manager that closes the transaction on InterventionRequired
1378 1378
1379 1379 If no transaction was provided, this simply runs the body and returns
1380 1380 """
1381 1381 if not tr:
1382 1382 yield
1383 1383 return
1384 1384 try:
1385 1385 yield
1386 1386 tr.close()
1387 1387 except error.InterventionRequired:
1388 1388 tr.close()
1389 1389 raise
1390 1390 finally:
1391 1391 tr.release()
1392 1392
1393 1393
1394 1394 @contextlib.contextmanager
1395 1395 def nullcontextmanager(enter_result=None):
1396 1396 yield enter_result
1397 1397
1398 1398
1399 1399 class _lrucachenode:
1400 1400 """A node in a doubly linked list.
1401 1401
1402 1402 Holds a reference to nodes on either side as well as a key-value
1403 1403 pair for the dictionary entry.
1404 1404 """
1405 1405
1406 1406 __slots__ = ('next', 'prev', 'key', 'value', 'cost')
1407 1407
1408 1408 def __init__(self):
1409 1409 self.next = self
1410 1410 self.prev = self
1411 1411
1412 1412 self.key = _notset
1413 1413 self.value = None
1414 1414 self.cost = 0
1415 1415
1416 1416 def markempty(self):
1417 1417 """Mark the node as emptied."""
1418 1418 self.key = _notset
1419 1419 self.value = None
1420 1420 self.cost = 0
1421 1421
1422 1422
1423 1423 class lrucachedict:
1424 1424 """Dict that caches most recent accesses and sets.
1425 1425
1426 1426 The dict consists of an actual backing dict - indexed by original
1427 1427 key - and a doubly linked circular list defining the order of entries in
1428 1428 the cache.
1429 1429
1430 1430 The head node is the newest entry in the cache. If the cache is full,
1431 1431 we recycle head.prev and make it the new head. Cache accesses result in
1432 1432 the node being moved to before the existing head and being marked as the
1433 1433 new head node.
1434 1434
1435 1435 Items in the cache can be inserted with an optional "cost" value. This is
1436 1436 simply an integer that is specified by the caller. The cache can be queried
1437 1437 for the total cost of all items presently in the cache.
1438 1438
1439 1439 The cache can also define a maximum cost. If a cache insertion would
1440 1440 cause the total cost of the cache to go beyond the maximum cost limit,
1441 1441 nodes will be evicted to make room for the new code. This can be used
1442 1442 to e.g. set a max memory limit and associate an estimated bytes size
1443 1443 cost to each item in the cache. By default, no maximum cost is enforced.
1444 1444 """
1445 1445
1446 1446 def __init__(self, max, maxcost=0):
1447 1447 self._cache = {}
1448 1448
1449 1449 self._head = _lrucachenode()
1450 1450 self._size = 1
1451 1451 self.capacity = max
1452 1452 self.totalcost = 0
1453 1453 self.maxcost = maxcost
1454 1454
1455 1455 def __len__(self):
1456 1456 return len(self._cache)
1457 1457
1458 1458 def __contains__(self, k):
1459 1459 return k in self._cache
1460 1460
1461 1461 def __iter__(self):
1462 1462 # We don't have to iterate in cache order, but why not.
1463 1463 n = self._head
1464 1464 for i in range(len(self._cache)):
1465 1465 yield n.key
1466 1466 n = n.next
1467 1467
1468 1468 def __getitem__(self, k):
1469 1469 node = self._cache[k]
1470 1470 self._movetohead(node)
1471 1471 return node.value
1472 1472
1473 1473 def insert(self, k, v, cost=0):
1474 1474 """Insert a new item in the cache with optional cost value."""
1475 1475 node = self._cache.get(k)
1476 1476 # Replace existing value and mark as newest.
1477 1477 if node is not None:
1478 1478 self.totalcost -= node.cost
1479 1479 node.value = v
1480 1480 node.cost = cost
1481 1481 self.totalcost += cost
1482 1482 self._movetohead(node)
1483 1483
1484 1484 if self.maxcost:
1485 1485 self._enforcecostlimit()
1486 1486
1487 1487 return
1488 1488
1489 1489 if self._size < self.capacity:
1490 1490 node = self._addcapacity()
1491 1491 else:
1492 1492 # Grab the last/oldest item.
1493 1493 node = self._head.prev
1494 1494
1495 1495 # At capacity. Kill the old entry.
1496 1496 if node.key is not _notset:
1497 1497 self.totalcost -= node.cost
1498 1498 del self._cache[node.key]
1499 1499
1500 1500 node.key = k
1501 1501 node.value = v
1502 1502 node.cost = cost
1503 1503 self.totalcost += cost
1504 1504 self._cache[k] = node
1505 1505 # And mark it as newest entry. No need to adjust order since it
1506 1506 # is already self._head.prev.
1507 1507 self._head = node
1508 1508
1509 1509 if self.maxcost:
1510 1510 self._enforcecostlimit()
1511 1511
1512 1512 def __setitem__(self, k, v):
1513 1513 self.insert(k, v)
1514 1514
1515 1515 def __delitem__(self, k):
1516 1516 self.pop(k)
1517 1517
1518 1518 def pop(self, k, default=_notset):
1519 1519 try:
1520 1520 node = self._cache.pop(k)
1521 1521 except KeyError:
1522 1522 if default is _notset:
1523 1523 raise
1524 1524 return default
1525 1525
1526 1526 assert node is not None # help pytype
1527 1527 value = node.value
1528 1528 self.totalcost -= node.cost
1529 1529 node.markempty()
1530 1530
1531 1531 # Temporarily mark as newest item before re-adjusting head to make
1532 1532 # this node the oldest item.
1533 1533 self._movetohead(node)
1534 1534 self._head = node.next
1535 1535
1536 1536 return value
1537 1537
1538 1538 # Additional dict methods.
1539 1539
1540 1540 def get(self, k, default=None):
1541 1541 try:
1542 1542 return self.__getitem__(k)
1543 1543 except KeyError:
1544 1544 return default
1545 1545
1546 1546 def peek(self, k, default=_notset):
1547 1547 """Get the specified item without moving it to the head
1548 1548
1549 1549 Unlike get(), this doesn't mutate the internal state. But be aware
1550 1550 that it doesn't mean peek() is thread safe.
1551 1551 """
1552 1552 try:
1553 1553 node = self._cache[k]
1554 1554 assert node is not None # help pytype
1555 1555 return node.value
1556 1556 except KeyError:
1557 1557 if default is _notset:
1558 1558 raise
1559 1559 return default
1560 1560
1561 1561 def clear(self):
1562 1562 n = self._head
1563 1563 while n.key is not _notset:
1564 1564 self.totalcost -= n.cost
1565 1565 n.markempty()
1566 1566 n = n.next
1567 1567
1568 1568 self._cache.clear()
1569 1569
1570 1570 def copy(self, capacity=None, maxcost=0):
1571 1571 """Create a new cache as a copy of the current one.
1572 1572
1573 1573 By default, the new cache has the same capacity as the existing one.
1574 1574 But, the cache capacity can be changed as part of performing the
1575 1575 copy.
1576 1576
1577 1577 Items in the copy have an insertion/access order matching this
1578 1578 instance.
1579 1579 """
1580 1580
1581 1581 capacity = capacity or self.capacity
1582 1582 maxcost = maxcost or self.maxcost
1583 1583 result = lrucachedict(capacity, maxcost=maxcost)
1584 1584
1585 1585 # We copy entries by iterating in oldest-to-newest order so the copy
1586 1586 # has the correct ordering.
1587 1587
1588 1588 # Find the first non-empty entry.
1589 1589 n = self._head.prev
1590 1590 while n.key is _notset and n is not self._head:
1591 1591 n = n.prev
1592 1592
1593 1593 # We could potentially skip the first N items when decreasing capacity.
1594 1594 # But let's keep it simple unless it is a performance problem.
1595 1595 for i in range(len(self._cache)):
1596 1596 result.insert(n.key, n.value, cost=n.cost)
1597 1597 n = n.prev
1598 1598
1599 1599 return result
1600 1600
1601 1601 def popoldest(self):
1602 1602 """Remove the oldest item from the cache.
1603 1603
1604 1604 Returns the (key, value) describing the removed cache entry.
1605 1605 """
1606 1606 if not self._cache:
1607 1607 return
1608 1608
1609 1609 # Walk the linked list backwards starting at tail node until we hit
1610 1610 # a non-empty node.
1611 1611 n = self._head.prev
1612 1612
1613 1613 assert n is not None # help pytype
1614 1614
1615 1615 while n.key is _notset:
1616 1616 n = n.prev
1617 1617
1618 1618 assert n is not None # help pytype
1619 1619
1620 1620 key, value = n.key, n.value
1621 1621
1622 1622 # And remove it from the cache and mark it as empty.
1623 1623 del self._cache[n.key]
1624 1624 self.totalcost -= n.cost
1625 1625 n.markempty()
1626 1626
1627 1627 return key, value
1628 1628
1629 1629 def _movetohead(self, node):
1630 1630 """Mark a node as the newest, making it the new head.
1631 1631
1632 1632 When a node is accessed, it becomes the freshest entry in the LRU
1633 1633 list, which is denoted by self._head.
1634 1634
1635 1635 Visually, let's make ``N`` the new head node (* denotes head):
1636 1636
1637 1637 previous/oldest <-> head <-> next/next newest
1638 1638
1639 1639 ----<->--- A* ---<->-----
1640 1640 | |
1641 1641 E <-> D <-> N <-> C <-> B
1642 1642
1643 1643 To:
1644 1644
1645 1645 ----<->--- N* ---<->-----
1646 1646 | |
1647 1647 E <-> D <-> C <-> B <-> A
1648 1648
1649 1649 This requires the following moves:
1650 1650
1651 1651 C.next = D (node.prev.next = node.next)
1652 1652 D.prev = C (node.next.prev = node.prev)
1653 1653 E.next = N (head.prev.next = node)
1654 1654 N.prev = E (node.prev = head.prev)
1655 1655 N.next = A (node.next = head)
1656 1656 A.prev = N (head.prev = node)
1657 1657 """
1658 1658 head = self._head
1659 1659 # C.next = D
1660 1660 node.prev.next = node.next
1661 1661 # D.prev = C
1662 1662 node.next.prev = node.prev
1663 1663 # N.prev = E
1664 1664 node.prev = head.prev
1665 1665 # N.next = A
1666 1666 # It is tempting to do just "head" here, however if node is
1667 1667 # adjacent to head, this will do bad things.
1668 1668 node.next = head.prev.next
1669 1669 # E.next = N
1670 1670 node.next.prev = node
1671 1671 # A.prev = N
1672 1672 node.prev.next = node
1673 1673
1674 1674 self._head = node
1675 1675
1676 1676 def _addcapacity(self):
1677 1677 """Add a node to the circular linked list.
1678 1678
1679 1679 The new node is inserted before the head node.
1680 1680 """
1681 1681 head = self._head
1682 1682 node = _lrucachenode()
1683 1683 head.prev.next = node
1684 1684 node.prev = head.prev
1685 1685 node.next = head
1686 1686 head.prev = node
1687 1687 self._size += 1
1688 1688 return node
1689 1689
1690 1690 def _enforcecostlimit(self):
1691 1691 # This should run after an insertion. It should only be called if total
1692 1692 # cost limits are being enforced.
1693 1693 # The most recently inserted node is never evicted.
1694 1694 if len(self) <= 1 or self.totalcost <= self.maxcost:
1695 1695 return
1696 1696
1697 1697 # This is logically equivalent to calling popoldest() until we
1698 1698 # free up enough cost. We don't do that since popoldest() needs
1699 1699 # to walk the linked list and doing this in a loop would be
1700 1700 # quadratic. So we find the first non-empty node and then
1701 1701 # walk nodes until we free up enough capacity.
1702 1702 #
1703 1703 # If we only removed the minimum number of nodes to free enough
1704 1704 # cost at insert time, chances are high that the next insert would
1705 1705 # also require pruning. This would effectively constitute quadratic
1706 1706 # behavior for insert-heavy workloads. To mitigate this, we set a
1707 1707 # target cost that is a percentage of the max cost. This will tend
1708 1708 # to free more nodes when the high water mark is reached, which
1709 1709 # lowers the chances of needing to prune on the subsequent insert.
1710 1710 targetcost = int(self.maxcost * 0.75)
1711 1711
1712 1712 n = self._head.prev
1713 1713 while n.key is _notset:
1714 1714 n = n.prev
1715 1715
1716 1716 while len(self) > 1 and self.totalcost > targetcost:
1717 1717 del self._cache[n.key]
1718 1718 self.totalcost -= n.cost
1719 1719 n.markempty()
1720 1720 n = n.prev
1721 1721
1722 1722
1723 1723 def lrucachefunc(func):
1724 1724 '''cache most recent results of function calls'''
1725 1725 cache = {}
1726 1726 order = collections.deque()
1727 1727 if func.__code__.co_argcount == 1:
1728 1728
1729 1729 def f(arg):
1730 1730 if arg not in cache:
1731 1731 if len(cache) > 20:
1732 1732 del cache[order.popleft()]
1733 1733 cache[arg] = func(arg)
1734 1734 else:
1735 1735 order.remove(arg)
1736 1736 order.append(arg)
1737 1737 return cache[arg]
1738 1738
1739 1739 else:
1740 1740
1741 1741 def f(*args):
1742 1742 if args not in cache:
1743 1743 if len(cache) > 20:
1744 1744 del cache[order.popleft()]
1745 1745 cache[args] = func(*args)
1746 1746 else:
1747 1747 order.remove(args)
1748 1748 order.append(args)
1749 1749 return cache[args]
1750 1750
1751 1751 return f
1752 1752
1753 1753
1754 1754 class propertycache:
1755 1755 def __init__(self, func):
1756 1756 self.func = func
1757 1757 self.name = func.__name__
1758 1758
1759 1759 def __get__(self, obj, type=None):
1760 1760 result = self.func(obj)
1761 1761 self.cachevalue(obj, result)
1762 1762 return result
1763 1763
1764 1764 def cachevalue(self, obj, value):
1765 1765 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1766 1766 obj.__dict__[self.name] = value
1767 1767
1768 1768
1769 1769 def clearcachedproperty(obj, prop):
1770 1770 '''clear a cached property value, if one has been set'''
1771 1771 prop = pycompat.sysstr(prop)
1772 1772 if prop in obj.__dict__:
1773 1773 del obj.__dict__[prop]
1774 1774
1775 1775
1776 1776 def increasingchunks(source, min=1024, max=65536):
1777 1777 """return no less than min bytes per chunk while data remains,
1778 1778 doubling min after each chunk until it reaches max"""
1779 1779
1780 1780 def log2(x):
1781 1781 if not x:
1782 1782 return 0
1783 1783 i = 0
1784 1784 while x:
1785 1785 x >>= 1
1786 1786 i += 1
1787 1787 return i - 1
1788 1788
1789 1789 buf = []
1790 1790 blen = 0
1791 1791 for chunk in source:
1792 1792 buf.append(chunk)
1793 1793 blen += len(chunk)
1794 1794 if blen >= min:
1795 1795 if min < max:
1796 1796 min = min << 1
1797 1797 nmin = 1 << log2(blen)
1798 1798 if nmin > min:
1799 1799 min = nmin
1800 1800 if min > max:
1801 1801 min = max
1802 1802 yield b''.join(buf)
1803 1803 blen = 0
1804 1804 buf = []
1805 1805 if buf:
1806 1806 yield b''.join(buf)
1807 1807
1808 1808
1809 1809 def always(fn):
1810 1810 return True
1811 1811
1812 1812
1813 1813 def never(fn):
1814 1814 return False
1815 1815
1816 1816
1817 1817 def nogc(func):
1818 1818 """disable garbage collector
1819 1819
1820 1820 Python's garbage collector triggers a GC each time a certain number of
1821 1821 container objects (the number being defined by gc.get_threshold()) are
1822 1822 allocated even when marked not to be tracked by the collector. Tracking has
1823 1823 no effect on when GCs are triggered, only on what objects the GC looks
1824 1824 into. As a workaround, disable GC while building complex (huge)
1825 1825 containers.
1826 1826
1827 1827 This garbage collector issue have been fixed in 2.7. But it still affect
1828 1828 CPython's performance.
1829 1829 """
1830 1830
1831 1831 def wrapper(*args, **kwargs):
1832 1832 gcenabled = gc.isenabled()
1833 1833 gc.disable()
1834 1834 try:
1835 1835 return func(*args, **kwargs)
1836 1836 finally:
1837 1837 if gcenabled:
1838 1838 gc.enable()
1839 1839
1840 1840 return wrapper
1841 1841
1842 1842
1843 1843 if pycompat.ispypy:
1844 1844 # PyPy runs slower with gc disabled
1845 1845 nogc = lambda x: x
1846 1846
1847 1847
1848 1848 def pathto(root, n1, n2):
1849 1849 # type: (bytes, bytes, bytes) -> bytes
1850 1850 """return the relative path from one place to another.
1851 1851 root should use os.sep to separate directories
1852 1852 n1 should use os.sep to separate directories
1853 1853 n2 should use "/" to separate directories
1854 1854 returns an os.sep-separated path.
1855 1855
1856 1856 If n1 is a relative path, it's assumed it's
1857 1857 relative to root.
1858 1858 n2 should always be relative to root.
1859 1859 """
1860 1860 if not n1:
1861 1861 return localpath(n2)
1862 1862 if os.path.isabs(n1):
1863 1863 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1864 1864 return os.path.join(root, localpath(n2))
1865 1865 n2 = b'/'.join((pconvert(root), n2))
1866 1866 a, b = splitpath(n1), n2.split(b'/')
1867 1867 a.reverse()
1868 1868 b.reverse()
1869 1869 while a and b and a[-1] == b[-1]:
1870 1870 a.pop()
1871 1871 b.pop()
1872 1872 b.reverse()
1873 1873 return pycompat.ossep.join(([b'..'] * len(a)) + b) or b'.'
1874 1874
1875 1875
1876 1876 def checksignature(func, depth=1):
1877 1877 '''wrap a function with code to check for calling errors'''
1878 1878
1879 1879 def check(*args, **kwargs):
1880 1880 try:
1881 1881 return func(*args, **kwargs)
1882 1882 except TypeError:
1883 1883 if len(traceback.extract_tb(sys.exc_info()[2])) == depth:
1884 1884 raise error.SignatureError
1885 1885 raise
1886 1886
1887 1887 return check
1888 1888
1889 1889
1890 1890 # a whilelist of known filesystems where hardlink works reliably
1891 1891 _hardlinkfswhitelist = {
1892 1892 b'apfs',
1893 1893 b'btrfs',
1894 1894 b'ext2',
1895 1895 b'ext3',
1896 1896 b'ext4',
1897 1897 b'hfs',
1898 1898 b'jfs',
1899 1899 b'NTFS',
1900 1900 b'reiserfs',
1901 1901 b'tmpfs',
1902 1902 b'ufs',
1903 1903 b'xfs',
1904 1904 b'zfs',
1905 1905 }
1906 1906
1907 1907
1908 1908 def copyfile(
1909 1909 src,
1910 1910 dest,
1911 1911 hardlink=False,
1912 1912 copystat=False,
1913 1913 checkambig=False,
1914 1914 nb_bytes=None,
1915 1915 no_hardlink_cb=None,
1916 1916 check_fs_hardlink=True,
1917 1917 ):
1918 1918 """copy a file, preserving mode and optionally other stat info like
1919 1919 atime/mtime
1920 1920
1921 1921 checkambig argument is used with filestat, and is useful only if
1922 1922 destination file is guarded by any lock (e.g. repo.lock or
1923 1923 repo.wlock).
1924 1924
1925 1925 copystat and checkambig should be exclusive.
1926 1926
1927 1927 nb_bytes: if set only copy the first `nb_bytes` of the source file.
1928 1928 """
1929 1929 assert not (copystat and checkambig)
1930 1930 oldstat = None
1931 1931 if os.path.lexists(dest):
1932 1932 if checkambig:
1933 1933 oldstat = checkambig and filestat.frompath(dest)
1934 1934 unlink(dest)
1935 1935 if hardlink and check_fs_hardlink:
1936 1936 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1937 1937 # unless we are confident that dest is on a whitelisted filesystem.
1938 1938 try:
1939 1939 fstype = getfstype(os.path.dirname(dest))
1940 1940 except OSError:
1941 1941 fstype = None
1942 1942 if fstype not in _hardlinkfswhitelist:
1943 1943 if no_hardlink_cb is not None:
1944 1944 no_hardlink_cb()
1945 1945 hardlink = False
1946 1946 if hardlink:
1947 1947 try:
1948 1948 oslink(src, dest)
1949 1949 if nb_bytes is not None:
1950 1950 m = "the `nb_bytes` argument is incompatible with `hardlink`"
1951 1951 raise error.ProgrammingError(m)
1952 1952 return
1953 1953 except (IOError, OSError) as exc:
1954 1954 if exc.errno != errno.EEXIST and no_hardlink_cb is not None:
1955 1955 no_hardlink_cb()
1956 1956 # fall back to normal copy
1957 1957 if os.path.islink(src):
1958 1958 os.symlink(os.readlink(src), dest)
1959 1959 # copytime is ignored for symlinks, but in general copytime isn't needed
1960 1960 # for them anyway
1961 1961 if nb_bytes is not None:
1962 1962 m = "cannot use `nb_bytes` on a symlink"
1963 1963 raise error.ProgrammingError(m)
1964 1964 else:
1965 1965 try:
1966 1966 shutil.copyfile(src, dest)
1967 1967 if copystat:
1968 1968 # copystat also copies mode
1969 1969 shutil.copystat(src, dest)
1970 1970 else:
1971 1971 shutil.copymode(src, dest)
1972 1972 if oldstat and oldstat.stat:
1973 1973 newstat = filestat.frompath(dest)
1974 1974 if newstat.isambig(oldstat):
1975 1975 # stat of copied file is ambiguous to original one
1976 1976 advanced = (
1977 1977 oldstat.stat[stat.ST_MTIME] + 1
1978 1978 ) & 0x7FFFFFFF
1979 1979 os.utime(dest, (advanced, advanced))
1980 1980 # We could do something smarter using `copy_file_range` call or similar
1981 1981 if nb_bytes is not None:
1982 1982 with open(dest, mode='r+') as f:
1983 1983 f.truncate(nb_bytes)
1984 1984 except shutil.Error as inst:
1985 1985 raise error.Abort(stringutil.forcebytestr(inst))
1986 1986
1987 1987
1988 1988 def copyfiles(src, dst, hardlink=None, progress=None):
1989 1989 """Copy a directory tree using hardlinks if possible."""
1990 1990 num = 0
1991 1991
1992 1992 def settopic():
1993 1993 if progress:
1994 1994 progress.topic = _(b'linking') if hardlink else _(b'copying')
1995 1995
1996 1996 if os.path.isdir(src):
1997 1997 if hardlink is None:
1998 1998 hardlink = (
1999 1999 os.stat(src).st_dev == os.stat(os.path.dirname(dst)).st_dev
2000 2000 )
2001 2001 settopic()
2002 2002 os.mkdir(dst)
2003 2003 for name, kind in listdir(src):
2004 2004 srcname = os.path.join(src, name)
2005 2005 dstname = os.path.join(dst, name)
2006 2006 hardlink, n = copyfiles(srcname, dstname, hardlink, progress)
2007 2007 num += n
2008 2008 else:
2009 2009 if hardlink is None:
2010 2010 hardlink = (
2011 2011 os.stat(os.path.dirname(src)).st_dev
2012 2012 == os.stat(os.path.dirname(dst)).st_dev
2013 2013 )
2014 2014 settopic()
2015 2015
2016 2016 if hardlink:
2017 2017 try:
2018 2018 oslink(src, dst)
2019 2019 except (IOError, OSError) as exc:
2020 2020 if exc.errno != errno.EEXIST:
2021 2021 hardlink = False
2022 2022 # XXX maybe try to relink if the file exist ?
2023 2023 shutil.copy(src, dst)
2024 2024 else:
2025 2025 shutil.copy(src, dst)
2026 2026 num += 1
2027 2027 if progress:
2028 2028 progress.increment()
2029 2029
2030 2030 return hardlink, num
2031 2031
2032 2032
2033 2033 _winreservednames = {
2034 2034 b'con',
2035 2035 b'prn',
2036 2036 b'aux',
2037 2037 b'nul',
2038 2038 b'com1',
2039 2039 b'com2',
2040 2040 b'com3',
2041 2041 b'com4',
2042 2042 b'com5',
2043 2043 b'com6',
2044 2044 b'com7',
2045 2045 b'com8',
2046 2046 b'com9',
2047 2047 b'lpt1',
2048 2048 b'lpt2',
2049 2049 b'lpt3',
2050 2050 b'lpt4',
2051 2051 b'lpt5',
2052 2052 b'lpt6',
2053 2053 b'lpt7',
2054 2054 b'lpt8',
2055 2055 b'lpt9',
2056 2056 }
2057 2057 _winreservedchars = b':*?"<>|'
2058 2058
2059 2059
2060 2060 def checkwinfilename(path):
2061 2061 # type: (bytes) -> Optional[bytes]
2062 2062 r"""Check that the base-relative path is a valid filename on Windows.
2063 2063 Returns None if the path is ok, or a UI string describing the problem.
2064 2064
2065 2065 >>> checkwinfilename(b"just/a/normal/path")
2066 2066 >>> checkwinfilename(b"foo/bar/con.xml")
2067 2067 "filename contains 'con', which is reserved on Windows"
2068 2068 >>> checkwinfilename(b"foo/con.xml/bar")
2069 2069 "filename contains 'con', which is reserved on Windows"
2070 2070 >>> checkwinfilename(b"foo/bar/xml.con")
2071 2071 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
2072 2072 "filename contains 'AUX', which is reserved on Windows"
2073 2073 >>> checkwinfilename(b"foo/bar/bla:.txt")
2074 2074 "filename contains ':', which is reserved on Windows"
2075 2075 >>> checkwinfilename(b"foo/bar/b\07la.txt")
2076 2076 "filename contains '\\x07', which is invalid on Windows"
2077 2077 >>> checkwinfilename(b"foo/bar/bla ")
2078 2078 "filename ends with ' ', which is not allowed on Windows"
2079 2079 >>> checkwinfilename(b"../bar")
2080 2080 >>> checkwinfilename(b"foo\\")
2081 2081 "filename ends with '\\', which is invalid on Windows"
2082 2082 >>> checkwinfilename(b"foo\\/bar")
2083 2083 "directory name ends with '\\', which is invalid on Windows"
2084 2084 """
2085 2085 if path.endswith(b'\\'):
2086 2086 return _(b"filename ends with '\\', which is invalid on Windows")
2087 2087 if b'\\/' in path:
2088 2088 return _(b"directory name ends with '\\', which is invalid on Windows")
2089 2089 for n in path.replace(b'\\', b'/').split(b'/'):
2090 2090 if not n:
2091 2091 continue
2092 2092 for c in _filenamebytestr(n):
2093 2093 if c in _winreservedchars:
2094 2094 return (
2095 2095 _(
2096 2096 b"filename contains '%s', which is reserved "
2097 2097 b"on Windows"
2098 2098 )
2099 2099 % c
2100 2100 )
2101 2101 if ord(c) <= 31:
2102 2102 return _(
2103 2103 b"filename contains '%s', which is invalid on Windows"
2104 2104 ) % stringutil.escapestr(c)
2105 2105 base = n.split(b'.')[0]
2106 2106 if base and base.lower() in _winreservednames:
2107 2107 return (
2108 2108 _(b"filename contains '%s', which is reserved on Windows")
2109 2109 % base
2110 2110 )
2111 2111 t = n[-1:]
2112 2112 if t in b'. ' and n not in b'..':
2113 2113 return (
2114 2114 _(
2115 2115 b"filename ends with '%s', which is not allowed "
2116 2116 b"on Windows"
2117 2117 )
2118 2118 % t
2119 2119 )
2120 2120
2121 2121
2122 2122 timer = getattr(time, "perf_counter", None)
2123 2123
2124 2124 if pycompat.iswindows:
2125 2125 checkosfilename = checkwinfilename
2126 2126 if not timer:
2127 2127 timer = time.clock
2128 2128 else:
2129 2129 # mercurial.windows doesn't have platform.checkosfilename
2130 2130 checkosfilename = platform.checkosfilename # pytype: disable=module-attr
2131 2131 if not timer:
2132 2132 timer = time.time
2133 2133
2134 2134
2135 2135 def makelock(info, pathname):
2136 2136 """Create a lock file atomically if possible
2137 2137
2138 2138 This may leave a stale lock file if symlink isn't supported and signal
2139 2139 interrupt is enabled.
2140 2140 """
2141 2141 try:
2142 2142 return os.symlink(info, pathname)
2143 2143 except OSError as why:
2144 2144 if why.errno == errno.EEXIST:
2145 2145 raise
2146 2146 except AttributeError: # no symlink in os
2147 2147 pass
2148 2148
2149 2149 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
2150 2150 ld = os.open(pathname, flags)
2151 2151 os.write(ld, info)
2152 2152 os.close(ld)
2153 2153
2154 2154
2155 2155 def readlock(pathname):
2156 2156 # type: (bytes) -> bytes
2157 2157 try:
2158 2158 return readlink(pathname)
2159 2159 except OSError as why:
2160 2160 if why.errno not in (errno.EINVAL, errno.ENOSYS):
2161 2161 raise
2162 2162 except AttributeError: # no symlink in os
2163 2163 pass
2164 2164 with posixfile(pathname, b'rb') as fp:
2165 2165 return fp.read()
2166 2166
2167 2167
2168 2168 def fstat(fp):
2169 2169 '''stat file object that may not have fileno method.'''
2170 2170 try:
2171 2171 return os.fstat(fp.fileno())
2172 2172 except AttributeError:
2173 2173 return os.stat(fp.name)
2174 2174
2175 2175
2176 2176 # File system features
2177 2177
2178 2178
2179 2179 def fscasesensitive(path):
2180 2180 # type: (bytes) -> bool
2181 2181 """
2182 2182 Return true if the given path is on a case-sensitive filesystem
2183 2183
2184 2184 Requires a path (like /foo/.hg) ending with a foldable final
2185 2185 directory component.
2186 2186 """
2187 2187 s1 = os.lstat(path)
2188 2188 d, b = os.path.split(path)
2189 2189 b2 = b.upper()
2190 2190 if b == b2:
2191 2191 b2 = b.lower()
2192 2192 if b == b2:
2193 2193 return True # no evidence against case sensitivity
2194 2194 p2 = os.path.join(d, b2)
2195 2195 try:
2196 2196 s2 = os.lstat(p2)
2197 2197 if s2 == s1:
2198 2198 return False
2199 2199 return True
2200 2200 except OSError:
2201 2201 return True
2202 2202
2203 2203
2204 2204 _re2_input = lambda x: x
2205 2205 try:
2206 2206 import re2 # pytype: disable=import-error
2207 2207
2208 2208 _re2 = None
2209 2209 except ImportError:
2210 2210 _re2 = False
2211 2211
2212 2212
2213 2213 def has_re2():
2214 2214 """return True is re2 is available, False otherwise"""
2215 2215 if _re2 is None:
2216 2216 _re._checkre2()
2217 2217 return _re2
2218 2218
2219 2219
2220 2220 class _re:
2221 2221 @staticmethod
2222 2222 def _checkre2():
2223 2223 global _re2
2224 2224 global _re2_input
2225 2225 if _re2 is not None:
2226 2226 # we already have the answer
2227 2227 return
2228 2228
2229 2229 check_pattern = br'\[([^\[]+)\]'
2230 2230 check_input = b'[ui]'
2231 2231 try:
2232 2232 # check if match works, see issue3964
2233 2233 _re2 = bool(re2.match(check_pattern, check_input))
2234 2234 except ImportError:
2235 2235 _re2 = False
2236 2236 except TypeError:
2237 2237 # the `pyre-2` project provides a re2 module that accept bytes
2238 2238 # the `fb-re2` project provides a re2 module that acccept sysstr
2239 2239 check_pattern = pycompat.sysstr(check_pattern)
2240 2240 check_input = pycompat.sysstr(check_input)
2241 2241 _re2 = bool(re2.match(check_pattern, check_input))
2242 2242 _re2_input = pycompat.sysstr
2243 2243
2244 2244 def compile(self, pat, flags=0):
2245 2245 """Compile a regular expression, using re2 if possible
2246 2246
2247 2247 For best performance, use only re2-compatible regexp features. The
2248 2248 only flags from the re module that are re2-compatible are
2249 2249 IGNORECASE and MULTILINE."""
2250 2250 if _re2 is None:
2251 2251 self._checkre2()
2252 2252 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
2253 2253 if flags & remod.IGNORECASE:
2254 2254 pat = b'(?i)' + pat
2255 2255 if flags & remod.MULTILINE:
2256 2256 pat = b'(?m)' + pat
2257 2257 try:
2258 2258 return re2.compile(_re2_input(pat))
2259 2259 except re2.error:
2260 2260 pass
2261 2261 return remod.compile(pat, flags)
2262 2262
2263 2263 @propertycache
2264 2264 def escape(self):
2265 2265 """Return the version of escape corresponding to self.compile.
2266 2266
2267 2267 This is imperfect because whether re2 or re is used for a particular
2268 2268 function depends on the flags, etc, but it's the best we can do.
2269 2269 """
2270 2270 global _re2
2271 2271 if _re2 is None:
2272 2272 self._checkre2()
2273 2273 if _re2:
2274 2274 return re2.escape
2275 2275 else:
2276 2276 return remod.escape
2277 2277
2278 2278
2279 2279 re = _re()
2280 2280
2281 2281 _fspathcache = {}
2282 2282
2283 2283
2284 2284 def fspath(name, root):
2285 2285 # type: (bytes, bytes) -> bytes
2286 2286 """Get name in the case stored in the filesystem
2287 2287
2288 2288 The name should be relative to root, and be normcase-ed for efficiency.
2289 2289
2290 2290 Note that this function is unnecessary, and should not be
2291 2291 called, for case-sensitive filesystems (simply because it's expensive).
2292 2292
2293 2293 The root should be normcase-ed, too.
2294 2294 """
2295 2295
2296 2296 def _makefspathcacheentry(dir):
2297 2297 return {normcase(n): n for n in os.listdir(dir)}
2298 2298
2299 2299 seps = pycompat.ossep
2300 2300 if pycompat.osaltsep:
2301 2301 seps = seps + pycompat.osaltsep
2302 2302 # Protect backslashes. This gets silly very quickly.
2303 2303 seps.replace(b'\\', b'\\\\')
2304 2304 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
2305 2305 dir = os.path.normpath(root)
2306 2306 result = []
2307 2307 for part, sep in pattern.findall(name):
2308 2308 if sep:
2309 2309 result.append(sep)
2310 2310 continue
2311 2311
2312 2312 if dir not in _fspathcache:
2313 2313 _fspathcache[dir] = _makefspathcacheentry(dir)
2314 2314 contents = _fspathcache[dir]
2315 2315
2316 2316 found = contents.get(part)
2317 2317 if not found:
2318 2318 # retry "once per directory" per "dirstate.walk" which
2319 2319 # may take place for each patches of "hg qpush", for example
2320 2320 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
2321 2321 found = contents.get(part)
2322 2322
2323 2323 result.append(found or part)
2324 2324 dir = os.path.join(dir, part)
2325 2325
2326 2326 return b''.join(result)
2327 2327
2328 2328
2329 2329 def checknlink(testfile):
2330 2330 # type: (bytes) -> bool
2331 2331 '''check whether hardlink count reporting works properly'''
2332 2332
2333 2333 # testfile may be open, so we need a separate file for checking to
2334 2334 # work around issue2543 (or testfile may get lost on Samba shares)
2335 2335 f1, f2, fp = None, None, None
2336 2336 try:
2337 2337 fd, f1 = pycompat.mkstemp(
2338 2338 prefix=b'.%s-' % os.path.basename(testfile),
2339 2339 suffix=b'1~',
2340 2340 dir=os.path.dirname(testfile),
2341 2341 )
2342 2342 os.close(fd)
2343 2343 f2 = b'%s2~' % f1[:-2]
2344 2344
2345 2345 oslink(f1, f2)
2346 2346 # nlinks() may behave differently for files on Windows shares if
2347 2347 # the file is open.
2348 2348 fp = posixfile(f2)
2349 2349 return nlinks(f2) > 1
2350 2350 except OSError:
2351 2351 return False
2352 2352 finally:
2353 2353 if fp is not None:
2354 2354 fp.close()
2355 2355 for f in (f1, f2):
2356 2356 try:
2357 2357 if f is not None:
2358 2358 os.unlink(f)
2359 2359 except OSError:
2360 2360 pass
2361 2361
2362 2362
2363 2363 def endswithsep(path):
2364 2364 # type: (bytes) -> bool
2365 2365 '''Check path ends with os.sep or os.altsep.'''
2366 2366 return bool( # help pytype
2367 2367 path.endswith(pycompat.ossep)
2368 2368 or pycompat.osaltsep
2369 2369 and path.endswith(pycompat.osaltsep)
2370 2370 )
2371 2371
2372 2372
2373 2373 def splitpath(path):
2374 2374 # type: (bytes) -> List[bytes]
2375 2375 """Split path by os.sep.
2376 2376 Note that this function does not use os.altsep because this is
2377 2377 an alternative of simple "xxx.split(os.sep)".
2378 2378 It is recommended to use os.path.normpath() before using this
2379 2379 function if need."""
2380 2380 return path.split(pycompat.ossep)
2381 2381
2382 2382
2383 2383 def mktempcopy(name, emptyok=False, createmode=None, enforcewritable=False):
2384 2384 """Create a temporary file with the same contents from name
2385 2385
2386 2386 The permission bits are copied from the original file.
2387 2387
2388 2388 If the temporary file is going to be truncated immediately, you
2389 2389 can use emptyok=True as an optimization.
2390 2390
2391 2391 Returns the name of the temporary file.
2392 2392 """
2393 2393 d, fn = os.path.split(name)
2394 2394 fd, temp = pycompat.mkstemp(prefix=b'.%s-' % fn, suffix=b'~', dir=d)
2395 2395 os.close(fd)
2396 2396 # Temporary files are created with mode 0600, which is usually not
2397 2397 # what we want. If the original file already exists, just copy
2398 2398 # its mode. Otherwise, manually obey umask.
2399 2399 copymode(name, temp, createmode, enforcewritable)
2400 2400
2401 2401 if emptyok:
2402 2402 return temp
2403 2403 try:
2404 2404 try:
2405 2405 ifp = posixfile(name, b"rb")
2406 2406 except IOError as inst:
2407 2407 if inst.errno == errno.ENOENT:
2408 2408 return temp
2409 2409 if not getattr(inst, 'filename', None):
2410 2410 inst.filename = name
2411 2411 raise
2412 2412 ofp = posixfile(temp, b"wb")
2413 2413 for chunk in filechunkiter(ifp):
2414 2414 ofp.write(chunk)
2415 2415 ifp.close()
2416 2416 ofp.close()
2417 2417 except: # re-raises
2418 2418 try:
2419 2419 os.unlink(temp)
2420 2420 except OSError:
2421 2421 pass
2422 2422 raise
2423 2423 return temp
2424 2424
2425 2425
2426 2426 class filestat:
2427 2427 """help to exactly detect change of a file
2428 2428
2429 2429 'stat' attribute is result of 'os.stat()' if specified 'path'
2430 2430 exists. Otherwise, it is None. This can avoid preparative
2431 2431 'exists()' examination on client side of this class.
2432 2432 """
2433 2433
2434 2434 def __init__(self, stat):
2435 2435 self.stat = stat
2436 2436
2437 2437 @classmethod
2438 2438 def frompath(cls, path):
2439 2439 try:
2440 2440 stat = os.stat(path)
2441 2441 except FileNotFoundError:
2442 2442 stat = None
2443 2443 return cls(stat)
2444 2444
2445 2445 @classmethod
2446 2446 def fromfp(cls, fp):
2447 2447 stat = os.fstat(fp.fileno())
2448 2448 return cls(stat)
2449 2449
2450 2450 __hash__ = object.__hash__
2451 2451
2452 2452 def __eq__(self, old):
2453 2453 try:
2454 2454 # if ambiguity between stat of new and old file is
2455 2455 # avoided, comparison of size, ctime and mtime is enough
2456 2456 # to exactly detect change of a file regardless of platform
2457 2457 return (
2458 2458 self.stat.st_size == old.stat.st_size
2459 2459 and self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2460 2460 and self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME]
2461 2461 )
2462 2462 except AttributeError:
2463 2463 pass
2464 2464 try:
2465 2465 return self.stat is None and old.stat is None
2466 2466 except AttributeError:
2467 2467 return False
2468 2468
2469 2469 def isambig(self, old):
2470 2470 """Examine whether new (= self) stat is ambiguous against old one
2471 2471
2472 2472 "S[N]" below means stat of a file at N-th change:
2473 2473
2474 2474 - S[n-1].ctime < S[n].ctime: can detect change of a file
2475 2475 - S[n-1].ctime == S[n].ctime
2476 2476 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2477 2477 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2478 2478 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2479 2479 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2480 2480
2481 2481 Case (*2) above means that a file was changed twice or more at
2482 2482 same time in sec (= S[n-1].ctime), and comparison of timestamp
2483 2483 is ambiguous.
2484 2484
2485 2485 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2486 2486 timestamp is ambiguous".
2487 2487
2488 2488 But advancing mtime only in case (*2) doesn't work as
2489 2489 expected, because naturally advanced S[n].mtime in case (*1)
2490 2490 might be equal to manually advanced S[n-1 or earlier].mtime.
2491 2491
2492 2492 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2493 2493 treated as ambiguous regardless of mtime, to avoid overlooking
2494 2494 by confliction between such mtime.
2495 2495
2496 2496 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2497 2497 S[n].mtime", even if size of a file isn't changed.
2498 2498 """
2499 2499 try:
2500 2500 return self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2501 2501 except AttributeError:
2502 2502 return False
2503 2503
2504 2504 def avoidambig(self, path, old):
2505 2505 """Change file stat of specified path to avoid ambiguity
2506 2506
2507 2507 'old' should be previous filestat of 'path'.
2508 2508
2509 2509 This skips avoiding ambiguity, if a process doesn't have
2510 2510 appropriate privileges for 'path'. This returns False in this
2511 2511 case.
2512 2512
2513 2513 Otherwise, this returns True, as "ambiguity is avoided".
2514 2514 """
2515 2515 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2516 2516 try:
2517 2517 os.utime(path, (advanced, advanced))
2518 2518 except PermissionError:
2519 2519 # utime() on the file created by another user causes EPERM,
2520 2520 # if a process doesn't have appropriate privileges
2521 2521 return False
2522 2522 return True
2523 2523
2524 2524 def __ne__(self, other):
2525 2525 return not self == other
2526 2526
2527 2527
2528 2528 class atomictempfile:
2529 2529 """writable file object that atomically updates a file
2530 2530
2531 2531 All writes will go to a temporary copy of the original file. Call
2532 2532 close() when you are done writing, and atomictempfile will rename
2533 2533 the temporary copy to the original name, making the changes
2534 2534 visible. If the object is destroyed without being closed, all your
2535 2535 writes are discarded.
2536 2536
2537 2537 checkambig argument of constructor is used with filestat, and is
2538 2538 useful only if target file is guarded by any lock (e.g. repo.lock
2539 2539 or repo.wlock).
2540 2540 """
2541 2541
2542 2542 def __init__(self, name, mode=b'w+b', createmode=None, checkambig=False):
2543 2543 self.__name = name # permanent name
2544 2544 self._tempname = mktempcopy(
2545 2545 name,
2546 2546 emptyok=(b'w' in mode),
2547 2547 createmode=createmode,
2548 2548 enforcewritable=(b'w' in mode),
2549 2549 )
2550 2550
2551 2551 self._fp = posixfile(self._tempname, mode)
2552 2552 self._checkambig = checkambig
2553 2553
2554 2554 # delegated methods
2555 2555 self.read = self._fp.read
2556 2556 self.write = self._fp.write
2557 2557 self.writelines = self._fp.writelines
2558 2558 self.seek = self._fp.seek
2559 2559 self.tell = self._fp.tell
2560 2560 self.fileno = self._fp.fileno
2561 2561
2562 2562 def close(self):
2563 2563 if not self._fp.closed:
2564 2564 self._fp.close()
2565 2565 filename = localpath(self.__name)
2566 2566 oldstat = self._checkambig and filestat.frompath(filename)
2567 2567 if oldstat and oldstat.stat:
2568 2568 rename(self._tempname, filename)
2569 2569 newstat = filestat.frompath(filename)
2570 2570 if newstat.isambig(oldstat):
2571 2571 # stat of changed file is ambiguous to original one
2572 2572 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2573 2573 os.utime(filename, (advanced, advanced))
2574 2574 else:
2575 2575 rename(self._tempname, filename)
2576 2576
2577 2577 def discard(self):
2578 2578 if not self._fp.closed:
2579 2579 try:
2580 2580 os.unlink(self._tempname)
2581 2581 except OSError:
2582 2582 pass
2583 2583 self._fp.close()
2584 2584
2585 2585 def __del__(self):
2586 2586 if safehasattr(self, '_fp'): # constructor actually did something
2587 2587 self.discard()
2588 2588
2589 2589 def __enter__(self):
2590 2590 return self
2591 2591
2592 2592 def __exit__(self, exctype, excvalue, traceback):
2593 2593 if exctype is not None:
2594 2594 self.discard()
2595 2595 else:
2596 2596 self.close()
2597 2597
2598 2598
2599 2599 def tryrmdir(f):
2600 2600 try:
2601 2601 removedirs(f)
2602 2602 except OSError as e:
2603 2603 if e.errno != errno.ENOENT and e.errno != errno.ENOTEMPTY:
2604 2604 raise
2605 2605
2606 2606
2607 2607 def unlinkpath(f, ignoremissing=False, rmdir=True):
2608 2608 # type: (bytes, bool, bool) -> None
2609 2609 """unlink and remove the directory if it is empty"""
2610 2610 if ignoremissing:
2611 2611 tryunlink(f)
2612 2612 else:
2613 2613 unlink(f)
2614 2614 if rmdir:
2615 2615 # try removing directories that might now be empty
2616 2616 try:
2617 2617 removedirs(os.path.dirname(f))
2618 2618 except OSError:
2619 2619 pass
2620 2620
2621 2621
2622 2622 def tryunlink(f):
2623 2623 # type: (bytes) -> None
2624 2624 """Attempt to remove a file, ignoring FileNotFoundError."""
2625 2625 try:
2626 2626 unlink(f)
2627 2627 except FileNotFoundError:
2628 2628 pass
2629 2629
2630 2630
2631 2631 def makedirs(name, mode=None, notindexed=False):
2632 2632 # type: (bytes, Optional[int], bool) -> None
2633 2633 """recursive directory creation with parent mode inheritance
2634 2634
2635 2635 Newly created directories are marked as "not to be indexed by
2636 2636 the content indexing service", if ``notindexed`` is specified
2637 2637 for "write" mode access.
2638 2638 """
2639 2639 try:
2640 2640 makedir(name, notindexed)
2641 2641 except OSError as err:
2642 2642 if err.errno == errno.EEXIST:
2643 2643 return
2644 2644 if err.errno != errno.ENOENT or not name:
2645 2645 raise
2646 2646 parent = os.path.dirname(abspath(name))
2647 2647 if parent == name:
2648 2648 raise
2649 2649 makedirs(parent, mode, notindexed)
2650 2650 try:
2651 2651 makedir(name, notindexed)
2652 2652 except OSError as err:
2653 2653 # Catch EEXIST to handle races
2654 2654 if err.errno == errno.EEXIST:
2655 2655 return
2656 2656 raise
2657 2657 if mode is not None:
2658 2658 os.chmod(name, mode)
2659 2659
2660 2660
2661 2661 def readfile(path):
2662 2662 # type: (bytes) -> bytes
2663 2663 with open(path, b'rb') as fp:
2664 2664 return fp.read()
2665 2665
2666 2666
2667 2667 def writefile(path, text):
2668 2668 # type: (bytes, bytes) -> None
2669 2669 with open(path, b'wb') as fp:
2670 2670 fp.write(text)
2671 2671
2672 2672
2673 2673 def appendfile(path, text):
2674 2674 # type: (bytes, bytes) -> None
2675 2675 with open(path, b'ab') as fp:
2676 2676 fp.write(text)
2677 2677
2678 2678
2679 2679 class chunkbuffer:
2680 2680 """Allow arbitrary sized chunks of data to be efficiently read from an
2681 2681 iterator over chunks of arbitrary size."""
2682 2682
2683 2683 def __init__(self, in_iter):
2684 2684 """in_iter is the iterator that's iterating over the input chunks."""
2685 2685
2686 2686 def splitbig(chunks):
2687 2687 for chunk in chunks:
2688 2688 if len(chunk) > 2 ** 20:
2689 2689 pos = 0
2690 2690 while pos < len(chunk):
2691 2691 end = pos + 2 ** 18
2692 2692 yield chunk[pos:end]
2693 2693 pos = end
2694 2694 else:
2695 2695 yield chunk
2696 2696
2697 2697 self.iter = splitbig(in_iter)
2698 2698 self._queue = collections.deque()
2699 2699 self._chunkoffset = 0
2700 2700
2701 2701 def read(self, l=None):
2702 2702 """Read L bytes of data from the iterator of chunks of data.
2703 2703 Returns less than L bytes if the iterator runs dry.
2704 2704
2705 2705 If size parameter is omitted, read everything"""
2706 2706 if l is None:
2707 2707 return b''.join(self.iter)
2708 2708
2709 2709 left = l
2710 2710 buf = []
2711 2711 queue = self._queue
2712 2712 while left > 0:
2713 2713 # refill the queue
2714 2714 if not queue:
2715 2715 target = 2 ** 18
2716 2716 for chunk in self.iter:
2717 2717 queue.append(chunk)
2718 2718 target -= len(chunk)
2719 2719 if target <= 0:
2720 2720 break
2721 2721 if not queue:
2722 2722 break
2723 2723
2724 2724 # The easy way to do this would be to queue.popleft(), modify the
2725 2725 # chunk (if necessary), then queue.appendleft(). However, for cases
2726 2726 # where we read partial chunk content, this incurs 2 dequeue
2727 2727 # mutations and creates a new str for the remaining chunk in the
2728 2728 # queue. Our code below avoids this overhead.
2729 2729
2730 2730 chunk = queue[0]
2731 2731 chunkl = len(chunk)
2732 2732 offset = self._chunkoffset
2733 2733
2734 2734 # Use full chunk.
2735 2735 if offset == 0 and left >= chunkl:
2736 2736 left -= chunkl
2737 2737 queue.popleft()
2738 2738 buf.append(chunk)
2739 2739 # self._chunkoffset remains at 0.
2740 2740 continue
2741 2741
2742 2742 chunkremaining = chunkl - offset
2743 2743
2744 2744 # Use all of unconsumed part of chunk.
2745 2745 if left >= chunkremaining:
2746 2746 left -= chunkremaining
2747 2747 queue.popleft()
2748 2748 # offset == 0 is enabled by block above, so this won't merely
2749 2749 # copy via ``chunk[0:]``.
2750 2750 buf.append(chunk[offset:])
2751 2751 self._chunkoffset = 0
2752 2752
2753 2753 # Partial chunk needed.
2754 2754 else:
2755 2755 buf.append(chunk[offset : offset + left])
2756 2756 self._chunkoffset += left
2757 2757 left -= chunkremaining
2758 2758
2759 2759 return b''.join(buf)
2760 2760
2761 2761
2762 2762 def filechunkiter(f, size=131072, limit=None):
2763 2763 """Create a generator that produces the data in the file size
2764 2764 (default 131072) bytes at a time, up to optional limit (default is
2765 2765 to read all data). Chunks may be less than size bytes if the
2766 2766 chunk is the last chunk in the file, or the file is a socket or
2767 2767 some other type of file that sometimes reads less data than is
2768 2768 requested."""
2769 2769 assert size >= 0
2770 2770 assert limit is None or limit >= 0
2771 2771 while True:
2772 2772 if limit is None:
2773 2773 nbytes = size
2774 2774 else:
2775 2775 nbytes = min(limit, size)
2776 2776 s = nbytes and f.read(nbytes)
2777 2777 if not s:
2778 2778 break
2779 2779 if limit:
2780 2780 limit -= len(s)
2781 2781 yield s
2782 2782
2783 2783
2784 2784 class cappedreader:
2785 2785 """A file object proxy that allows reading up to N bytes.
2786 2786
2787 2787 Given a source file object, instances of this type allow reading up to
2788 2788 N bytes from that source file object. Attempts to read past the allowed
2789 2789 limit are treated as EOF.
2790 2790
2791 2791 It is assumed that I/O is not performed on the original file object
2792 2792 in addition to I/O that is performed by this instance. If there is,
2793 2793 state tracking will get out of sync and unexpected results will ensue.
2794 2794 """
2795 2795
2796 2796 def __init__(self, fh, limit):
2797 2797 """Allow reading up to <limit> bytes from <fh>."""
2798 2798 self._fh = fh
2799 2799 self._left = limit
2800 2800
2801 2801 def read(self, n=-1):
2802 2802 if not self._left:
2803 2803 return b''
2804 2804
2805 2805 if n < 0:
2806 2806 n = self._left
2807 2807
2808 2808 data = self._fh.read(min(n, self._left))
2809 2809 self._left -= len(data)
2810 2810 assert self._left >= 0
2811 2811
2812 2812 return data
2813 2813
2814 2814 def readinto(self, b):
2815 2815 res = self.read(len(b))
2816 2816 if res is None:
2817 2817 return None
2818 2818
2819 2819 b[0 : len(res)] = res
2820 2820 return len(res)
2821 2821
2822 2822
2823 2823 def unitcountfn(*unittable):
2824 2824 '''return a function that renders a readable count of some quantity'''
2825 2825
2826 2826 def go(count):
2827 2827 for multiplier, divisor, format in unittable:
2828 2828 if abs(count) >= divisor * multiplier:
2829 2829 return format % (count / float(divisor))
2830 2830 return unittable[-1][2] % count
2831 2831
2832 2832 return go
2833 2833
2834 2834
2835 2835 def processlinerange(fromline, toline):
2836 2836 # type: (int, int) -> Tuple[int, int]
2837 2837 """Check that linerange <fromline>:<toline> makes sense and return a
2838 2838 0-based range.
2839 2839
2840 2840 >>> processlinerange(10, 20)
2841 2841 (9, 20)
2842 2842 >>> processlinerange(2, 1)
2843 2843 Traceback (most recent call last):
2844 2844 ...
2845 2845 ParseError: line range must be positive
2846 2846 >>> processlinerange(0, 5)
2847 2847 Traceback (most recent call last):
2848 2848 ...
2849 2849 ParseError: fromline must be strictly positive
2850 2850 """
2851 2851 if toline - fromline < 0:
2852 2852 raise error.ParseError(_(b"line range must be positive"))
2853 2853 if fromline < 1:
2854 2854 raise error.ParseError(_(b"fromline must be strictly positive"))
2855 2855 return fromline - 1, toline
2856 2856
2857 2857
2858 2858 bytecount = unitcountfn(
2859 2859 (100, 1 << 30, _(b'%.0f GB')),
2860 2860 (10, 1 << 30, _(b'%.1f GB')),
2861 2861 (1, 1 << 30, _(b'%.2f GB')),
2862 2862 (100, 1 << 20, _(b'%.0f MB')),
2863 2863 (10, 1 << 20, _(b'%.1f MB')),
2864 2864 (1, 1 << 20, _(b'%.2f MB')),
2865 2865 (100, 1 << 10, _(b'%.0f KB')),
2866 2866 (10, 1 << 10, _(b'%.1f KB')),
2867 2867 (1, 1 << 10, _(b'%.2f KB')),
2868 2868 (1, 1, _(b'%.0f bytes')),
2869 2869 )
2870 2870
2871 2871
2872 2872 class transformingwriter:
2873 2873 """Writable file wrapper to transform data by function"""
2874 2874
2875 2875 def __init__(self, fp, encode):
2876 2876 self._fp = fp
2877 2877 self._encode = encode
2878 2878
2879 2879 def close(self):
2880 2880 self._fp.close()
2881 2881
2882 2882 def flush(self):
2883 2883 self._fp.flush()
2884 2884
2885 2885 def write(self, data):
2886 2886 return self._fp.write(self._encode(data))
2887 2887
2888 2888
2889 2889 # Matches a single EOL which can either be a CRLF where repeated CR
2890 2890 # are removed or a LF. We do not care about old Macintosh files, so a
2891 2891 # stray CR is an error.
2892 2892 _eolre = remod.compile(br'\r*\n')
2893 2893
2894 2894
2895 2895 def tolf(s):
2896 2896 # type: (bytes) -> bytes
2897 2897 return _eolre.sub(b'\n', s)
2898 2898
2899 2899
2900 2900 def tocrlf(s):
2901 2901 # type: (bytes) -> bytes
2902 2902 return _eolre.sub(b'\r\n', s)
2903 2903
2904 2904
2905 2905 def _crlfwriter(fp):
2906 2906 return transformingwriter(fp, tocrlf)
2907 2907
2908 2908
2909 2909 if pycompat.oslinesep == b'\r\n':
2910 2910 tonativeeol = tocrlf
2911 2911 fromnativeeol = tolf
2912 2912 nativeeolwriter = _crlfwriter
2913 2913 else:
2914 2914 tonativeeol = pycompat.identity
2915 2915 fromnativeeol = pycompat.identity
2916 2916 nativeeolwriter = pycompat.identity
2917 2917
2918 2918
2919 2919 # TODO delete since workaround variant for Python 2 no longer needed.
2920 2920 def iterfile(fp):
2921 2921 return fp
2922 2922
2923 2923
2924 2924 def iterlines(iterator):
2925 2925 # type: (Iterable[bytes]) -> Iterator[bytes]
2926 2926 for chunk in iterator:
2927 2927 for line in chunk.splitlines():
2928 2928 yield line
2929 2929
2930 2930
2931 2931 def expandpath(path):
2932 2932 # type: (bytes) -> bytes
2933 2933 return os.path.expanduser(os.path.expandvars(path))
2934 2934
2935 2935
2936 2936 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2937 2937 """Return the result of interpolating items in the mapping into string s.
2938 2938
2939 2939 prefix is a single character string, or a two character string with
2940 2940 a backslash as the first character if the prefix needs to be escaped in
2941 2941 a regular expression.
2942 2942
2943 2943 fn is an optional function that will be applied to the replacement text
2944 2944 just before replacement.
2945 2945
2946 2946 escape_prefix is an optional flag that allows using doubled prefix for
2947 2947 its escaping.
2948 2948 """
2949 2949 fn = fn or (lambda s: s)
2950 2950 patterns = b'|'.join(mapping.keys())
2951 2951 if escape_prefix:
2952 2952 patterns += b'|' + prefix
2953 2953 if len(prefix) > 1:
2954 2954 prefix_char = prefix[1:]
2955 2955 else:
2956 2956 prefix_char = prefix
2957 2957 mapping[prefix_char] = prefix_char
2958 2958 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2959 2959 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2960 2960
2961 2961
2962 2962 timecount = unitcountfn(
2963 2963 (1, 1e3, _(b'%.0f s')),
2964 2964 (100, 1, _(b'%.1f s')),
2965 2965 (10, 1, _(b'%.2f s')),
2966 2966 (1, 1, _(b'%.3f s')),
2967 2967 (100, 0.001, _(b'%.1f ms')),
2968 2968 (10, 0.001, _(b'%.2f ms')),
2969 2969 (1, 0.001, _(b'%.3f ms')),
2970 2970 (100, 0.000001, _(b'%.1f us')),
2971 2971 (10, 0.000001, _(b'%.2f us')),
2972 2972 (1, 0.000001, _(b'%.3f us')),
2973 2973 (100, 0.000000001, _(b'%.1f ns')),
2974 2974 (10, 0.000000001, _(b'%.2f ns')),
2975 2975 (1, 0.000000001, _(b'%.3f ns')),
2976 2976 )
2977 2977
2978 2978
2979 2979 @attr.s
2980 2980 class timedcmstats:
2981 2981 """Stats information produced by the timedcm context manager on entering."""
2982 2982
2983 2983 # the starting value of the timer as a float (meaning and resulution is
2984 2984 # platform dependent, see util.timer)
2985 2985 start = attr.ib(default=attr.Factory(lambda: timer()))
2986 2986 # the number of seconds as a floating point value; starts at 0, updated when
2987 2987 # the context is exited.
2988 2988 elapsed = attr.ib(default=0)
2989 2989 # the number of nested timedcm context managers.
2990 2990 level = attr.ib(default=1)
2991 2991
2992 2992 def __bytes__(self):
2993 2993 return timecount(self.elapsed) if self.elapsed else b'<unknown>'
2994 2994
2995 2995 __str__ = encoding.strmethod(__bytes__)
2996 2996
2997 2997
2998 2998 @contextlib.contextmanager
2999 2999 def timedcm(whencefmt, *whenceargs):
3000 3000 """A context manager that produces timing information for a given context.
3001 3001
3002 3002 On entering a timedcmstats instance is produced.
3003 3003
3004 3004 This context manager is reentrant.
3005 3005
3006 3006 """
3007 3007 # track nested context managers
3008 3008 timedcm._nested += 1
3009 3009 timing_stats = timedcmstats(level=timedcm._nested)
3010 3010 try:
3011 3011 with tracing.log(whencefmt, *whenceargs):
3012 3012 yield timing_stats
3013 3013 finally:
3014 3014 timing_stats.elapsed = timer() - timing_stats.start
3015 3015 timedcm._nested -= 1
3016 3016
3017 3017
3018 3018 timedcm._nested = 0
3019 3019
3020 3020
3021 3021 def timed(func):
3022 3022 """Report the execution time of a function call to stderr.
3023 3023
3024 3024 During development, use as a decorator when you need to measure
3025 3025 the cost of a function, e.g. as follows:
3026 3026
3027 3027 @util.timed
3028 3028 def foo(a, b, c):
3029 3029 pass
3030 3030 """
3031 3031
3032 3032 def wrapper(*args, **kwargs):
3033 3033 with timedcm(pycompat.bytestr(func.__name__)) as time_stats:
3034 3034 result = func(*args, **kwargs)
3035 3035 stderr = procutil.stderr
3036 3036 stderr.write(
3037 3037 b'%s%s: %s\n'
3038 3038 % (
3039 3039 b' ' * time_stats.level * 2,
3040 3040 pycompat.bytestr(func.__name__),
3041 3041 time_stats,
3042 3042 )
3043 3043 )
3044 3044 return result
3045 3045
3046 3046 return wrapper
3047 3047
3048 3048
3049 3049 _sizeunits = (
3050 3050 (b'm', 2 ** 20),
3051 3051 (b'k', 2 ** 10),
3052 3052 (b'g', 2 ** 30),
3053 3053 (b'kb', 2 ** 10),
3054 3054 (b'mb', 2 ** 20),
3055 3055 (b'gb', 2 ** 30),
3056 3056 (b'b', 1),
3057 3057 )
3058 3058
3059 3059
3060 3060 def sizetoint(s):
3061 3061 # type: (bytes) -> int
3062 3062 """Convert a space specifier to a byte count.
3063 3063
3064 3064 >>> sizetoint(b'30')
3065 3065 30
3066 3066 >>> sizetoint(b'2.2kb')
3067 3067 2252
3068 3068 >>> sizetoint(b'6M')
3069 3069 6291456
3070 3070 """
3071 3071 t = s.strip().lower()
3072 3072 try:
3073 3073 for k, u in _sizeunits:
3074 3074 if t.endswith(k):
3075 3075 return int(float(t[: -len(k)]) * u)
3076 3076 return int(t)
3077 3077 except ValueError:
3078 3078 raise error.ParseError(_(b"couldn't parse size: %s") % s)
3079 3079
3080 3080
3081 3081 class hooks:
3082 3082 """A collection of hook functions that can be used to extend a
3083 3083 function's behavior. Hooks are called in lexicographic order,
3084 3084 based on the names of their sources."""
3085 3085
3086 3086 def __init__(self):
3087 3087 self._hooks = []
3088 3088
3089 3089 def add(self, source, hook):
3090 3090 self._hooks.append((source, hook))
3091 3091
3092 3092 def __call__(self, *args):
3093 3093 self._hooks.sort(key=lambda x: x[0])
3094 3094 results = []
3095 3095 for source, hook in self._hooks:
3096 3096 results.append(hook(*args))
3097 3097 return results
3098 3098
3099 3099
3100 3100 def getstackframes(skip=0, line=b' %-*s in %s\n', fileline=b'%s:%d', depth=0):
3101 3101 """Yields lines for a nicely formatted stacktrace.
3102 3102 Skips the 'skip' last entries, then return the last 'depth' entries.
3103 3103 Each file+linenumber is formatted according to fileline.
3104 3104 Each line is formatted according to line.
3105 3105 If line is None, it yields:
3106 3106 length of longest filepath+line number,
3107 3107 filepath+linenumber,
3108 3108 function
3109 3109
3110 3110 Not be used in production code but very convenient while developing.
3111 3111 """
3112 3112 entries = [
3113 3113 (fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3114 3114 for fn, ln, func, _text in traceback.extract_stack()[: -skip - 1]
3115 3115 ][-depth:]
3116 3116 if entries:
3117 3117 fnmax = max(len(entry[0]) for entry in entries)
3118 3118 for fnln, func in entries:
3119 3119 if line is None:
3120 3120 yield (fnmax, fnln, func)
3121 3121 else:
3122 3122 yield line % (fnmax, fnln, func)
3123 3123
3124 3124
3125 3125 def debugstacktrace(
3126 3126 msg=b'stacktrace',
3127 3127 skip=0,
3128 3128 f=procutil.stderr,
3129 3129 otherf=procutil.stdout,
3130 3130 depth=0,
3131 3131 prefix=b'',
3132 3132 ):
3133 3133 """Writes a message to f (stderr) with a nicely formatted stacktrace.
3134 3134 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3135 3135 By default it will flush stdout first.
3136 3136 It can be used everywhere and intentionally does not require an ui object.
3137 3137 Not be used in production code but very convenient while developing.
3138 3138 """
3139 3139 if otherf:
3140 3140 otherf.flush()
3141 3141 f.write(b'%s%s at:\n' % (prefix, msg.rstrip()))
3142 3142 for line in getstackframes(skip + 1, depth=depth):
3143 3143 f.write(prefix + line)
3144 3144 f.flush()
3145 3145
3146 3146
3147 3147 # convenient shortcut
3148 3148 dst = debugstacktrace
3149 3149
3150 3150
3151 3151 def safename(f, tag, ctx, others=None):
3152 3152 """
3153 3153 Generate a name that it is safe to rename f to in the given context.
3154 3154
3155 3155 f: filename to rename
3156 3156 tag: a string tag that will be included in the new name
3157 3157 ctx: a context, in which the new name must not exist
3158 3158 others: a set of other filenames that the new name must not be in
3159 3159
3160 3160 Returns a file name of the form oldname~tag[~number] which does not exist
3161 3161 in the provided context and is not in the set of other names.
3162 3162 """
3163 3163 if others is None:
3164 3164 others = set()
3165 3165
3166 3166 fn = b'%s~%s' % (f, tag)
3167 3167 if fn not in ctx and fn not in others:
3168 3168 return fn
3169 3169 for n in itertools.count(1):
3170 3170 fn = b'%s~%s~%s' % (f, tag, n)
3171 3171 if fn not in ctx and fn not in others:
3172 3172 return fn
3173 3173
3174 3174
3175 3175 def readexactly(stream, n):
3176 3176 '''read n bytes from stream.read and abort if less was available'''
3177 3177 s = stream.read(n)
3178 3178 if len(s) < n:
3179 3179 raise error.Abort(
3180 3180 _(b"stream ended unexpectedly (got %d bytes, expected %d)")
3181 3181 % (len(s), n)
3182 3182 )
3183 3183 return s
3184 3184
3185 3185
3186 3186 def uvarintencode(value):
3187 3187 """Encode an unsigned integer value to a varint.
3188 3188
3189 3189 A varint is a variable length integer of 1 or more bytes. Each byte
3190 3190 except the last has the most significant bit set. The lower 7 bits of
3191 3191 each byte store the 2's complement representation, least significant group
3192 3192 first.
3193 3193
3194 3194 >>> uvarintencode(0)
3195 3195 '\\x00'
3196 3196 >>> uvarintencode(1)
3197 3197 '\\x01'
3198 3198 >>> uvarintencode(127)
3199 3199 '\\x7f'
3200 3200 >>> uvarintencode(1337)
3201 3201 '\\xb9\\n'
3202 3202 >>> uvarintencode(65536)
3203 3203 '\\x80\\x80\\x04'
3204 3204 >>> uvarintencode(-1)
3205 3205 Traceback (most recent call last):
3206 3206 ...
3207 3207 ProgrammingError: negative value for uvarint: -1
3208 3208 """
3209 3209 if value < 0:
3210 3210 raise error.ProgrammingError(b'negative value for uvarint: %d' % value)
3211 3211 bits = value & 0x7F
3212 3212 value >>= 7
3213 3213 bytes = []
3214 3214 while value:
3215 3215 bytes.append(pycompat.bytechr(0x80 | bits))
3216 3216 bits = value & 0x7F
3217 3217 value >>= 7
3218 3218 bytes.append(pycompat.bytechr(bits))
3219 3219
3220 3220 return b''.join(bytes)
3221 3221
3222 3222
3223 3223 def uvarintdecodestream(fh):
3224 3224 """Decode an unsigned variable length integer from a stream.
3225 3225
3226 3226 The passed argument is anything that has a ``.read(N)`` method.
3227 3227
3228 3228 >>> from io import BytesIO
3229 3229 >>> uvarintdecodestream(BytesIO(b'\\x00'))
3230 3230 0
3231 3231 >>> uvarintdecodestream(BytesIO(b'\\x01'))
3232 3232 1
3233 3233 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
3234 3234 127
3235 3235 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
3236 3236 1337
3237 3237 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
3238 3238 65536
3239 3239 >>> uvarintdecodestream(BytesIO(b'\\x80'))
3240 3240 Traceback (most recent call last):
3241 3241 ...
3242 3242 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
3243 3243 """
3244 3244 result = 0
3245 3245 shift = 0
3246 3246 while True:
3247 3247 byte = ord(readexactly(fh, 1))
3248 3248 result |= (byte & 0x7F) << shift
3249 3249 if not (byte & 0x80):
3250 3250 return result
3251 3251 shift += 7
3252 3252
3253 3253
3254 3254 # Passing the '' locale means that the locale should be set according to the
3255 3255 # user settings (environment variables).
3256 3256 # Python sometimes avoids setting the global locale settings. When interfacing
3257 3257 # with C code (e.g. the curses module or the Subversion bindings), the global
3258 3258 # locale settings must be initialized correctly. Python 2 does not initialize
3259 3259 # the global locale settings on interpreter startup. Python 3 sometimes
3260 3260 # initializes LC_CTYPE, but not consistently at least on Windows. Therefore we
3261 3261 # explicitly initialize it to get consistent behavior if it's not already
3262 3262 # initialized. Since CPython commit 177d921c8c03d30daa32994362023f777624b10d,
3263 3263 # LC_CTYPE is always initialized. If we require Python 3.8+, we should re-check
3264 3264 # if we can remove this code.
3265 3265 @contextlib.contextmanager
3266 3266 def with_lc_ctype():
3267 3267 oldloc = locale.setlocale(locale.LC_CTYPE, None)
3268 3268 if oldloc == 'C':
3269 3269 try:
3270 3270 try:
3271 3271 locale.setlocale(locale.LC_CTYPE, '')
3272 3272 except locale.Error:
3273 3273 # The likely case is that the locale from the environment
3274 3274 # variables is unknown.
3275 3275 pass
3276 3276 yield
3277 3277 finally:
3278 3278 locale.setlocale(locale.LC_CTYPE, oldloc)
3279 3279 else:
3280 3280 yield
3281 3281
3282 3282
3283 3283 def _estimatememory():
3284 3284 # type: () -> Optional[int]
3285 3285 """Provide an estimate for the available system memory in Bytes.
3286 3286
3287 3287 If no estimate can be provided on the platform, returns None.
3288 3288 """
3289 3289 if pycompat.sysplatform.startswith(b'win'):
3290 3290 # On Windows, use the GlobalMemoryStatusEx kernel function directly.
3291 3291 from ctypes import c_long as DWORD, c_ulonglong as DWORDLONG
3292 3292 from ctypes.wintypes import ( # pytype: disable=import-error
3293 3293 Structure,
3294 3294 byref,
3295 3295 sizeof,
3296 3296 windll,
3297 3297 )
3298 3298
3299 3299 class MEMORYSTATUSEX(Structure):
3300 3300 _fields_ = [
3301 3301 ('dwLength', DWORD),
3302 3302 ('dwMemoryLoad', DWORD),
3303 3303 ('ullTotalPhys', DWORDLONG),
3304 3304 ('ullAvailPhys', DWORDLONG),
3305 3305 ('ullTotalPageFile', DWORDLONG),
3306 3306 ('ullAvailPageFile', DWORDLONG),
3307 3307 ('ullTotalVirtual', DWORDLONG),
3308 3308 ('ullAvailVirtual', DWORDLONG),
3309 3309 ('ullExtendedVirtual', DWORDLONG),
3310 3310 ]
3311 3311
3312 3312 x = MEMORYSTATUSEX()
3313 3313 x.dwLength = sizeof(x)
3314 3314 windll.kernel32.GlobalMemoryStatusEx(byref(x))
3315 3315 return x.ullAvailPhys
3316 3316
3317 3317 # On newer Unix-like systems and Mac OSX, the sysconf interface
3318 3318 # can be used. _SC_PAGE_SIZE is part of POSIX; _SC_PHYS_PAGES
3319 3319 # seems to be implemented on most systems.
3320 3320 try:
3321 3321 pagesize = os.sysconf(os.sysconf_names['SC_PAGE_SIZE'])
3322 3322 pages = os.sysconf(os.sysconf_names['SC_PHYS_PAGES'])
3323 3323 return pagesize * pages
3324 3324 except OSError: # sysconf can fail
3325 3325 pass
3326 3326 except KeyError: # unknown parameter
3327 3327 pass
General Comments 0
You need to be logged in to leave comments. Login now