##// END OF EJS Templates
nocg: make the utility work are both a decorator and context manager...
marmoute -
r52442:a452807d default
parent child Browse files
Show More
@@ -1,3312 +1,3325
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16
17 17 import abc
18 18 import collections
19 19 import contextlib
20 20 import errno
21 21 import gc
22 22 import hashlib
23 23 import io
24 24 import itertools
25 25 import locale
26 26 import mmap
27 27 import os
28 28 import pickle # provides util.pickle symbol
29 29 import re as remod
30 30 import shutil
31 31 import stat
32 32 import sys
33 33 import time
34 34 import traceback
35 35 import warnings
36 36
37 37 from typing import (
38 Any,
38 39 Iterable,
39 40 Iterator,
40 41 List,
41 42 Optional,
42 43 Tuple,
43 44 )
44 45
45 46 from .node import hex
46 47 from .thirdparty import attr
47 48 from .pycompat import (
48 49 open,
49 50 )
50 51 from hgdemandimport import tracing
51 52 from . import (
52 53 encoding,
53 54 error,
54 55 i18n,
55 56 policy,
56 57 pycompat,
57 58 urllibcompat,
58 59 )
59 60 from .utils import (
60 61 compression,
61 62 hashutil,
62 63 procutil,
63 64 stringutil,
64 65 )
65 66
66 67 # keeps pyflakes happy
67 68 assert [
68 69 Iterable,
69 70 Iterator,
70 71 List,
71 72 Optional,
72 73 Tuple,
73 74 ]
74 75
75 76
76 77 base85 = policy.importmod('base85')
77 78 osutil = policy.importmod('osutil')
78 79
79 80 b85decode = base85.b85decode
80 81 b85encode = base85.b85encode
81 82
82 83 cookielib = pycompat.cookielib
83 84 httplib = pycompat.httplib
84 85 safehasattr = pycompat.safehasattr
85 86 socketserver = pycompat.socketserver
86 87 bytesio = io.BytesIO
87 88 # TODO deprecate stringio name, as it is a lie on Python 3.
88 89 stringio = bytesio
89 90 xmlrpclib = pycompat.xmlrpclib
90 91
91 92 httpserver = urllibcompat.httpserver
92 93 urlerr = urllibcompat.urlerr
93 94 urlreq = urllibcompat.urlreq
94 95
95 96 # workaround for win32mbcs
96 97 _filenamebytestr = pycompat.bytestr
97 98
98 99 if pycompat.iswindows:
99 100 from . import windows as platform
100 101 else:
101 102 from . import posix as platform
102 103
103 104 _ = i18n._
104 105
105 106 abspath = platform.abspath
106 107 bindunixsocket = platform.bindunixsocket
107 108 cachestat = platform.cachestat
108 109 checkexec = platform.checkexec
109 110 checklink = platform.checklink
110 111 copymode = platform.copymode
111 112 expandglobs = platform.expandglobs
112 113 getfsmountpoint = platform.getfsmountpoint
113 114 getfstype = platform.getfstype
114 115 get_password = platform.get_password
115 116 groupmembers = platform.groupmembers
116 117 groupname = platform.groupname
117 118 isexec = platform.isexec
118 119 isowner = platform.isowner
119 120 listdir = osutil.listdir
120 121 localpath = platform.localpath
121 122 lookupreg = platform.lookupreg
122 123 makedir = platform.makedir
123 124 nlinks = platform.nlinks
124 125 normpath = platform.normpath
125 126 normcase = platform.normcase
126 127 normcasespec = platform.normcasespec
127 128 normcasefallback = platform.normcasefallback
128 129 openhardlinks = platform.openhardlinks
129 130 oslink = platform.oslink
130 131 parsepatchoutput = platform.parsepatchoutput
131 132 pconvert = platform.pconvert
132 133 poll = platform.poll
133 134 posixfile = platform.posixfile
134 135 readlink = platform.readlink
135 136 rename = platform.rename
136 137 removedirs = platform.removedirs
137 138 samedevice = platform.samedevice
138 139 samefile = platform.samefile
139 140 samestat = platform.samestat
140 141 setflags = platform.setflags
141 142 split = platform.split
142 143 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
143 144 statisexec = platform.statisexec
144 145 statislink = platform.statislink
145 146 umask = platform.umask
146 147 unlink = platform.unlink
147 148 username = platform.username
148 149
149 150
150 151 def setumask(val: int) -> None:
151 152 '''updates the umask. used by chg server'''
152 153 if pycompat.iswindows:
153 154 return
154 155 os.umask(val)
155 156 global umask
156 157 platform.umask = umask = val & 0o777
157 158
158 159
159 160 # small compat layer
160 161 compengines = compression.compengines
161 162 SERVERROLE = compression.SERVERROLE
162 163 CLIENTROLE = compression.CLIENTROLE
163 164
164 165 # Python compatibility
165 166
166 167 _notset = object()
167 168
168 169
169 170 def bitsfrom(container):
170 171 bits = 0
171 172 for bit in container:
172 173 bits |= bit
173 174 return bits
174 175
175 176
176 177 # python 2.6 still have deprecation warning enabled by default. We do not want
177 178 # to display anything to standard user so detect if we are running test and
178 179 # only use python deprecation warning in this case.
179 180 _dowarn = bool(encoding.environ.get(b'HGEMITWARNINGS'))
180 181 if _dowarn:
181 182 # explicitly unfilter our warning for python 2.7
182 183 #
183 184 # The option of setting PYTHONWARNINGS in the test runner was investigated.
184 185 # However, module name set through PYTHONWARNINGS was exactly matched, so
185 186 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
186 187 # makes the whole PYTHONWARNINGS thing useless for our usecase.
187 188 warnings.filterwarnings('default', '', DeprecationWarning, 'mercurial')
188 189 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext')
189 190 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext3rd')
190 191 if _dowarn:
191 192 # silence warning emitted by passing user string to re.sub()
192 193 warnings.filterwarnings(
193 194 'ignore', 'bad escape', DeprecationWarning, 'mercurial'
194 195 )
195 196 warnings.filterwarnings(
196 197 'ignore', 'invalid escape sequence', DeprecationWarning, 'mercurial'
197 198 )
198 199 # TODO: reinvent imp.is_frozen()
199 200 warnings.filterwarnings(
200 201 'ignore',
201 202 'the imp module is deprecated',
202 203 DeprecationWarning,
203 204 'mercurial',
204 205 )
205 206
206 207
207 208 def nouideprecwarn(msg, version, stacklevel=1):
208 209 """Issue an python native deprecation warning
209 210
210 211 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
211 212 """
212 213 if _dowarn:
213 214 msg += (
214 215 b"\n(compatibility will be dropped after Mercurial-%s,"
215 216 b" update your code.)"
216 217 ) % version
217 218 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
218 219 # on python 3 with chg, we will need to explicitly flush the output
219 220 sys.stderr.flush()
220 221
221 222
222 223 DIGESTS = {
223 224 b'md5': hashlib.md5,
224 225 b'sha1': hashutil.sha1,
225 226 b'sha512': hashlib.sha512,
226 227 }
227 228 # List of digest types from strongest to weakest
228 229 DIGESTS_BY_STRENGTH = [b'sha512', b'sha1', b'md5']
229 230
230 231 for k in DIGESTS_BY_STRENGTH:
231 232 assert k in DIGESTS
232 233
233 234
234 235 class digester:
235 236 """helper to compute digests.
236 237
237 238 This helper can be used to compute one or more digests given their name.
238 239
239 240 >>> d = digester([b'md5', b'sha1'])
240 241 >>> d.update(b'foo')
241 242 >>> [k for k in sorted(d)]
242 243 ['md5', 'sha1']
243 244 >>> d[b'md5']
244 245 'acbd18db4cc2f85cedef654fccc4a4d8'
245 246 >>> d[b'sha1']
246 247 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
247 248 >>> digester.preferred([b'md5', b'sha1'])
248 249 'sha1'
249 250 """
250 251
251 252 def __init__(self, digests, s=b''):
252 253 self._hashes = {}
253 254 for k in digests:
254 255 if k not in DIGESTS:
255 256 raise error.Abort(_(b'unknown digest type: %s') % k)
256 257 self._hashes[k] = DIGESTS[k]()
257 258 if s:
258 259 self.update(s)
259 260
260 261 def update(self, data):
261 262 for h in self._hashes.values():
262 263 h.update(data)
263 264
264 265 def __getitem__(self, key):
265 266 if key not in DIGESTS:
266 267 raise error.Abort(_(b'unknown digest type: %s') % k)
267 268 return hex(self._hashes[key].digest())
268 269
269 270 def __iter__(self):
270 271 return iter(self._hashes)
271 272
272 273 @staticmethod
273 274 def preferred(supported):
274 275 """returns the strongest digest type in both supported and DIGESTS."""
275 276
276 277 for k in DIGESTS_BY_STRENGTH:
277 278 if k in supported:
278 279 return k
279 280 return None
280 281
281 282
282 283 class digestchecker:
283 284 """file handle wrapper that additionally checks content against a given
284 285 size and digests.
285 286
286 287 d = digestchecker(fh, size, {'md5': '...'})
287 288
288 289 When multiple digests are given, all of them are validated.
289 290 """
290 291
291 292 def __init__(self, fh, size, digests):
292 293 self._fh = fh
293 294 self._size = size
294 295 self._got = 0
295 296 self._digests = dict(digests)
296 297 self._digester = digester(self._digests.keys())
297 298
298 299 def read(self, length=-1):
299 300 content = self._fh.read(length)
300 301 self._digester.update(content)
301 302 self._got += len(content)
302 303 return content
303 304
304 305 def validate(self):
305 306 if self._size != self._got:
306 307 raise error.Abort(
307 308 _(b'size mismatch: expected %d, got %d')
308 309 % (self._size, self._got)
309 310 )
310 311 for k, v in self._digests.items():
311 312 if v != self._digester[k]:
312 313 # i18n: first parameter is a digest name
313 314 raise error.Abort(
314 315 _(b'%s mismatch: expected %s, got %s')
315 316 % (k, v, self._digester[k])
316 317 )
317 318
318 319
319 320 try:
320 321 buffer = buffer # pytype: disable=name-error
321 322 except NameError:
322 323
323 324 def buffer(sliceable, offset=0, length=None):
324 325 if length is not None:
325 326 return memoryview(sliceable)[offset : offset + length]
326 327 return memoryview(sliceable)[offset:]
327 328
328 329
329 330 _chunksize = 4096
330 331
331 332
332 333 class bufferedinputpipe:
333 334 """a manually buffered input pipe
334 335
335 336 Python will not let us use buffered IO and lazy reading with 'polling' at
336 337 the same time. We cannot probe the buffer state and select will not detect
337 338 that data are ready to read if they are already buffered.
338 339
339 340 This class let us work around that by implementing its own buffering
340 341 (allowing efficient readline) while offering a way to know if the buffer is
341 342 empty from the output (allowing collaboration of the buffer with polling).
342 343
343 344 This class lives in the 'util' module because it makes use of the 'os'
344 345 module from the python stdlib.
345 346 """
346 347
347 348 def __new__(cls, fh):
348 349 # If we receive a fileobjectproxy, we need to use a variation of this
349 350 # class that notifies observers about activity.
350 351 if isinstance(fh, fileobjectproxy):
351 352 cls = observedbufferedinputpipe
352 353
353 354 return super(bufferedinputpipe, cls).__new__(cls)
354 355
355 356 def __init__(self, input):
356 357 self._input = input
357 358 self._buffer = []
358 359 self._eof = False
359 360 self._lenbuf = 0
360 361
361 362 @property
362 363 def hasbuffer(self):
363 364 """True is any data is currently buffered
364 365
365 366 This will be used externally a pre-step for polling IO. If there is
366 367 already data then no polling should be set in place."""
367 368 return bool(self._buffer)
368 369
369 370 @property
370 371 def closed(self):
371 372 return self._input.closed
372 373
373 374 def fileno(self):
374 375 return self._input.fileno()
375 376
376 377 def close(self):
377 378 return self._input.close()
378 379
379 380 def read(self, size):
380 381 while (not self._eof) and (self._lenbuf < size):
381 382 self._fillbuffer()
382 383 return self._frombuffer(size)
383 384
384 385 def unbufferedread(self, size):
385 386 if not self._eof and self._lenbuf == 0:
386 387 self._fillbuffer(max(size, _chunksize))
387 388 return self._frombuffer(min(self._lenbuf, size))
388 389
389 390 def readline(self, *args, **kwargs):
390 391 if len(self._buffer) > 1:
391 392 # this should not happen because both read and readline end with a
392 393 # _frombuffer call that collapse it.
393 394 self._buffer = [b''.join(self._buffer)]
394 395 self._lenbuf = len(self._buffer[0])
395 396 lfi = -1
396 397 if self._buffer:
397 398 lfi = self._buffer[-1].find(b'\n')
398 399 while (not self._eof) and lfi < 0:
399 400 self._fillbuffer()
400 401 if self._buffer:
401 402 lfi = self._buffer[-1].find(b'\n')
402 403 size = lfi + 1
403 404 if lfi < 0: # end of file
404 405 size = self._lenbuf
405 406 elif len(self._buffer) > 1:
406 407 # we need to take previous chunks into account
407 408 size += self._lenbuf - len(self._buffer[-1])
408 409 return self._frombuffer(size)
409 410
410 411 def _frombuffer(self, size):
411 412 """return at most 'size' data from the buffer
412 413
413 414 The data are removed from the buffer."""
414 415 if size == 0 or not self._buffer:
415 416 return b''
416 417 buf = self._buffer[0]
417 418 if len(self._buffer) > 1:
418 419 buf = b''.join(self._buffer)
419 420
420 421 data = buf[:size]
421 422 buf = buf[len(data) :]
422 423 if buf:
423 424 self._buffer = [buf]
424 425 self._lenbuf = len(buf)
425 426 else:
426 427 self._buffer = []
427 428 self._lenbuf = 0
428 429 return data
429 430
430 431 def _fillbuffer(self, size=_chunksize):
431 432 """read data to the buffer"""
432 433 data = os.read(self._input.fileno(), size)
433 434 if not data:
434 435 self._eof = True
435 436 else:
436 437 self._lenbuf += len(data)
437 438 self._buffer.append(data)
438 439
439 440 return data
440 441
441 442
442 443 def mmapread(fp, size=None):
443 444 if size == 0:
444 445 # size of 0 to mmap.mmap() means "all data"
445 446 # rather than "zero bytes", so special case that.
446 447 return b''
447 448 elif size is None:
448 449 size = 0
449 450 fd = getattr(fp, 'fileno', lambda: fp)()
450 451 try:
451 452 return mmap.mmap(fd, size, access=mmap.ACCESS_READ)
452 453 except ValueError:
453 454 # Empty files cannot be mmapped, but mmapread should still work. Check
454 455 # if the file is empty, and if so, return an empty buffer.
455 456 if os.fstat(fd).st_size == 0:
456 457 return b''
457 458 raise
458 459
459 460
460 461 class fileobjectproxy:
461 462 """A proxy around file objects that tells a watcher when events occur.
462 463
463 464 This type is intended to only be used for testing purposes. Think hard
464 465 before using it in important code.
465 466 """
466 467
467 468 __slots__ = (
468 469 '_orig',
469 470 '_observer',
470 471 )
471 472
472 473 def __init__(self, fh, observer):
473 474 object.__setattr__(self, '_orig', fh)
474 475 object.__setattr__(self, '_observer', observer)
475 476
476 477 def __getattribute__(self, name):
477 478 ours = {
478 479 '_observer',
479 480 # IOBase
480 481 'close',
481 482 # closed if a property
482 483 'fileno',
483 484 'flush',
484 485 'isatty',
485 486 'readable',
486 487 'readline',
487 488 'readlines',
488 489 'seek',
489 490 'seekable',
490 491 'tell',
491 492 'truncate',
492 493 'writable',
493 494 'writelines',
494 495 # RawIOBase
495 496 'read',
496 497 'readall',
497 498 'readinto',
498 499 'write',
499 500 # BufferedIOBase
500 501 # raw is a property
501 502 'detach',
502 503 # read defined above
503 504 'read1',
504 505 # readinto defined above
505 506 # write defined above
506 507 }
507 508
508 509 # We only observe some methods.
509 510 if name in ours:
510 511 return object.__getattribute__(self, name)
511 512
512 513 return getattr(object.__getattribute__(self, '_orig'), name)
513 514
514 515 def __nonzero__(self):
515 516 return bool(object.__getattribute__(self, '_orig'))
516 517
517 518 __bool__ = __nonzero__
518 519
519 520 def __delattr__(self, name):
520 521 return delattr(object.__getattribute__(self, '_orig'), name)
521 522
522 523 def __setattr__(self, name, value):
523 524 return setattr(object.__getattribute__(self, '_orig'), name, value)
524 525
525 526 def __iter__(self):
526 527 return object.__getattribute__(self, '_orig').__iter__()
527 528
528 529 def _observedcall(self, name, *args, **kwargs):
529 530 # Call the original object.
530 531 orig = object.__getattribute__(self, '_orig')
531 532 res = getattr(orig, name)(*args, **kwargs)
532 533
533 534 # Call a method on the observer of the same name with arguments
534 535 # so it can react, log, etc.
535 536 observer = object.__getattribute__(self, '_observer')
536 537 fn = getattr(observer, name, None)
537 538 if fn:
538 539 fn(res, *args, **kwargs)
539 540
540 541 return res
541 542
542 543 def close(self, *args, **kwargs):
543 544 return object.__getattribute__(self, '_observedcall')(
544 545 'close', *args, **kwargs
545 546 )
546 547
547 548 def fileno(self, *args, **kwargs):
548 549 return object.__getattribute__(self, '_observedcall')(
549 550 'fileno', *args, **kwargs
550 551 )
551 552
552 553 def flush(self, *args, **kwargs):
553 554 return object.__getattribute__(self, '_observedcall')(
554 555 'flush', *args, **kwargs
555 556 )
556 557
557 558 def isatty(self, *args, **kwargs):
558 559 return object.__getattribute__(self, '_observedcall')(
559 560 'isatty', *args, **kwargs
560 561 )
561 562
562 563 def readable(self, *args, **kwargs):
563 564 return object.__getattribute__(self, '_observedcall')(
564 565 'readable', *args, **kwargs
565 566 )
566 567
567 568 def readline(self, *args, **kwargs):
568 569 return object.__getattribute__(self, '_observedcall')(
569 570 'readline', *args, **kwargs
570 571 )
571 572
572 573 def readlines(self, *args, **kwargs):
573 574 return object.__getattribute__(self, '_observedcall')(
574 575 'readlines', *args, **kwargs
575 576 )
576 577
577 578 def seek(self, *args, **kwargs):
578 579 return object.__getattribute__(self, '_observedcall')(
579 580 'seek', *args, **kwargs
580 581 )
581 582
582 583 def seekable(self, *args, **kwargs):
583 584 return object.__getattribute__(self, '_observedcall')(
584 585 'seekable', *args, **kwargs
585 586 )
586 587
587 588 def tell(self, *args, **kwargs):
588 589 return object.__getattribute__(self, '_observedcall')(
589 590 'tell', *args, **kwargs
590 591 )
591 592
592 593 def truncate(self, *args, **kwargs):
593 594 return object.__getattribute__(self, '_observedcall')(
594 595 'truncate', *args, **kwargs
595 596 )
596 597
597 598 def writable(self, *args, **kwargs):
598 599 return object.__getattribute__(self, '_observedcall')(
599 600 'writable', *args, **kwargs
600 601 )
601 602
602 603 def writelines(self, *args, **kwargs):
603 604 return object.__getattribute__(self, '_observedcall')(
604 605 'writelines', *args, **kwargs
605 606 )
606 607
607 608 def read(self, *args, **kwargs):
608 609 return object.__getattribute__(self, '_observedcall')(
609 610 'read', *args, **kwargs
610 611 )
611 612
612 613 def readall(self, *args, **kwargs):
613 614 return object.__getattribute__(self, '_observedcall')(
614 615 'readall', *args, **kwargs
615 616 )
616 617
617 618 def readinto(self, *args, **kwargs):
618 619 return object.__getattribute__(self, '_observedcall')(
619 620 'readinto', *args, **kwargs
620 621 )
621 622
622 623 def write(self, *args, **kwargs):
623 624 return object.__getattribute__(self, '_observedcall')(
624 625 'write', *args, **kwargs
625 626 )
626 627
627 628 def detach(self, *args, **kwargs):
628 629 return object.__getattribute__(self, '_observedcall')(
629 630 'detach', *args, **kwargs
630 631 )
631 632
632 633 def read1(self, *args, **kwargs):
633 634 return object.__getattribute__(self, '_observedcall')(
634 635 'read1', *args, **kwargs
635 636 )
636 637
637 638
638 639 class observedbufferedinputpipe(bufferedinputpipe):
639 640 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
640 641
641 642 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
642 643 bypass ``fileobjectproxy``. Because of this, we need to make
643 644 ``bufferedinputpipe`` aware of these operations.
644 645
645 646 This variation of ``bufferedinputpipe`` can notify observers about
646 647 ``os.read()`` events. It also re-publishes other events, such as
647 648 ``read()`` and ``readline()``.
648 649 """
649 650
650 651 def _fillbuffer(self, size=_chunksize):
651 652 res = super(observedbufferedinputpipe, self)._fillbuffer(size=size)
652 653
653 654 fn = getattr(self._input._observer, 'osread', None)
654 655 if fn:
655 656 fn(res, size)
656 657
657 658 return res
658 659
659 660 # We use different observer methods because the operation isn't
660 661 # performed on the actual file object but on us.
661 662 def read(self, size):
662 663 res = super(observedbufferedinputpipe, self).read(size)
663 664
664 665 fn = getattr(self._input._observer, 'bufferedread', None)
665 666 if fn:
666 667 fn(res, size)
667 668
668 669 return res
669 670
670 671 def readline(self, *args, **kwargs):
671 672 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
672 673
673 674 fn = getattr(self._input._observer, 'bufferedreadline', None)
674 675 if fn:
675 676 fn(res)
676 677
677 678 return res
678 679
679 680
680 681 PROXIED_SOCKET_METHODS = {
681 682 'makefile',
682 683 'recv',
683 684 'recvfrom',
684 685 'recvfrom_into',
685 686 'recv_into',
686 687 'send',
687 688 'sendall',
688 689 'sendto',
689 690 'setblocking',
690 691 'settimeout',
691 692 'gettimeout',
692 693 'setsockopt',
693 694 }
694 695
695 696
696 697 class socketproxy:
697 698 """A proxy around a socket that tells a watcher when events occur.
698 699
699 700 This is like ``fileobjectproxy`` except for sockets.
700 701
701 702 This type is intended to only be used for testing purposes. Think hard
702 703 before using it in important code.
703 704 """
704 705
705 706 __slots__ = (
706 707 '_orig',
707 708 '_observer',
708 709 )
709 710
710 711 def __init__(self, sock, observer):
711 712 object.__setattr__(self, '_orig', sock)
712 713 object.__setattr__(self, '_observer', observer)
713 714
714 715 def __getattribute__(self, name):
715 716 if name in PROXIED_SOCKET_METHODS:
716 717 return object.__getattribute__(self, name)
717 718
718 719 return getattr(object.__getattribute__(self, '_orig'), name)
719 720
720 721 def __delattr__(self, name):
721 722 return delattr(object.__getattribute__(self, '_orig'), name)
722 723
723 724 def __setattr__(self, name, value):
724 725 return setattr(object.__getattribute__(self, '_orig'), name, value)
725 726
726 727 def __nonzero__(self):
727 728 return bool(object.__getattribute__(self, '_orig'))
728 729
729 730 __bool__ = __nonzero__
730 731
731 732 def _observedcall(self, name, *args, **kwargs):
732 733 # Call the original object.
733 734 orig = object.__getattribute__(self, '_orig')
734 735 res = getattr(orig, name)(*args, **kwargs)
735 736
736 737 # Call a method on the observer of the same name with arguments
737 738 # so it can react, log, etc.
738 739 observer = object.__getattribute__(self, '_observer')
739 740 fn = getattr(observer, name, None)
740 741 if fn:
741 742 fn(res, *args, **kwargs)
742 743
743 744 return res
744 745
745 746 def makefile(self, *args, **kwargs):
746 747 res = object.__getattribute__(self, '_observedcall')(
747 748 'makefile', *args, **kwargs
748 749 )
749 750
750 751 # The file object may be used for I/O. So we turn it into a
751 752 # proxy using our observer.
752 753 observer = object.__getattribute__(self, '_observer')
753 754 return makeloggingfileobject(
754 755 observer.fh,
755 756 res,
756 757 observer.name,
757 758 reads=observer.reads,
758 759 writes=observer.writes,
759 760 logdata=observer.logdata,
760 761 logdataapis=observer.logdataapis,
761 762 )
762 763
763 764 def recv(self, *args, **kwargs):
764 765 return object.__getattribute__(self, '_observedcall')(
765 766 'recv', *args, **kwargs
766 767 )
767 768
768 769 def recvfrom(self, *args, **kwargs):
769 770 return object.__getattribute__(self, '_observedcall')(
770 771 'recvfrom', *args, **kwargs
771 772 )
772 773
773 774 def recvfrom_into(self, *args, **kwargs):
774 775 return object.__getattribute__(self, '_observedcall')(
775 776 'recvfrom_into', *args, **kwargs
776 777 )
777 778
778 779 def recv_into(self, *args, **kwargs):
779 780 return object.__getattribute__(self, '_observedcall')(
780 781 'recv_info', *args, **kwargs
781 782 )
782 783
783 784 def send(self, *args, **kwargs):
784 785 return object.__getattribute__(self, '_observedcall')(
785 786 'send', *args, **kwargs
786 787 )
787 788
788 789 def sendall(self, *args, **kwargs):
789 790 return object.__getattribute__(self, '_observedcall')(
790 791 'sendall', *args, **kwargs
791 792 )
792 793
793 794 def sendto(self, *args, **kwargs):
794 795 return object.__getattribute__(self, '_observedcall')(
795 796 'sendto', *args, **kwargs
796 797 )
797 798
798 799 def setblocking(self, *args, **kwargs):
799 800 return object.__getattribute__(self, '_observedcall')(
800 801 'setblocking', *args, **kwargs
801 802 )
802 803
803 804 def settimeout(self, *args, **kwargs):
804 805 return object.__getattribute__(self, '_observedcall')(
805 806 'settimeout', *args, **kwargs
806 807 )
807 808
808 809 def gettimeout(self, *args, **kwargs):
809 810 return object.__getattribute__(self, '_observedcall')(
810 811 'gettimeout', *args, **kwargs
811 812 )
812 813
813 814 def setsockopt(self, *args, **kwargs):
814 815 return object.__getattribute__(self, '_observedcall')(
815 816 'setsockopt', *args, **kwargs
816 817 )
817 818
818 819
819 820 class baseproxyobserver:
820 821 def __init__(self, fh, name, logdata, logdataapis):
821 822 self.fh = fh
822 823 self.name = name
823 824 self.logdata = logdata
824 825 self.logdataapis = logdataapis
825 826
826 827 def _writedata(self, data):
827 828 if not self.logdata:
828 829 if self.logdataapis:
829 830 self.fh.write(b'\n')
830 831 self.fh.flush()
831 832 return
832 833
833 834 # Simple case writes all data on a single line.
834 835 if b'\n' not in data:
835 836 if self.logdataapis:
836 837 self.fh.write(b': %s\n' % stringutil.escapestr(data))
837 838 else:
838 839 self.fh.write(
839 840 b'%s> %s\n' % (self.name, stringutil.escapestr(data))
840 841 )
841 842 self.fh.flush()
842 843 return
843 844
844 845 # Data with newlines is written to multiple lines.
845 846 if self.logdataapis:
846 847 self.fh.write(b':\n')
847 848
848 849 lines = data.splitlines(True)
849 850 for line in lines:
850 851 self.fh.write(
851 852 b'%s> %s\n' % (self.name, stringutil.escapestr(line))
852 853 )
853 854 self.fh.flush()
854 855
855 856
856 857 class fileobjectobserver(baseproxyobserver):
857 858 """Logs file object activity."""
858 859
859 860 def __init__(
860 861 self, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
861 862 ):
862 863 super(fileobjectobserver, self).__init__(fh, name, logdata, logdataapis)
863 864 self.reads = reads
864 865 self.writes = writes
865 866
866 867 def read(self, res, size=-1):
867 868 if not self.reads:
868 869 return
869 870 # Python 3 can return None from reads at EOF instead of empty strings.
870 871 if res is None:
871 872 res = b''
872 873
873 874 if size == -1 and res == b'':
874 875 # Suppress pointless read(-1) calls that return
875 876 # nothing. These happen _a lot_ on Python 3, and there
876 877 # doesn't seem to be a better workaround to have matching
877 878 # Python 2 and 3 behavior. :(
878 879 return
879 880
880 881 if self.logdataapis:
881 882 self.fh.write(b'%s> read(%d) -> %d' % (self.name, size, len(res)))
882 883
883 884 self._writedata(res)
884 885
885 886 def readline(self, res, limit=-1):
886 887 if not self.reads:
887 888 return
888 889
889 890 if self.logdataapis:
890 891 self.fh.write(b'%s> readline() -> %d' % (self.name, len(res)))
891 892
892 893 self._writedata(res)
893 894
894 895 def readinto(self, res, dest):
895 896 if not self.reads:
896 897 return
897 898
898 899 if self.logdataapis:
899 900 self.fh.write(
900 901 b'%s> readinto(%d) -> %r' % (self.name, len(dest), res)
901 902 )
902 903
903 904 data = dest[0:res] if res is not None else b''
904 905
905 906 # _writedata() uses "in" operator and is confused by memoryview because
906 907 # characters are ints on Python 3.
907 908 if isinstance(data, memoryview):
908 909 data = data.tobytes()
909 910
910 911 self._writedata(data)
911 912
912 913 def write(self, res, data):
913 914 if not self.writes:
914 915 return
915 916
916 917 # Python 2 returns None from some write() calls. Python 3 (reasonably)
917 918 # returns the integer bytes written.
918 919 if res is None and data:
919 920 res = len(data)
920 921
921 922 if self.logdataapis:
922 923 self.fh.write(b'%s> write(%d) -> %r' % (self.name, len(data), res))
923 924
924 925 self._writedata(data)
925 926
926 927 def flush(self, res):
927 928 if not self.writes:
928 929 return
929 930
930 931 self.fh.write(b'%s> flush() -> %r\n' % (self.name, res))
931 932
932 933 # For observedbufferedinputpipe.
933 934 def bufferedread(self, res, size):
934 935 if not self.reads:
935 936 return
936 937
937 938 if self.logdataapis:
938 939 self.fh.write(
939 940 b'%s> bufferedread(%d) -> %d' % (self.name, size, len(res))
940 941 )
941 942
942 943 self._writedata(res)
943 944
944 945 def bufferedreadline(self, res):
945 946 if not self.reads:
946 947 return
947 948
948 949 if self.logdataapis:
949 950 self.fh.write(
950 951 b'%s> bufferedreadline() -> %d' % (self.name, len(res))
951 952 )
952 953
953 954 self._writedata(res)
954 955
955 956
956 957 def makeloggingfileobject(
957 958 logh, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
958 959 ):
959 960 """Turn a file object into a logging file object."""
960 961
961 962 observer = fileobjectobserver(
962 963 logh,
963 964 name,
964 965 reads=reads,
965 966 writes=writes,
966 967 logdata=logdata,
967 968 logdataapis=logdataapis,
968 969 )
969 970 return fileobjectproxy(fh, observer)
970 971
971 972
972 973 class socketobserver(baseproxyobserver):
973 974 """Logs socket activity."""
974 975
975 976 def __init__(
976 977 self,
977 978 fh,
978 979 name,
979 980 reads=True,
980 981 writes=True,
981 982 states=True,
982 983 logdata=False,
983 984 logdataapis=True,
984 985 ):
985 986 super(socketobserver, self).__init__(fh, name, logdata, logdataapis)
986 987 self.reads = reads
987 988 self.writes = writes
988 989 self.states = states
989 990
990 991 def makefile(self, res, mode=None, bufsize=None):
991 992 if not self.states:
992 993 return
993 994
994 995 self.fh.write(b'%s> makefile(%r, %r)\n' % (self.name, mode, bufsize))
995 996
996 997 def recv(self, res, size, flags=0):
997 998 if not self.reads:
998 999 return
999 1000
1000 1001 if self.logdataapis:
1001 1002 self.fh.write(
1002 1003 b'%s> recv(%d, %d) -> %d' % (self.name, size, flags, len(res))
1003 1004 )
1004 1005 self._writedata(res)
1005 1006
1006 1007 def recvfrom(self, res, size, flags=0):
1007 1008 if not self.reads:
1008 1009 return
1009 1010
1010 1011 if self.logdataapis:
1011 1012 self.fh.write(
1012 1013 b'%s> recvfrom(%d, %d) -> %d'
1013 1014 % (self.name, size, flags, len(res[0]))
1014 1015 )
1015 1016
1016 1017 self._writedata(res[0])
1017 1018
1018 1019 def recvfrom_into(self, res, buf, size, flags=0):
1019 1020 if not self.reads:
1020 1021 return
1021 1022
1022 1023 if self.logdataapis:
1023 1024 self.fh.write(
1024 1025 b'%s> recvfrom_into(%d, %d) -> %d'
1025 1026 % (self.name, size, flags, res[0])
1026 1027 )
1027 1028
1028 1029 self._writedata(buf[0 : res[0]])
1029 1030
1030 1031 def recv_into(self, res, buf, size=0, flags=0):
1031 1032 if not self.reads:
1032 1033 return
1033 1034
1034 1035 if self.logdataapis:
1035 1036 self.fh.write(
1036 1037 b'%s> recv_into(%d, %d) -> %d' % (self.name, size, flags, res)
1037 1038 )
1038 1039
1039 1040 self._writedata(buf[0:res])
1040 1041
1041 1042 def send(self, res, data, flags=0):
1042 1043 if not self.writes:
1043 1044 return
1044 1045
1045 1046 self.fh.write(
1046 1047 b'%s> send(%d, %d) -> %d' % (self.name, len(data), flags, len(res))
1047 1048 )
1048 1049 self._writedata(data)
1049 1050
1050 1051 def sendall(self, res, data, flags=0):
1051 1052 if not self.writes:
1052 1053 return
1053 1054
1054 1055 if self.logdataapis:
1055 1056 # Returns None on success. So don't bother reporting return value.
1056 1057 self.fh.write(
1057 1058 b'%s> sendall(%d, %d)' % (self.name, len(data), flags)
1058 1059 )
1059 1060
1060 1061 self._writedata(data)
1061 1062
1062 1063 def sendto(self, res, data, flagsoraddress, address=None):
1063 1064 if not self.writes:
1064 1065 return
1065 1066
1066 1067 if address:
1067 1068 flags = flagsoraddress
1068 1069 else:
1069 1070 flags = 0
1070 1071
1071 1072 if self.logdataapis:
1072 1073 self.fh.write(
1073 1074 b'%s> sendto(%d, %d, %r) -> %d'
1074 1075 % (self.name, len(data), flags, address, res)
1075 1076 )
1076 1077
1077 1078 self._writedata(data)
1078 1079
1079 1080 def setblocking(self, res, flag):
1080 1081 if not self.states:
1081 1082 return
1082 1083
1083 1084 self.fh.write(b'%s> setblocking(%r)\n' % (self.name, flag))
1084 1085
1085 1086 def settimeout(self, res, value):
1086 1087 if not self.states:
1087 1088 return
1088 1089
1089 1090 self.fh.write(b'%s> settimeout(%r)\n' % (self.name, value))
1090 1091
1091 1092 def gettimeout(self, res):
1092 1093 if not self.states:
1093 1094 return
1094 1095
1095 1096 self.fh.write(b'%s> gettimeout() -> %f\n' % (self.name, res))
1096 1097
1097 1098 def setsockopt(self, res, level, optname, value):
1098 1099 if not self.states:
1099 1100 return
1100 1101
1101 1102 self.fh.write(
1102 1103 b'%s> setsockopt(%r, %r, %r) -> %r\n'
1103 1104 % (self.name, level, optname, value, res)
1104 1105 )
1105 1106
1106 1107
1107 1108 def makeloggingsocket(
1108 1109 logh,
1109 1110 fh,
1110 1111 name,
1111 1112 reads=True,
1112 1113 writes=True,
1113 1114 states=True,
1114 1115 logdata=False,
1115 1116 logdataapis=True,
1116 1117 ):
1117 1118 """Turn a socket into a logging socket."""
1118 1119
1119 1120 observer = socketobserver(
1120 1121 logh,
1121 1122 name,
1122 1123 reads=reads,
1123 1124 writes=writes,
1124 1125 states=states,
1125 1126 logdata=logdata,
1126 1127 logdataapis=logdataapis,
1127 1128 )
1128 1129 return socketproxy(fh, observer)
1129 1130
1130 1131
1131 1132 def version():
1132 1133 """Return version information if available."""
1133 1134 try:
1134 1135 from . import __version__
1135 1136
1136 1137 return __version__.version
1137 1138 except ImportError:
1138 1139 return b'unknown'
1139 1140
1140 1141
1141 1142 def versiontuple(v=None, n=4):
1142 1143 """Parses a Mercurial version string into an N-tuple.
1143 1144
1144 1145 The version string to be parsed is specified with the ``v`` argument.
1145 1146 If it isn't defined, the current Mercurial version string will be parsed.
1146 1147
1147 1148 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1148 1149 returned values:
1149 1150
1150 1151 >>> v = b'3.6.1+190-df9b73d2d444'
1151 1152 >>> versiontuple(v, 2)
1152 1153 (3, 6)
1153 1154 >>> versiontuple(v, 3)
1154 1155 (3, 6, 1)
1155 1156 >>> versiontuple(v, 4)
1156 1157 (3, 6, 1, '190-df9b73d2d444')
1157 1158
1158 1159 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1159 1160 (3, 6, 1, '190-df9b73d2d444+20151118')
1160 1161
1161 1162 >>> v = b'3.6'
1162 1163 >>> versiontuple(v, 2)
1163 1164 (3, 6)
1164 1165 >>> versiontuple(v, 3)
1165 1166 (3, 6, None)
1166 1167 >>> versiontuple(v, 4)
1167 1168 (3, 6, None, None)
1168 1169
1169 1170 >>> v = b'3.9-rc'
1170 1171 >>> versiontuple(v, 2)
1171 1172 (3, 9)
1172 1173 >>> versiontuple(v, 3)
1173 1174 (3, 9, None)
1174 1175 >>> versiontuple(v, 4)
1175 1176 (3, 9, None, 'rc')
1176 1177
1177 1178 >>> v = b'3.9-rc+2-02a8fea4289b'
1178 1179 >>> versiontuple(v, 2)
1179 1180 (3, 9)
1180 1181 >>> versiontuple(v, 3)
1181 1182 (3, 9, None)
1182 1183 >>> versiontuple(v, 4)
1183 1184 (3, 9, None, 'rc+2-02a8fea4289b')
1184 1185
1185 1186 >>> versiontuple(b'4.6rc0')
1186 1187 (4, 6, None, 'rc0')
1187 1188 >>> versiontuple(b'4.6rc0+12-425d55e54f98')
1188 1189 (4, 6, None, 'rc0+12-425d55e54f98')
1189 1190 >>> versiontuple(b'.1.2.3')
1190 1191 (None, None, None, '.1.2.3')
1191 1192 >>> versiontuple(b'12.34..5')
1192 1193 (12, 34, None, '..5')
1193 1194 >>> versiontuple(b'1.2.3.4.5.6')
1194 1195 (1, 2, 3, '.4.5.6')
1195 1196 """
1196 1197 if not v:
1197 1198 v = version()
1198 1199 m = remod.match(br'(\d+(?:\.\d+){,2})[+-]?(.*)', v)
1199 1200 if not m:
1200 1201 vparts, extra = b'', v
1201 1202 elif m.group(2):
1202 1203 vparts, extra = m.groups()
1203 1204 else:
1204 1205 vparts, extra = m.group(1), None
1205 1206
1206 1207 assert vparts is not None # help pytype
1207 1208
1208 1209 vints = []
1209 1210 for i in vparts.split(b'.'):
1210 1211 try:
1211 1212 vints.append(int(i))
1212 1213 except ValueError:
1213 1214 break
1214 1215 # (3, 6) -> (3, 6, None)
1215 1216 while len(vints) < 3:
1216 1217 vints.append(None)
1217 1218
1218 1219 if n == 2:
1219 1220 return (vints[0], vints[1])
1220 1221 if n == 3:
1221 1222 return (vints[0], vints[1], vints[2])
1222 1223 if n == 4:
1223 1224 return (vints[0], vints[1], vints[2], extra)
1224 1225
1225 1226 raise error.ProgrammingError(b"invalid version part request: %d" % n)
1226 1227
1227 1228
1228 1229 def cachefunc(func):
1229 1230 '''cache the result of function calls'''
1230 1231 # XXX doesn't handle keywords args
1231 1232 if func.__code__.co_argcount == 0:
1232 1233 listcache = []
1233 1234
1234 1235 def f():
1235 1236 if len(listcache) == 0:
1236 1237 listcache.append(func())
1237 1238 return listcache[0]
1238 1239
1239 1240 return f
1240 1241 cache = {}
1241 1242 if func.__code__.co_argcount == 1:
1242 1243 # we gain a small amount of time because
1243 1244 # we don't need to pack/unpack the list
1244 1245 def f(arg):
1245 1246 if arg not in cache:
1246 1247 cache[arg] = func(arg)
1247 1248 return cache[arg]
1248 1249
1249 1250 else:
1250 1251
1251 1252 def f(*args):
1252 1253 if args not in cache:
1253 1254 cache[args] = func(*args)
1254 1255 return cache[args]
1255 1256
1256 1257 return f
1257 1258
1258 1259
1259 1260 class cow:
1260 1261 """helper class to make copy-on-write easier
1261 1262
1262 1263 Call preparewrite before doing any writes.
1263 1264 """
1264 1265
1265 1266 def preparewrite(self):
1266 1267 """call this before writes, return self or a copied new object"""
1267 1268 if getattr(self, '_copied', 0):
1268 1269 self._copied -= 1
1269 1270 # Function cow.__init__ expects 1 arg(s), got 2 [wrong-arg-count]
1270 1271 return self.__class__(self) # pytype: disable=wrong-arg-count
1271 1272 return self
1272 1273
1273 1274 def copy(self):
1274 1275 """always do a cheap copy"""
1275 1276 self._copied = getattr(self, '_copied', 0) + 1
1276 1277 return self
1277 1278
1278 1279
1279 1280 class sortdict(collections.OrderedDict):
1280 1281 """a simple sorted dictionary
1281 1282
1282 1283 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1283 1284 >>> d2 = d1.copy()
1284 1285 >>> list(d2.items())
1285 1286 [('a', 0), ('b', 1)]
1286 1287 >>> d2.update([(b'a', 2)])
1287 1288 >>> list(d2.keys()) # should still be in last-set order
1288 1289 ['b', 'a']
1289 1290 >>> d1.insert(1, b'a.5', 0.5)
1290 1291 >>> list(d1.items())
1291 1292 [('a', 0), ('a.5', 0.5), ('b', 1)]
1292 1293 """
1293 1294
1294 1295 def __setitem__(self, key, value):
1295 1296 if key in self:
1296 1297 del self[key]
1297 1298 super(sortdict, self).__setitem__(key, value)
1298 1299
1299 1300 if pycompat.ispypy:
1300 1301 # __setitem__() isn't called as of PyPy 5.8.0
1301 1302 def update(self, src, **f):
1302 1303 if isinstance(src, dict):
1303 1304 src = src.items()
1304 1305 for k, v in src:
1305 1306 self[k] = v
1306 1307 for k in f:
1307 1308 self[k] = f[k]
1308 1309
1309 1310 def insert(self, position, key, value):
1310 1311 for (i, (k, v)) in enumerate(list(self.items())):
1311 1312 if i == position:
1312 1313 self[key] = value
1313 1314 if i >= position:
1314 1315 del self[k]
1315 1316 self[k] = v
1316 1317
1317 1318
1318 1319 class cowdict(cow, dict):
1319 1320 """copy-on-write dict
1320 1321
1321 1322 Be sure to call d = d.preparewrite() before writing to d.
1322 1323
1323 1324 >>> a = cowdict()
1324 1325 >>> a is a.preparewrite()
1325 1326 True
1326 1327 >>> b = a.copy()
1327 1328 >>> b is a
1328 1329 True
1329 1330 >>> c = b.copy()
1330 1331 >>> c is a
1331 1332 True
1332 1333 >>> a = a.preparewrite()
1333 1334 >>> b is a
1334 1335 False
1335 1336 >>> a is a.preparewrite()
1336 1337 True
1337 1338 >>> c = c.preparewrite()
1338 1339 >>> b is c
1339 1340 False
1340 1341 >>> b is b.preparewrite()
1341 1342 True
1342 1343 """
1343 1344
1344 1345
1345 1346 class cowsortdict(cow, sortdict):
1346 1347 """copy-on-write sortdict
1347 1348
1348 1349 Be sure to call d = d.preparewrite() before writing to d.
1349 1350 """
1350 1351
1351 1352
1352 1353 class transactional: # pytype: disable=ignored-metaclass
1353 1354 """Base class for making a transactional type into a context manager."""
1354 1355
1355 1356 __metaclass__ = abc.ABCMeta
1356 1357
1357 1358 @abc.abstractmethod
1358 1359 def close(self):
1359 1360 """Successfully closes the transaction."""
1360 1361
1361 1362 @abc.abstractmethod
1362 1363 def release(self):
1363 1364 """Marks the end of the transaction.
1364 1365
1365 1366 If the transaction has not been closed, it will be aborted.
1366 1367 """
1367 1368
1368 1369 def __enter__(self):
1369 1370 return self
1370 1371
1371 1372 def __exit__(self, exc_type, exc_val, exc_tb):
1372 1373 try:
1373 1374 if exc_type is None:
1374 1375 self.close()
1375 1376 finally:
1376 1377 self.release()
1377 1378
1378 1379
1379 1380 @contextlib.contextmanager
1380 1381 def acceptintervention(tr=None):
1381 1382 """A context manager that closes the transaction on InterventionRequired
1382 1383
1383 1384 If no transaction was provided, this simply runs the body and returns
1384 1385 """
1385 1386 if not tr:
1386 1387 yield
1387 1388 return
1388 1389 try:
1389 1390 yield
1390 1391 tr.close()
1391 1392 except error.InterventionRequired:
1392 1393 tr.close()
1393 1394 raise
1394 1395 finally:
1395 1396 tr.release()
1396 1397
1397 1398
1398 1399 @contextlib.contextmanager
1399 1400 def nullcontextmanager(enter_result=None):
1400 1401 yield enter_result
1401 1402
1402 1403
1403 1404 class _lrucachenode:
1404 1405 """A node in a doubly linked list.
1405 1406
1406 1407 Holds a reference to nodes on either side as well as a key-value
1407 1408 pair for the dictionary entry.
1408 1409 """
1409 1410
1410 1411 __slots__ = ('next', 'prev', 'key', 'value', 'cost')
1411 1412
1412 1413 def __init__(self):
1413 1414 self.next = self
1414 1415 self.prev = self
1415 1416
1416 1417 self.key = _notset
1417 1418 self.value = None
1418 1419 self.cost = 0
1419 1420
1420 1421 def markempty(self):
1421 1422 """Mark the node as emptied."""
1422 1423 self.key = _notset
1423 1424 self.value = None
1424 1425 self.cost = 0
1425 1426
1426 1427
1427 1428 class lrucachedict:
1428 1429 """Dict that caches most recent accesses and sets.
1429 1430
1430 1431 The dict consists of an actual backing dict - indexed by original
1431 1432 key - and a doubly linked circular list defining the order of entries in
1432 1433 the cache.
1433 1434
1434 1435 The head node is the newest entry in the cache. If the cache is full,
1435 1436 we recycle head.prev and make it the new head. Cache accesses result in
1436 1437 the node being moved to before the existing head and being marked as the
1437 1438 new head node.
1438 1439
1439 1440 Items in the cache can be inserted with an optional "cost" value. This is
1440 1441 simply an integer that is specified by the caller. The cache can be queried
1441 1442 for the total cost of all items presently in the cache.
1442 1443
1443 1444 The cache can also define a maximum cost. If a cache insertion would
1444 1445 cause the total cost of the cache to go beyond the maximum cost limit,
1445 1446 nodes will be evicted to make room for the new code. This can be used
1446 1447 to e.g. set a max memory limit and associate an estimated bytes size
1447 1448 cost to each item in the cache. By default, no maximum cost is enforced.
1448 1449 """
1449 1450
1450 1451 def __init__(self, max, maxcost=0):
1451 1452 self._cache = {}
1452 1453
1453 1454 self._head = _lrucachenode()
1454 1455 self._size = 1
1455 1456 self.capacity = max
1456 1457 self.totalcost = 0
1457 1458 self.maxcost = maxcost
1458 1459
1459 1460 def __len__(self):
1460 1461 return len(self._cache)
1461 1462
1462 1463 def __contains__(self, k):
1463 1464 return k in self._cache
1464 1465
1465 1466 def __iter__(self):
1466 1467 # We don't have to iterate in cache order, but why not.
1467 1468 n = self._head
1468 1469 for i in range(len(self._cache)):
1469 1470 yield n.key
1470 1471 n = n.next
1471 1472
1472 1473 def __getitem__(self, k):
1473 1474 node = self._cache[k]
1474 1475 self._movetohead(node)
1475 1476 return node.value
1476 1477
1477 1478 def insert(self, k, v, cost=0):
1478 1479 """Insert a new item in the cache with optional cost value."""
1479 1480 node = self._cache.get(k)
1480 1481 # Replace existing value and mark as newest.
1481 1482 if node is not None:
1482 1483 self.totalcost -= node.cost
1483 1484 node.value = v
1484 1485 node.cost = cost
1485 1486 self.totalcost += cost
1486 1487 self._movetohead(node)
1487 1488
1488 1489 if self.maxcost:
1489 1490 self._enforcecostlimit()
1490 1491
1491 1492 return
1492 1493
1493 1494 if self._size < self.capacity:
1494 1495 node = self._addcapacity()
1495 1496 else:
1496 1497 # Grab the last/oldest item.
1497 1498 node = self._head.prev
1498 1499
1499 1500 # At capacity. Kill the old entry.
1500 1501 if node.key is not _notset:
1501 1502 self.totalcost -= node.cost
1502 1503 del self._cache[node.key]
1503 1504
1504 1505 node.key = k
1505 1506 node.value = v
1506 1507 node.cost = cost
1507 1508 self.totalcost += cost
1508 1509 self._cache[k] = node
1509 1510 # And mark it as newest entry. No need to adjust order since it
1510 1511 # is already self._head.prev.
1511 1512 self._head = node
1512 1513
1513 1514 if self.maxcost:
1514 1515 self._enforcecostlimit()
1515 1516
1516 1517 def __setitem__(self, k, v):
1517 1518 self.insert(k, v)
1518 1519
1519 1520 def __delitem__(self, k):
1520 1521 self.pop(k)
1521 1522
1522 1523 def pop(self, k, default=_notset):
1523 1524 try:
1524 1525 node = self._cache.pop(k)
1525 1526 except KeyError:
1526 1527 if default is _notset:
1527 1528 raise
1528 1529 return default
1529 1530
1530 1531 value = node.value
1531 1532 self.totalcost -= node.cost
1532 1533 node.markempty()
1533 1534
1534 1535 # Temporarily mark as newest item before re-adjusting head to make
1535 1536 # this node the oldest item.
1536 1537 self._movetohead(node)
1537 1538 self._head = node.next
1538 1539
1539 1540 return value
1540 1541
1541 1542 # Additional dict methods.
1542 1543
1543 1544 def get(self, k, default=None):
1544 1545 try:
1545 1546 return self.__getitem__(k)
1546 1547 except KeyError:
1547 1548 return default
1548 1549
1549 1550 def peek(self, k, default=_notset):
1550 1551 """Get the specified item without moving it to the head
1551 1552
1552 1553 Unlike get(), this doesn't mutate the internal state. But be aware
1553 1554 that it doesn't mean peek() is thread safe.
1554 1555 """
1555 1556 try:
1556 1557 node = self._cache[k]
1557 1558 return node.value
1558 1559 except KeyError:
1559 1560 if default is _notset:
1560 1561 raise
1561 1562 return default
1562 1563
1563 1564 def clear(self):
1564 1565 n = self._head
1565 1566 while n.key is not _notset:
1566 1567 self.totalcost -= n.cost
1567 1568 n.markempty()
1568 1569 n = n.next
1569 1570
1570 1571 self._cache.clear()
1571 1572
1572 1573 def copy(self, capacity=None, maxcost=0):
1573 1574 """Create a new cache as a copy of the current one.
1574 1575
1575 1576 By default, the new cache has the same capacity as the existing one.
1576 1577 But, the cache capacity can be changed as part of performing the
1577 1578 copy.
1578 1579
1579 1580 Items in the copy have an insertion/access order matching this
1580 1581 instance.
1581 1582 """
1582 1583
1583 1584 capacity = capacity or self.capacity
1584 1585 maxcost = maxcost or self.maxcost
1585 1586 result = lrucachedict(capacity, maxcost=maxcost)
1586 1587
1587 1588 # We copy entries by iterating in oldest-to-newest order so the copy
1588 1589 # has the correct ordering.
1589 1590
1590 1591 # Find the first non-empty entry.
1591 1592 n = self._head.prev
1592 1593 while n.key is _notset and n is not self._head:
1593 1594 n = n.prev
1594 1595
1595 1596 # We could potentially skip the first N items when decreasing capacity.
1596 1597 # But let's keep it simple unless it is a performance problem.
1597 1598 for i in range(len(self._cache)):
1598 1599 result.insert(n.key, n.value, cost=n.cost)
1599 1600 n = n.prev
1600 1601
1601 1602 return result
1602 1603
1603 1604 def popoldest(self):
1604 1605 """Remove the oldest item from the cache.
1605 1606
1606 1607 Returns the (key, value) describing the removed cache entry.
1607 1608 """
1608 1609 if not self._cache:
1609 1610 return
1610 1611
1611 1612 # Walk the linked list backwards starting at tail node until we hit
1612 1613 # a non-empty node.
1613 1614 n = self._head.prev
1614 1615
1615 1616 while n.key is _notset:
1616 1617 n = n.prev
1617 1618
1618 1619 key, value = n.key, n.value
1619 1620
1620 1621 # And remove it from the cache and mark it as empty.
1621 1622 del self._cache[n.key]
1622 1623 self.totalcost -= n.cost
1623 1624 n.markempty()
1624 1625
1625 1626 return key, value
1626 1627
1627 1628 def _movetohead(self, node: _lrucachenode):
1628 1629 """Mark a node as the newest, making it the new head.
1629 1630
1630 1631 When a node is accessed, it becomes the freshest entry in the LRU
1631 1632 list, which is denoted by self._head.
1632 1633
1633 1634 Visually, let's make ``N`` the new head node (* denotes head):
1634 1635
1635 1636 previous/oldest <-> head <-> next/next newest
1636 1637
1637 1638 ----<->--- A* ---<->-----
1638 1639 | |
1639 1640 E <-> D <-> N <-> C <-> B
1640 1641
1641 1642 To:
1642 1643
1643 1644 ----<->--- N* ---<->-----
1644 1645 | |
1645 1646 E <-> D <-> C <-> B <-> A
1646 1647
1647 1648 This requires the following moves:
1648 1649
1649 1650 C.next = D (node.prev.next = node.next)
1650 1651 D.prev = C (node.next.prev = node.prev)
1651 1652 E.next = N (head.prev.next = node)
1652 1653 N.prev = E (node.prev = head.prev)
1653 1654 N.next = A (node.next = head)
1654 1655 A.prev = N (head.prev = node)
1655 1656 """
1656 1657 head = self._head
1657 1658 # C.next = D
1658 1659 node.prev.next = node.next
1659 1660 # D.prev = C
1660 1661 node.next.prev = node.prev
1661 1662 # N.prev = E
1662 1663 node.prev = head.prev
1663 1664 # N.next = A
1664 1665 # It is tempting to do just "head" here, however if node is
1665 1666 # adjacent to head, this will do bad things.
1666 1667 node.next = head.prev.next
1667 1668 # E.next = N
1668 1669 node.next.prev = node
1669 1670 # A.prev = N
1670 1671 node.prev.next = node
1671 1672
1672 1673 self._head = node
1673 1674
1674 1675 def _addcapacity(self) -> _lrucachenode:
1675 1676 """Add a node to the circular linked list.
1676 1677
1677 1678 The new node is inserted before the head node.
1678 1679 """
1679 1680 head = self._head
1680 1681 node = _lrucachenode()
1681 1682 head.prev.next = node
1682 1683 node.prev = head.prev
1683 1684 node.next = head
1684 1685 head.prev = node
1685 1686 self._size += 1
1686 1687 return node
1687 1688
1688 1689 def _enforcecostlimit(self):
1689 1690 # This should run after an insertion. It should only be called if total
1690 1691 # cost limits are being enforced.
1691 1692 # The most recently inserted node is never evicted.
1692 1693 if len(self) <= 1 or self.totalcost <= self.maxcost:
1693 1694 return
1694 1695
1695 1696 # This is logically equivalent to calling popoldest() until we
1696 1697 # free up enough cost. We don't do that since popoldest() needs
1697 1698 # to walk the linked list and doing this in a loop would be
1698 1699 # quadratic. So we find the first non-empty node and then
1699 1700 # walk nodes until we free up enough capacity.
1700 1701 #
1701 1702 # If we only removed the minimum number of nodes to free enough
1702 1703 # cost at insert time, chances are high that the next insert would
1703 1704 # also require pruning. This would effectively constitute quadratic
1704 1705 # behavior for insert-heavy workloads. To mitigate this, we set a
1705 1706 # target cost that is a percentage of the max cost. This will tend
1706 1707 # to free more nodes when the high water mark is reached, which
1707 1708 # lowers the chances of needing to prune on the subsequent insert.
1708 1709 targetcost = int(self.maxcost * 0.75)
1709 1710
1710 1711 n = self._head.prev
1711 1712 while n.key is _notset:
1712 1713 n = n.prev
1713 1714
1714 1715 while len(self) > 1 and self.totalcost > targetcost:
1715 1716 del self._cache[n.key]
1716 1717 self.totalcost -= n.cost
1717 1718 n.markempty()
1718 1719 n = n.prev
1719 1720
1720 1721
1721 1722 def lrucachefunc(func):
1722 1723 '''cache most recent results of function calls'''
1723 1724 cache = {}
1724 1725 order = collections.deque()
1725 1726 if func.__code__.co_argcount == 1:
1726 1727
1727 1728 def f(arg):
1728 1729 if arg not in cache:
1729 1730 if len(cache) > 20:
1730 1731 del cache[order.popleft()]
1731 1732 cache[arg] = func(arg)
1732 1733 else:
1733 1734 order.remove(arg)
1734 1735 order.append(arg)
1735 1736 return cache[arg]
1736 1737
1737 1738 else:
1738 1739
1739 1740 def f(*args):
1740 1741 if args not in cache:
1741 1742 if len(cache) > 20:
1742 1743 del cache[order.popleft()]
1743 1744 cache[args] = func(*args)
1744 1745 else:
1745 1746 order.remove(args)
1746 1747 order.append(args)
1747 1748 return cache[args]
1748 1749
1749 1750 return f
1750 1751
1751 1752
1752 1753 class propertycache:
1753 1754 def __init__(self, func):
1754 1755 self.func = func
1755 1756 self.name = func.__name__
1756 1757
1757 1758 def __get__(self, obj, type=None):
1758 1759 result = self.func(obj)
1759 1760 self.cachevalue(obj, result)
1760 1761 return result
1761 1762
1762 1763 def cachevalue(self, obj, value):
1763 1764 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1764 1765 obj.__dict__[self.name] = value
1765 1766
1766 1767
1767 1768 def clearcachedproperty(obj, prop):
1768 1769 '''clear a cached property value, if one has been set'''
1769 1770 prop = pycompat.sysstr(prop)
1770 1771 if prop in obj.__dict__:
1771 1772 del obj.__dict__[prop]
1772 1773
1773 1774
1774 1775 def increasingchunks(source, min=1024, max=65536):
1775 1776 """return no less than min bytes per chunk while data remains,
1776 1777 doubling min after each chunk until it reaches max"""
1777 1778
1778 1779 def log2(x):
1779 1780 if not x:
1780 1781 return 0
1781 1782 i = 0
1782 1783 while x:
1783 1784 x >>= 1
1784 1785 i += 1
1785 1786 return i - 1
1786 1787
1787 1788 buf = []
1788 1789 blen = 0
1789 1790 for chunk in source:
1790 1791 buf.append(chunk)
1791 1792 blen += len(chunk)
1792 1793 if blen >= min:
1793 1794 if min < max:
1794 1795 min = min << 1
1795 1796 nmin = 1 << log2(blen)
1796 1797 if nmin > min:
1797 1798 min = nmin
1798 1799 if min > max:
1799 1800 min = max
1800 1801 yield b''.join(buf)
1801 1802 blen = 0
1802 1803 buf = []
1803 1804 if buf:
1804 1805 yield b''.join(buf)
1805 1806
1806 1807
1807 1808 def always(fn):
1808 1809 return True
1809 1810
1810 1811
1811 1812 def never(fn):
1812 1813 return False
1813 1814
1814 1815
1815 def nogc(func):
1816 def nogc(func=None) -> Any:
1816 1817 """disable garbage collector
1817 1818
1818 1819 Python's garbage collector triggers a GC each time a certain number of
1819 1820 container objects (the number being defined by gc.get_threshold()) are
1820 1821 allocated even when marked not to be tracked by the collector. Tracking has
1821 1822 no effect on when GCs are triggered, only on what objects the GC looks
1822 1823 into. As a workaround, disable GC while building complex (huge)
1823 1824 containers.
1824 1825
1825 1826 This garbage collector issue have been fixed in 2.7. But it still affect
1826 1827 CPython's performance.
1827 1828 """
1828
1829 if func is None:
1830 return _nogc_context()
1831 else:
1832 return _nogc_decorator(func)
1833
1834
1835 @contextlib.contextmanager
1836 def _nogc_context():
1837 gcenabled = gc.isenabled()
1838 gc.disable()
1839 try:
1840 yield
1841 finally:
1842 if gcenabled:
1843 gc.enable()
1844
1845
1846 def _nogc_decorator(func):
1829 1847 def wrapper(*args, **kwargs):
1830 gcenabled = gc.isenabled()
1831 gc.disable()
1832 try:
1848 with _nogc_context():
1833 1849 return func(*args, **kwargs)
1834 finally:
1835 if gcenabled:
1836 gc.enable()
1837 1850
1838 1851 return wrapper
1839 1852
1840 1853
1841 1854 if pycompat.ispypy:
1842 1855 # PyPy runs slower with gc disabled
1843 1856 nogc = lambda x: x
1844 1857
1845 1858
1846 1859 def pathto(root: bytes, n1: bytes, n2: bytes) -> bytes:
1847 1860 """return the relative path from one place to another.
1848 1861 root should use os.sep to separate directories
1849 1862 n1 should use os.sep to separate directories
1850 1863 n2 should use "/" to separate directories
1851 1864 returns an os.sep-separated path.
1852 1865
1853 1866 If n1 is a relative path, it's assumed it's
1854 1867 relative to root.
1855 1868 n2 should always be relative to root.
1856 1869 """
1857 1870 if not n1:
1858 1871 return localpath(n2)
1859 1872 if os.path.isabs(n1):
1860 1873 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1861 1874 return os.path.join(root, localpath(n2))
1862 1875 n2 = b'/'.join((pconvert(root), n2))
1863 1876 a, b = splitpath(n1), n2.split(b'/')
1864 1877 a.reverse()
1865 1878 b.reverse()
1866 1879 while a and b and a[-1] == b[-1]:
1867 1880 a.pop()
1868 1881 b.pop()
1869 1882 b.reverse()
1870 1883 return pycompat.ossep.join(([b'..'] * len(a)) + b) or b'.'
1871 1884
1872 1885
1873 1886 def checksignature(func, depth=1):
1874 1887 '''wrap a function with code to check for calling errors'''
1875 1888
1876 1889 def check(*args, **kwargs):
1877 1890 try:
1878 1891 return func(*args, **kwargs)
1879 1892 except TypeError:
1880 1893 if len(traceback.extract_tb(sys.exc_info()[2])) == depth:
1881 1894 raise error.SignatureError
1882 1895 raise
1883 1896
1884 1897 return check
1885 1898
1886 1899
1887 1900 # a whilelist of known filesystems where hardlink works reliably
1888 1901 _hardlinkfswhitelist = {
1889 1902 b'apfs',
1890 1903 b'btrfs',
1891 1904 b'ext2',
1892 1905 b'ext3',
1893 1906 b'ext4',
1894 1907 b'hfs',
1895 1908 b'jfs',
1896 1909 b'NTFS',
1897 1910 b'reiserfs',
1898 1911 b'tmpfs',
1899 1912 b'ufs',
1900 1913 b'xfs',
1901 1914 b'zfs',
1902 1915 }
1903 1916
1904 1917
1905 1918 def copyfile(
1906 1919 src,
1907 1920 dest,
1908 1921 hardlink=False,
1909 1922 copystat=False,
1910 1923 checkambig=False,
1911 1924 nb_bytes=None,
1912 1925 no_hardlink_cb=None,
1913 1926 check_fs_hardlink=True,
1914 1927 ):
1915 1928 """copy a file, preserving mode and optionally other stat info like
1916 1929 atime/mtime
1917 1930
1918 1931 checkambig argument is used with filestat, and is useful only if
1919 1932 destination file is guarded by any lock (e.g. repo.lock or
1920 1933 repo.wlock).
1921 1934
1922 1935 copystat and checkambig should be exclusive.
1923 1936
1924 1937 nb_bytes: if set only copy the first `nb_bytes` of the source file.
1925 1938 """
1926 1939 assert not (copystat and checkambig)
1927 1940 oldstat = None
1928 1941 if os.path.lexists(dest):
1929 1942 if checkambig:
1930 1943 oldstat = checkambig and filestat.frompath(dest)
1931 1944 unlink(dest)
1932 1945 if hardlink and check_fs_hardlink:
1933 1946 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1934 1947 # unless we are confident that dest is on a whitelisted filesystem.
1935 1948 try:
1936 1949 fstype = getfstype(os.path.dirname(dest))
1937 1950 except OSError:
1938 1951 fstype = None
1939 1952 if fstype not in _hardlinkfswhitelist:
1940 1953 if no_hardlink_cb is not None:
1941 1954 no_hardlink_cb()
1942 1955 hardlink = False
1943 1956 if hardlink:
1944 1957 try:
1945 1958 oslink(src, dest)
1946 1959 if nb_bytes is not None:
1947 1960 m = "the `nb_bytes` argument is incompatible with `hardlink`"
1948 1961 raise error.ProgrammingError(m)
1949 1962 return
1950 1963 except (IOError, OSError) as exc:
1951 1964 if exc.errno != errno.EEXIST and no_hardlink_cb is not None:
1952 1965 no_hardlink_cb()
1953 1966 # fall back to normal copy
1954 1967 if os.path.islink(src):
1955 1968 os.symlink(os.readlink(src), dest)
1956 1969 # copytime is ignored for symlinks, but in general copytime isn't needed
1957 1970 # for them anyway
1958 1971 if nb_bytes is not None:
1959 1972 m = "cannot use `nb_bytes` on a symlink"
1960 1973 raise error.ProgrammingError(m)
1961 1974 else:
1962 1975 try:
1963 1976 shutil.copyfile(src, dest)
1964 1977 if copystat:
1965 1978 # copystat also copies mode
1966 1979 shutil.copystat(src, dest)
1967 1980 else:
1968 1981 shutil.copymode(src, dest)
1969 1982 if oldstat and oldstat.stat:
1970 1983 newstat = filestat.frompath(dest)
1971 1984 if newstat.isambig(oldstat):
1972 1985 # stat of copied file is ambiguous to original one
1973 1986 advanced = (
1974 1987 oldstat.stat[stat.ST_MTIME] + 1
1975 1988 ) & 0x7FFFFFFF
1976 1989 os.utime(dest, (advanced, advanced))
1977 1990 # We could do something smarter using `copy_file_range` call or similar
1978 1991 if nb_bytes is not None:
1979 1992 with open(dest, mode='r+') as f:
1980 1993 f.truncate(nb_bytes)
1981 1994 except shutil.Error as inst:
1982 1995 raise error.Abort(stringutil.forcebytestr(inst))
1983 1996
1984 1997
1985 1998 def copyfiles(src, dst, hardlink=None, progress=None):
1986 1999 """Copy a directory tree using hardlinks if possible."""
1987 2000 num = 0
1988 2001
1989 2002 def settopic():
1990 2003 if progress:
1991 2004 progress.topic = _(b'linking') if hardlink else _(b'copying')
1992 2005
1993 2006 if os.path.isdir(src):
1994 2007 if hardlink is None:
1995 2008 hardlink = (
1996 2009 os.stat(src).st_dev == os.stat(os.path.dirname(dst)).st_dev
1997 2010 )
1998 2011 settopic()
1999 2012 os.mkdir(dst)
2000 2013 for name, kind in listdir(src):
2001 2014 srcname = os.path.join(src, name)
2002 2015 dstname = os.path.join(dst, name)
2003 2016 hardlink, n = copyfiles(srcname, dstname, hardlink, progress)
2004 2017 num += n
2005 2018 else:
2006 2019 if hardlink is None:
2007 2020 hardlink = (
2008 2021 os.stat(os.path.dirname(src)).st_dev
2009 2022 == os.stat(os.path.dirname(dst)).st_dev
2010 2023 )
2011 2024 settopic()
2012 2025
2013 2026 if hardlink:
2014 2027 try:
2015 2028 oslink(src, dst)
2016 2029 except (IOError, OSError) as exc:
2017 2030 if exc.errno != errno.EEXIST:
2018 2031 hardlink = False
2019 2032 # XXX maybe try to relink if the file exist ?
2020 2033 shutil.copy(src, dst)
2021 2034 else:
2022 2035 shutil.copy(src, dst)
2023 2036 num += 1
2024 2037 if progress:
2025 2038 progress.increment()
2026 2039
2027 2040 return hardlink, num
2028 2041
2029 2042
2030 2043 _winreservednames = {
2031 2044 b'con',
2032 2045 b'prn',
2033 2046 b'aux',
2034 2047 b'nul',
2035 2048 b'com1',
2036 2049 b'com2',
2037 2050 b'com3',
2038 2051 b'com4',
2039 2052 b'com5',
2040 2053 b'com6',
2041 2054 b'com7',
2042 2055 b'com8',
2043 2056 b'com9',
2044 2057 b'lpt1',
2045 2058 b'lpt2',
2046 2059 b'lpt3',
2047 2060 b'lpt4',
2048 2061 b'lpt5',
2049 2062 b'lpt6',
2050 2063 b'lpt7',
2051 2064 b'lpt8',
2052 2065 b'lpt9',
2053 2066 }
2054 2067 _winreservedchars = b':*?"<>|'
2055 2068
2056 2069
2057 2070 def checkwinfilename(path: bytes) -> Optional[bytes]:
2058 2071 r"""Check that the base-relative path is a valid filename on Windows.
2059 2072 Returns None if the path is ok, or a UI string describing the problem.
2060 2073
2061 2074 >>> checkwinfilename(b"just/a/normal/path")
2062 2075 >>> checkwinfilename(b"foo/bar/con.xml")
2063 2076 "filename contains 'con', which is reserved on Windows"
2064 2077 >>> checkwinfilename(b"foo/con.xml/bar")
2065 2078 "filename contains 'con', which is reserved on Windows"
2066 2079 >>> checkwinfilename(b"foo/bar/xml.con")
2067 2080 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
2068 2081 "filename contains 'AUX', which is reserved on Windows"
2069 2082 >>> checkwinfilename(b"foo/bar/bla:.txt")
2070 2083 "filename contains ':', which is reserved on Windows"
2071 2084 >>> checkwinfilename(b"foo/bar/b\07la.txt")
2072 2085 "filename contains '\\x07', which is invalid on Windows"
2073 2086 >>> checkwinfilename(b"foo/bar/bla ")
2074 2087 "filename ends with ' ', which is not allowed on Windows"
2075 2088 >>> checkwinfilename(b"../bar")
2076 2089 >>> checkwinfilename(b"foo\\")
2077 2090 "filename ends with '\\', which is invalid on Windows"
2078 2091 >>> checkwinfilename(b"foo\\/bar")
2079 2092 "directory name ends with '\\', which is invalid on Windows"
2080 2093 """
2081 2094 if path.endswith(b'\\'):
2082 2095 return _(b"filename ends with '\\', which is invalid on Windows")
2083 2096 if b'\\/' in path:
2084 2097 return _(b"directory name ends with '\\', which is invalid on Windows")
2085 2098 for n in path.replace(b'\\', b'/').split(b'/'):
2086 2099 if not n:
2087 2100 continue
2088 2101 for c in _filenamebytestr(n):
2089 2102 if c in _winreservedchars:
2090 2103 return (
2091 2104 _(
2092 2105 b"filename contains '%s', which is reserved "
2093 2106 b"on Windows"
2094 2107 )
2095 2108 % c
2096 2109 )
2097 2110 if ord(c) <= 31:
2098 2111 return _(
2099 2112 b"filename contains '%s', which is invalid on Windows"
2100 2113 ) % stringutil.escapestr(c)
2101 2114 base = n.split(b'.')[0]
2102 2115 if base and base.lower() in _winreservednames:
2103 2116 return (
2104 2117 _(b"filename contains '%s', which is reserved on Windows")
2105 2118 % base
2106 2119 )
2107 2120 t = n[-1:]
2108 2121 if t in b'. ' and n not in b'..':
2109 2122 return (
2110 2123 _(
2111 2124 b"filename ends with '%s', which is not allowed "
2112 2125 b"on Windows"
2113 2126 )
2114 2127 % t
2115 2128 )
2116 2129
2117 2130
2118 2131 timer = getattr(time, "perf_counter", None)
2119 2132
2120 2133 if pycompat.iswindows:
2121 2134 checkosfilename = checkwinfilename
2122 2135 if not timer:
2123 2136 timer = time.clock # pytype: disable=module-attr
2124 2137 else:
2125 2138 # mercurial.windows doesn't have platform.checkosfilename
2126 2139 checkosfilename = platform.checkosfilename # pytype: disable=module-attr
2127 2140 if not timer:
2128 2141 timer = time.time
2129 2142
2130 2143
2131 2144 def makelock(info, pathname):
2132 2145 """Create a lock file atomically if possible
2133 2146
2134 2147 This may leave a stale lock file if symlink isn't supported and signal
2135 2148 interrupt is enabled.
2136 2149 """
2137 2150 try:
2138 2151 return os.symlink(info, pathname)
2139 2152 except OSError as why:
2140 2153 if why.errno == errno.EEXIST:
2141 2154 raise
2142 2155 except AttributeError: # no symlink in os
2143 2156 pass
2144 2157
2145 2158 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
2146 2159 ld = os.open(pathname, flags)
2147 2160 os.write(ld, info)
2148 2161 os.close(ld)
2149 2162
2150 2163
2151 2164 def readlock(pathname: bytes) -> bytes:
2152 2165 try:
2153 2166 return readlink(pathname)
2154 2167 except OSError as why:
2155 2168 if why.errno not in (errno.EINVAL, errno.ENOSYS):
2156 2169 raise
2157 2170 except AttributeError: # no symlink in os
2158 2171 pass
2159 2172 with posixfile(pathname, b'rb') as fp:
2160 2173 return fp.read()
2161 2174
2162 2175
2163 2176 def fstat(fp):
2164 2177 '''stat file object that may not have fileno method.'''
2165 2178 try:
2166 2179 return os.fstat(fp.fileno())
2167 2180 except AttributeError:
2168 2181 return os.stat(fp.name)
2169 2182
2170 2183
2171 2184 # File system features
2172 2185
2173 2186
2174 2187 def fscasesensitive(path: bytes) -> bool:
2175 2188 """
2176 2189 Return true if the given path is on a case-sensitive filesystem
2177 2190
2178 2191 Requires a path (like /foo/.hg) ending with a foldable final
2179 2192 directory component.
2180 2193 """
2181 2194 s1 = os.lstat(path)
2182 2195 d, b = os.path.split(path)
2183 2196 b2 = b.upper()
2184 2197 if b == b2:
2185 2198 b2 = b.lower()
2186 2199 if b == b2:
2187 2200 return True # no evidence against case sensitivity
2188 2201 p2 = os.path.join(d, b2)
2189 2202 try:
2190 2203 s2 = os.lstat(p2)
2191 2204 if s2 == s1:
2192 2205 return False
2193 2206 return True
2194 2207 except OSError:
2195 2208 return True
2196 2209
2197 2210
2198 2211 _re2_input = lambda x: x
2199 2212 try:
2200 2213 import re2 # pytype: disable=import-error
2201 2214
2202 2215 _re2 = None
2203 2216 except ImportError:
2204 2217 _re2 = False
2205 2218
2206 2219
2207 2220 def has_re2():
2208 2221 """return True is re2 is available, False otherwise"""
2209 2222 if _re2 is None:
2210 2223 _re._checkre2()
2211 2224 return _re2
2212 2225
2213 2226
2214 2227 class _re:
2215 2228 @staticmethod
2216 2229 def _checkre2():
2217 2230 global _re2
2218 2231 global _re2_input
2219 2232 if _re2 is not None:
2220 2233 # we already have the answer
2221 2234 return
2222 2235
2223 2236 check_pattern = br'\[([^\[]+)\]'
2224 2237 check_input = b'[ui]'
2225 2238 try:
2226 2239 # check if match works, see issue3964
2227 2240 _re2 = bool(re2.match(check_pattern, check_input))
2228 2241 except ImportError:
2229 2242 _re2 = False
2230 2243 except TypeError:
2231 2244 # the `pyre-2` project provides a re2 module that accept bytes
2232 2245 # the `fb-re2` project provides a re2 module that acccept sysstr
2233 2246 check_pattern = pycompat.sysstr(check_pattern)
2234 2247 check_input = pycompat.sysstr(check_input)
2235 2248 _re2 = bool(re2.match(check_pattern, check_input))
2236 2249 _re2_input = pycompat.sysstr
2237 2250
2238 2251 def compile(self, pat, flags=0):
2239 2252 """Compile a regular expression, using re2 if possible
2240 2253
2241 2254 For best performance, use only re2-compatible regexp features. The
2242 2255 only flags from the re module that are re2-compatible are
2243 2256 IGNORECASE and MULTILINE."""
2244 2257 if _re2 is None:
2245 2258 self._checkre2()
2246 2259 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
2247 2260 if flags & remod.IGNORECASE:
2248 2261 pat = b'(?i)' + pat
2249 2262 if flags & remod.MULTILINE:
2250 2263 pat = b'(?m)' + pat
2251 2264 try:
2252 2265 return re2.compile(_re2_input(pat))
2253 2266 except re2.error:
2254 2267 pass
2255 2268 return remod.compile(pat, flags)
2256 2269
2257 2270 @propertycache
2258 2271 def escape(self):
2259 2272 """Return the version of escape corresponding to self.compile.
2260 2273
2261 2274 This is imperfect because whether re2 or re is used for a particular
2262 2275 function depends on the flags, etc, but it's the best we can do.
2263 2276 """
2264 2277 global _re2
2265 2278 if _re2 is None:
2266 2279 self._checkre2()
2267 2280 if _re2:
2268 2281 return re2.escape
2269 2282 else:
2270 2283 return remod.escape
2271 2284
2272 2285
2273 2286 re = _re()
2274 2287
2275 2288 _fspathcache = {}
2276 2289
2277 2290
2278 2291 def fspath(name: bytes, root: bytes) -> bytes:
2279 2292 """Get name in the case stored in the filesystem
2280 2293
2281 2294 The name should be relative to root, and be normcase-ed for efficiency.
2282 2295
2283 2296 Note that this function is unnecessary, and should not be
2284 2297 called, for case-sensitive filesystems (simply because it's expensive).
2285 2298
2286 2299 The root should be normcase-ed, too.
2287 2300 """
2288 2301
2289 2302 def _makefspathcacheentry(dir):
2290 2303 return {normcase(n): n for n in os.listdir(dir)}
2291 2304
2292 2305 seps = pycompat.ossep
2293 2306 if pycompat.osaltsep:
2294 2307 seps = seps + pycompat.osaltsep
2295 2308 # Protect backslashes. This gets silly very quickly.
2296 2309 seps.replace(b'\\', b'\\\\')
2297 2310 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
2298 2311 dir = os.path.normpath(root)
2299 2312 result = []
2300 2313 for part, sep in pattern.findall(name):
2301 2314 if sep:
2302 2315 result.append(sep)
2303 2316 continue
2304 2317
2305 2318 if dir not in _fspathcache:
2306 2319 _fspathcache[dir] = _makefspathcacheentry(dir)
2307 2320 contents = _fspathcache[dir]
2308 2321
2309 2322 found = contents.get(part)
2310 2323 if not found:
2311 2324 # retry "once per directory" per "dirstate.walk" which
2312 2325 # may take place for each patches of "hg qpush", for example
2313 2326 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
2314 2327 found = contents.get(part)
2315 2328
2316 2329 result.append(found or part)
2317 2330 dir = os.path.join(dir, part)
2318 2331
2319 2332 return b''.join(result)
2320 2333
2321 2334
2322 2335 def checknlink(testfile: bytes) -> bool:
2323 2336 '''check whether hardlink count reporting works properly'''
2324 2337
2325 2338 # testfile may be open, so we need a separate file for checking to
2326 2339 # work around issue2543 (or testfile may get lost on Samba shares)
2327 2340 f1, f2, fp = None, None, None
2328 2341 try:
2329 2342 fd, f1 = pycompat.mkstemp(
2330 2343 prefix=b'.%s-' % os.path.basename(testfile),
2331 2344 suffix=b'1~',
2332 2345 dir=os.path.dirname(testfile),
2333 2346 )
2334 2347 os.close(fd)
2335 2348 f2 = b'%s2~' % f1[:-2]
2336 2349
2337 2350 oslink(f1, f2)
2338 2351 # nlinks() may behave differently for files on Windows shares if
2339 2352 # the file is open.
2340 2353 fp = posixfile(f2)
2341 2354 return nlinks(f2) > 1
2342 2355 except OSError:
2343 2356 return False
2344 2357 finally:
2345 2358 if fp is not None:
2346 2359 fp.close()
2347 2360 for f in (f1, f2):
2348 2361 try:
2349 2362 if f is not None:
2350 2363 os.unlink(f)
2351 2364 except OSError:
2352 2365 pass
2353 2366
2354 2367
2355 2368 def endswithsep(path: bytes) -> bool:
2356 2369 '''Check path ends with os.sep or os.altsep.'''
2357 2370 return bool( # help pytype
2358 2371 path.endswith(pycompat.ossep)
2359 2372 or pycompat.osaltsep
2360 2373 and path.endswith(pycompat.osaltsep)
2361 2374 )
2362 2375
2363 2376
2364 2377 def splitpath(path: bytes) -> List[bytes]:
2365 2378 """Split path by os.sep.
2366 2379 Note that this function does not use os.altsep because this is
2367 2380 an alternative of simple "xxx.split(os.sep)".
2368 2381 It is recommended to use os.path.normpath() before using this
2369 2382 function if need."""
2370 2383 return path.split(pycompat.ossep)
2371 2384
2372 2385
2373 2386 def mktempcopy(name, emptyok=False, createmode=None, enforcewritable=False):
2374 2387 """Create a temporary file with the same contents from name
2375 2388
2376 2389 The permission bits are copied from the original file.
2377 2390
2378 2391 If the temporary file is going to be truncated immediately, you
2379 2392 can use emptyok=True as an optimization.
2380 2393
2381 2394 Returns the name of the temporary file.
2382 2395 """
2383 2396 d, fn = os.path.split(name)
2384 2397 fd, temp = pycompat.mkstemp(prefix=b'.%s-' % fn, suffix=b'~', dir=d)
2385 2398 os.close(fd)
2386 2399 # Temporary files are created with mode 0600, which is usually not
2387 2400 # what we want. If the original file already exists, just copy
2388 2401 # its mode. Otherwise, manually obey umask.
2389 2402 copymode(name, temp, createmode, enforcewritable)
2390 2403
2391 2404 if emptyok:
2392 2405 return temp
2393 2406 try:
2394 2407 try:
2395 2408 ifp = posixfile(name, b"rb")
2396 2409 except IOError as inst:
2397 2410 if inst.errno == errno.ENOENT:
2398 2411 return temp
2399 2412 if not getattr(inst, 'filename', None):
2400 2413 inst.filename = name
2401 2414 raise
2402 2415 ofp = posixfile(temp, b"wb")
2403 2416 for chunk in filechunkiter(ifp):
2404 2417 ofp.write(chunk)
2405 2418 ifp.close()
2406 2419 ofp.close()
2407 2420 except: # re-raises
2408 2421 try:
2409 2422 os.unlink(temp)
2410 2423 except OSError:
2411 2424 pass
2412 2425 raise
2413 2426 return temp
2414 2427
2415 2428
2416 2429 class filestat:
2417 2430 """help to exactly detect change of a file
2418 2431
2419 2432 'stat' attribute is result of 'os.stat()' if specified 'path'
2420 2433 exists. Otherwise, it is None. This can avoid preparative
2421 2434 'exists()' examination on client side of this class.
2422 2435 """
2423 2436
2424 2437 def __init__(self, stat):
2425 2438 self.stat = stat
2426 2439
2427 2440 @classmethod
2428 2441 def frompath(cls, path):
2429 2442 try:
2430 2443 stat = os.stat(path)
2431 2444 except FileNotFoundError:
2432 2445 stat = None
2433 2446 return cls(stat)
2434 2447
2435 2448 @classmethod
2436 2449 def fromfp(cls, fp):
2437 2450 stat = os.fstat(fp.fileno())
2438 2451 return cls(stat)
2439 2452
2440 2453 __hash__ = object.__hash__
2441 2454
2442 2455 def __eq__(self, old):
2443 2456 try:
2444 2457 # if ambiguity between stat of new and old file is
2445 2458 # avoided, comparison of size, ctime and mtime is enough
2446 2459 # to exactly detect change of a file regardless of platform
2447 2460 return (
2448 2461 self.stat.st_size == old.stat.st_size
2449 2462 and self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2450 2463 and self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME]
2451 2464 )
2452 2465 except AttributeError:
2453 2466 pass
2454 2467 try:
2455 2468 return self.stat is None and old.stat is None
2456 2469 except AttributeError:
2457 2470 return False
2458 2471
2459 2472 def isambig(self, old):
2460 2473 """Examine whether new (= self) stat is ambiguous against old one
2461 2474
2462 2475 "S[N]" below means stat of a file at N-th change:
2463 2476
2464 2477 - S[n-1].ctime < S[n].ctime: can detect change of a file
2465 2478 - S[n-1].ctime == S[n].ctime
2466 2479 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2467 2480 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2468 2481 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2469 2482 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2470 2483
2471 2484 Case (*2) above means that a file was changed twice or more at
2472 2485 same time in sec (= S[n-1].ctime), and comparison of timestamp
2473 2486 is ambiguous.
2474 2487
2475 2488 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2476 2489 timestamp is ambiguous".
2477 2490
2478 2491 But advancing mtime only in case (*2) doesn't work as
2479 2492 expected, because naturally advanced S[n].mtime in case (*1)
2480 2493 might be equal to manually advanced S[n-1 or earlier].mtime.
2481 2494
2482 2495 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2483 2496 treated as ambiguous regardless of mtime, to avoid overlooking
2484 2497 by confliction between such mtime.
2485 2498
2486 2499 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2487 2500 S[n].mtime", even if size of a file isn't changed.
2488 2501 """
2489 2502 try:
2490 2503 return self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2491 2504 except AttributeError:
2492 2505 return False
2493 2506
2494 2507 def avoidambig(self, path, old):
2495 2508 """Change file stat of specified path to avoid ambiguity
2496 2509
2497 2510 'old' should be previous filestat of 'path'.
2498 2511
2499 2512 This skips avoiding ambiguity, if a process doesn't have
2500 2513 appropriate privileges for 'path'. This returns False in this
2501 2514 case.
2502 2515
2503 2516 Otherwise, this returns True, as "ambiguity is avoided".
2504 2517 """
2505 2518 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2506 2519 try:
2507 2520 os.utime(path, (advanced, advanced))
2508 2521 except PermissionError:
2509 2522 # utime() on the file created by another user causes EPERM,
2510 2523 # if a process doesn't have appropriate privileges
2511 2524 return False
2512 2525 return True
2513 2526
2514 2527 def __ne__(self, other):
2515 2528 return not self == other
2516 2529
2517 2530
2518 2531 class atomictempfile:
2519 2532 """writable file object that atomically updates a file
2520 2533
2521 2534 All writes will go to a temporary copy of the original file. Call
2522 2535 close() when you are done writing, and atomictempfile will rename
2523 2536 the temporary copy to the original name, making the changes
2524 2537 visible. If the object is destroyed without being closed, all your
2525 2538 writes are discarded.
2526 2539
2527 2540 checkambig argument of constructor is used with filestat, and is
2528 2541 useful only if target file is guarded by any lock (e.g. repo.lock
2529 2542 or repo.wlock).
2530 2543 """
2531 2544
2532 2545 def __init__(self, name, mode=b'w+b', createmode=None, checkambig=False):
2533 2546 self.__name = name # permanent name
2534 2547 self._tempname = mktempcopy(
2535 2548 name,
2536 2549 emptyok=(b'w' in mode),
2537 2550 createmode=createmode,
2538 2551 enforcewritable=(b'w' in mode),
2539 2552 )
2540 2553
2541 2554 self._fp = posixfile(self._tempname, mode)
2542 2555 self._checkambig = checkambig
2543 2556
2544 2557 # delegated methods
2545 2558 self.read = self._fp.read
2546 2559 self.write = self._fp.write
2547 2560 self.writelines = self._fp.writelines
2548 2561 self.seek = self._fp.seek
2549 2562 self.tell = self._fp.tell
2550 2563 self.fileno = self._fp.fileno
2551 2564
2552 2565 def close(self):
2553 2566 if not self._fp.closed:
2554 2567 self._fp.close()
2555 2568 filename = localpath(self.__name)
2556 2569 oldstat = self._checkambig and filestat.frompath(filename)
2557 2570 if oldstat and oldstat.stat:
2558 2571 rename(self._tempname, filename)
2559 2572 newstat = filestat.frompath(filename)
2560 2573 if newstat.isambig(oldstat):
2561 2574 # stat of changed file is ambiguous to original one
2562 2575 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2563 2576 os.utime(filename, (advanced, advanced))
2564 2577 else:
2565 2578 rename(self._tempname, filename)
2566 2579
2567 2580 def discard(self):
2568 2581 if not self._fp.closed:
2569 2582 try:
2570 2583 os.unlink(self._tempname)
2571 2584 except OSError:
2572 2585 pass
2573 2586 self._fp.close()
2574 2587
2575 2588 def __del__(self):
2576 2589 if hasattr(self, '_fp'): # constructor actually did something
2577 2590 self.discard()
2578 2591
2579 2592 def __enter__(self):
2580 2593 return self
2581 2594
2582 2595 def __exit__(self, exctype, excvalue, traceback):
2583 2596 if exctype is not None:
2584 2597 self.discard()
2585 2598 else:
2586 2599 self.close()
2587 2600
2588 2601
2589 2602 def tryrmdir(f):
2590 2603 try:
2591 2604 removedirs(f)
2592 2605 except OSError as e:
2593 2606 if e.errno != errno.ENOENT and e.errno != errno.ENOTEMPTY:
2594 2607 raise
2595 2608
2596 2609
2597 2610 def unlinkpath(
2598 2611 f: bytes, ignoremissing: bool = False, rmdir: bool = True
2599 2612 ) -> None:
2600 2613 """unlink and remove the directory if it is empty"""
2601 2614 if ignoremissing:
2602 2615 tryunlink(f)
2603 2616 else:
2604 2617 unlink(f)
2605 2618 if rmdir:
2606 2619 # try removing directories that might now be empty
2607 2620 try:
2608 2621 removedirs(os.path.dirname(f))
2609 2622 except OSError:
2610 2623 pass
2611 2624
2612 2625
2613 2626 def tryunlink(f: bytes) -> bool:
2614 2627 """Attempt to remove a file, ignoring FileNotFoundError.
2615 2628
2616 2629 Returns False in case the file did not exit, True otherwise
2617 2630 """
2618 2631 try:
2619 2632 unlink(f)
2620 2633 return True
2621 2634 except FileNotFoundError:
2622 2635 return False
2623 2636
2624 2637
2625 2638 def makedirs(
2626 2639 name: bytes, mode: Optional[int] = None, notindexed: bool = False
2627 2640 ) -> None:
2628 2641 """recursive directory creation with parent mode inheritance
2629 2642
2630 2643 Newly created directories are marked as "not to be indexed by
2631 2644 the content indexing service", if ``notindexed`` is specified
2632 2645 for "write" mode access.
2633 2646 """
2634 2647 try:
2635 2648 makedir(name, notindexed)
2636 2649 except OSError as err:
2637 2650 if err.errno == errno.EEXIST:
2638 2651 return
2639 2652 if err.errno != errno.ENOENT or not name:
2640 2653 raise
2641 2654 parent = os.path.dirname(abspath(name))
2642 2655 if parent == name:
2643 2656 raise
2644 2657 makedirs(parent, mode, notindexed)
2645 2658 try:
2646 2659 makedir(name, notindexed)
2647 2660 except OSError as err:
2648 2661 # Catch EEXIST to handle races
2649 2662 if err.errno == errno.EEXIST:
2650 2663 return
2651 2664 raise
2652 2665 if mode is not None:
2653 2666 os.chmod(name, mode)
2654 2667
2655 2668
2656 2669 def readfile(path: bytes) -> bytes:
2657 2670 with open(path, b'rb') as fp:
2658 2671 return fp.read()
2659 2672
2660 2673
2661 2674 def writefile(path: bytes, text: bytes) -> None:
2662 2675 with open(path, b'wb') as fp:
2663 2676 fp.write(text)
2664 2677
2665 2678
2666 2679 def appendfile(path: bytes, text: bytes) -> None:
2667 2680 with open(path, b'ab') as fp:
2668 2681 fp.write(text)
2669 2682
2670 2683
2671 2684 class chunkbuffer:
2672 2685 """Allow arbitrary sized chunks of data to be efficiently read from an
2673 2686 iterator over chunks of arbitrary size."""
2674 2687
2675 2688 def __init__(self, in_iter):
2676 2689 """in_iter is the iterator that's iterating over the input chunks."""
2677 2690
2678 2691 def splitbig(chunks):
2679 2692 for chunk in chunks:
2680 2693 if len(chunk) > 2 ** 20:
2681 2694 pos = 0
2682 2695 while pos < len(chunk):
2683 2696 end = pos + 2 ** 18
2684 2697 yield chunk[pos:end]
2685 2698 pos = end
2686 2699 else:
2687 2700 yield chunk
2688 2701
2689 2702 self.iter = splitbig(in_iter)
2690 2703 self._queue = collections.deque()
2691 2704 self._chunkoffset = 0
2692 2705
2693 2706 def read(self, l=None):
2694 2707 """Read L bytes of data from the iterator of chunks of data.
2695 2708 Returns less than L bytes if the iterator runs dry.
2696 2709
2697 2710 If size parameter is omitted, read everything"""
2698 2711 if l is None:
2699 2712 return b''.join(self.iter)
2700 2713
2701 2714 left = l
2702 2715 buf = []
2703 2716 queue = self._queue
2704 2717 while left > 0:
2705 2718 # refill the queue
2706 2719 if not queue:
2707 2720 target = 2 ** 18
2708 2721 for chunk in self.iter:
2709 2722 queue.append(chunk)
2710 2723 target -= len(chunk)
2711 2724 if target <= 0:
2712 2725 break
2713 2726 if not queue:
2714 2727 break
2715 2728
2716 2729 # The easy way to do this would be to queue.popleft(), modify the
2717 2730 # chunk (if necessary), then queue.appendleft(). However, for cases
2718 2731 # where we read partial chunk content, this incurs 2 dequeue
2719 2732 # mutations and creates a new str for the remaining chunk in the
2720 2733 # queue. Our code below avoids this overhead.
2721 2734
2722 2735 chunk = queue[0]
2723 2736 chunkl = len(chunk)
2724 2737 offset = self._chunkoffset
2725 2738
2726 2739 # Use full chunk.
2727 2740 if offset == 0 and left >= chunkl:
2728 2741 left -= chunkl
2729 2742 queue.popleft()
2730 2743 buf.append(chunk)
2731 2744 # self._chunkoffset remains at 0.
2732 2745 continue
2733 2746
2734 2747 chunkremaining = chunkl - offset
2735 2748
2736 2749 # Use all of unconsumed part of chunk.
2737 2750 if left >= chunkremaining:
2738 2751 left -= chunkremaining
2739 2752 queue.popleft()
2740 2753 # offset == 0 is enabled by block above, so this won't merely
2741 2754 # copy via ``chunk[0:]``.
2742 2755 buf.append(chunk[offset:])
2743 2756 self._chunkoffset = 0
2744 2757
2745 2758 # Partial chunk needed.
2746 2759 else:
2747 2760 buf.append(chunk[offset : offset + left])
2748 2761 self._chunkoffset += left
2749 2762 left -= chunkremaining
2750 2763
2751 2764 return b''.join(buf)
2752 2765
2753 2766
2754 2767 def filechunkiter(f, size=131072, limit=None):
2755 2768 """Create a generator that produces the data in the file size
2756 2769 (default 131072) bytes at a time, up to optional limit (default is
2757 2770 to read all data). Chunks may be less than size bytes if the
2758 2771 chunk is the last chunk in the file, or the file is a socket or
2759 2772 some other type of file that sometimes reads less data than is
2760 2773 requested."""
2761 2774 assert size >= 0
2762 2775 assert limit is None or limit >= 0
2763 2776 while True:
2764 2777 if limit is None:
2765 2778 nbytes = size
2766 2779 else:
2767 2780 nbytes = min(limit, size)
2768 2781 s = nbytes and f.read(nbytes)
2769 2782 if not s:
2770 2783 break
2771 2784 if limit:
2772 2785 limit -= len(s)
2773 2786 yield s
2774 2787
2775 2788
2776 2789 class cappedreader:
2777 2790 """A file object proxy that allows reading up to N bytes.
2778 2791
2779 2792 Given a source file object, instances of this type allow reading up to
2780 2793 N bytes from that source file object. Attempts to read past the allowed
2781 2794 limit are treated as EOF.
2782 2795
2783 2796 It is assumed that I/O is not performed on the original file object
2784 2797 in addition to I/O that is performed by this instance. If there is,
2785 2798 state tracking will get out of sync and unexpected results will ensue.
2786 2799 """
2787 2800
2788 2801 def __init__(self, fh, limit):
2789 2802 """Allow reading up to <limit> bytes from <fh>."""
2790 2803 self._fh = fh
2791 2804 self._left = limit
2792 2805
2793 2806 def read(self, n=-1):
2794 2807 if not self._left:
2795 2808 return b''
2796 2809
2797 2810 if n < 0:
2798 2811 n = self._left
2799 2812
2800 2813 data = self._fh.read(min(n, self._left))
2801 2814 self._left -= len(data)
2802 2815 assert self._left >= 0
2803 2816
2804 2817 return data
2805 2818
2806 2819 def readinto(self, b):
2807 2820 res = self.read(len(b))
2808 2821 if res is None:
2809 2822 return None
2810 2823
2811 2824 b[0 : len(res)] = res
2812 2825 return len(res)
2813 2826
2814 2827
2815 2828 def unitcountfn(*unittable):
2816 2829 '''return a function that renders a readable count of some quantity'''
2817 2830
2818 2831 def go(count):
2819 2832 for multiplier, divisor, format in unittable:
2820 2833 if abs(count) >= divisor * multiplier:
2821 2834 return format % (count / float(divisor))
2822 2835 return unittable[-1][2] % count
2823 2836
2824 2837 return go
2825 2838
2826 2839
2827 2840 def processlinerange(fromline: int, toline: int) -> Tuple[int, int]:
2828 2841 """Check that linerange <fromline>:<toline> makes sense and return a
2829 2842 0-based range.
2830 2843
2831 2844 >>> processlinerange(10, 20)
2832 2845 (9, 20)
2833 2846 >>> processlinerange(2, 1)
2834 2847 Traceback (most recent call last):
2835 2848 ...
2836 2849 ParseError: line range must be positive
2837 2850 >>> processlinerange(0, 5)
2838 2851 Traceback (most recent call last):
2839 2852 ...
2840 2853 ParseError: fromline must be strictly positive
2841 2854 """
2842 2855 if toline - fromline < 0:
2843 2856 raise error.ParseError(_(b"line range must be positive"))
2844 2857 if fromline < 1:
2845 2858 raise error.ParseError(_(b"fromline must be strictly positive"))
2846 2859 return fromline - 1, toline
2847 2860
2848 2861
2849 2862 bytecount = unitcountfn(
2850 2863 (100, 1 << 30, _(b'%.0f GB')),
2851 2864 (10, 1 << 30, _(b'%.1f GB')),
2852 2865 (1, 1 << 30, _(b'%.2f GB')),
2853 2866 (100, 1 << 20, _(b'%.0f MB')),
2854 2867 (10, 1 << 20, _(b'%.1f MB')),
2855 2868 (1, 1 << 20, _(b'%.2f MB')),
2856 2869 (100, 1 << 10, _(b'%.0f KB')),
2857 2870 (10, 1 << 10, _(b'%.1f KB')),
2858 2871 (1, 1 << 10, _(b'%.2f KB')),
2859 2872 (1, 1, _(b'%.0f bytes')),
2860 2873 )
2861 2874
2862 2875
2863 2876 class transformingwriter:
2864 2877 """Writable file wrapper to transform data by function"""
2865 2878
2866 2879 def __init__(self, fp, encode):
2867 2880 self._fp = fp
2868 2881 self._encode = encode
2869 2882
2870 2883 def close(self):
2871 2884 self._fp.close()
2872 2885
2873 2886 def flush(self):
2874 2887 self._fp.flush()
2875 2888
2876 2889 def write(self, data):
2877 2890 return self._fp.write(self._encode(data))
2878 2891
2879 2892
2880 2893 # Matches a single EOL which can either be a CRLF where repeated CR
2881 2894 # are removed or a LF. We do not care about old Macintosh files, so a
2882 2895 # stray CR is an error.
2883 2896 _eolre = remod.compile(br'\r*\n')
2884 2897
2885 2898
2886 2899 def tolf(s: bytes) -> bytes:
2887 2900 return _eolre.sub(b'\n', s)
2888 2901
2889 2902
2890 2903 def tocrlf(s: bytes) -> bytes:
2891 2904 return _eolre.sub(b'\r\n', s)
2892 2905
2893 2906
2894 2907 def _crlfwriter(fp):
2895 2908 return transformingwriter(fp, tocrlf)
2896 2909
2897 2910
2898 2911 if pycompat.oslinesep == b'\r\n':
2899 2912 tonativeeol = tocrlf
2900 2913 fromnativeeol = tolf
2901 2914 nativeeolwriter = _crlfwriter
2902 2915 else:
2903 2916 tonativeeol = pycompat.identity
2904 2917 fromnativeeol = pycompat.identity
2905 2918 nativeeolwriter = pycompat.identity
2906 2919
2907 2920
2908 2921 # TODO delete since workaround variant for Python 2 no longer needed.
2909 2922 def iterfile(fp):
2910 2923 return fp
2911 2924
2912 2925
2913 2926 def iterlines(iterator: Iterable[bytes]) -> Iterator[bytes]:
2914 2927 for chunk in iterator:
2915 2928 for line in chunk.splitlines():
2916 2929 yield line
2917 2930
2918 2931
2919 2932 def expandpath(path: bytes) -> bytes:
2920 2933 return os.path.expanduser(os.path.expandvars(path))
2921 2934
2922 2935
2923 2936 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2924 2937 """Return the result of interpolating items in the mapping into string s.
2925 2938
2926 2939 prefix is a single character string, or a two character string with
2927 2940 a backslash as the first character if the prefix needs to be escaped in
2928 2941 a regular expression.
2929 2942
2930 2943 fn is an optional function that will be applied to the replacement text
2931 2944 just before replacement.
2932 2945
2933 2946 escape_prefix is an optional flag that allows using doubled prefix for
2934 2947 its escaping.
2935 2948 """
2936 2949 fn = fn or (lambda s: s)
2937 2950 patterns = b'|'.join(mapping.keys())
2938 2951 if escape_prefix:
2939 2952 patterns += b'|' + prefix
2940 2953 if len(prefix) > 1:
2941 2954 prefix_char = prefix[1:]
2942 2955 else:
2943 2956 prefix_char = prefix
2944 2957 mapping[prefix_char] = prefix_char
2945 2958 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2946 2959 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2947 2960
2948 2961
2949 2962 timecount = unitcountfn(
2950 2963 (1, 1e3, _(b'%.0f s')),
2951 2964 (100, 1, _(b'%.1f s')),
2952 2965 (10, 1, _(b'%.2f s')),
2953 2966 (1, 1, _(b'%.3f s')),
2954 2967 (100, 0.001, _(b'%.1f ms')),
2955 2968 (10, 0.001, _(b'%.2f ms')),
2956 2969 (1, 0.001, _(b'%.3f ms')),
2957 2970 (100, 0.000001, _(b'%.1f us')),
2958 2971 (10, 0.000001, _(b'%.2f us')),
2959 2972 (1, 0.000001, _(b'%.3f us')),
2960 2973 (100, 0.000000001, _(b'%.1f ns')),
2961 2974 (10, 0.000000001, _(b'%.2f ns')),
2962 2975 (1, 0.000000001, _(b'%.3f ns')),
2963 2976 )
2964 2977
2965 2978
2966 2979 @attr.s
2967 2980 class timedcmstats:
2968 2981 """Stats information produced by the timedcm context manager on entering."""
2969 2982
2970 2983 # the starting value of the timer as a float (meaning and resulution is
2971 2984 # platform dependent, see util.timer)
2972 2985 start = attr.ib(default=attr.Factory(lambda: timer()))
2973 2986 # the number of seconds as a floating point value; starts at 0, updated when
2974 2987 # the context is exited.
2975 2988 elapsed = attr.ib(default=0)
2976 2989 # the number of nested timedcm context managers.
2977 2990 level = attr.ib(default=1)
2978 2991
2979 2992 def __bytes__(self):
2980 2993 return timecount(self.elapsed) if self.elapsed else b'<unknown>'
2981 2994
2982 2995 __str__ = encoding.strmethod(__bytes__)
2983 2996
2984 2997
2985 2998 @contextlib.contextmanager
2986 2999 def timedcm(whencefmt, *whenceargs):
2987 3000 """A context manager that produces timing information for a given context.
2988 3001
2989 3002 On entering a timedcmstats instance is produced.
2990 3003
2991 3004 This context manager is reentrant.
2992 3005
2993 3006 """
2994 3007 # track nested context managers
2995 3008 timedcm._nested += 1
2996 3009 timing_stats = timedcmstats(level=timedcm._nested)
2997 3010 try:
2998 3011 with tracing.log(whencefmt, *whenceargs):
2999 3012 yield timing_stats
3000 3013 finally:
3001 3014 timing_stats.elapsed = timer() - timing_stats.start
3002 3015 timedcm._nested -= 1
3003 3016
3004 3017
3005 3018 timedcm._nested = 0
3006 3019
3007 3020
3008 3021 def timed(func):
3009 3022 """Report the execution time of a function call to stderr.
3010 3023
3011 3024 During development, use as a decorator when you need to measure
3012 3025 the cost of a function, e.g. as follows:
3013 3026
3014 3027 @util.timed
3015 3028 def foo(a, b, c):
3016 3029 pass
3017 3030 """
3018 3031
3019 3032 def wrapper(*args, **kwargs):
3020 3033 with timedcm(pycompat.bytestr(func.__name__)) as time_stats:
3021 3034 result = func(*args, **kwargs)
3022 3035 stderr = procutil.stderr
3023 3036 stderr.write(
3024 3037 b'%s%s: %s\n'
3025 3038 % (
3026 3039 b' ' * time_stats.level * 2,
3027 3040 pycompat.bytestr(func.__name__),
3028 3041 time_stats,
3029 3042 )
3030 3043 )
3031 3044 return result
3032 3045
3033 3046 return wrapper
3034 3047
3035 3048
3036 3049 _sizeunits = (
3037 3050 (b'm', 2 ** 20),
3038 3051 (b'k', 2 ** 10),
3039 3052 (b'g', 2 ** 30),
3040 3053 (b'kb', 2 ** 10),
3041 3054 (b'mb', 2 ** 20),
3042 3055 (b'gb', 2 ** 30),
3043 3056 (b'b', 1),
3044 3057 )
3045 3058
3046 3059
3047 3060 def sizetoint(s: bytes) -> int:
3048 3061 """Convert a space specifier to a byte count.
3049 3062
3050 3063 >>> sizetoint(b'30')
3051 3064 30
3052 3065 >>> sizetoint(b'2.2kb')
3053 3066 2252
3054 3067 >>> sizetoint(b'6M')
3055 3068 6291456
3056 3069 """
3057 3070 t = s.strip().lower()
3058 3071 try:
3059 3072 for k, u in _sizeunits:
3060 3073 if t.endswith(k):
3061 3074 return int(float(t[: -len(k)]) * u)
3062 3075 return int(t)
3063 3076 except ValueError:
3064 3077 raise error.ParseError(_(b"couldn't parse size: %s") % s)
3065 3078
3066 3079
3067 3080 class hooks:
3068 3081 """A collection of hook functions that can be used to extend a
3069 3082 function's behavior. Hooks are called in lexicographic order,
3070 3083 based on the names of their sources."""
3071 3084
3072 3085 def __init__(self):
3073 3086 self._hooks = []
3074 3087
3075 3088 def add(self, source, hook):
3076 3089 self._hooks.append((source, hook))
3077 3090
3078 3091 def __call__(self, *args):
3079 3092 self._hooks.sort(key=lambda x: x[0])
3080 3093 results = []
3081 3094 for source, hook in self._hooks:
3082 3095 results.append(hook(*args))
3083 3096 return results
3084 3097
3085 3098
3086 3099 def getstackframes(skip=0, line=b' %-*s in %s\n', fileline=b'%s:%d', depth=0):
3087 3100 """Yields lines for a nicely formatted stacktrace.
3088 3101 Skips the 'skip' last entries, then return the last 'depth' entries.
3089 3102 Each file+linenumber is formatted according to fileline.
3090 3103 Each line is formatted according to line.
3091 3104 If line is None, it yields:
3092 3105 length of longest filepath+line number,
3093 3106 filepath+linenumber,
3094 3107 function
3095 3108
3096 3109 Not be used in production code but very convenient while developing.
3097 3110 """
3098 3111 entries = [
3099 3112 (fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3100 3113 for fn, ln, func, _text in traceback.extract_stack()[: -skip - 1]
3101 3114 ][-depth:]
3102 3115 if entries:
3103 3116 fnmax = max(len(entry[0]) for entry in entries)
3104 3117 for fnln, func in entries:
3105 3118 if line is None:
3106 3119 yield (fnmax, fnln, func)
3107 3120 else:
3108 3121 yield line % (fnmax, fnln, func)
3109 3122
3110 3123
3111 3124 def debugstacktrace(
3112 3125 msg=b'stacktrace',
3113 3126 skip=0,
3114 3127 f=procutil.stderr,
3115 3128 otherf=procutil.stdout,
3116 3129 depth=0,
3117 3130 prefix=b'',
3118 3131 ):
3119 3132 """Writes a message to f (stderr) with a nicely formatted stacktrace.
3120 3133 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3121 3134 By default it will flush stdout first.
3122 3135 It can be used everywhere and intentionally does not require an ui object.
3123 3136 Not be used in production code but very convenient while developing.
3124 3137 """
3125 3138 if otherf:
3126 3139 otherf.flush()
3127 3140 f.write(b'%s%s at:\n' % (prefix, msg.rstrip()))
3128 3141 for line in getstackframes(skip + 1, depth=depth):
3129 3142 f.write(prefix + line)
3130 3143 f.flush()
3131 3144
3132 3145
3133 3146 # convenient shortcut
3134 3147 dst = debugstacktrace
3135 3148
3136 3149
3137 3150 def safename(f, tag, ctx, others=None):
3138 3151 """
3139 3152 Generate a name that it is safe to rename f to in the given context.
3140 3153
3141 3154 f: filename to rename
3142 3155 tag: a string tag that will be included in the new name
3143 3156 ctx: a context, in which the new name must not exist
3144 3157 others: a set of other filenames that the new name must not be in
3145 3158
3146 3159 Returns a file name of the form oldname~tag[~number] which does not exist
3147 3160 in the provided context and is not in the set of other names.
3148 3161 """
3149 3162 if others is None:
3150 3163 others = set()
3151 3164
3152 3165 fn = b'%s~%s' % (f, tag)
3153 3166 if fn not in ctx and fn not in others:
3154 3167 return fn
3155 3168 for n in itertools.count(1):
3156 3169 fn = b'%s~%s~%s' % (f, tag, n)
3157 3170 if fn not in ctx and fn not in others:
3158 3171 return fn
3159 3172
3160 3173
3161 3174 def readexactly(stream, n):
3162 3175 '''read n bytes from stream.read and abort if less was available'''
3163 3176 s = stream.read(n)
3164 3177 if len(s) < n:
3165 3178 raise error.Abort(
3166 3179 _(b"stream ended unexpectedly (got %d bytes, expected %d)")
3167 3180 % (len(s), n)
3168 3181 )
3169 3182 return s
3170 3183
3171 3184
3172 3185 def uvarintencode(value):
3173 3186 """Encode an unsigned integer value to a varint.
3174 3187
3175 3188 A varint is a variable length integer of 1 or more bytes. Each byte
3176 3189 except the last has the most significant bit set. The lower 7 bits of
3177 3190 each byte store the 2's complement representation, least significant group
3178 3191 first.
3179 3192
3180 3193 >>> uvarintencode(0)
3181 3194 '\\x00'
3182 3195 >>> uvarintencode(1)
3183 3196 '\\x01'
3184 3197 >>> uvarintencode(127)
3185 3198 '\\x7f'
3186 3199 >>> uvarintencode(1337)
3187 3200 '\\xb9\\n'
3188 3201 >>> uvarintencode(65536)
3189 3202 '\\x80\\x80\\x04'
3190 3203 >>> uvarintencode(-1)
3191 3204 Traceback (most recent call last):
3192 3205 ...
3193 3206 ProgrammingError: negative value for uvarint: -1
3194 3207 """
3195 3208 if value < 0:
3196 3209 raise error.ProgrammingError(b'negative value for uvarint: %d' % value)
3197 3210 bits = value & 0x7F
3198 3211 value >>= 7
3199 3212 bytes = []
3200 3213 while value:
3201 3214 bytes.append(pycompat.bytechr(0x80 | bits))
3202 3215 bits = value & 0x7F
3203 3216 value >>= 7
3204 3217 bytes.append(pycompat.bytechr(bits))
3205 3218
3206 3219 return b''.join(bytes)
3207 3220
3208 3221
3209 3222 def uvarintdecodestream(fh):
3210 3223 """Decode an unsigned variable length integer from a stream.
3211 3224
3212 3225 The passed argument is anything that has a ``.read(N)`` method.
3213 3226
3214 3227 >>> from io import BytesIO
3215 3228 >>> uvarintdecodestream(BytesIO(b'\\x00'))
3216 3229 0
3217 3230 >>> uvarintdecodestream(BytesIO(b'\\x01'))
3218 3231 1
3219 3232 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
3220 3233 127
3221 3234 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
3222 3235 1337
3223 3236 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
3224 3237 65536
3225 3238 >>> uvarintdecodestream(BytesIO(b'\\x80'))
3226 3239 Traceback (most recent call last):
3227 3240 ...
3228 3241 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
3229 3242 """
3230 3243 result = 0
3231 3244 shift = 0
3232 3245 while True:
3233 3246 byte = ord(readexactly(fh, 1))
3234 3247 result |= (byte & 0x7F) << shift
3235 3248 if not (byte & 0x80):
3236 3249 return result
3237 3250 shift += 7
3238 3251
3239 3252
3240 3253 # Passing the '' locale means that the locale should be set according to the
3241 3254 # user settings (environment variables).
3242 3255 # Python sometimes avoids setting the global locale settings. When interfacing
3243 3256 # with C code (e.g. the curses module or the Subversion bindings), the global
3244 3257 # locale settings must be initialized correctly. Python 2 does not initialize
3245 3258 # the global locale settings on interpreter startup. Python 3 sometimes
3246 3259 # initializes LC_CTYPE, but not consistently at least on Windows. Therefore we
3247 3260 # explicitly initialize it to get consistent behavior if it's not already
3248 3261 # initialized. Since CPython commit 177d921c8c03d30daa32994362023f777624b10d,
3249 3262 # LC_CTYPE is always initialized. If we require Python 3.8+, we should re-check
3250 3263 # if we can remove this code.
3251 3264 @contextlib.contextmanager
3252 3265 def with_lc_ctype():
3253 3266 oldloc = locale.setlocale(locale.LC_CTYPE, None)
3254 3267 if oldloc == 'C':
3255 3268 try:
3256 3269 try:
3257 3270 locale.setlocale(locale.LC_CTYPE, '')
3258 3271 except locale.Error:
3259 3272 # The likely case is that the locale from the environment
3260 3273 # variables is unknown.
3261 3274 pass
3262 3275 yield
3263 3276 finally:
3264 3277 locale.setlocale(locale.LC_CTYPE, oldloc)
3265 3278 else:
3266 3279 yield
3267 3280
3268 3281
3269 3282 def _estimatememory() -> Optional[int]:
3270 3283 """Provide an estimate for the available system memory in Bytes.
3271 3284
3272 3285 If no estimate can be provided on the platform, returns None.
3273 3286 """
3274 3287 if pycompat.sysplatform.startswith(b'win'):
3275 3288 # On Windows, use the GlobalMemoryStatusEx kernel function directly.
3276 3289 from ctypes import c_long as DWORD, c_ulonglong as DWORDLONG
3277 3290 from ctypes.wintypes import ( # pytype: disable=import-error
3278 3291 Structure,
3279 3292 byref,
3280 3293 sizeof,
3281 3294 windll,
3282 3295 )
3283 3296
3284 3297 class MEMORYSTATUSEX(Structure):
3285 3298 _fields_ = [
3286 3299 ('dwLength', DWORD),
3287 3300 ('dwMemoryLoad', DWORD),
3288 3301 ('ullTotalPhys', DWORDLONG),
3289 3302 ('ullAvailPhys', DWORDLONG),
3290 3303 ('ullTotalPageFile', DWORDLONG),
3291 3304 ('ullAvailPageFile', DWORDLONG),
3292 3305 ('ullTotalVirtual', DWORDLONG),
3293 3306 ('ullAvailVirtual', DWORDLONG),
3294 3307 ('ullExtendedVirtual', DWORDLONG),
3295 3308 ]
3296 3309
3297 3310 x = MEMORYSTATUSEX()
3298 3311 x.dwLength = sizeof(x)
3299 3312 windll.kernel32.GlobalMemoryStatusEx(byref(x))
3300 3313 return x.ullAvailPhys
3301 3314
3302 3315 # On newer Unix-like systems and Mac OSX, the sysconf interface
3303 3316 # can be used. _SC_PAGE_SIZE is part of POSIX; _SC_PHYS_PAGES
3304 3317 # seems to be implemented on most systems.
3305 3318 try:
3306 3319 pagesize = os.sysconf(os.sysconf_names['SC_PAGE_SIZE'])
3307 3320 pages = os.sysconf(os.sysconf_names['SC_PHYS_PAGES'])
3308 3321 return pagesize * pages
3309 3322 except OSError: # sysconf can fail
3310 3323 pass
3311 3324 except KeyError: # unknown parameter
3312 3325 pass
General Comments 0
You need to be logged in to leave comments. Login now