##// END OF EJS Templates
py3: catch PermissionError instead of checking errno == EPERM
Manuel Jacob -
r50203:d2adebe3 default
parent child Browse files
Show More
@@ -1,3319 +1,3317 b''
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16
17 17 import abc
18 18 import collections
19 19 import contextlib
20 20 import errno
21 21 import gc
22 22 import hashlib
23 23 import io
24 24 import itertools
25 25 import locale
26 26 import mmap
27 27 import os
28 28 import pickle # provides util.pickle symbol
29 29 import re as remod
30 30 import shutil
31 31 import stat
32 32 import sys
33 33 import time
34 34 import traceback
35 35 import warnings
36 36
37 37 from .node import hex
38 38 from .thirdparty import attr
39 39 from .pycompat import (
40 40 delattr,
41 41 getattr,
42 42 open,
43 43 setattr,
44 44 )
45 45 from hgdemandimport import tracing
46 46 from . import (
47 47 encoding,
48 48 error,
49 49 i18n,
50 50 policy,
51 51 pycompat,
52 52 urllibcompat,
53 53 )
54 54 from .utils import (
55 55 compression,
56 56 hashutil,
57 57 procutil,
58 58 stringutil,
59 59 )
60 60
61 61 if pycompat.TYPE_CHECKING:
62 62 from typing import (
63 63 Iterator,
64 64 List,
65 65 Optional,
66 66 Tuple,
67 67 )
68 68
69 69
70 70 base85 = policy.importmod('base85')
71 71 osutil = policy.importmod('osutil')
72 72
73 73 b85decode = base85.b85decode
74 74 b85encode = base85.b85encode
75 75
76 76 cookielib = pycompat.cookielib
77 77 httplib = pycompat.httplib
78 78 safehasattr = pycompat.safehasattr
79 79 socketserver = pycompat.socketserver
80 80 bytesio = io.BytesIO
81 81 # TODO deprecate stringio name, as it is a lie on Python 3.
82 82 stringio = bytesio
83 83 xmlrpclib = pycompat.xmlrpclib
84 84
85 85 httpserver = urllibcompat.httpserver
86 86 urlerr = urllibcompat.urlerr
87 87 urlreq = urllibcompat.urlreq
88 88
89 89 # workaround for win32mbcs
90 90 _filenamebytestr = pycompat.bytestr
91 91
92 92 if pycompat.iswindows:
93 93 from . import windows as platform
94 94 else:
95 95 from . import posix as platform
96 96
97 97 _ = i18n._
98 98
99 99 abspath = platform.abspath
100 100 bindunixsocket = platform.bindunixsocket
101 101 cachestat = platform.cachestat
102 102 checkexec = platform.checkexec
103 103 checklink = platform.checklink
104 104 copymode = platform.copymode
105 105 expandglobs = platform.expandglobs
106 106 getfsmountpoint = platform.getfsmountpoint
107 107 getfstype = platform.getfstype
108 108 get_password = platform.get_password
109 109 groupmembers = platform.groupmembers
110 110 groupname = platform.groupname
111 111 isexec = platform.isexec
112 112 isowner = platform.isowner
113 113 listdir = osutil.listdir
114 114 localpath = platform.localpath
115 115 lookupreg = platform.lookupreg
116 116 makedir = platform.makedir
117 117 nlinks = platform.nlinks
118 118 normpath = platform.normpath
119 119 normcase = platform.normcase
120 120 normcasespec = platform.normcasespec
121 121 normcasefallback = platform.normcasefallback
122 122 openhardlinks = platform.openhardlinks
123 123 oslink = platform.oslink
124 124 parsepatchoutput = platform.parsepatchoutput
125 125 pconvert = platform.pconvert
126 126 poll = platform.poll
127 127 posixfile = platform.posixfile
128 128 readlink = platform.readlink
129 129 rename = platform.rename
130 130 removedirs = platform.removedirs
131 131 samedevice = platform.samedevice
132 132 samefile = platform.samefile
133 133 samestat = platform.samestat
134 134 setflags = platform.setflags
135 135 split = platform.split
136 136 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
137 137 statisexec = platform.statisexec
138 138 statislink = platform.statislink
139 139 umask = platform.umask
140 140 unlink = platform.unlink
141 141 username = platform.username
142 142
143 143
144 144 def setumask(val):
145 145 # type: (int) -> None
146 146 '''updates the umask. used by chg server'''
147 147 if pycompat.iswindows:
148 148 return
149 149 os.umask(val)
150 150 global umask
151 151 platform.umask = umask = val & 0o777
152 152
153 153
154 154 # small compat layer
155 155 compengines = compression.compengines
156 156 SERVERROLE = compression.SERVERROLE
157 157 CLIENTROLE = compression.CLIENTROLE
158 158
159 159 # Python compatibility
160 160
161 161 _notset = object()
162 162
163 163
164 164 def bitsfrom(container):
165 165 bits = 0
166 166 for bit in container:
167 167 bits |= bit
168 168 return bits
169 169
170 170
171 171 # python 2.6 still have deprecation warning enabled by default. We do not want
172 172 # to display anything to standard user so detect if we are running test and
173 173 # only use python deprecation warning in this case.
174 174 _dowarn = bool(encoding.environ.get(b'HGEMITWARNINGS'))
175 175 if _dowarn:
176 176 # explicitly unfilter our warning for python 2.7
177 177 #
178 178 # The option of setting PYTHONWARNINGS in the test runner was investigated.
179 179 # However, module name set through PYTHONWARNINGS was exactly matched, so
180 180 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
181 181 # makes the whole PYTHONWARNINGS thing useless for our usecase.
182 182 warnings.filterwarnings('default', '', DeprecationWarning, 'mercurial')
183 183 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext')
184 184 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext3rd')
185 185 if _dowarn:
186 186 # silence warning emitted by passing user string to re.sub()
187 187 warnings.filterwarnings(
188 188 'ignore', 'bad escape', DeprecationWarning, 'mercurial'
189 189 )
190 190 warnings.filterwarnings(
191 191 'ignore', 'invalid escape sequence', DeprecationWarning, 'mercurial'
192 192 )
193 193 # TODO: reinvent imp.is_frozen()
194 194 warnings.filterwarnings(
195 195 'ignore',
196 196 'the imp module is deprecated',
197 197 DeprecationWarning,
198 198 'mercurial',
199 199 )
200 200
201 201
202 202 def nouideprecwarn(msg, version, stacklevel=1):
203 203 """Issue an python native deprecation warning
204 204
205 205 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
206 206 """
207 207 if _dowarn:
208 208 msg += (
209 209 b"\n(compatibility will be dropped after Mercurial-%s,"
210 210 b" update your code.)"
211 211 ) % version
212 212 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
213 213 # on python 3 with chg, we will need to explicitly flush the output
214 214 sys.stderr.flush()
215 215
216 216
217 217 DIGESTS = {
218 218 b'md5': hashlib.md5,
219 219 b'sha1': hashutil.sha1,
220 220 b'sha512': hashlib.sha512,
221 221 }
222 222 # List of digest types from strongest to weakest
223 223 DIGESTS_BY_STRENGTH = [b'sha512', b'sha1', b'md5']
224 224
225 225 for k in DIGESTS_BY_STRENGTH:
226 226 assert k in DIGESTS
227 227
228 228
229 229 class digester:
230 230 """helper to compute digests.
231 231
232 232 This helper can be used to compute one or more digests given their name.
233 233
234 234 >>> d = digester([b'md5', b'sha1'])
235 235 >>> d.update(b'foo')
236 236 >>> [k for k in sorted(d)]
237 237 ['md5', 'sha1']
238 238 >>> d[b'md5']
239 239 'acbd18db4cc2f85cedef654fccc4a4d8'
240 240 >>> d[b'sha1']
241 241 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
242 242 >>> digester.preferred([b'md5', b'sha1'])
243 243 'sha1'
244 244 """
245 245
246 246 def __init__(self, digests, s=b''):
247 247 self._hashes = {}
248 248 for k in digests:
249 249 if k not in DIGESTS:
250 250 raise error.Abort(_(b'unknown digest type: %s') % k)
251 251 self._hashes[k] = DIGESTS[k]()
252 252 if s:
253 253 self.update(s)
254 254
255 255 def update(self, data):
256 256 for h in self._hashes.values():
257 257 h.update(data)
258 258
259 259 def __getitem__(self, key):
260 260 if key not in DIGESTS:
261 261 raise error.Abort(_(b'unknown digest type: %s') % k)
262 262 return hex(self._hashes[key].digest())
263 263
264 264 def __iter__(self):
265 265 return iter(self._hashes)
266 266
267 267 @staticmethod
268 268 def preferred(supported):
269 269 """returns the strongest digest type in both supported and DIGESTS."""
270 270
271 271 for k in DIGESTS_BY_STRENGTH:
272 272 if k in supported:
273 273 return k
274 274 return None
275 275
276 276
277 277 class digestchecker:
278 278 """file handle wrapper that additionally checks content against a given
279 279 size and digests.
280 280
281 281 d = digestchecker(fh, size, {'md5': '...'})
282 282
283 283 When multiple digests are given, all of them are validated.
284 284 """
285 285
286 286 def __init__(self, fh, size, digests):
287 287 self._fh = fh
288 288 self._size = size
289 289 self._got = 0
290 290 self._digests = dict(digests)
291 291 self._digester = digester(self._digests.keys())
292 292
293 293 def read(self, length=-1):
294 294 content = self._fh.read(length)
295 295 self._digester.update(content)
296 296 self._got += len(content)
297 297 return content
298 298
299 299 def validate(self):
300 300 if self._size != self._got:
301 301 raise error.Abort(
302 302 _(b'size mismatch: expected %d, got %d')
303 303 % (self._size, self._got)
304 304 )
305 305 for k, v in self._digests.items():
306 306 if v != self._digester[k]:
307 307 # i18n: first parameter is a digest name
308 308 raise error.Abort(
309 309 _(b'%s mismatch: expected %s, got %s')
310 310 % (k, v, self._digester[k])
311 311 )
312 312
313 313
314 314 try:
315 315 buffer = buffer # pytype: disable=name-error
316 316 except NameError:
317 317
318 318 def buffer(sliceable, offset=0, length=None):
319 319 if length is not None:
320 320 return memoryview(sliceable)[offset : offset + length]
321 321 return memoryview(sliceable)[offset:]
322 322
323 323
324 324 _chunksize = 4096
325 325
326 326
327 327 class bufferedinputpipe:
328 328 """a manually buffered input pipe
329 329
330 330 Python will not let us use buffered IO and lazy reading with 'polling' at
331 331 the same time. We cannot probe the buffer state and select will not detect
332 332 that data are ready to read if they are already buffered.
333 333
334 334 This class let us work around that by implementing its own buffering
335 335 (allowing efficient readline) while offering a way to know if the buffer is
336 336 empty from the output (allowing collaboration of the buffer with polling).
337 337
338 338 This class lives in the 'util' module because it makes use of the 'os'
339 339 module from the python stdlib.
340 340 """
341 341
342 342 def __new__(cls, fh):
343 343 # If we receive a fileobjectproxy, we need to use a variation of this
344 344 # class that notifies observers about activity.
345 345 if isinstance(fh, fileobjectproxy):
346 346 cls = observedbufferedinputpipe
347 347
348 348 return super(bufferedinputpipe, cls).__new__(cls)
349 349
350 350 def __init__(self, input):
351 351 self._input = input
352 352 self._buffer = []
353 353 self._eof = False
354 354 self._lenbuf = 0
355 355
356 356 @property
357 357 def hasbuffer(self):
358 358 """True is any data is currently buffered
359 359
360 360 This will be used externally a pre-step for polling IO. If there is
361 361 already data then no polling should be set in place."""
362 362 return bool(self._buffer)
363 363
364 364 @property
365 365 def closed(self):
366 366 return self._input.closed
367 367
368 368 def fileno(self):
369 369 return self._input.fileno()
370 370
371 371 def close(self):
372 372 return self._input.close()
373 373
374 374 def read(self, size):
375 375 while (not self._eof) and (self._lenbuf < size):
376 376 self._fillbuffer()
377 377 return self._frombuffer(size)
378 378
379 379 def unbufferedread(self, size):
380 380 if not self._eof and self._lenbuf == 0:
381 381 self._fillbuffer(max(size, _chunksize))
382 382 return self._frombuffer(min(self._lenbuf, size))
383 383
384 384 def readline(self, *args, **kwargs):
385 385 if len(self._buffer) > 1:
386 386 # this should not happen because both read and readline end with a
387 387 # _frombuffer call that collapse it.
388 388 self._buffer = [b''.join(self._buffer)]
389 389 self._lenbuf = len(self._buffer[0])
390 390 lfi = -1
391 391 if self._buffer:
392 392 lfi = self._buffer[-1].find(b'\n')
393 393 while (not self._eof) and lfi < 0:
394 394 self._fillbuffer()
395 395 if self._buffer:
396 396 lfi = self._buffer[-1].find(b'\n')
397 397 size = lfi + 1
398 398 if lfi < 0: # end of file
399 399 size = self._lenbuf
400 400 elif len(self._buffer) > 1:
401 401 # we need to take previous chunks into account
402 402 size += self._lenbuf - len(self._buffer[-1])
403 403 return self._frombuffer(size)
404 404
405 405 def _frombuffer(self, size):
406 406 """return at most 'size' data from the buffer
407 407
408 408 The data are removed from the buffer."""
409 409 if size == 0 or not self._buffer:
410 410 return b''
411 411 buf = self._buffer[0]
412 412 if len(self._buffer) > 1:
413 413 buf = b''.join(self._buffer)
414 414
415 415 data = buf[:size]
416 416 buf = buf[len(data) :]
417 417 if buf:
418 418 self._buffer = [buf]
419 419 self._lenbuf = len(buf)
420 420 else:
421 421 self._buffer = []
422 422 self._lenbuf = 0
423 423 return data
424 424
425 425 def _fillbuffer(self, size=_chunksize):
426 426 """read data to the buffer"""
427 427 data = os.read(self._input.fileno(), size)
428 428 if not data:
429 429 self._eof = True
430 430 else:
431 431 self._lenbuf += len(data)
432 432 self._buffer.append(data)
433 433
434 434 return data
435 435
436 436
437 437 def mmapread(fp, size=None):
438 438 if size == 0:
439 439 # size of 0 to mmap.mmap() means "all data"
440 440 # rather than "zero bytes", so special case that.
441 441 return b''
442 442 elif size is None:
443 443 size = 0
444 444 fd = getattr(fp, 'fileno', lambda: fp)()
445 445 try:
446 446 return mmap.mmap(fd, size, access=mmap.ACCESS_READ)
447 447 except ValueError:
448 448 # Empty files cannot be mmapped, but mmapread should still work. Check
449 449 # if the file is empty, and if so, return an empty buffer.
450 450 if os.fstat(fd).st_size == 0:
451 451 return b''
452 452 raise
453 453
454 454
455 455 class fileobjectproxy:
456 456 """A proxy around file objects that tells a watcher when events occur.
457 457
458 458 This type is intended to only be used for testing purposes. Think hard
459 459 before using it in important code.
460 460 """
461 461
462 462 __slots__ = (
463 463 '_orig',
464 464 '_observer',
465 465 )
466 466
467 467 def __init__(self, fh, observer):
468 468 object.__setattr__(self, '_orig', fh)
469 469 object.__setattr__(self, '_observer', observer)
470 470
471 471 def __getattribute__(self, name):
472 472 ours = {
473 473 '_observer',
474 474 # IOBase
475 475 'close',
476 476 # closed if a property
477 477 'fileno',
478 478 'flush',
479 479 'isatty',
480 480 'readable',
481 481 'readline',
482 482 'readlines',
483 483 'seek',
484 484 'seekable',
485 485 'tell',
486 486 'truncate',
487 487 'writable',
488 488 'writelines',
489 489 # RawIOBase
490 490 'read',
491 491 'readall',
492 492 'readinto',
493 493 'write',
494 494 # BufferedIOBase
495 495 # raw is a property
496 496 'detach',
497 497 # read defined above
498 498 'read1',
499 499 # readinto defined above
500 500 # write defined above
501 501 }
502 502
503 503 # We only observe some methods.
504 504 if name in ours:
505 505 return object.__getattribute__(self, name)
506 506
507 507 return getattr(object.__getattribute__(self, '_orig'), name)
508 508
509 509 def __nonzero__(self):
510 510 return bool(object.__getattribute__(self, '_orig'))
511 511
512 512 __bool__ = __nonzero__
513 513
514 514 def __delattr__(self, name):
515 515 return delattr(object.__getattribute__(self, '_orig'), name)
516 516
517 517 def __setattr__(self, name, value):
518 518 return setattr(object.__getattribute__(self, '_orig'), name, value)
519 519
520 520 def __iter__(self):
521 521 return object.__getattribute__(self, '_orig').__iter__()
522 522
523 523 def _observedcall(self, name, *args, **kwargs):
524 524 # Call the original object.
525 525 orig = object.__getattribute__(self, '_orig')
526 526 res = getattr(orig, name)(*args, **kwargs)
527 527
528 528 # Call a method on the observer of the same name with arguments
529 529 # so it can react, log, etc.
530 530 observer = object.__getattribute__(self, '_observer')
531 531 fn = getattr(observer, name, None)
532 532 if fn:
533 533 fn(res, *args, **kwargs)
534 534
535 535 return res
536 536
537 537 def close(self, *args, **kwargs):
538 538 return object.__getattribute__(self, '_observedcall')(
539 539 'close', *args, **kwargs
540 540 )
541 541
542 542 def fileno(self, *args, **kwargs):
543 543 return object.__getattribute__(self, '_observedcall')(
544 544 'fileno', *args, **kwargs
545 545 )
546 546
547 547 def flush(self, *args, **kwargs):
548 548 return object.__getattribute__(self, '_observedcall')(
549 549 'flush', *args, **kwargs
550 550 )
551 551
552 552 def isatty(self, *args, **kwargs):
553 553 return object.__getattribute__(self, '_observedcall')(
554 554 'isatty', *args, **kwargs
555 555 )
556 556
557 557 def readable(self, *args, **kwargs):
558 558 return object.__getattribute__(self, '_observedcall')(
559 559 'readable', *args, **kwargs
560 560 )
561 561
562 562 def readline(self, *args, **kwargs):
563 563 return object.__getattribute__(self, '_observedcall')(
564 564 'readline', *args, **kwargs
565 565 )
566 566
567 567 def readlines(self, *args, **kwargs):
568 568 return object.__getattribute__(self, '_observedcall')(
569 569 'readlines', *args, **kwargs
570 570 )
571 571
572 572 def seek(self, *args, **kwargs):
573 573 return object.__getattribute__(self, '_observedcall')(
574 574 'seek', *args, **kwargs
575 575 )
576 576
577 577 def seekable(self, *args, **kwargs):
578 578 return object.__getattribute__(self, '_observedcall')(
579 579 'seekable', *args, **kwargs
580 580 )
581 581
582 582 def tell(self, *args, **kwargs):
583 583 return object.__getattribute__(self, '_observedcall')(
584 584 'tell', *args, **kwargs
585 585 )
586 586
587 587 def truncate(self, *args, **kwargs):
588 588 return object.__getattribute__(self, '_observedcall')(
589 589 'truncate', *args, **kwargs
590 590 )
591 591
592 592 def writable(self, *args, **kwargs):
593 593 return object.__getattribute__(self, '_observedcall')(
594 594 'writable', *args, **kwargs
595 595 )
596 596
597 597 def writelines(self, *args, **kwargs):
598 598 return object.__getattribute__(self, '_observedcall')(
599 599 'writelines', *args, **kwargs
600 600 )
601 601
602 602 def read(self, *args, **kwargs):
603 603 return object.__getattribute__(self, '_observedcall')(
604 604 'read', *args, **kwargs
605 605 )
606 606
607 607 def readall(self, *args, **kwargs):
608 608 return object.__getattribute__(self, '_observedcall')(
609 609 'readall', *args, **kwargs
610 610 )
611 611
612 612 def readinto(self, *args, **kwargs):
613 613 return object.__getattribute__(self, '_observedcall')(
614 614 'readinto', *args, **kwargs
615 615 )
616 616
617 617 def write(self, *args, **kwargs):
618 618 return object.__getattribute__(self, '_observedcall')(
619 619 'write', *args, **kwargs
620 620 )
621 621
622 622 def detach(self, *args, **kwargs):
623 623 return object.__getattribute__(self, '_observedcall')(
624 624 'detach', *args, **kwargs
625 625 )
626 626
627 627 def read1(self, *args, **kwargs):
628 628 return object.__getattribute__(self, '_observedcall')(
629 629 'read1', *args, **kwargs
630 630 )
631 631
632 632
633 633 class observedbufferedinputpipe(bufferedinputpipe):
634 634 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
635 635
636 636 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
637 637 bypass ``fileobjectproxy``. Because of this, we need to make
638 638 ``bufferedinputpipe`` aware of these operations.
639 639
640 640 This variation of ``bufferedinputpipe`` can notify observers about
641 641 ``os.read()`` events. It also re-publishes other events, such as
642 642 ``read()`` and ``readline()``.
643 643 """
644 644
645 645 def _fillbuffer(self):
646 646 res = super(observedbufferedinputpipe, self)._fillbuffer()
647 647
648 648 fn = getattr(self._input._observer, 'osread', None)
649 649 if fn:
650 650 fn(res, _chunksize)
651 651
652 652 return res
653 653
654 654 # We use different observer methods because the operation isn't
655 655 # performed on the actual file object but on us.
656 656 def read(self, size):
657 657 res = super(observedbufferedinputpipe, self).read(size)
658 658
659 659 fn = getattr(self._input._observer, 'bufferedread', None)
660 660 if fn:
661 661 fn(res, size)
662 662
663 663 return res
664 664
665 665 def readline(self, *args, **kwargs):
666 666 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
667 667
668 668 fn = getattr(self._input._observer, 'bufferedreadline', None)
669 669 if fn:
670 670 fn(res)
671 671
672 672 return res
673 673
674 674
675 675 PROXIED_SOCKET_METHODS = {
676 676 'makefile',
677 677 'recv',
678 678 'recvfrom',
679 679 'recvfrom_into',
680 680 'recv_into',
681 681 'send',
682 682 'sendall',
683 683 'sendto',
684 684 'setblocking',
685 685 'settimeout',
686 686 'gettimeout',
687 687 'setsockopt',
688 688 }
689 689
690 690
691 691 class socketproxy:
692 692 """A proxy around a socket that tells a watcher when events occur.
693 693
694 694 This is like ``fileobjectproxy`` except for sockets.
695 695
696 696 This type is intended to only be used for testing purposes. Think hard
697 697 before using it in important code.
698 698 """
699 699
700 700 __slots__ = (
701 701 '_orig',
702 702 '_observer',
703 703 )
704 704
705 705 def __init__(self, sock, observer):
706 706 object.__setattr__(self, '_orig', sock)
707 707 object.__setattr__(self, '_observer', observer)
708 708
709 709 def __getattribute__(self, name):
710 710 if name in PROXIED_SOCKET_METHODS:
711 711 return object.__getattribute__(self, name)
712 712
713 713 return getattr(object.__getattribute__(self, '_orig'), name)
714 714
715 715 def __delattr__(self, name):
716 716 return delattr(object.__getattribute__(self, '_orig'), name)
717 717
718 718 def __setattr__(self, name, value):
719 719 return setattr(object.__getattribute__(self, '_orig'), name, value)
720 720
721 721 def __nonzero__(self):
722 722 return bool(object.__getattribute__(self, '_orig'))
723 723
724 724 __bool__ = __nonzero__
725 725
726 726 def _observedcall(self, name, *args, **kwargs):
727 727 # Call the original object.
728 728 orig = object.__getattribute__(self, '_orig')
729 729 res = getattr(orig, name)(*args, **kwargs)
730 730
731 731 # Call a method on the observer of the same name with arguments
732 732 # so it can react, log, etc.
733 733 observer = object.__getattribute__(self, '_observer')
734 734 fn = getattr(observer, name, None)
735 735 if fn:
736 736 fn(res, *args, **kwargs)
737 737
738 738 return res
739 739
740 740 def makefile(self, *args, **kwargs):
741 741 res = object.__getattribute__(self, '_observedcall')(
742 742 'makefile', *args, **kwargs
743 743 )
744 744
745 745 # The file object may be used for I/O. So we turn it into a
746 746 # proxy using our observer.
747 747 observer = object.__getattribute__(self, '_observer')
748 748 return makeloggingfileobject(
749 749 observer.fh,
750 750 res,
751 751 observer.name,
752 752 reads=observer.reads,
753 753 writes=observer.writes,
754 754 logdata=observer.logdata,
755 755 logdataapis=observer.logdataapis,
756 756 )
757 757
758 758 def recv(self, *args, **kwargs):
759 759 return object.__getattribute__(self, '_observedcall')(
760 760 'recv', *args, **kwargs
761 761 )
762 762
763 763 def recvfrom(self, *args, **kwargs):
764 764 return object.__getattribute__(self, '_observedcall')(
765 765 'recvfrom', *args, **kwargs
766 766 )
767 767
768 768 def recvfrom_into(self, *args, **kwargs):
769 769 return object.__getattribute__(self, '_observedcall')(
770 770 'recvfrom_into', *args, **kwargs
771 771 )
772 772
773 773 def recv_into(self, *args, **kwargs):
774 774 return object.__getattribute__(self, '_observedcall')(
775 775 'recv_info', *args, **kwargs
776 776 )
777 777
778 778 def send(self, *args, **kwargs):
779 779 return object.__getattribute__(self, '_observedcall')(
780 780 'send', *args, **kwargs
781 781 )
782 782
783 783 def sendall(self, *args, **kwargs):
784 784 return object.__getattribute__(self, '_observedcall')(
785 785 'sendall', *args, **kwargs
786 786 )
787 787
788 788 def sendto(self, *args, **kwargs):
789 789 return object.__getattribute__(self, '_observedcall')(
790 790 'sendto', *args, **kwargs
791 791 )
792 792
793 793 def setblocking(self, *args, **kwargs):
794 794 return object.__getattribute__(self, '_observedcall')(
795 795 'setblocking', *args, **kwargs
796 796 )
797 797
798 798 def settimeout(self, *args, **kwargs):
799 799 return object.__getattribute__(self, '_observedcall')(
800 800 'settimeout', *args, **kwargs
801 801 )
802 802
803 803 def gettimeout(self, *args, **kwargs):
804 804 return object.__getattribute__(self, '_observedcall')(
805 805 'gettimeout', *args, **kwargs
806 806 )
807 807
808 808 def setsockopt(self, *args, **kwargs):
809 809 return object.__getattribute__(self, '_observedcall')(
810 810 'setsockopt', *args, **kwargs
811 811 )
812 812
813 813
814 814 class baseproxyobserver:
815 815 def __init__(self, fh, name, logdata, logdataapis):
816 816 self.fh = fh
817 817 self.name = name
818 818 self.logdata = logdata
819 819 self.logdataapis = logdataapis
820 820
821 821 def _writedata(self, data):
822 822 if not self.logdata:
823 823 if self.logdataapis:
824 824 self.fh.write(b'\n')
825 825 self.fh.flush()
826 826 return
827 827
828 828 # Simple case writes all data on a single line.
829 829 if b'\n' not in data:
830 830 if self.logdataapis:
831 831 self.fh.write(b': %s\n' % stringutil.escapestr(data))
832 832 else:
833 833 self.fh.write(
834 834 b'%s> %s\n' % (self.name, stringutil.escapestr(data))
835 835 )
836 836 self.fh.flush()
837 837 return
838 838
839 839 # Data with newlines is written to multiple lines.
840 840 if self.logdataapis:
841 841 self.fh.write(b':\n')
842 842
843 843 lines = data.splitlines(True)
844 844 for line in lines:
845 845 self.fh.write(
846 846 b'%s> %s\n' % (self.name, stringutil.escapestr(line))
847 847 )
848 848 self.fh.flush()
849 849
850 850
851 851 class fileobjectobserver(baseproxyobserver):
852 852 """Logs file object activity."""
853 853
854 854 def __init__(
855 855 self, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
856 856 ):
857 857 super(fileobjectobserver, self).__init__(fh, name, logdata, logdataapis)
858 858 self.reads = reads
859 859 self.writes = writes
860 860
861 861 def read(self, res, size=-1):
862 862 if not self.reads:
863 863 return
864 864 # Python 3 can return None from reads at EOF instead of empty strings.
865 865 if res is None:
866 866 res = b''
867 867
868 868 if size == -1 and res == b'':
869 869 # Suppress pointless read(-1) calls that return
870 870 # nothing. These happen _a lot_ on Python 3, and there
871 871 # doesn't seem to be a better workaround to have matching
872 872 # Python 2 and 3 behavior. :(
873 873 return
874 874
875 875 if self.logdataapis:
876 876 self.fh.write(b'%s> read(%d) -> %d' % (self.name, size, len(res)))
877 877
878 878 self._writedata(res)
879 879
880 880 def readline(self, res, limit=-1):
881 881 if not self.reads:
882 882 return
883 883
884 884 if self.logdataapis:
885 885 self.fh.write(b'%s> readline() -> %d' % (self.name, len(res)))
886 886
887 887 self._writedata(res)
888 888
889 889 def readinto(self, res, dest):
890 890 if not self.reads:
891 891 return
892 892
893 893 if self.logdataapis:
894 894 self.fh.write(
895 895 b'%s> readinto(%d) -> %r' % (self.name, len(dest), res)
896 896 )
897 897
898 898 data = dest[0:res] if res is not None else b''
899 899
900 900 # _writedata() uses "in" operator and is confused by memoryview because
901 901 # characters are ints on Python 3.
902 902 if isinstance(data, memoryview):
903 903 data = data.tobytes()
904 904
905 905 self._writedata(data)
906 906
907 907 def write(self, res, data):
908 908 if not self.writes:
909 909 return
910 910
911 911 # Python 2 returns None from some write() calls. Python 3 (reasonably)
912 912 # returns the integer bytes written.
913 913 if res is None and data:
914 914 res = len(data)
915 915
916 916 if self.logdataapis:
917 917 self.fh.write(b'%s> write(%d) -> %r' % (self.name, len(data), res))
918 918
919 919 self._writedata(data)
920 920
921 921 def flush(self, res):
922 922 if not self.writes:
923 923 return
924 924
925 925 self.fh.write(b'%s> flush() -> %r\n' % (self.name, res))
926 926
927 927 # For observedbufferedinputpipe.
928 928 def bufferedread(self, res, size):
929 929 if not self.reads:
930 930 return
931 931
932 932 if self.logdataapis:
933 933 self.fh.write(
934 934 b'%s> bufferedread(%d) -> %d' % (self.name, size, len(res))
935 935 )
936 936
937 937 self._writedata(res)
938 938
939 939 def bufferedreadline(self, res):
940 940 if not self.reads:
941 941 return
942 942
943 943 if self.logdataapis:
944 944 self.fh.write(
945 945 b'%s> bufferedreadline() -> %d' % (self.name, len(res))
946 946 )
947 947
948 948 self._writedata(res)
949 949
950 950
951 951 def makeloggingfileobject(
952 952 logh, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
953 953 ):
954 954 """Turn a file object into a logging file object."""
955 955
956 956 observer = fileobjectobserver(
957 957 logh,
958 958 name,
959 959 reads=reads,
960 960 writes=writes,
961 961 logdata=logdata,
962 962 logdataapis=logdataapis,
963 963 )
964 964 return fileobjectproxy(fh, observer)
965 965
966 966
967 967 class socketobserver(baseproxyobserver):
968 968 """Logs socket activity."""
969 969
970 970 def __init__(
971 971 self,
972 972 fh,
973 973 name,
974 974 reads=True,
975 975 writes=True,
976 976 states=True,
977 977 logdata=False,
978 978 logdataapis=True,
979 979 ):
980 980 super(socketobserver, self).__init__(fh, name, logdata, logdataapis)
981 981 self.reads = reads
982 982 self.writes = writes
983 983 self.states = states
984 984
985 985 def makefile(self, res, mode=None, bufsize=None):
986 986 if not self.states:
987 987 return
988 988
989 989 self.fh.write(b'%s> makefile(%r, %r)\n' % (self.name, mode, bufsize))
990 990
991 991 def recv(self, res, size, flags=0):
992 992 if not self.reads:
993 993 return
994 994
995 995 if self.logdataapis:
996 996 self.fh.write(
997 997 b'%s> recv(%d, %d) -> %d' % (self.name, size, flags, len(res))
998 998 )
999 999 self._writedata(res)
1000 1000
1001 1001 def recvfrom(self, res, size, flags=0):
1002 1002 if not self.reads:
1003 1003 return
1004 1004
1005 1005 if self.logdataapis:
1006 1006 self.fh.write(
1007 1007 b'%s> recvfrom(%d, %d) -> %d'
1008 1008 % (self.name, size, flags, len(res[0]))
1009 1009 )
1010 1010
1011 1011 self._writedata(res[0])
1012 1012
1013 1013 def recvfrom_into(self, res, buf, size, flags=0):
1014 1014 if not self.reads:
1015 1015 return
1016 1016
1017 1017 if self.logdataapis:
1018 1018 self.fh.write(
1019 1019 b'%s> recvfrom_into(%d, %d) -> %d'
1020 1020 % (self.name, size, flags, res[0])
1021 1021 )
1022 1022
1023 1023 self._writedata(buf[0 : res[0]])
1024 1024
1025 1025 def recv_into(self, res, buf, size=0, flags=0):
1026 1026 if not self.reads:
1027 1027 return
1028 1028
1029 1029 if self.logdataapis:
1030 1030 self.fh.write(
1031 1031 b'%s> recv_into(%d, %d) -> %d' % (self.name, size, flags, res)
1032 1032 )
1033 1033
1034 1034 self._writedata(buf[0:res])
1035 1035
1036 1036 def send(self, res, data, flags=0):
1037 1037 if not self.writes:
1038 1038 return
1039 1039
1040 1040 self.fh.write(
1041 1041 b'%s> send(%d, %d) -> %d' % (self.name, len(data), flags, len(res))
1042 1042 )
1043 1043 self._writedata(data)
1044 1044
1045 1045 def sendall(self, res, data, flags=0):
1046 1046 if not self.writes:
1047 1047 return
1048 1048
1049 1049 if self.logdataapis:
1050 1050 # Returns None on success. So don't bother reporting return value.
1051 1051 self.fh.write(
1052 1052 b'%s> sendall(%d, %d)' % (self.name, len(data), flags)
1053 1053 )
1054 1054
1055 1055 self._writedata(data)
1056 1056
1057 1057 def sendto(self, res, data, flagsoraddress, address=None):
1058 1058 if not self.writes:
1059 1059 return
1060 1060
1061 1061 if address:
1062 1062 flags = flagsoraddress
1063 1063 else:
1064 1064 flags = 0
1065 1065
1066 1066 if self.logdataapis:
1067 1067 self.fh.write(
1068 1068 b'%s> sendto(%d, %d, %r) -> %d'
1069 1069 % (self.name, len(data), flags, address, res)
1070 1070 )
1071 1071
1072 1072 self._writedata(data)
1073 1073
1074 1074 def setblocking(self, res, flag):
1075 1075 if not self.states:
1076 1076 return
1077 1077
1078 1078 self.fh.write(b'%s> setblocking(%r)\n' % (self.name, flag))
1079 1079
1080 1080 def settimeout(self, res, value):
1081 1081 if not self.states:
1082 1082 return
1083 1083
1084 1084 self.fh.write(b'%s> settimeout(%r)\n' % (self.name, value))
1085 1085
1086 1086 def gettimeout(self, res):
1087 1087 if not self.states:
1088 1088 return
1089 1089
1090 1090 self.fh.write(b'%s> gettimeout() -> %f\n' % (self.name, res))
1091 1091
1092 1092 def setsockopt(self, res, level, optname, value):
1093 1093 if not self.states:
1094 1094 return
1095 1095
1096 1096 self.fh.write(
1097 1097 b'%s> setsockopt(%r, %r, %r) -> %r\n'
1098 1098 % (self.name, level, optname, value, res)
1099 1099 )
1100 1100
1101 1101
1102 1102 def makeloggingsocket(
1103 1103 logh,
1104 1104 fh,
1105 1105 name,
1106 1106 reads=True,
1107 1107 writes=True,
1108 1108 states=True,
1109 1109 logdata=False,
1110 1110 logdataapis=True,
1111 1111 ):
1112 1112 """Turn a socket into a logging socket."""
1113 1113
1114 1114 observer = socketobserver(
1115 1115 logh,
1116 1116 name,
1117 1117 reads=reads,
1118 1118 writes=writes,
1119 1119 states=states,
1120 1120 logdata=logdata,
1121 1121 logdataapis=logdataapis,
1122 1122 )
1123 1123 return socketproxy(fh, observer)
1124 1124
1125 1125
1126 1126 def version():
1127 1127 """Return version information if available."""
1128 1128 try:
1129 1129 from . import __version__
1130 1130
1131 1131 return __version__.version
1132 1132 except ImportError:
1133 1133 return b'unknown'
1134 1134
1135 1135
1136 1136 def versiontuple(v=None, n=4):
1137 1137 """Parses a Mercurial version string into an N-tuple.
1138 1138
1139 1139 The version string to be parsed is specified with the ``v`` argument.
1140 1140 If it isn't defined, the current Mercurial version string will be parsed.
1141 1141
1142 1142 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1143 1143 returned values:
1144 1144
1145 1145 >>> v = b'3.6.1+190-df9b73d2d444'
1146 1146 >>> versiontuple(v, 2)
1147 1147 (3, 6)
1148 1148 >>> versiontuple(v, 3)
1149 1149 (3, 6, 1)
1150 1150 >>> versiontuple(v, 4)
1151 1151 (3, 6, 1, '190-df9b73d2d444')
1152 1152
1153 1153 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1154 1154 (3, 6, 1, '190-df9b73d2d444+20151118')
1155 1155
1156 1156 >>> v = b'3.6'
1157 1157 >>> versiontuple(v, 2)
1158 1158 (3, 6)
1159 1159 >>> versiontuple(v, 3)
1160 1160 (3, 6, None)
1161 1161 >>> versiontuple(v, 4)
1162 1162 (3, 6, None, None)
1163 1163
1164 1164 >>> v = b'3.9-rc'
1165 1165 >>> versiontuple(v, 2)
1166 1166 (3, 9)
1167 1167 >>> versiontuple(v, 3)
1168 1168 (3, 9, None)
1169 1169 >>> versiontuple(v, 4)
1170 1170 (3, 9, None, 'rc')
1171 1171
1172 1172 >>> v = b'3.9-rc+2-02a8fea4289b'
1173 1173 >>> versiontuple(v, 2)
1174 1174 (3, 9)
1175 1175 >>> versiontuple(v, 3)
1176 1176 (3, 9, None)
1177 1177 >>> versiontuple(v, 4)
1178 1178 (3, 9, None, 'rc+2-02a8fea4289b')
1179 1179
1180 1180 >>> versiontuple(b'4.6rc0')
1181 1181 (4, 6, None, 'rc0')
1182 1182 >>> versiontuple(b'4.6rc0+12-425d55e54f98')
1183 1183 (4, 6, None, 'rc0+12-425d55e54f98')
1184 1184 >>> versiontuple(b'.1.2.3')
1185 1185 (None, None, None, '.1.2.3')
1186 1186 >>> versiontuple(b'12.34..5')
1187 1187 (12, 34, None, '..5')
1188 1188 >>> versiontuple(b'1.2.3.4.5.6')
1189 1189 (1, 2, 3, '.4.5.6')
1190 1190 """
1191 1191 if not v:
1192 1192 v = version()
1193 1193 m = remod.match(br'(\d+(?:\.\d+){,2})[+-]?(.*)', v)
1194 1194 if not m:
1195 1195 vparts, extra = b'', v
1196 1196 elif m.group(2):
1197 1197 vparts, extra = m.groups()
1198 1198 else:
1199 1199 vparts, extra = m.group(1), None
1200 1200
1201 1201 assert vparts is not None # help pytype
1202 1202
1203 1203 vints = []
1204 1204 for i in vparts.split(b'.'):
1205 1205 try:
1206 1206 vints.append(int(i))
1207 1207 except ValueError:
1208 1208 break
1209 1209 # (3, 6) -> (3, 6, None)
1210 1210 while len(vints) < 3:
1211 1211 vints.append(None)
1212 1212
1213 1213 if n == 2:
1214 1214 return (vints[0], vints[1])
1215 1215 if n == 3:
1216 1216 return (vints[0], vints[1], vints[2])
1217 1217 if n == 4:
1218 1218 return (vints[0], vints[1], vints[2], extra)
1219 1219
1220 1220 raise error.ProgrammingError(b"invalid version part request: %d" % n)
1221 1221
1222 1222
1223 1223 def cachefunc(func):
1224 1224 '''cache the result of function calls'''
1225 1225 # XXX doesn't handle keywords args
1226 1226 if func.__code__.co_argcount == 0:
1227 1227 listcache = []
1228 1228
1229 1229 def f():
1230 1230 if len(listcache) == 0:
1231 1231 listcache.append(func())
1232 1232 return listcache[0]
1233 1233
1234 1234 return f
1235 1235 cache = {}
1236 1236 if func.__code__.co_argcount == 1:
1237 1237 # we gain a small amount of time because
1238 1238 # we don't need to pack/unpack the list
1239 1239 def f(arg):
1240 1240 if arg not in cache:
1241 1241 cache[arg] = func(arg)
1242 1242 return cache[arg]
1243 1243
1244 1244 else:
1245 1245
1246 1246 def f(*args):
1247 1247 if args not in cache:
1248 1248 cache[args] = func(*args)
1249 1249 return cache[args]
1250 1250
1251 1251 return f
1252 1252
1253 1253
1254 1254 class cow:
1255 1255 """helper class to make copy-on-write easier
1256 1256
1257 1257 Call preparewrite before doing any writes.
1258 1258 """
1259 1259
1260 1260 def preparewrite(self):
1261 1261 """call this before writes, return self or a copied new object"""
1262 1262 if getattr(self, '_copied', 0):
1263 1263 self._copied -= 1
1264 1264 # Function cow.__init__ expects 1 arg(s), got 2 [wrong-arg-count]
1265 1265 return self.__class__(self) # pytype: disable=wrong-arg-count
1266 1266 return self
1267 1267
1268 1268 def copy(self):
1269 1269 """always do a cheap copy"""
1270 1270 self._copied = getattr(self, '_copied', 0) + 1
1271 1271 return self
1272 1272
1273 1273
1274 1274 class sortdict(collections.OrderedDict):
1275 1275 """a simple sorted dictionary
1276 1276
1277 1277 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1278 1278 >>> d2 = d1.copy()
1279 1279 >>> d2
1280 1280 sortdict([('a', 0), ('b', 1)])
1281 1281 >>> d2.update([(b'a', 2)])
1282 1282 >>> list(d2.keys()) # should still be in last-set order
1283 1283 ['b', 'a']
1284 1284 >>> d1.insert(1, b'a.5', 0.5)
1285 1285 >>> d1
1286 1286 sortdict([('a', 0), ('a.5', 0.5), ('b', 1)])
1287 1287 """
1288 1288
1289 1289 def __setitem__(self, key, value):
1290 1290 if key in self:
1291 1291 del self[key]
1292 1292 super(sortdict, self).__setitem__(key, value)
1293 1293
1294 1294 if pycompat.ispypy:
1295 1295 # __setitem__() isn't called as of PyPy 5.8.0
1296 1296 def update(self, src, **f):
1297 1297 if isinstance(src, dict):
1298 1298 src = src.items()
1299 1299 for k, v in src:
1300 1300 self[k] = v
1301 1301 for k in f:
1302 1302 self[k] = f[k]
1303 1303
1304 1304 def insert(self, position, key, value):
1305 1305 for (i, (k, v)) in enumerate(list(self.items())):
1306 1306 if i == position:
1307 1307 self[key] = value
1308 1308 if i >= position:
1309 1309 del self[k]
1310 1310 self[k] = v
1311 1311
1312 1312
1313 1313 class cowdict(cow, dict):
1314 1314 """copy-on-write dict
1315 1315
1316 1316 Be sure to call d = d.preparewrite() before writing to d.
1317 1317
1318 1318 >>> a = cowdict()
1319 1319 >>> a is a.preparewrite()
1320 1320 True
1321 1321 >>> b = a.copy()
1322 1322 >>> b is a
1323 1323 True
1324 1324 >>> c = b.copy()
1325 1325 >>> c is a
1326 1326 True
1327 1327 >>> a = a.preparewrite()
1328 1328 >>> b is a
1329 1329 False
1330 1330 >>> a is a.preparewrite()
1331 1331 True
1332 1332 >>> c = c.preparewrite()
1333 1333 >>> b is c
1334 1334 False
1335 1335 >>> b is b.preparewrite()
1336 1336 True
1337 1337 """
1338 1338
1339 1339
1340 1340 class cowsortdict(cow, sortdict):
1341 1341 """copy-on-write sortdict
1342 1342
1343 1343 Be sure to call d = d.preparewrite() before writing to d.
1344 1344 """
1345 1345
1346 1346
1347 1347 class transactional: # pytype: disable=ignored-metaclass
1348 1348 """Base class for making a transactional type into a context manager."""
1349 1349
1350 1350 __metaclass__ = abc.ABCMeta
1351 1351
1352 1352 @abc.abstractmethod
1353 1353 def close(self):
1354 1354 """Successfully closes the transaction."""
1355 1355
1356 1356 @abc.abstractmethod
1357 1357 def release(self):
1358 1358 """Marks the end of the transaction.
1359 1359
1360 1360 If the transaction has not been closed, it will be aborted.
1361 1361 """
1362 1362
1363 1363 def __enter__(self):
1364 1364 return self
1365 1365
1366 1366 def __exit__(self, exc_type, exc_val, exc_tb):
1367 1367 try:
1368 1368 if exc_type is None:
1369 1369 self.close()
1370 1370 finally:
1371 1371 self.release()
1372 1372
1373 1373
1374 1374 @contextlib.contextmanager
1375 1375 def acceptintervention(tr=None):
1376 1376 """A context manager that closes the transaction on InterventionRequired
1377 1377
1378 1378 If no transaction was provided, this simply runs the body and returns
1379 1379 """
1380 1380 if not tr:
1381 1381 yield
1382 1382 return
1383 1383 try:
1384 1384 yield
1385 1385 tr.close()
1386 1386 except error.InterventionRequired:
1387 1387 tr.close()
1388 1388 raise
1389 1389 finally:
1390 1390 tr.release()
1391 1391
1392 1392
1393 1393 @contextlib.contextmanager
1394 1394 def nullcontextmanager(enter_result=None):
1395 1395 yield enter_result
1396 1396
1397 1397
1398 1398 class _lrucachenode:
1399 1399 """A node in a doubly linked list.
1400 1400
1401 1401 Holds a reference to nodes on either side as well as a key-value
1402 1402 pair for the dictionary entry.
1403 1403 """
1404 1404
1405 1405 __slots__ = ('next', 'prev', 'key', 'value', 'cost')
1406 1406
1407 1407 def __init__(self):
1408 1408 self.next = self
1409 1409 self.prev = self
1410 1410
1411 1411 self.key = _notset
1412 1412 self.value = None
1413 1413 self.cost = 0
1414 1414
1415 1415 def markempty(self):
1416 1416 """Mark the node as emptied."""
1417 1417 self.key = _notset
1418 1418 self.value = None
1419 1419 self.cost = 0
1420 1420
1421 1421
1422 1422 class lrucachedict:
1423 1423 """Dict that caches most recent accesses and sets.
1424 1424
1425 1425 The dict consists of an actual backing dict - indexed by original
1426 1426 key - and a doubly linked circular list defining the order of entries in
1427 1427 the cache.
1428 1428
1429 1429 The head node is the newest entry in the cache. If the cache is full,
1430 1430 we recycle head.prev and make it the new head. Cache accesses result in
1431 1431 the node being moved to before the existing head and being marked as the
1432 1432 new head node.
1433 1433
1434 1434 Items in the cache can be inserted with an optional "cost" value. This is
1435 1435 simply an integer that is specified by the caller. The cache can be queried
1436 1436 for the total cost of all items presently in the cache.
1437 1437
1438 1438 The cache can also define a maximum cost. If a cache insertion would
1439 1439 cause the total cost of the cache to go beyond the maximum cost limit,
1440 1440 nodes will be evicted to make room for the new code. This can be used
1441 1441 to e.g. set a max memory limit and associate an estimated bytes size
1442 1442 cost to each item in the cache. By default, no maximum cost is enforced.
1443 1443 """
1444 1444
1445 1445 def __init__(self, max, maxcost=0):
1446 1446 self._cache = {}
1447 1447
1448 1448 self._head = _lrucachenode()
1449 1449 self._size = 1
1450 1450 self.capacity = max
1451 1451 self.totalcost = 0
1452 1452 self.maxcost = maxcost
1453 1453
1454 1454 def __len__(self):
1455 1455 return len(self._cache)
1456 1456
1457 1457 def __contains__(self, k):
1458 1458 return k in self._cache
1459 1459
1460 1460 def __iter__(self):
1461 1461 # We don't have to iterate in cache order, but why not.
1462 1462 n = self._head
1463 1463 for i in range(len(self._cache)):
1464 1464 yield n.key
1465 1465 n = n.next
1466 1466
1467 1467 def __getitem__(self, k):
1468 1468 node = self._cache[k]
1469 1469 self._movetohead(node)
1470 1470 return node.value
1471 1471
1472 1472 def insert(self, k, v, cost=0):
1473 1473 """Insert a new item in the cache with optional cost value."""
1474 1474 node = self._cache.get(k)
1475 1475 # Replace existing value and mark as newest.
1476 1476 if node is not None:
1477 1477 self.totalcost -= node.cost
1478 1478 node.value = v
1479 1479 node.cost = cost
1480 1480 self.totalcost += cost
1481 1481 self._movetohead(node)
1482 1482
1483 1483 if self.maxcost:
1484 1484 self._enforcecostlimit()
1485 1485
1486 1486 return
1487 1487
1488 1488 if self._size < self.capacity:
1489 1489 node = self._addcapacity()
1490 1490 else:
1491 1491 # Grab the last/oldest item.
1492 1492 node = self._head.prev
1493 1493
1494 1494 # At capacity. Kill the old entry.
1495 1495 if node.key is not _notset:
1496 1496 self.totalcost -= node.cost
1497 1497 del self._cache[node.key]
1498 1498
1499 1499 node.key = k
1500 1500 node.value = v
1501 1501 node.cost = cost
1502 1502 self.totalcost += cost
1503 1503 self._cache[k] = node
1504 1504 # And mark it as newest entry. No need to adjust order since it
1505 1505 # is already self._head.prev.
1506 1506 self._head = node
1507 1507
1508 1508 if self.maxcost:
1509 1509 self._enforcecostlimit()
1510 1510
1511 1511 def __setitem__(self, k, v):
1512 1512 self.insert(k, v)
1513 1513
1514 1514 def __delitem__(self, k):
1515 1515 self.pop(k)
1516 1516
1517 1517 def pop(self, k, default=_notset):
1518 1518 try:
1519 1519 node = self._cache.pop(k)
1520 1520 except KeyError:
1521 1521 if default is _notset:
1522 1522 raise
1523 1523 return default
1524 1524
1525 1525 assert node is not None # help pytype
1526 1526 value = node.value
1527 1527 self.totalcost -= node.cost
1528 1528 node.markempty()
1529 1529
1530 1530 # Temporarily mark as newest item before re-adjusting head to make
1531 1531 # this node the oldest item.
1532 1532 self._movetohead(node)
1533 1533 self._head = node.next
1534 1534
1535 1535 return value
1536 1536
1537 1537 # Additional dict methods.
1538 1538
1539 1539 def get(self, k, default=None):
1540 1540 try:
1541 1541 return self.__getitem__(k)
1542 1542 except KeyError:
1543 1543 return default
1544 1544
1545 1545 def peek(self, k, default=_notset):
1546 1546 """Get the specified item without moving it to the head
1547 1547
1548 1548 Unlike get(), this doesn't mutate the internal state. But be aware
1549 1549 that it doesn't mean peek() is thread safe.
1550 1550 """
1551 1551 try:
1552 1552 node = self._cache[k]
1553 1553 assert node is not None # help pytype
1554 1554 return node.value
1555 1555 except KeyError:
1556 1556 if default is _notset:
1557 1557 raise
1558 1558 return default
1559 1559
1560 1560 def clear(self):
1561 1561 n = self._head
1562 1562 while n.key is not _notset:
1563 1563 self.totalcost -= n.cost
1564 1564 n.markempty()
1565 1565 n = n.next
1566 1566
1567 1567 self._cache.clear()
1568 1568
1569 1569 def copy(self, capacity=None, maxcost=0):
1570 1570 """Create a new cache as a copy of the current one.
1571 1571
1572 1572 By default, the new cache has the same capacity as the existing one.
1573 1573 But, the cache capacity can be changed as part of performing the
1574 1574 copy.
1575 1575
1576 1576 Items in the copy have an insertion/access order matching this
1577 1577 instance.
1578 1578 """
1579 1579
1580 1580 capacity = capacity or self.capacity
1581 1581 maxcost = maxcost or self.maxcost
1582 1582 result = lrucachedict(capacity, maxcost=maxcost)
1583 1583
1584 1584 # We copy entries by iterating in oldest-to-newest order so the copy
1585 1585 # has the correct ordering.
1586 1586
1587 1587 # Find the first non-empty entry.
1588 1588 n = self._head.prev
1589 1589 while n.key is _notset and n is not self._head:
1590 1590 n = n.prev
1591 1591
1592 1592 # We could potentially skip the first N items when decreasing capacity.
1593 1593 # But let's keep it simple unless it is a performance problem.
1594 1594 for i in range(len(self._cache)):
1595 1595 result.insert(n.key, n.value, cost=n.cost)
1596 1596 n = n.prev
1597 1597
1598 1598 return result
1599 1599
1600 1600 def popoldest(self):
1601 1601 """Remove the oldest item from the cache.
1602 1602
1603 1603 Returns the (key, value) describing the removed cache entry.
1604 1604 """
1605 1605 if not self._cache:
1606 1606 return
1607 1607
1608 1608 # Walk the linked list backwards starting at tail node until we hit
1609 1609 # a non-empty node.
1610 1610 n = self._head.prev
1611 1611
1612 1612 assert n is not None # help pytype
1613 1613
1614 1614 while n.key is _notset:
1615 1615 n = n.prev
1616 1616
1617 1617 assert n is not None # help pytype
1618 1618
1619 1619 key, value = n.key, n.value
1620 1620
1621 1621 # And remove it from the cache and mark it as empty.
1622 1622 del self._cache[n.key]
1623 1623 self.totalcost -= n.cost
1624 1624 n.markempty()
1625 1625
1626 1626 return key, value
1627 1627
1628 1628 def _movetohead(self, node):
1629 1629 """Mark a node as the newest, making it the new head.
1630 1630
1631 1631 When a node is accessed, it becomes the freshest entry in the LRU
1632 1632 list, which is denoted by self._head.
1633 1633
1634 1634 Visually, let's make ``N`` the new head node (* denotes head):
1635 1635
1636 1636 previous/oldest <-> head <-> next/next newest
1637 1637
1638 1638 ----<->--- A* ---<->-----
1639 1639 | |
1640 1640 E <-> D <-> N <-> C <-> B
1641 1641
1642 1642 To:
1643 1643
1644 1644 ----<->--- N* ---<->-----
1645 1645 | |
1646 1646 E <-> D <-> C <-> B <-> A
1647 1647
1648 1648 This requires the following moves:
1649 1649
1650 1650 C.next = D (node.prev.next = node.next)
1651 1651 D.prev = C (node.next.prev = node.prev)
1652 1652 E.next = N (head.prev.next = node)
1653 1653 N.prev = E (node.prev = head.prev)
1654 1654 N.next = A (node.next = head)
1655 1655 A.prev = N (head.prev = node)
1656 1656 """
1657 1657 head = self._head
1658 1658 # C.next = D
1659 1659 node.prev.next = node.next
1660 1660 # D.prev = C
1661 1661 node.next.prev = node.prev
1662 1662 # N.prev = E
1663 1663 node.prev = head.prev
1664 1664 # N.next = A
1665 1665 # It is tempting to do just "head" here, however if node is
1666 1666 # adjacent to head, this will do bad things.
1667 1667 node.next = head.prev.next
1668 1668 # E.next = N
1669 1669 node.next.prev = node
1670 1670 # A.prev = N
1671 1671 node.prev.next = node
1672 1672
1673 1673 self._head = node
1674 1674
1675 1675 def _addcapacity(self):
1676 1676 """Add a node to the circular linked list.
1677 1677
1678 1678 The new node is inserted before the head node.
1679 1679 """
1680 1680 head = self._head
1681 1681 node = _lrucachenode()
1682 1682 head.prev.next = node
1683 1683 node.prev = head.prev
1684 1684 node.next = head
1685 1685 head.prev = node
1686 1686 self._size += 1
1687 1687 return node
1688 1688
1689 1689 def _enforcecostlimit(self):
1690 1690 # This should run after an insertion. It should only be called if total
1691 1691 # cost limits are being enforced.
1692 1692 # The most recently inserted node is never evicted.
1693 1693 if len(self) <= 1 or self.totalcost <= self.maxcost:
1694 1694 return
1695 1695
1696 1696 # This is logically equivalent to calling popoldest() until we
1697 1697 # free up enough cost. We don't do that since popoldest() needs
1698 1698 # to walk the linked list and doing this in a loop would be
1699 1699 # quadratic. So we find the first non-empty node and then
1700 1700 # walk nodes until we free up enough capacity.
1701 1701 #
1702 1702 # If we only removed the minimum number of nodes to free enough
1703 1703 # cost at insert time, chances are high that the next insert would
1704 1704 # also require pruning. This would effectively constitute quadratic
1705 1705 # behavior for insert-heavy workloads. To mitigate this, we set a
1706 1706 # target cost that is a percentage of the max cost. This will tend
1707 1707 # to free more nodes when the high water mark is reached, which
1708 1708 # lowers the chances of needing to prune on the subsequent insert.
1709 1709 targetcost = int(self.maxcost * 0.75)
1710 1710
1711 1711 n = self._head.prev
1712 1712 while n.key is _notset:
1713 1713 n = n.prev
1714 1714
1715 1715 while len(self) > 1 and self.totalcost > targetcost:
1716 1716 del self._cache[n.key]
1717 1717 self.totalcost -= n.cost
1718 1718 n.markempty()
1719 1719 n = n.prev
1720 1720
1721 1721
1722 1722 def lrucachefunc(func):
1723 1723 '''cache most recent results of function calls'''
1724 1724 cache = {}
1725 1725 order = collections.deque()
1726 1726 if func.__code__.co_argcount == 1:
1727 1727
1728 1728 def f(arg):
1729 1729 if arg not in cache:
1730 1730 if len(cache) > 20:
1731 1731 del cache[order.popleft()]
1732 1732 cache[arg] = func(arg)
1733 1733 else:
1734 1734 order.remove(arg)
1735 1735 order.append(arg)
1736 1736 return cache[arg]
1737 1737
1738 1738 else:
1739 1739
1740 1740 def f(*args):
1741 1741 if args not in cache:
1742 1742 if len(cache) > 20:
1743 1743 del cache[order.popleft()]
1744 1744 cache[args] = func(*args)
1745 1745 else:
1746 1746 order.remove(args)
1747 1747 order.append(args)
1748 1748 return cache[args]
1749 1749
1750 1750 return f
1751 1751
1752 1752
1753 1753 class propertycache:
1754 1754 def __init__(self, func):
1755 1755 self.func = func
1756 1756 self.name = func.__name__
1757 1757
1758 1758 def __get__(self, obj, type=None):
1759 1759 result = self.func(obj)
1760 1760 self.cachevalue(obj, result)
1761 1761 return result
1762 1762
1763 1763 def cachevalue(self, obj, value):
1764 1764 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1765 1765 obj.__dict__[self.name] = value
1766 1766
1767 1767
1768 1768 def clearcachedproperty(obj, prop):
1769 1769 '''clear a cached property value, if one has been set'''
1770 1770 prop = pycompat.sysstr(prop)
1771 1771 if prop in obj.__dict__:
1772 1772 del obj.__dict__[prop]
1773 1773
1774 1774
1775 1775 def increasingchunks(source, min=1024, max=65536):
1776 1776 """return no less than min bytes per chunk while data remains,
1777 1777 doubling min after each chunk until it reaches max"""
1778 1778
1779 1779 def log2(x):
1780 1780 if not x:
1781 1781 return 0
1782 1782 i = 0
1783 1783 while x:
1784 1784 x >>= 1
1785 1785 i += 1
1786 1786 return i - 1
1787 1787
1788 1788 buf = []
1789 1789 blen = 0
1790 1790 for chunk in source:
1791 1791 buf.append(chunk)
1792 1792 blen += len(chunk)
1793 1793 if blen >= min:
1794 1794 if min < max:
1795 1795 min = min << 1
1796 1796 nmin = 1 << log2(blen)
1797 1797 if nmin > min:
1798 1798 min = nmin
1799 1799 if min > max:
1800 1800 min = max
1801 1801 yield b''.join(buf)
1802 1802 blen = 0
1803 1803 buf = []
1804 1804 if buf:
1805 1805 yield b''.join(buf)
1806 1806
1807 1807
1808 1808 def always(fn):
1809 1809 return True
1810 1810
1811 1811
1812 1812 def never(fn):
1813 1813 return False
1814 1814
1815 1815
1816 1816 def nogc(func):
1817 1817 """disable garbage collector
1818 1818
1819 1819 Python's garbage collector triggers a GC each time a certain number of
1820 1820 container objects (the number being defined by gc.get_threshold()) are
1821 1821 allocated even when marked not to be tracked by the collector. Tracking has
1822 1822 no effect on when GCs are triggered, only on what objects the GC looks
1823 1823 into. As a workaround, disable GC while building complex (huge)
1824 1824 containers.
1825 1825
1826 1826 This garbage collector issue have been fixed in 2.7. But it still affect
1827 1827 CPython's performance.
1828 1828 """
1829 1829
1830 1830 def wrapper(*args, **kwargs):
1831 1831 gcenabled = gc.isenabled()
1832 1832 gc.disable()
1833 1833 try:
1834 1834 return func(*args, **kwargs)
1835 1835 finally:
1836 1836 if gcenabled:
1837 1837 gc.enable()
1838 1838
1839 1839 return wrapper
1840 1840
1841 1841
1842 1842 if pycompat.ispypy:
1843 1843 # PyPy runs slower with gc disabled
1844 1844 nogc = lambda x: x
1845 1845
1846 1846
1847 1847 def pathto(root, n1, n2):
1848 1848 # type: (bytes, bytes, bytes) -> bytes
1849 1849 """return the relative path from one place to another.
1850 1850 root should use os.sep to separate directories
1851 1851 n1 should use os.sep to separate directories
1852 1852 n2 should use "/" to separate directories
1853 1853 returns an os.sep-separated path.
1854 1854
1855 1855 If n1 is a relative path, it's assumed it's
1856 1856 relative to root.
1857 1857 n2 should always be relative to root.
1858 1858 """
1859 1859 if not n1:
1860 1860 return localpath(n2)
1861 1861 if os.path.isabs(n1):
1862 1862 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1863 1863 return os.path.join(root, localpath(n2))
1864 1864 n2 = b'/'.join((pconvert(root), n2))
1865 1865 a, b = splitpath(n1), n2.split(b'/')
1866 1866 a.reverse()
1867 1867 b.reverse()
1868 1868 while a and b and a[-1] == b[-1]:
1869 1869 a.pop()
1870 1870 b.pop()
1871 1871 b.reverse()
1872 1872 return pycompat.ossep.join(([b'..'] * len(a)) + b) or b'.'
1873 1873
1874 1874
1875 1875 def checksignature(func, depth=1):
1876 1876 '''wrap a function with code to check for calling errors'''
1877 1877
1878 1878 def check(*args, **kwargs):
1879 1879 try:
1880 1880 return func(*args, **kwargs)
1881 1881 except TypeError:
1882 1882 if len(traceback.extract_tb(sys.exc_info()[2])) == depth:
1883 1883 raise error.SignatureError
1884 1884 raise
1885 1885
1886 1886 return check
1887 1887
1888 1888
1889 1889 # a whilelist of known filesystems where hardlink works reliably
1890 1890 _hardlinkfswhitelist = {
1891 1891 b'apfs',
1892 1892 b'btrfs',
1893 1893 b'ext2',
1894 1894 b'ext3',
1895 1895 b'ext4',
1896 1896 b'hfs',
1897 1897 b'jfs',
1898 1898 b'NTFS',
1899 1899 b'reiserfs',
1900 1900 b'tmpfs',
1901 1901 b'ufs',
1902 1902 b'xfs',
1903 1903 b'zfs',
1904 1904 }
1905 1905
1906 1906
1907 1907 def copyfile(
1908 1908 src,
1909 1909 dest,
1910 1910 hardlink=False,
1911 1911 copystat=False,
1912 1912 checkambig=False,
1913 1913 nb_bytes=None,
1914 1914 no_hardlink_cb=None,
1915 1915 check_fs_hardlink=True,
1916 1916 ):
1917 1917 """copy a file, preserving mode and optionally other stat info like
1918 1918 atime/mtime
1919 1919
1920 1920 checkambig argument is used with filestat, and is useful only if
1921 1921 destination file is guarded by any lock (e.g. repo.lock or
1922 1922 repo.wlock).
1923 1923
1924 1924 copystat and checkambig should be exclusive.
1925 1925
1926 1926 nb_bytes: if set only copy the first `nb_bytes` of the source file.
1927 1927 """
1928 1928 assert not (copystat and checkambig)
1929 1929 oldstat = None
1930 1930 if os.path.lexists(dest):
1931 1931 if checkambig:
1932 1932 oldstat = checkambig and filestat.frompath(dest)
1933 1933 unlink(dest)
1934 1934 if hardlink and check_fs_hardlink:
1935 1935 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1936 1936 # unless we are confident that dest is on a whitelisted filesystem.
1937 1937 try:
1938 1938 fstype = getfstype(os.path.dirname(dest))
1939 1939 except OSError:
1940 1940 fstype = None
1941 1941 if fstype not in _hardlinkfswhitelist:
1942 1942 if no_hardlink_cb is not None:
1943 1943 no_hardlink_cb()
1944 1944 hardlink = False
1945 1945 if hardlink:
1946 1946 try:
1947 1947 oslink(src, dest)
1948 1948 if nb_bytes is not None:
1949 1949 m = "the `nb_bytes` argument is incompatible with `hardlink`"
1950 1950 raise error.ProgrammingError(m)
1951 1951 return
1952 1952 except (IOError, OSError) as exc:
1953 1953 if exc.errno != errno.EEXIST and no_hardlink_cb is not None:
1954 1954 no_hardlink_cb()
1955 1955 # fall back to normal copy
1956 1956 if os.path.islink(src):
1957 1957 os.symlink(os.readlink(src), dest)
1958 1958 # copytime is ignored for symlinks, but in general copytime isn't needed
1959 1959 # for them anyway
1960 1960 if nb_bytes is not None:
1961 1961 m = "cannot use `nb_bytes` on a symlink"
1962 1962 raise error.ProgrammingError(m)
1963 1963 else:
1964 1964 try:
1965 1965 shutil.copyfile(src, dest)
1966 1966 if copystat:
1967 1967 # copystat also copies mode
1968 1968 shutil.copystat(src, dest)
1969 1969 else:
1970 1970 shutil.copymode(src, dest)
1971 1971 if oldstat and oldstat.stat:
1972 1972 newstat = filestat.frompath(dest)
1973 1973 if newstat.isambig(oldstat):
1974 1974 # stat of copied file is ambiguous to original one
1975 1975 advanced = (
1976 1976 oldstat.stat[stat.ST_MTIME] + 1
1977 1977 ) & 0x7FFFFFFF
1978 1978 os.utime(dest, (advanced, advanced))
1979 1979 # We could do something smarter using `copy_file_range` call or similar
1980 1980 if nb_bytes is not None:
1981 1981 with open(dest, mode='r+') as f:
1982 1982 f.truncate(nb_bytes)
1983 1983 except shutil.Error as inst:
1984 1984 raise error.Abort(stringutil.forcebytestr(inst))
1985 1985
1986 1986
1987 1987 def copyfiles(src, dst, hardlink=None, progress=None):
1988 1988 """Copy a directory tree using hardlinks if possible."""
1989 1989 num = 0
1990 1990
1991 1991 def settopic():
1992 1992 if progress:
1993 1993 progress.topic = _(b'linking') if hardlink else _(b'copying')
1994 1994
1995 1995 if os.path.isdir(src):
1996 1996 if hardlink is None:
1997 1997 hardlink = (
1998 1998 os.stat(src).st_dev == os.stat(os.path.dirname(dst)).st_dev
1999 1999 )
2000 2000 settopic()
2001 2001 os.mkdir(dst)
2002 2002 for name, kind in listdir(src):
2003 2003 srcname = os.path.join(src, name)
2004 2004 dstname = os.path.join(dst, name)
2005 2005 hardlink, n = copyfiles(srcname, dstname, hardlink, progress)
2006 2006 num += n
2007 2007 else:
2008 2008 if hardlink is None:
2009 2009 hardlink = (
2010 2010 os.stat(os.path.dirname(src)).st_dev
2011 2011 == os.stat(os.path.dirname(dst)).st_dev
2012 2012 )
2013 2013 settopic()
2014 2014
2015 2015 if hardlink:
2016 2016 try:
2017 2017 oslink(src, dst)
2018 2018 except (IOError, OSError) as exc:
2019 2019 if exc.errno != errno.EEXIST:
2020 2020 hardlink = False
2021 2021 # XXX maybe try to relink if the file exist ?
2022 2022 shutil.copy(src, dst)
2023 2023 else:
2024 2024 shutil.copy(src, dst)
2025 2025 num += 1
2026 2026 if progress:
2027 2027 progress.increment()
2028 2028
2029 2029 return hardlink, num
2030 2030
2031 2031
2032 2032 _winreservednames = {
2033 2033 b'con',
2034 2034 b'prn',
2035 2035 b'aux',
2036 2036 b'nul',
2037 2037 b'com1',
2038 2038 b'com2',
2039 2039 b'com3',
2040 2040 b'com4',
2041 2041 b'com5',
2042 2042 b'com6',
2043 2043 b'com7',
2044 2044 b'com8',
2045 2045 b'com9',
2046 2046 b'lpt1',
2047 2047 b'lpt2',
2048 2048 b'lpt3',
2049 2049 b'lpt4',
2050 2050 b'lpt5',
2051 2051 b'lpt6',
2052 2052 b'lpt7',
2053 2053 b'lpt8',
2054 2054 b'lpt9',
2055 2055 }
2056 2056 _winreservedchars = b':*?"<>|'
2057 2057
2058 2058
2059 2059 def checkwinfilename(path):
2060 2060 # type: (bytes) -> Optional[bytes]
2061 2061 r"""Check that the base-relative path is a valid filename on Windows.
2062 2062 Returns None if the path is ok, or a UI string describing the problem.
2063 2063
2064 2064 >>> checkwinfilename(b"just/a/normal/path")
2065 2065 >>> checkwinfilename(b"foo/bar/con.xml")
2066 2066 "filename contains 'con', which is reserved on Windows"
2067 2067 >>> checkwinfilename(b"foo/con.xml/bar")
2068 2068 "filename contains 'con', which is reserved on Windows"
2069 2069 >>> checkwinfilename(b"foo/bar/xml.con")
2070 2070 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
2071 2071 "filename contains 'AUX', which is reserved on Windows"
2072 2072 >>> checkwinfilename(b"foo/bar/bla:.txt")
2073 2073 "filename contains ':', which is reserved on Windows"
2074 2074 >>> checkwinfilename(b"foo/bar/b\07la.txt")
2075 2075 "filename contains '\\x07', which is invalid on Windows"
2076 2076 >>> checkwinfilename(b"foo/bar/bla ")
2077 2077 "filename ends with ' ', which is not allowed on Windows"
2078 2078 >>> checkwinfilename(b"../bar")
2079 2079 >>> checkwinfilename(b"foo\\")
2080 2080 "filename ends with '\\', which is invalid on Windows"
2081 2081 >>> checkwinfilename(b"foo\\/bar")
2082 2082 "directory name ends with '\\', which is invalid on Windows"
2083 2083 """
2084 2084 if path.endswith(b'\\'):
2085 2085 return _(b"filename ends with '\\', which is invalid on Windows")
2086 2086 if b'\\/' in path:
2087 2087 return _(b"directory name ends with '\\', which is invalid on Windows")
2088 2088 for n in path.replace(b'\\', b'/').split(b'/'):
2089 2089 if not n:
2090 2090 continue
2091 2091 for c in _filenamebytestr(n):
2092 2092 if c in _winreservedchars:
2093 2093 return (
2094 2094 _(
2095 2095 b"filename contains '%s', which is reserved "
2096 2096 b"on Windows"
2097 2097 )
2098 2098 % c
2099 2099 )
2100 2100 if ord(c) <= 31:
2101 2101 return _(
2102 2102 b"filename contains '%s', which is invalid on Windows"
2103 2103 ) % stringutil.escapestr(c)
2104 2104 base = n.split(b'.')[0]
2105 2105 if base and base.lower() in _winreservednames:
2106 2106 return (
2107 2107 _(b"filename contains '%s', which is reserved on Windows")
2108 2108 % base
2109 2109 )
2110 2110 t = n[-1:]
2111 2111 if t in b'. ' and n not in b'..':
2112 2112 return (
2113 2113 _(
2114 2114 b"filename ends with '%s', which is not allowed "
2115 2115 b"on Windows"
2116 2116 )
2117 2117 % t
2118 2118 )
2119 2119
2120 2120
2121 2121 timer = getattr(time, "perf_counter", None)
2122 2122
2123 2123 if pycompat.iswindows:
2124 2124 checkosfilename = checkwinfilename
2125 2125 if not timer:
2126 2126 timer = time.clock
2127 2127 else:
2128 2128 # mercurial.windows doesn't have platform.checkosfilename
2129 2129 checkosfilename = platform.checkosfilename # pytype: disable=module-attr
2130 2130 if not timer:
2131 2131 timer = time.time
2132 2132
2133 2133
2134 2134 def makelock(info, pathname):
2135 2135 """Create a lock file atomically if possible
2136 2136
2137 2137 This may leave a stale lock file if symlink isn't supported and signal
2138 2138 interrupt is enabled.
2139 2139 """
2140 2140 try:
2141 2141 return os.symlink(info, pathname)
2142 2142 except OSError as why:
2143 2143 if why.errno == errno.EEXIST:
2144 2144 raise
2145 2145 except AttributeError: # no symlink in os
2146 2146 pass
2147 2147
2148 2148 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
2149 2149 ld = os.open(pathname, flags)
2150 2150 os.write(ld, info)
2151 2151 os.close(ld)
2152 2152
2153 2153
2154 2154 def readlock(pathname):
2155 2155 # type: (bytes) -> bytes
2156 2156 try:
2157 2157 return readlink(pathname)
2158 2158 except OSError as why:
2159 2159 if why.errno not in (errno.EINVAL, errno.ENOSYS):
2160 2160 raise
2161 2161 except AttributeError: # no symlink in os
2162 2162 pass
2163 2163 with posixfile(pathname, b'rb') as fp:
2164 2164 return fp.read()
2165 2165
2166 2166
2167 2167 def fstat(fp):
2168 2168 '''stat file object that may not have fileno method.'''
2169 2169 try:
2170 2170 return os.fstat(fp.fileno())
2171 2171 except AttributeError:
2172 2172 return os.stat(fp.name)
2173 2173
2174 2174
2175 2175 # File system features
2176 2176
2177 2177
2178 2178 def fscasesensitive(path):
2179 2179 # type: (bytes) -> bool
2180 2180 """
2181 2181 Return true if the given path is on a case-sensitive filesystem
2182 2182
2183 2183 Requires a path (like /foo/.hg) ending with a foldable final
2184 2184 directory component.
2185 2185 """
2186 2186 s1 = os.lstat(path)
2187 2187 d, b = os.path.split(path)
2188 2188 b2 = b.upper()
2189 2189 if b == b2:
2190 2190 b2 = b.lower()
2191 2191 if b == b2:
2192 2192 return True # no evidence against case sensitivity
2193 2193 p2 = os.path.join(d, b2)
2194 2194 try:
2195 2195 s2 = os.lstat(p2)
2196 2196 if s2 == s1:
2197 2197 return False
2198 2198 return True
2199 2199 except OSError:
2200 2200 return True
2201 2201
2202 2202
2203 2203 _re2_input = lambda x: x
2204 2204 try:
2205 2205 import re2 # pytype: disable=import-error
2206 2206
2207 2207 _re2 = None
2208 2208 except ImportError:
2209 2209 _re2 = False
2210 2210
2211 2211
2212 2212 class _re:
2213 2213 def _checkre2(self):
2214 2214 global _re2
2215 2215 global _re2_input
2216 2216
2217 2217 check_pattern = br'\[([^\[]+)\]'
2218 2218 check_input = b'[ui]'
2219 2219 try:
2220 2220 # check if match works, see issue3964
2221 2221 _re2 = bool(re2.match(check_pattern, check_input))
2222 2222 except ImportError:
2223 2223 _re2 = False
2224 2224 except TypeError:
2225 2225 # the `pyre-2` project provides a re2 module that accept bytes
2226 2226 # the `fb-re2` project provides a re2 module that acccept sysstr
2227 2227 check_pattern = pycompat.sysstr(check_pattern)
2228 2228 check_input = pycompat.sysstr(check_input)
2229 2229 _re2 = bool(re2.match(check_pattern, check_input))
2230 2230 _re2_input = pycompat.sysstr
2231 2231
2232 2232 def compile(self, pat, flags=0):
2233 2233 """Compile a regular expression, using re2 if possible
2234 2234
2235 2235 For best performance, use only re2-compatible regexp features. The
2236 2236 only flags from the re module that are re2-compatible are
2237 2237 IGNORECASE and MULTILINE."""
2238 2238 if _re2 is None:
2239 2239 self._checkre2()
2240 2240 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
2241 2241 if flags & remod.IGNORECASE:
2242 2242 pat = b'(?i)' + pat
2243 2243 if flags & remod.MULTILINE:
2244 2244 pat = b'(?m)' + pat
2245 2245 try:
2246 2246 return re2.compile(_re2_input(pat))
2247 2247 except re2.error:
2248 2248 pass
2249 2249 return remod.compile(pat, flags)
2250 2250
2251 2251 @propertycache
2252 2252 def escape(self):
2253 2253 """Return the version of escape corresponding to self.compile.
2254 2254
2255 2255 This is imperfect because whether re2 or re is used for a particular
2256 2256 function depends on the flags, etc, but it's the best we can do.
2257 2257 """
2258 2258 global _re2
2259 2259 if _re2 is None:
2260 2260 self._checkre2()
2261 2261 if _re2:
2262 2262 return re2.escape
2263 2263 else:
2264 2264 return remod.escape
2265 2265
2266 2266
2267 2267 re = _re()
2268 2268
2269 2269 _fspathcache = {}
2270 2270
2271 2271
2272 2272 def fspath(name, root):
2273 2273 # type: (bytes, bytes) -> bytes
2274 2274 """Get name in the case stored in the filesystem
2275 2275
2276 2276 The name should be relative to root, and be normcase-ed for efficiency.
2277 2277
2278 2278 Note that this function is unnecessary, and should not be
2279 2279 called, for case-sensitive filesystems (simply because it's expensive).
2280 2280
2281 2281 The root should be normcase-ed, too.
2282 2282 """
2283 2283
2284 2284 def _makefspathcacheentry(dir):
2285 2285 return {normcase(n): n for n in os.listdir(dir)}
2286 2286
2287 2287 seps = pycompat.ossep
2288 2288 if pycompat.osaltsep:
2289 2289 seps = seps + pycompat.osaltsep
2290 2290 # Protect backslashes. This gets silly very quickly.
2291 2291 seps.replace(b'\\', b'\\\\')
2292 2292 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
2293 2293 dir = os.path.normpath(root)
2294 2294 result = []
2295 2295 for part, sep in pattern.findall(name):
2296 2296 if sep:
2297 2297 result.append(sep)
2298 2298 continue
2299 2299
2300 2300 if dir not in _fspathcache:
2301 2301 _fspathcache[dir] = _makefspathcacheentry(dir)
2302 2302 contents = _fspathcache[dir]
2303 2303
2304 2304 found = contents.get(part)
2305 2305 if not found:
2306 2306 # retry "once per directory" per "dirstate.walk" which
2307 2307 # may take place for each patches of "hg qpush", for example
2308 2308 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
2309 2309 found = contents.get(part)
2310 2310
2311 2311 result.append(found or part)
2312 2312 dir = os.path.join(dir, part)
2313 2313
2314 2314 return b''.join(result)
2315 2315
2316 2316
2317 2317 def checknlink(testfile):
2318 2318 # type: (bytes) -> bool
2319 2319 '''check whether hardlink count reporting works properly'''
2320 2320
2321 2321 # testfile may be open, so we need a separate file for checking to
2322 2322 # work around issue2543 (or testfile may get lost on Samba shares)
2323 2323 f1, f2, fp = None, None, None
2324 2324 try:
2325 2325 fd, f1 = pycompat.mkstemp(
2326 2326 prefix=b'.%s-' % os.path.basename(testfile),
2327 2327 suffix=b'1~',
2328 2328 dir=os.path.dirname(testfile),
2329 2329 )
2330 2330 os.close(fd)
2331 2331 f2 = b'%s2~' % f1[:-2]
2332 2332
2333 2333 oslink(f1, f2)
2334 2334 # nlinks() may behave differently for files on Windows shares if
2335 2335 # the file is open.
2336 2336 fp = posixfile(f2)
2337 2337 return nlinks(f2) > 1
2338 2338 except OSError:
2339 2339 return False
2340 2340 finally:
2341 2341 if fp is not None:
2342 2342 fp.close()
2343 2343 for f in (f1, f2):
2344 2344 try:
2345 2345 if f is not None:
2346 2346 os.unlink(f)
2347 2347 except OSError:
2348 2348 pass
2349 2349
2350 2350
2351 2351 def endswithsep(path):
2352 2352 # type: (bytes) -> bool
2353 2353 '''Check path ends with os.sep or os.altsep.'''
2354 2354 return bool( # help pytype
2355 2355 path.endswith(pycompat.ossep)
2356 2356 or pycompat.osaltsep
2357 2357 and path.endswith(pycompat.osaltsep)
2358 2358 )
2359 2359
2360 2360
2361 2361 def splitpath(path):
2362 2362 # type: (bytes) -> List[bytes]
2363 2363 """Split path by os.sep.
2364 2364 Note that this function does not use os.altsep because this is
2365 2365 an alternative of simple "xxx.split(os.sep)".
2366 2366 It is recommended to use os.path.normpath() before using this
2367 2367 function if need."""
2368 2368 return path.split(pycompat.ossep)
2369 2369
2370 2370
2371 2371 def mktempcopy(name, emptyok=False, createmode=None, enforcewritable=False):
2372 2372 """Create a temporary file with the same contents from name
2373 2373
2374 2374 The permission bits are copied from the original file.
2375 2375
2376 2376 If the temporary file is going to be truncated immediately, you
2377 2377 can use emptyok=True as an optimization.
2378 2378
2379 2379 Returns the name of the temporary file.
2380 2380 """
2381 2381 d, fn = os.path.split(name)
2382 2382 fd, temp = pycompat.mkstemp(prefix=b'.%s-' % fn, suffix=b'~', dir=d)
2383 2383 os.close(fd)
2384 2384 # Temporary files are created with mode 0600, which is usually not
2385 2385 # what we want. If the original file already exists, just copy
2386 2386 # its mode. Otherwise, manually obey umask.
2387 2387 copymode(name, temp, createmode, enforcewritable)
2388 2388
2389 2389 if emptyok:
2390 2390 return temp
2391 2391 try:
2392 2392 try:
2393 2393 ifp = posixfile(name, b"rb")
2394 2394 except IOError as inst:
2395 2395 if inst.errno == errno.ENOENT:
2396 2396 return temp
2397 2397 if not getattr(inst, 'filename', None):
2398 2398 inst.filename = name
2399 2399 raise
2400 2400 ofp = posixfile(temp, b"wb")
2401 2401 for chunk in filechunkiter(ifp):
2402 2402 ofp.write(chunk)
2403 2403 ifp.close()
2404 2404 ofp.close()
2405 2405 except: # re-raises
2406 2406 try:
2407 2407 os.unlink(temp)
2408 2408 except OSError:
2409 2409 pass
2410 2410 raise
2411 2411 return temp
2412 2412
2413 2413
2414 2414 class filestat:
2415 2415 """help to exactly detect change of a file
2416 2416
2417 2417 'stat' attribute is result of 'os.stat()' if specified 'path'
2418 2418 exists. Otherwise, it is None. This can avoid preparative
2419 2419 'exists()' examination on client side of this class.
2420 2420 """
2421 2421
2422 2422 def __init__(self, stat):
2423 2423 self.stat = stat
2424 2424
2425 2425 @classmethod
2426 2426 def frompath(cls, path):
2427 2427 try:
2428 2428 stat = os.stat(path)
2429 2429 except FileNotFoundError:
2430 2430 stat = None
2431 2431 return cls(stat)
2432 2432
2433 2433 @classmethod
2434 2434 def fromfp(cls, fp):
2435 2435 stat = os.fstat(fp.fileno())
2436 2436 return cls(stat)
2437 2437
2438 2438 __hash__ = object.__hash__
2439 2439
2440 2440 def __eq__(self, old):
2441 2441 try:
2442 2442 # if ambiguity between stat of new and old file is
2443 2443 # avoided, comparison of size, ctime and mtime is enough
2444 2444 # to exactly detect change of a file regardless of platform
2445 2445 return (
2446 2446 self.stat.st_size == old.stat.st_size
2447 2447 and self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2448 2448 and self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME]
2449 2449 )
2450 2450 except AttributeError:
2451 2451 pass
2452 2452 try:
2453 2453 return self.stat is None and old.stat is None
2454 2454 except AttributeError:
2455 2455 return False
2456 2456
2457 2457 def isambig(self, old):
2458 2458 """Examine whether new (= self) stat is ambiguous against old one
2459 2459
2460 2460 "S[N]" below means stat of a file at N-th change:
2461 2461
2462 2462 - S[n-1].ctime < S[n].ctime: can detect change of a file
2463 2463 - S[n-1].ctime == S[n].ctime
2464 2464 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2465 2465 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2466 2466 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2467 2467 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2468 2468
2469 2469 Case (*2) above means that a file was changed twice or more at
2470 2470 same time in sec (= S[n-1].ctime), and comparison of timestamp
2471 2471 is ambiguous.
2472 2472
2473 2473 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2474 2474 timestamp is ambiguous".
2475 2475
2476 2476 But advancing mtime only in case (*2) doesn't work as
2477 2477 expected, because naturally advanced S[n].mtime in case (*1)
2478 2478 might be equal to manually advanced S[n-1 or earlier].mtime.
2479 2479
2480 2480 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2481 2481 treated as ambiguous regardless of mtime, to avoid overlooking
2482 2482 by confliction between such mtime.
2483 2483
2484 2484 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2485 2485 S[n].mtime", even if size of a file isn't changed.
2486 2486 """
2487 2487 try:
2488 2488 return self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2489 2489 except AttributeError:
2490 2490 return False
2491 2491
2492 2492 def avoidambig(self, path, old):
2493 2493 """Change file stat of specified path to avoid ambiguity
2494 2494
2495 2495 'old' should be previous filestat of 'path'.
2496 2496
2497 2497 This skips avoiding ambiguity, if a process doesn't have
2498 2498 appropriate privileges for 'path'. This returns False in this
2499 2499 case.
2500 2500
2501 2501 Otherwise, this returns True, as "ambiguity is avoided".
2502 2502 """
2503 2503 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2504 2504 try:
2505 2505 os.utime(path, (advanced, advanced))
2506 except OSError as inst:
2507 if inst.errno == errno.EPERM:
2506 except PermissionError:
2508 2507 # utime() on the file created by another user causes EPERM,
2509 2508 # if a process doesn't have appropriate privileges
2510 2509 return False
2511 raise
2512 2510 return True
2513 2511
2514 2512 def __ne__(self, other):
2515 2513 return not self == other
2516 2514
2517 2515
2518 2516 class atomictempfile:
2519 2517 """writable file object that atomically updates a file
2520 2518
2521 2519 All writes will go to a temporary copy of the original file. Call
2522 2520 close() when you are done writing, and atomictempfile will rename
2523 2521 the temporary copy to the original name, making the changes
2524 2522 visible. If the object is destroyed without being closed, all your
2525 2523 writes are discarded.
2526 2524
2527 2525 checkambig argument of constructor is used with filestat, and is
2528 2526 useful only if target file is guarded by any lock (e.g. repo.lock
2529 2527 or repo.wlock).
2530 2528 """
2531 2529
2532 2530 def __init__(self, name, mode=b'w+b', createmode=None, checkambig=False):
2533 2531 self.__name = name # permanent name
2534 2532 self._tempname = mktempcopy(
2535 2533 name,
2536 2534 emptyok=(b'w' in mode),
2537 2535 createmode=createmode,
2538 2536 enforcewritable=(b'w' in mode),
2539 2537 )
2540 2538
2541 2539 self._fp = posixfile(self._tempname, mode)
2542 2540 self._checkambig = checkambig
2543 2541
2544 2542 # delegated methods
2545 2543 self.read = self._fp.read
2546 2544 self.write = self._fp.write
2547 2545 self.seek = self._fp.seek
2548 2546 self.tell = self._fp.tell
2549 2547 self.fileno = self._fp.fileno
2550 2548
2551 2549 def close(self):
2552 2550 if not self._fp.closed:
2553 2551 self._fp.close()
2554 2552 filename = localpath(self.__name)
2555 2553 oldstat = self._checkambig and filestat.frompath(filename)
2556 2554 if oldstat and oldstat.stat:
2557 2555 rename(self._tempname, filename)
2558 2556 newstat = filestat.frompath(filename)
2559 2557 if newstat.isambig(oldstat):
2560 2558 # stat of changed file is ambiguous to original one
2561 2559 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2562 2560 os.utime(filename, (advanced, advanced))
2563 2561 else:
2564 2562 rename(self._tempname, filename)
2565 2563
2566 2564 def discard(self):
2567 2565 if not self._fp.closed:
2568 2566 try:
2569 2567 os.unlink(self._tempname)
2570 2568 except OSError:
2571 2569 pass
2572 2570 self._fp.close()
2573 2571
2574 2572 def __del__(self):
2575 2573 if safehasattr(self, '_fp'): # constructor actually did something
2576 2574 self.discard()
2577 2575
2578 2576 def __enter__(self):
2579 2577 return self
2580 2578
2581 2579 def __exit__(self, exctype, excvalue, traceback):
2582 2580 if exctype is not None:
2583 2581 self.discard()
2584 2582 else:
2585 2583 self.close()
2586 2584
2587 2585
2588 2586 def tryrmdir(f):
2589 2587 try:
2590 2588 removedirs(f)
2591 2589 except OSError as e:
2592 2590 if e.errno != errno.ENOENT and e.errno != errno.ENOTEMPTY:
2593 2591 raise
2594 2592
2595 2593
2596 2594 def unlinkpath(f, ignoremissing=False, rmdir=True):
2597 2595 # type: (bytes, bool, bool) -> None
2598 2596 """unlink and remove the directory if it is empty"""
2599 2597 if ignoremissing:
2600 2598 tryunlink(f)
2601 2599 else:
2602 2600 unlink(f)
2603 2601 if rmdir:
2604 2602 # try removing directories that might now be empty
2605 2603 try:
2606 2604 removedirs(os.path.dirname(f))
2607 2605 except OSError:
2608 2606 pass
2609 2607
2610 2608
2611 2609 def tryunlink(f):
2612 2610 # type: (bytes) -> None
2613 2611 """Attempt to remove a file, ignoring FileNotFoundError."""
2614 2612 try:
2615 2613 unlink(f)
2616 2614 except FileNotFoundError:
2617 2615 pass
2618 2616
2619 2617
2620 2618 def makedirs(name, mode=None, notindexed=False):
2621 2619 # type: (bytes, Optional[int], bool) -> None
2622 2620 """recursive directory creation with parent mode inheritance
2623 2621
2624 2622 Newly created directories are marked as "not to be indexed by
2625 2623 the content indexing service", if ``notindexed`` is specified
2626 2624 for "write" mode access.
2627 2625 """
2628 2626 try:
2629 2627 makedir(name, notindexed)
2630 2628 except OSError as err:
2631 2629 if err.errno == errno.EEXIST:
2632 2630 return
2633 2631 if err.errno != errno.ENOENT or not name:
2634 2632 raise
2635 2633 parent = os.path.dirname(abspath(name))
2636 2634 if parent == name:
2637 2635 raise
2638 2636 makedirs(parent, mode, notindexed)
2639 2637 try:
2640 2638 makedir(name, notindexed)
2641 2639 except OSError as err:
2642 2640 # Catch EEXIST to handle races
2643 2641 if err.errno == errno.EEXIST:
2644 2642 return
2645 2643 raise
2646 2644 if mode is not None:
2647 2645 os.chmod(name, mode)
2648 2646
2649 2647
2650 2648 def readfile(path):
2651 2649 # type: (bytes) -> bytes
2652 2650 with open(path, b'rb') as fp:
2653 2651 return fp.read()
2654 2652
2655 2653
2656 2654 def writefile(path, text):
2657 2655 # type: (bytes, bytes) -> None
2658 2656 with open(path, b'wb') as fp:
2659 2657 fp.write(text)
2660 2658
2661 2659
2662 2660 def appendfile(path, text):
2663 2661 # type: (bytes, bytes) -> None
2664 2662 with open(path, b'ab') as fp:
2665 2663 fp.write(text)
2666 2664
2667 2665
2668 2666 class chunkbuffer:
2669 2667 """Allow arbitrary sized chunks of data to be efficiently read from an
2670 2668 iterator over chunks of arbitrary size."""
2671 2669
2672 2670 def __init__(self, in_iter):
2673 2671 """in_iter is the iterator that's iterating over the input chunks."""
2674 2672
2675 2673 def splitbig(chunks):
2676 2674 for chunk in chunks:
2677 2675 if len(chunk) > 2 ** 20:
2678 2676 pos = 0
2679 2677 while pos < len(chunk):
2680 2678 end = pos + 2 ** 18
2681 2679 yield chunk[pos:end]
2682 2680 pos = end
2683 2681 else:
2684 2682 yield chunk
2685 2683
2686 2684 self.iter = splitbig(in_iter)
2687 2685 self._queue = collections.deque()
2688 2686 self._chunkoffset = 0
2689 2687
2690 2688 def read(self, l=None):
2691 2689 """Read L bytes of data from the iterator of chunks of data.
2692 2690 Returns less than L bytes if the iterator runs dry.
2693 2691
2694 2692 If size parameter is omitted, read everything"""
2695 2693 if l is None:
2696 2694 return b''.join(self.iter)
2697 2695
2698 2696 left = l
2699 2697 buf = []
2700 2698 queue = self._queue
2701 2699 while left > 0:
2702 2700 # refill the queue
2703 2701 if not queue:
2704 2702 target = 2 ** 18
2705 2703 for chunk in self.iter:
2706 2704 queue.append(chunk)
2707 2705 target -= len(chunk)
2708 2706 if target <= 0:
2709 2707 break
2710 2708 if not queue:
2711 2709 break
2712 2710
2713 2711 # The easy way to do this would be to queue.popleft(), modify the
2714 2712 # chunk (if necessary), then queue.appendleft(). However, for cases
2715 2713 # where we read partial chunk content, this incurs 2 dequeue
2716 2714 # mutations and creates a new str for the remaining chunk in the
2717 2715 # queue. Our code below avoids this overhead.
2718 2716
2719 2717 chunk = queue[0]
2720 2718 chunkl = len(chunk)
2721 2719 offset = self._chunkoffset
2722 2720
2723 2721 # Use full chunk.
2724 2722 if offset == 0 and left >= chunkl:
2725 2723 left -= chunkl
2726 2724 queue.popleft()
2727 2725 buf.append(chunk)
2728 2726 # self._chunkoffset remains at 0.
2729 2727 continue
2730 2728
2731 2729 chunkremaining = chunkl - offset
2732 2730
2733 2731 # Use all of unconsumed part of chunk.
2734 2732 if left >= chunkremaining:
2735 2733 left -= chunkremaining
2736 2734 queue.popleft()
2737 2735 # offset == 0 is enabled by block above, so this won't merely
2738 2736 # copy via ``chunk[0:]``.
2739 2737 buf.append(chunk[offset:])
2740 2738 self._chunkoffset = 0
2741 2739
2742 2740 # Partial chunk needed.
2743 2741 else:
2744 2742 buf.append(chunk[offset : offset + left])
2745 2743 self._chunkoffset += left
2746 2744 left -= chunkremaining
2747 2745
2748 2746 return b''.join(buf)
2749 2747
2750 2748
2751 2749 def filechunkiter(f, size=131072, limit=None):
2752 2750 """Create a generator that produces the data in the file size
2753 2751 (default 131072) bytes at a time, up to optional limit (default is
2754 2752 to read all data). Chunks may be less than size bytes if the
2755 2753 chunk is the last chunk in the file, or the file is a socket or
2756 2754 some other type of file that sometimes reads less data than is
2757 2755 requested."""
2758 2756 assert size >= 0
2759 2757 assert limit is None or limit >= 0
2760 2758 while True:
2761 2759 if limit is None:
2762 2760 nbytes = size
2763 2761 else:
2764 2762 nbytes = min(limit, size)
2765 2763 s = nbytes and f.read(nbytes)
2766 2764 if not s:
2767 2765 break
2768 2766 if limit:
2769 2767 limit -= len(s)
2770 2768 yield s
2771 2769
2772 2770
2773 2771 class cappedreader:
2774 2772 """A file object proxy that allows reading up to N bytes.
2775 2773
2776 2774 Given a source file object, instances of this type allow reading up to
2777 2775 N bytes from that source file object. Attempts to read past the allowed
2778 2776 limit are treated as EOF.
2779 2777
2780 2778 It is assumed that I/O is not performed on the original file object
2781 2779 in addition to I/O that is performed by this instance. If there is,
2782 2780 state tracking will get out of sync and unexpected results will ensue.
2783 2781 """
2784 2782
2785 2783 def __init__(self, fh, limit):
2786 2784 """Allow reading up to <limit> bytes from <fh>."""
2787 2785 self._fh = fh
2788 2786 self._left = limit
2789 2787
2790 2788 def read(self, n=-1):
2791 2789 if not self._left:
2792 2790 return b''
2793 2791
2794 2792 if n < 0:
2795 2793 n = self._left
2796 2794
2797 2795 data = self._fh.read(min(n, self._left))
2798 2796 self._left -= len(data)
2799 2797 assert self._left >= 0
2800 2798
2801 2799 return data
2802 2800
2803 2801 def readinto(self, b):
2804 2802 res = self.read(len(b))
2805 2803 if res is None:
2806 2804 return None
2807 2805
2808 2806 b[0 : len(res)] = res
2809 2807 return len(res)
2810 2808
2811 2809
2812 2810 def unitcountfn(*unittable):
2813 2811 '''return a function that renders a readable count of some quantity'''
2814 2812
2815 2813 def go(count):
2816 2814 for multiplier, divisor, format in unittable:
2817 2815 if abs(count) >= divisor * multiplier:
2818 2816 return format % (count / float(divisor))
2819 2817 return unittable[-1][2] % count
2820 2818
2821 2819 return go
2822 2820
2823 2821
2824 2822 def processlinerange(fromline, toline):
2825 2823 # type: (int, int) -> Tuple[int, int]
2826 2824 """Check that linerange <fromline>:<toline> makes sense and return a
2827 2825 0-based range.
2828 2826
2829 2827 >>> processlinerange(10, 20)
2830 2828 (9, 20)
2831 2829 >>> processlinerange(2, 1)
2832 2830 Traceback (most recent call last):
2833 2831 ...
2834 2832 ParseError: line range must be positive
2835 2833 >>> processlinerange(0, 5)
2836 2834 Traceback (most recent call last):
2837 2835 ...
2838 2836 ParseError: fromline must be strictly positive
2839 2837 """
2840 2838 if toline - fromline < 0:
2841 2839 raise error.ParseError(_(b"line range must be positive"))
2842 2840 if fromline < 1:
2843 2841 raise error.ParseError(_(b"fromline must be strictly positive"))
2844 2842 return fromline - 1, toline
2845 2843
2846 2844
2847 2845 bytecount = unitcountfn(
2848 2846 (100, 1 << 30, _(b'%.0f GB')),
2849 2847 (10, 1 << 30, _(b'%.1f GB')),
2850 2848 (1, 1 << 30, _(b'%.2f GB')),
2851 2849 (100, 1 << 20, _(b'%.0f MB')),
2852 2850 (10, 1 << 20, _(b'%.1f MB')),
2853 2851 (1, 1 << 20, _(b'%.2f MB')),
2854 2852 (100, 1 << 10, _(b'%.0f KB')),
2855 2853 (10, 1 << 10, _(b'%.1f KB')),
2856 2854 (1, 1 << 10, _(b'%.2f KB')),
2857 2855 (1, 1, _(b'%.0f bytes')),
2858 2856 )
2859 2857
2860 2858
2861 2859 class transformingwriter:
2862 2860 """Writable file wrapper to transform data by function"""
2863 2861
2864 2862 def __init__(self, fp, encode):
2865 2863 self._fp = fp
2866 2864 self._encode = encode
2867 2865
2868 2866 def close(self):
2869 2867 self._fp.close()
2870 2868
2871 2869 def flush(self):
2872 2870 self._fp.flush()
2873 2871
2874 2872 def write(self, data):
2875 2873 return self._fp.write(self._encode(data))
2876 2874
2877 2875
2878 2876 # Matches a single EOL which can either be a CRLF where repeated CR
2879 2877 # are removed or a LF. We do not care about old Macintosh files, so a
2880 2878 # stray CR is an error.
2881 2879 _eolre = remod.compile(br'\r*\n')
2882 2880
2883 2881
2884 2882 def tolf(s):
2885 2883 # type: (bytes) -> bytes
2886 2884 return _eolre.sub(b'\n', s)
2887 2885
2888 2886
2889 2887 def tocrlf(s):
2890 2888 # type: (bytes) -> bytes
2891 2889 return _eolre.sub(b'\r\n', s)
2892 2890
2893 2891
2894 2892 def _crlfwriter(fp):
2895 2893 return transformingwriter(fp, tocrlf)
2896 2894
2897 2895
2898 2896 if pycompat.oslinesep == b'\r\n':
2899 2897 tonativeeol = tocrlf
2900 2898 fromnativeeol = tolf
2901 2899 nativeeolwriter = _crlfwriter
2902 2900 else:
2903 2901 tonativeeol = pycompat.identity
2904 2902 fromnativeeol = pycompat.identity
2905 2903 nativeeolwriter = pycompat.identity
2906 2904
2907 2905
2908 2906 # TODO delete since workaround variant for Python 2 no longer needed.
2909 2907 def iterfile(fp):
2910 2908 return fp
2911 2909
2912 2910
2913 2911 def iterlines(iterator):
2914 2912 # type: (Iterator[bytes]) -> Iterator[bytes]
2915 2913 for chunk in iterator:
2916 2914 for line in chunk.splitlines():
2917 2915 yield line
2918 2916
2919 2917
2920 2918 def expandpath(path):
2921 2919 # type: (bytes) -> bytes
2922 2920 return os.path.expanduser(os.path.expandvars(path))
2923 2921
2924 2922
2925 2923 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2926 2924 """Return the result of interpolating items in the mapping into string s.
2927 2925
2928 2926 prefix is a single character string, or a two character string with
2929 2927 a backslash as the first character if the prefix needs to be escaped in
2930 2928 a regular expression.
2931 2929
2932 2930 fn is an optional function that will be applied to the replacement text
2933 2931 just before replacement.
2934 2932
2935 2933 escape_prefix is an optional flag that allows using doubled prefix for
2936 2934 its escaping.
2937 2935 """
2938 2936 fn = fn or (lambda s: s)
2939 2937 patterns = b'|'.join(mapping.keys())
2940 2938 if escape_prefix:
2941 2939 patterns += b'|' + prefix
2942 2940 if len(prefix) > 1:
2943 2941 prefix_char = prefix[1:]
2944 2942 else:
2945 2943 prefix_char = prefix
2946 2944 mapping[prefix_char] = prefix_char
2947 2945 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2948 2946 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2949 2947
2950 2948
2951 2949 timecount = unitcountfn(
2952 2950 (1, 1e3, _(b'%.0f s')),
2953 2951 (100, 1, _(b'%.1f s')),
2954 2952 (10, 1, _(b'%.2f s')),
2955 2953 (1, 1, _(b'%.3f s')),
2956 2954 (100, 0.001, _(b'%.1f ms')),
2957 2955 (10, 0.001, _(b'%.2f ms')),
2958 2956 (1, 0.001, _(b'%.3f ms')),
2959 2957 (100, 0.000001, _(b'%.1f us')),
2960 2958 (10, 0.000001, _(b'%.2f us')),
2961 2959 (1, 0.000001, _(b'%.3f us')),
2962 2960 (100, 0.000000001, _(b'%.1f ns')),
2963 2961 (10, 0.000000001, _(b'%.2f ns')),
2964 2962 (1, 0.000000001, _(b'%.3f ns')),
2965 2963 )
2966 2964
2967 2965
2968 2966 @attr.s
2969 2967 class timedcmstats:
2970 2968 """Stats information produced by the timedcm context manager on entering."""
2971 2969
2972 2970 # the starting value of the timer as a float (meaning and resulution is
2973 2971 # platform dependent, see util.timer)
2974 2972 start = attr.ib(default=attr.Factory(lambda: timer()))
2975 2973 # the number of seconds as a floating point value; starts at 0, updated when
2976 2974 # the context is exited.
2977 2975 elapsed = attr.ib(default=0)
2978 2976 # the number of nested timedcm context managers.
2979 2977 level = attr.ib(default=1)
2980 2978
2981 2979 def __bytes__(self):
2982 2980 return timecount(self.elapsed) if self.elapsed else b'<unknown>'
2983 2981
2984 2982 __str__ = encoding.strmethod(__bytes__)
2985 2983
2986 2984
2987 2985 @contextlib.contextmanager
2988 2986 def timedcm(whencefmt, *whenceargs):
2989 2987 """A context manager that produces timing information for a given context.
2990 2988
2991 2989 On entering a timedcmstats instance is produced.
2992 2990
2993 2991 This context manager is reentrant.
2994 2992
2995 2993 """
2996 2994 # track nested context managers
2997 2995 timedcm._nested += 1
2998 2996 timing_stats = timedcmstats(level=timedcm._nested)
2999 2997 try:
3000 2998 with tracing.log(whencefmt, *whenceargs):
3001 2999 yield timing_stats
3002 3000 finally:
3003 3001 timing_stats.elapsed = timer() - timing_stats.start
3004 3002 timedcm._nested -= 1
3005 3003
3006 3004
3007 3005 timedcm._nested = 0
3008 3006
3009 3007
3010 3008 def timed(func):
3011 3009 """Report the execution time of a function call to stderr.
3012 3010
3013 3011 During development, use as a decorator when you need to measure
3014 3012 the cost of a function, e.g. as follows:
3015 3013
3016 3014 @util.timed
3017 3015 def foo(a, b, c):
3018 3016 pass
3019 3017 """
3020 3018
3021 3019 def wrapper(*args, **kwargs):
3022 3020 with timedcm(pycompat.bytestr(func.__name__)) as time_stats:
3023 3021 result = func(*args, **kwargs)
3024 3022 stderr = procutil.stderr
3025 3023 stderr.write(
3026 3024 b'%s%s: %s\n'
3027 3025 % (
3028 3026 b' ' * time_stats.level * 2,
3029 3027 pycompat.bytestr(func.__name__),
3030 3028 time_stats,
3031 3029 )
3032 3030 )
3033 3031 return result
3034 3032
3035 3033 return wrapper
3036 3034
3037 3035
3038 3036 _sizeunits = (
3039 3037 (b'm', 2 ** 20),
3040 3038 (b'k', 2 ** 10),
3041 3039 (b'g', 2 ** 30),
3042 3040 (b'kb', 2 ** 10),
3043 3041 (b'mb', 2 ** 20),
3044 3042 (b'gb', 2 ** 30),
3045 3043 (b'b', 1),
3046 3044 )
3047 3045
3048 3046
3049 3047 def sizetoint(s):
3050 3048 # type: (bytes) -> int
3051 3049 """Convert a space specifier to a byte count.
3052 3050
3053 3051 >>> sizetoint(b'30')
3054 3052 30
3055 3053 >>> sizetoint(b'2.2kb')
3056 3054 2252
3057 3055 >>> sizetoint(b'6M')
3058 3056 6291456
3059 3057 """
3060 3058 t = s.strip().lower()
3061 3059 try:
3062 3060 for k, u in _sizeunits:
3063 3061 if t.endswith(k):
3064 3062 return int(float(t[: -len(k)]) * u)
3065 3063 return int(t)
3066 3064 except ValueError:
3067 3065 raise error.ParseError(_(b"couldn't parse size: %s") % s)
3068 3066
3069 3067
3070 3068 class hooks:
3071 3069 """A collection of hook functions that can be used to extend a
3072 3070 function's behavior. Hooks are called in lexicographic order,
3073 3071 based on the names of their sources."""
3074 3072
3075 3073 def __init__(self):
3076 3074 self._hooks = []
3077 3075
3078 3076 def add(self, source, hook):
3079 3077 self._hooks.append((source, hook))
3080 3078
3081 3079 def __call__(self, *args):
3082 3080 self._hooks.sort(key=lambda x: x[0])
3083 3081 results = []
3084 3082 for source, hook in self._hooks:
3085 3083 results.append(hook(*args))
3086 3084 return results
3087 3085
3088 3086
3089 3087 def getstackframes(skip=0, line=b' %-*s in %s\n', fileline=b'%s:%d', depth=0):
3090 3088 """Yields lines for a nicely formatted stacktrace.
3091 3089 Skips the 'skip' last entries, then return the last 'depth' entries.
3092 3090 Each file+linenumber is formatted according to fileline.
3093 3091 Each line is formatted according to line.
3094 3092 If line is None, it yields:
3095 3093 length of longest filepath+line number,
3096 3094 filepath+linenumber,
3097 3095 function
3098 3096
3099 3097 Not be used in production code but very convenient while developing.
3100 3098 """
3101 3099 entries = [
3102 3100 (fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3103 3101 for fn, ln, func, _text in traceback.extract_stack()[: -skip - 1]
3104 3102 ][-depth:]
3105 3103 if entries:
3106 3104 fnmax = max(len(entry[0]) for entry in entries)
3107 3105 for fnln, func in entries:
3108 3106 if line is None:
3109 3107 yield (fnmax, fnln, func)
3110 3108 else:
3111 3109 yield line % (fnmax, fnln, func)
3112 3110
3113 3111
3114 3112 def debugstacktrace(
3115 3113 msg=b'stacktrace',
3116 3114 skip=0,
3117 3115 f=procutil.stderr,
3118 3116 otherf=procutil.stdout,
3119 3117 depth=0,
3120 3118 prefix=b'',
3121 3119 ):
3122 3120 """Writes a message to f (stderr) with a nicely formatted stacktrace.
3123 3121 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3124 3122 By default it will flush stdout first.
3125 3123 It can be used everywhere and intentionally does not require an ui object.
3126 3124 Not be used in production code but very convenient while developing.
3127 3125 """
3128 3126 if otherf:
3129 3127 otherf.flush()
3130 3128 f.write(b'%s%s at:\n' % (prefix, msg.rstrip()))
3131 3129 for line in getstackframes(skip + 1, depth=depth):
3132 3130 f.write(prefix + line)
3133 3131 f.flush()
3134 3132
3135 3133
3136 3134 # convenient shortcut
3137 3135 dst = debugstacktrace
3138 3136
3139 3137
3140 3138 def safename(f, tag, ctx, others=None):
3141 3139 """
3142 3140 Generate a name that it is safe to rename f to in the given context.
3143 3141
3144 3142 f: filename to rename
3145 3143 tag: a string tag that will be included in the new name
3146 3144 ctx: a context, in which the new name must not exist
3147 3145 others: a set of other filenames that the new name must not be in
3148 3146
3149 3147 Returns a file name of the form oldname~tag[~number] which does not exist
3150 3148 in the provided context and is not in the set of other names.
3151 3149 """
3152 3150 if others is None:
3153 3151 others = set()
3154 3152
3155 3153 fn = b'%s~%s' % (f, tag)
3156 3154 if fn not in ctx and fn not in others:
3157 3155 return fn
3158 3156 for n in itertools.count(1):
3159 3157 fn = b'%s~%s~%s' % (f, tag, n)
3160 3158 if fn not in ctx and fn not in others:
3161 3159 return fn
3162 3160
3163 3161
3164 3162 def readexactly(stream, n):
3165 3163 '''read n bytes from stream.read and abort if less was available'''
3166 3164 s = stream.read(n)
3167 3165 if len(s) < n:
3168 3166 raise error.Abort(
3169 3167 _(b"stream ended unexpectedly (got %d bytes, expected %d)")
3170 3168 % (len(s), n)
3171 3169 )
3172 3170 return s
3173 3171
3174 3172
3175 3173 def uvarintencode(value):
3176 3174 """Encode an unsigned integer value to a varint.
3177 3175
3178 3176 A varint is a variable length integer of 1 or more bytes. Each byte
3179 3177 except the last has the most significant bit set. The lower 7 bits of
3180 3178 each byte store the 2's complement representation, least significant group
3181 3179 first.
3182 3180
3183 3181 >>> uvarintencode(0)
3184 3182 '\\x00'
3185 3183 >>> uvarintencode(1)
3186 3184 '\\x01'
3187 3185 >>> uvarintencode(127)
3188 3186 '\\x7f'
3189 3187 >>> uvarintencode(1337)
3190 3188 '\\xb9\\n'
3191 3189 >>> uvarintencode(65536)
3192 3190 '\\x80\\x80\\x04'
3193 3191 >>> uvarintencode(-1)
3194 3192 Traceback (most recent call last):
3195 3193 ...
3196 3194 ProgrammingError: negative value for uvarint: -1
3197 3195 """
3198 3196 if value < 0:
3199 3197 raise error.ProgrammingError(b'negative value for uvarint: %d' % value)
3200 3198 bits = value & 0x7F
3201 3199 value >>= 7
3202 3200 bytes = []
3203 3201 while value:
3204 3202 bytes.append(pycompat.bytechr(0x80 | bits))
3205 3203 bits = value & 0x7F
3206 3204 value >>= 7
3207 3205 bytes.append(pycompat.bytechr(bits))
3208 3206
3209 3207 return b''.join(bytes)
3210 3208
3211 3209
3212 3210 def uvarintdecodestream(fh):
3213 3211 """Decode an unsigned variable length integer from a stream.
3214 3212
3215 3213 The passed argument is anything that has a ``.read(N)`` method.
3216 3214
3217 3215 >>> try:
3218 3216 ... from StringIO import StringIO as BytesIO
3219 3217 ... except ImportError:
3220 3218 ... from io import BytesIO
3221 3219 >>> uvarintdecodestream(BytesIO(b'\\x00'))
3222 3220 0
3223 3221 >>> uvarintdecodestream(BytesIO(b'\\x01'))
3224 3222 1
3225 3223 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
3226 3224 127
3227 3225 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
3228 3226 1337
3229 3227 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
3230 3228 65536
3231 3229 >>> uvarintdecodestream(BytesIO(b'\\x80'))
3232 3230 Traceback (most recent call last):
3233 3231 ...
3234 3232 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
3235 3233 """
3236 3234 result = 0
3237 3235 shift = 0
3238 3236 while True:
3239 3237 byte = ord(readexactly(fh, 1))
3240 3238 result |= (byte & 0x7F) << shift
3241 3239 if not (byte & 0x80):
3242 3240 return result
3243 3241 shift += 7
3244 3242
3245 3243
3246 3244 # Passing the '' locale means that the locale should be set according to the
3247 3245 # user settings (environment variables).
3248 3246 # Python sometimes avoids setting the global locale settings. When interfacing
3249 3247 # with C code (e.g. the curses module or the Subversion bindings), the global
3250 3248 # locale settings must be initialized correctly. Python 2 does not initialize
3251 3249 # the global locale settings on interpreter startup. Python 3 sometimes
3252 3250 # initializes LC_CTYPE, but not consistently at least on Windows. Therefore we
3253 3251 # explicitly initialize it to get consistent behavior if it's not already
3254 3252 # initialized. Since CPython commit 177d921c8c03d30daa32994362023f777624b10d,
3255 3253 # LC_CTYPE is always initialized. If we require Python 3.8+, we should re-check
3256 3254 # if we can remove this code.
3257 3255 @contextlib.contextmanager
3258 3256 def with_lc_ctype():
3259 3257 oldloc = locale.setlocale(locale.LC_CTYPE, None)
3260 3258 if oldloc == 'C':
3261 3259 try:
3262 3260 try:
3263 3261 locale.setlocale(locale.LC_CTYPE, '')
3264 3262 except locale.Error:
3265 3263 # The likely case is that the locale from the environment
3266 3264 # variables is unknown.
3267 3265 pass
3268 3266 yield
3269 3267 finally:
3270 3268 locale.setlocale(locale.LC_CTYPE, oldloc)
3271 3269 else:
3272 3270 yield
3273 3271
3274 3272
3275 3273 def _estimatememory():
3276 3274 # type: () -> Optional[int]
3277 3275 """Provide an estimate for the available system memory in Bytes.
3278 3276
3279 3277 If no estimate can be provided on the platform, returns None.
3280 3278 """
3281 3279 if pycompat.sysplatform.startswith(b'win'):
3282 3280 # On Windows, use the GlobalMemoryStatusEx kernel function directly.
3283 3281 from ctypes import c_long as DWORD, c_ulonglong as DWORDLONG
3284 3282 from ctypes.wintypes import ( # pytype: disable=import-error
3285 3283 Structure,
3286 3284 byref,
3287 3285 sizeof,
3288 3286 windll,
3289 3287 )
3290 3288
3291 3289 class MEMORYSTATUSEX(Structure):
3292 3290 _fields_ = [
3293 3291 ('dwLength', DWORD),
3294 3292 ('dwMemoryLoad', DWORD),
3295 3293 ('ullTotalPhys', DWORDLONG),
3296 3294 ('ullAvailPhys', DWORDLONG),
3297 3295 ('ullTotalPageFile', DWORDLONG),
3298 3296 ('ullAvailPageFile', DWORDLONG),
3299 3297 ('ullTotalVirtual', DWORDLONG),
3300 3298 ('ullAvailVirtual', DWORDLONG),
3301 3299 ('ullExtendedVirtual', DWORDLONG),
3302 3300 ]
3303 3301
3304 3302 x = MEMORYSTATUSEX()
3305 3303 x.dwLength = sizeof(x)
3306 3304 windll.kernel32.GlobalMemoryStatusEx(byref(x))
3307 3305 return x.ullAvailPhys
3308 3306
3309 3307 # On newer Unix-like systems and Mac OSX, the sysconf interface
3310 3308 # can be used. _SC_PAGE_SIZE is part of POSIX; _SC_PHYS_PAGES
3311 3309 # seems to be implemented on most systems.
3312 3310 try:
3313 3311 pagesize = os.sysconf(os.sysconf_names['SC_PAGE_SIZE'])
3314 3312 pages = os.sysconf(os.sysconf_names['SC_PHYS_PAGES'])
3315 3313 return pagesize * pages
3316 3314 except OSError: # sysconf can fail
3317 3315 pass
3318 3316 except KeyError: # unknown parameter
3319 3317 pass
General Comments 0
You need to be logged in to leave comments. Login now