##// END OF EJS Templates
util: drop a duplicate import...
Matt Harbison -
r49852:7ccf3dac default
parent child Browse files
Show More
@@ -1,3320 +1,3319 b''
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16
17 17 import abc
18 18 import collections
19 19 import contextlib
20 20 import errno
21 21 import gc
22 22 import hashlib
23 23 import io
24 24 import itertools
25 25 import locale
26 26 import mmap
27 27 import os
28 28 import pickle # provides util.pickle symbol
29 29 import re as remod
30 30 import shutil
31 31 import stat
32 32 import sys
33 33 import time
34 34 import traceback
35 35 import warnings
36 36
37 37 from .node import hex
38 38 from .thirdparty import attr
39 39 from .pycompat import (
40 40 delattr,
41 41 getattr,
42 42 open,
43 43 setattr,
44 44 )
45 from .node import hex
46 45 from hgdemandimport import tracing
47 46 from . import (
48 47 encoding,
49 48 error,
50 49 i18n,
51 50 policy,
52 51 pycompat,
53 52 urllibcompat,
54 53 )
55 54 from .utils import (
56 55 compression,
57 56 hashutil,
58 57 procutil,
59 58 stringutil,
60 59 )
61 60
62 61 if pycompat.TYPE_CHECKING:
63 62 from typing import (
64 63 Iterator,
65 64 List,
66 65 Optional,
67 66 Tuple,
68 67 )
69 68
70 69
71 70 base85 = policy.importmod('base85')
72 71 osutil = policy.importmod('osutil')
73 72
74 73 b85decode = base85.b85decode
75 74 b85encode = base85.b85encode
76 75
77 76 cookielib = pycompat.cookielib
78 77 httplib = pycompat.httplib
79 78 safehasattr = pycompat.safehasattr
80 79 socketserver = pycompat.socketserver
81 80 bytesio = io.BytesIO
82 81 # TODO deprecate stringio name, as it is a lie on Python 3.
83 82 stringio = bytesio
84 83 xmlrpclib = pycompat.xmlrpclib
85 84
86 85 httpserver = urllibcompat.httpserver
87 86 urlerr = urllibcompat.urlerr
88 87 urlreq = urllibcompat.urlreq
89 88
90 89 # workaround for win32mbcs
91 90 _filenamebytestr = pycompat.bytestr
92 91
93 92 if pycompat.iswindows:
94 93 from . import windows as platform
95 94 else:
96 95 from . import posix as platform
97 96
98 97 _ = i18n._
99 98
100 99 abspath = platform.abspath
101 100 bindunixsocket = platform.bindunixsocket
102 101 cachestat = platform.cachestat
103 102 checkexec = platform.checkexec
104 103 checklink = platform.checklink
105 104 copymode = platform.copymode
106 105 expandglobs = platform.expandglobs
107 106 getfsmountpoint = platform.getfsmountpoint
108 107 getfstype = platform.getfstype
109 108 get_password = platform.get_password
110 109 groupmembers = platform.groupmembers
111 110 groupname = platform.groupname
112 111 isexec = platform.isexec
113 112 isowner = platform.isowner
114 113 listdir = osutil.listdir
115 114 localpath = platform.localpath
116 115 lookupreg = platform.lookupreg
117 116 makedir = platform.makedir
118 117 nlinks = platform.nlinks
119 118 normpath = platform.normpath
120 119 normcase = platform.normcase
121 120 normcasespec = platform.normcasespec
122 121 normcasefallback = platform.normcasefallback
123 122 openhardlinks = platform.openhardlinks
124 123 oslink = platform.oslink
125 124 parsepatchoutput = platform.parsepatchoutput
126 125 pconvert = platform.pconvert
127 126 poll = platform.poll
128 127 posixfile = platform.posixfile
129 128 readlink = platform.readlink
130 129 rename = platform.rename
131 130 removedirs = platform.removedirs
132 131 samedevice = platform.samedevice
133 132 samefile = platform.samefile
134 133 samestat = platform.samestat
135 134 setflags = platform.setflags
136 135 split = platform.split
137 136 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
138 137 statisexec = platform.statisexec
139 138 statislink = platform.statislink
140 139 umask = platform.umask
141 140 unlink = platform.unlink
142 141 username = platform.username
143 142
144 143
145 144 def setumask(val):
146 145 # type: (int) -> None
147 146 '''updates the umask. used by chg server'''
148 147 if pycompat.iswindows:
149 148 return
150 149 os.umask(val)
151 150 global umask
152 151 platform.umask = umask = val & 0o777
153 152
154 153
155 154 # small compat layer
156 155 compengines = compression.compengines
157 156 SERVERROLE = compression.SERVERROLE
158 157 CLIENTROLE = compression.CLIENTROLE
159 158
160 159 try:
161 160 recvfds = osutil.recvfds
162 161 except AttributeError:
163 162 pass
164 163
165 164 # Python compatibility
166 165
167 166 _notset = object()
168 167
169 168
170 169 def bitsfrom(container):
171 170 bits = 0
172 171 for bit in container:
173 172 bits |= bit
174 173 return bits
175 174
176 175
177 176 # python 2.6 still have deprecation warning enabled by default. We do not want
178 177 # to display anything to standard user so detect if we are running test and
179 178 # only use python deprecation warning in this case.
180 179 _dowarn = bool(encoding.environ.get(b'HGEMITWARNINGS'))
181 180 if _dowarn:
182 181 # explicitly unfilter our warning for python 2.7
183 182 #
184 183 # The option of setting PYTHONWARNINGS in the test runner was investigated.
185 184 # However, module name set through PYTHONWARNINGS was exactly matched, so
186 185 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
187 186 # makes the whole PYTHONWARNINGS thing useless for our usecase.
188 187 warnings.filterwarnings('default', '', DeprecationWarning, 'mercurial')
189 188 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext')
190 189 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext3rd')
191 190 if _dowarn:
192 191 # silence warning emitted by passing user string to re.sub()
193 192 warnings.filterwarnings(
194 193 'ignore', 'bad escape', DeprecationWarning, 'mercurial'
195 194 )
196 195 warnings.filterwarnings(
197 196 'ignore', 'invalid escape sequence', DeprecationWarning, 'mercurial'
198 197 )
199 198 # TODO: reinvent imp.is_frozen()
200 199 warnings.filterwarnings(
201 200 'ignore',
202 201 'the imp module is deprecated',
203 202 DeprecationWarning,
204 203 'mercurial',
205 204 )
206 205
207 206
208 207 def nouideprecwarn(msg, version, stacklevel=1):
209 208 """Issue an python native deprecation warning
210 209
211 210 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
212 211 """
213 212 if _dowarn:
214 213 msg += (
215 214 b"\n(compatibility will be dropped after Mercurial-%s,"
216 215 b" update your code.)"
217 216 ) % version
218 217 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
219 218 # on python 3 with chg, we will need to explicitly flush the output
220 219 sys.stderr.flush()
221 220
222 221
223 222 DIGESTS = {
224 223 b'md5': hashlib.md5,
225 224 b'sha1': hashutil.sha1,
226 225 b'sha512': hashlib.sha512,
227 226 }
228 227 # List of digest types from strongest to weakest
229 228 DIGESTS_BY_STRENGTH = [b'sha512', b'sha1', b'md5']
230 229
231 230 for k in DIGESTS_BY_STRENGTH:
232 231 assert k in DIGESTS
233 232
234 233
235 234 class digester:
236 235 """helper to compute digests.
237 236
238 237 This helper can be used to compute one or more digests given their name.
239 238
240 239 >>> d = digester([b'md5', b'sha1'])
241 240 >>> d.update(b'foo')
242 241 >>> [k for k in sorted(d)]
243 242 ['md5', 'sha1']
244 243 >>> d[b'md5']
245 244 'acbd18db4cc2f85cedef654fccc4a4d8'
246 245 >>> d[b'sha1']
247 246 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
248 247 >>> digester.preferred([b'md5', b'sha1'])
249 248 'sha1'
250 249 """
251 250
252 251 def __init__(self, digests, s=b''):
253 252 self._hashes = {}
254 253 for k in digests:
255 254 if k not in DIGESTS:
256 255 raise error.Abort(_(b'unknown digest type: %s') % k)
257 256 self._hashes[k] = DIGESTS[k]()
258 257 if s:
259 258 self.update(s)
260 259
261 260 def update(self, data):
262 261 for h in self._hashes.values():
263 262 h.update(data)
264 263
265 264 def __getitem__(self, key):
266 265 if key not in DIGESTS:
267 266 raise error.Abort(_(b'unknown digest type: %s') % k)
268 267 return hex(self._hashes[key].digest())
269 268
270 269 def __iter__(self):
271 270 return iter(self._hashes)
272 271
273 272 @staticmethod
274 273 def preferred(supported):
275 274 """returns the strongest digest type in both supported and DIGESTS."""
276 275
277 276 for k in DIGESTS_BY_STRENGTH:
278 277 if k in supported:
279 278 return k
280 279 return None
281 280
282 281
283 282 class digestchecker:
284 283 """file handle wrapper that additionally checks content against a given
285 284 size and digests.
286 285
287 286 d = digestchecker(fh, size, {'md5': '...'})
288 287
289 288 When multiple digests are given, all of them are validated.
290 289 """
291 290
292 291 def __init__(self, fh, size, digests):
293 292 self._fh = fh
294 293 self._size = size
295 294 self._got = 0
296 295 self._digests = dict(digests)
297 296 self._digester = digester(self._digests.keys())
298 297
299 298 def read(self, length=-1):
300 299 content = self._fh.read(length)
301 300 self._digester.update(content)
302 301 self._got += len(content)
303 302 return content
304 303
305 304 def validate(self):
306 305 if self._size != self._got:
307 306 raise error.Abort(
308 307 _(b'size mismatch: expected %d, got %d')
309 308 % (self._size, self._got)
310 309 )
311 310 for k, v in self._digests.items():
312 311 if v != self._digester[k]:
313 312 # i18n: first parameter is a digest name
314 313 raise error.Abort(
315 314 _(b'%s mismatch: expected %s, got %s')
316 315 % (k, v, self._digester[k])
317 316 )
318 317
319 318
320 319 try:
321 320 buffer = buffer # pytype: disable=name-error
322 321 except NameError:
323 322
324 323 def buffer(sliceable, offset=0, length=None):
325 324 if length is not None:
326 325 return memoryview(sliceable)[offset : offset + length]
327 326 return memoryview(sliceable)[offset:]
328 327
329 328
330 329 _chunksize = 4096
331 330
332 331
333 332 class bufferedinputpipe:
334 333 """a manually buffered input pipe
335 334
336 335 Python will not let us use buffered IO and lazy reading with 'polling' at
337 336 the same time. We cannot probe the buffer state and select will not detect
338 337 that data are ready to read if they are already buffered.
339 338
340 339 This class let us work around that by implementing its own buffering
341 340 (allowing efficient readline) while offering a way to know if the buffer is
342 341 empty from the output (allowing collaboration of the buffer with polling).
343 342
344 343 This class lives in the 'util' module because it makes use of the 'os'
345 344 module from the python stdlib.
346 345 """
347 346
348 347 def __new__(cls, fh):
349 348 # If we receive a fileobjectproxy, we need to use a variation of this
350 349 # class that notifies observers about activity.
351 350 if isinstance(fh, fileobjectproxy):
352 351 cls = observedbufferedinputpipe
353 352
354 353 return super(bufferedinputpipe, cls).__new__(cls)
355 354
356 355 def __init__(self, input):
357 356 self._input = input
358 357 self._buffer = []
359 358 self._eof = False
360 359 self._lenbuf = 0
361 360
362 361 @property
363 362 def hasbuffer(self):
364 363 """True is any data is currently buffered
365 364
366 365 This will be used externally a pre-step for polling IO. If there is
367 366 already data then no polling should be set in place."""
368 367 return bool(self._buffer)
369 368
370 369 @property
371 370 def closed(self):
372 371 return self._input.closed
373 372
374 373 def fileno(self):
375 374 return self._input.fileno()
376 375
377 376 def close(self):
378 377 return self._input.close()
379 378
380 379 def read(self, size):
381 380 while (not self._eof) and (self._lenbuf < size):
382 381 self._fillbuffer()
383 382 return self._frombuffer(size)
384 383
385 384 def unbufferedread(self, size):
386 385 if not self._eof and self._lenbuf == 0:
387 386 self._fillbuffer(max(size, _chunksize))
388 387 return self._frombuffer(min(self._lenbuf, size))
389 388
390 389 def readline(self, *args, **kwargs):
391 390 if len(self._buffer) > 1:
392 391 # this should not happen because both read and readline end with a
393 392 # _frombuffer call that collapse it.
394 393 self._buffer = [b''.join(self._buffer)]
395 394 self._lenbuf = len(self._buffer[0])
396 395 lfi = -1
397 396 if self._buffer:
398 397 lfi = self._buffer[-1].find(b'\n')
399 398 while (not self._eof) and lfi < 0:
400 399 self._fillbuffer()
401 400 if self._buffer:
402 401 lfi = self._buffer[-1].find(b'\n')
403 402 size = lfi + 1
404 403 if lfi < 0: # end of file
405 404 size = self._lenbuf
406 405 elif len(self._buffer) > 1:
407 406 # we need to take previous chunks into account
408 407 size += self._lenbuf - len(self._buffer[-1])
409 408 return self._frombuffer(size)
410 409
411 410 def _frombuffer(self, size):
412 411 """return at most 'size' data from the buffer
413 412
414 413 The data are removed from the buffer."""
415 414 if size == 0 or not self._buffer:
416 415 return b''
417 416 buf = self._buffer[0]
418 417 if len(self._buffer) > 1:
419 418 buf = b''.join(self._buffer)
420 419
421 420 data = buf[:size]
422 421 buf = buf[len(data) :]
423 422 if buf:
424 423 self._buffer = [buf]
425 424 self._lenbuf = len(buf)
426 425 else:
427 426 self._buffer = []
428 427 self._lenbuf = 0
429 428 return data
430 429
431 430 def _fillbuffer(self, size=_chunksize):
432 431 """read data to the buffer"""
433 432 data = os.read(self._input.fileno(), size)
434 433 if not data:
435 434 self._eof = True
436 435 else:
437 436 self._lenbuf += len(data)
438 437 self._buffer.append(data)
439 438
440 439 return data
441 440
442 441
443 442 def mmapread(fp, size=None):
444 443 if size == 0:
445 444 # size of 0 to mmap.mmap() means "all data"
446 445 # rather than "zero bytes", so special case that.
447 446 return b''
448 447 elif size is None:
449 448 size = 0
450 449 fd = getattr(fp, 'fileno', lambda: fp)()
451 450 try:
452 451 return mmap.mmap(fd, size, access=mmap.ACCESS_READ)
453 452 except ValueError:
454 453 # Empty files cannot be mmapped, but mmapread should still work. Check
455 454 # if the file is empty, and if so, return an empty buffer.
456 455 if os.fstat(fd).st_size == 0:
457 456 return b''
458 457 raise
459 458
460 459
461 460 class fileobjectproxy:
462 461 """A proxy around file objects that tells a watcher when events occur.
463 462
464 463 This type is intended to only be used for testing purposes. Think hard
465 464 before using it in important code.
466 465 """
467 466
468 467 __slots__ = (
469 468 '_orig',
470 469 '_observer',
471 470 )
472 471
473 472 def __init__(self, fh, observer):
474 473 object.__setattr__(self, '_orig', fh)
475 474 object.__setattr__(self, '_observer', observer)
476 475
477 476 def __getattribute__(self, name):
478 477 ours = {
479 478 '_observer',
480 479 # IOBase
481 480 'close',
482 481 # closed if a property
483 482 'fileno',
484 483 'flush',
485 484 'isatty',
486 485 'readable',
487 486 'readline',
488 487 'readlines',
489 488 'seek',
490 489 'seekable',
491 490 'tell',
492 491 'truncate',
493 492 'writable',
494 493 'writelines',
495 494 # RawIOBase
496 495 'read',
497 496 'readall',
498 497 'readinto',
499 498 'write',
500 499 # BufferedIOBase
501 500 # raw is a property
502 501 'detach',
503 502 # read defined above
504 503 'read1',
505 504 # readinto defined above
506 505 # write defined above
507 506 }
508 507
509 508 # We only observe some methods.
510 509 if name in ours:
511 510 return object.__getattribute__(self, name)
512 511
513 512 return getattr(object.__getattribute__(self, '_orig'), name)
514 513
515 514 def __nonzero__(self):
516 515 return bool(object.__getattribute__(self, '_orig'))
517 516
518 517 __bool__ = __nonzero__
519 518
520 519 def __delattr__(self, name):
521 520 return delattr(object.__getattribute__(self, '_orig'), name)
522 521
523 522 def __setattr__(self, name, value):
524 523 return setattr(object.__getattribute__(self, '_orig'), name, value)
525 524
526 525 def __iter__(self):
527 526 return object.__getattribute__(self, '_orig').__iter__()
528 527
529 528 def _observedcall(self, name, *args, **kwargs):
530 529 # Call the original object.
531 530 orig = object.__getattribute__(self, '_orig')
532 531 res = getattr(orig, name)(*args, **kwargs)
533 532
534 533 # Call a method on the observer of the same name with arguments
535 534 # so it can react, log, etc.
536 535 observer = object.__getattribute__(self, '_observer')
537 536 fn = getattr(observer, name, None)
538 537 if fn:
539 538 fn(res, *args, **kwargs)
540 539
541 540 return res
542 541
543 542 def close(self, *args, **kwargs):
544 543 return object.__getattribute__(self, '_observedcall')(
545 544 'close', *args, **kwargs
546 545 )
547 546
548 547 def fileno(self, *args, **kwargs):
549 548 return object.__getattribute__(self, '_observedcall')(
550 549 'fileno', *args, **kwargs
551 550 )
552 551
553 552 def flush(self, *args, **kwargs):
554 553 return object.__getattribute__(self, '_observedcall')(
555 554 'flush', *args, **kwargs
556 555 )
557 556
558 557 def isatty(self, *args, **kwargs):
559 558 return object.__getattribute__(self, '_observedcall')(
560 559 'isatty', *args, **kwargs
561 560 )
562 561
563 562 def readable(self, *args, **kwargs):
564 563 return object.__getattribute__(self, '_observedcall')(
565 564 'readable', *args, **kwargs
566 565 )
567 566
568 567 def readline(self, *args, **kwargs):
569 568 return object.__getattribute__(self, '_observedcall')(
570 569 'readline', *args, **kwargs
571 570 )
572 571
573 572 def readlines(self, *args, **kwargs):
574 573 return object.__getattribute__(self, '_observedcall')(
575 574 'readlines', *args, **kwargs
576 575 )
577 576
578 577 def seek(self, *args, **kwargs):
579 578 return object.__getattribute__(self, '_observedcall')(
580 579 'seek', *args, **kwargs
581 580 )
582 581
583 582 def seekable(self, *args, **kwargs):
584 583 return object.__getattribute__(self, '_observedcall')(
585 584 'seekable', *args, **kwargs
586 585 )
587 586
588 587 def tell(self, *args, **kwargs):
589 588 return object.__getattribute__(self, '_observedcall')(
590 589 'tell', *args, **kwargs
591 590 )
592 591
593 592 def truncate(self, *args, **kwargs):
594 593 return object.__getattribute__(self, '_observedcall')(
595 594 'truncate', *args, **kwargs
596 595 )
597 596
598 597 def writable(self, *args, **kwargs):
599 598 return object.__getattribute__(self, '_observedcall')(
600 599 'writable', *args, **kwargs
601 600 )
602 601
603 602 def writelines(self, *args, **kwargs):
604 603 return object.__getattribute__(self, '_observedcall')(
605 604 'writelines', *args, **kwargs
606 605 )
607 606
608 607 def read(self, *args, **kwargs):
609 608 return object.__getattribute__(self, '_observedcall')(
610 609 'read', *args, **kwargs
611 610 )
612 611
613 612 def readall(self, *args, **kwargs):
614 613 return object.__getattribute__(self, '_observedcall')(
615 614 'readall', *args, **kwargs
616 615 )
617 616
618 617 def readinto(self, *args, **kwargs):
619 618 return object.__getattribute__(self, '_observedcall')(
620 619 'readinto', *args, **kwargs
621 620 )
622 621
623 622 def write(self, *args, **kwargs):
624 623 return object.__getattribute__(self, '_observedcall')(
625 624 'write', *args, **kwargs
626 625 )
627 626
628 627 def detach(self, *args, **kwargs):
629 628 return object.__getattribute__(self, '_observedcall')(
630 629 'detach', *args, **kwargs
631 630 )
632 631
633 632 def read1(self, *args, **kwargs):
634 633 return object.__getattribute__(self, '_observedcall')(
635 634 'read1', *args, **kwargs
636 635 )
637 636
638 637
639 638 class observedbufferedinputpipe(bufferedinputpipe):
640 639 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
641 640
642 641 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
643 642 bypass ``fileobjectproxy``. Because of this, we need to make
644 643 ``bufferedinputpipe`` aware of these operations.
645 644
646 645 This variation of ``bufferedinputpipe`` can notify observers about
647 646 ``os.read()`` events. It also re-publishes other events, such as
648 647 ``read()`` and ``readline()``.
649 648 """
650 649
651 650 def _fillbuffer(self):
652 651 res = super(observedbufferedinputpipe, self)._fillbuffer()
653 652
654 653 fn = getattr(self._input._observer, 'osread', None)
655 654 if fn:
656 655 fn(res, _chunksize)
657 656
658 657 return res
659 658
660 659 # We use different observer methods because the operation isn't
661 660 # performed on the actual file object but on us.
662 661 def read(self, size):
663 662 res = super(observedbufferedinputpipe, self).read(size)
664 663
665 664 fn = getattr(self._input._observer, 'bufferedread', None)
666 665 if fn:
667 666 fn(res, size)
668 667
669 668 return res
670 669
671 670 def readline(self, *args, **kwargs):
672 671 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
673 672
674 673 fn = getattr(self._input._observer, 'bufferedreadline', None)
675 674 if fn:
676 675 fn(res)
677 676
678 677 return res
679 678
680 679
681 680 PROXIED_SOCKET_METHODS = {
682 681 'makefile',
683 682 'recv',
684 683 'recvfrom',
685 684 'recvfrom_into',
686 685 'recv_into',
687 686 'send',
688 687 'sendall',
689 688 'sendto',
690 689 'setblocking',
691 690 'settimeout',
692 691 'gettimeout',
693 692 'setsockopt',
694 693 }
695 694
696 695
697 696 class socketproxy:
698 697 """A proxy around a socket that tells a watcher when events occur.
699 698
700 699 This is like ``fileobjectproxy`` except for sockets.
701 700
702 701 This type is intended to only be used for testing purposes. Think hard
703 702 before using it in important code.
704 703 """
705 704
706 705 __slots__ = (
707 706 '_orig',
708 707 '_observer',
709 708 )
710 709
711 710 def __init__(self, sock, observer):
712 711 object.__setattr__(self, '_orig', sock)
713 712 object.__setattr__(self, '_observer', observer)
714 713
715 714 def __getattribute__(self, name):
716 715 if name in PROXIED_SOCKET_METHODS:
717 716 return object.__getattribute__(self, name)
718 717
719 718 return getattr(object.__getattribute__(self, '_orig'), name)
720 719
721 720 def __delattr__(self, name):
722 721 return delattr(object.__getattribute__(self, '_orig'), name)
723 722
724 723 def __setattr__(self, name, value):
725 724 return setattr(object.__getattribute__(self, '_orig'), name, value)
726 725
727 726 def __nonzero__(self):
728 727 return bool(object.__getattribute__(self, '_orig'))
729 728
730 729 __bool__ = __nonzero__
731 730
732 731 def _observedcall(self, name, *args, **kwargs):
733 732 # Call the original object.
734 733 orig = object.__getattribute__(self, '_orig')
735 734 res = getattr(orig, name)(*args, **kwargs)
736 735
737 736 # Call a method on the observer of the same name with arguments
738 737 # so it can react, log, etc.
739 738 observer = object.__getattribute__(self, '_observer')
740 739 fn = getattr(observer, name, None)
741 740 if fn:
742 741 fn(res, *args, **kwargs)
743 742
744 743 return res
745 744
746 745 def makefile(self, *args, **kwargs):
747 746 res = object.__getattribute__(self, '_observedcall')(
748 747 'makefile', *args, **kwargs
749 748 )
750 749
751 750 # The file object may be used for I/O. So we turn it into a
752 751 # proxy using our observer.
753 752 observer = object.__getattribute__(self, '_observer')
754 753 return makeloggingfileobject(
755 754 observer.fh,
756 755 res,
757 756 observer.name,
758 757 reads=observer.reads,
759 758 writes=observer.writes,
760 759 logdata=observer.logdata,
761 760 logdataapis=observer.logdataapis,
762 761 )
763 762
764 763 def recv(self, *args, **kwargs):
765 764 return object.__getattribute__(self, '_observedcall')(
766 765 'recv', *args, **kwargs
767 766 )
768 767
769 768 def recvfrom(self, *args, **kwargs):
770 769 return object.__getattribute__(self, '_observedcall')(
771 770 'recvfrom', *args, **kwargs
772 771 )
773 772
774 773 def recvfrom_into(self, *args, **kwargs):
775 774 return object.__getattribute__(self, '_observedcall')(
776 775 'recvfrom_into', *args, **kwargs
777 776 )
778 777
779 778 def recv_into(self, *args, **kwargs):
780 779 return object.__getattribute__(self, '_observedcall')(
781 780 'recv_info', *args, **kwargs
782 781 )
783 782
784 783 def send(self, *args, **kwargs):
785 784 return object.__getattribute__(self, '_observedcall')(
786 785 'send', *args, **kwargs
787 786 )
788 787
789 788 def sendall(self, *args, **kwargs):
790 789 return object.__getattribute__(self, '_observedcall')(
791 790 'sendall', *args, **kwargs
792 791 )
793 792
794 793 def sendto(self, *args, **kwargs):
795 794 return object.__getattribute__(self, '_observedcall')(
796 795 'sendto', *args, **kwargs
797 796 )
798 797
799 798 def setblocking(self, *args, **kwargs):
800 799 return object.__getattribute__(self, '_observedcall')(
801 800 'setblocking', *args, **kwargs
802 801 )
803 802
804 803 def settimeout(self, *args, **kwargs):
805 804 return object.__getattribute__(self, '_observedcall')(
806 805 'settimeout', *args, **kwargs
807 806 )
808 807
809 808 def gettimeout(self, *args, **kwargs):
810 809 return object.__getattribute__(self, '_observedcall')(
811 810 'gettimeout', *args, **kwargs
812 811 )
813 812
814 813 def setsockopt(self, *args, **kwargs):
815 814 return object.__getattribute__(self, '_observedcall')(
816 815 'setsockopt', *args, **kwargs
817 816 )
818 817
819 818
820 819 class baseproxyobserver:
821 820 def __init__(self, fh, name, logdata, logdataapis):
822 821 self.fh = fh
823 822 self.name = name
824 823 self.logdata = logdata
825 824 self.logdataapis = logdataapis
826 825
827 826 def _writedata(self, data):
828 827 if not self.logdata:
829 828 if self.logdataapis:
830 829 self.fh.write(b'\n')
831 830 self.fh.flush()
832 831 return
833 832
834 833 # Simple case writes all data on a single line.
835 834 if b'\n' not in data:
836 835 if self.logdataapis:
837 836 self.fh.write(b': %s\n' % stringutil.escapestr(data))
838 837 else:
839 838 self.fh.write(
840 839 b'%s> %s\n' % (self.name, stringutil.escapestr(data))
841 840 )
842 841 self.fh.flush()
843 842 return
844 843
845 844 # Data with newlines is written to multiple lines.
846 845 if self.logdataapis:
847 846 self.fh.write(b':\n')
848 847
849 848 lines = data.splitlines(True)
850 849 for line in lines:
851 850 self.fh.write(
852 851 b'%s> %s\n' % (self.name, stringutil.escapestr(line))
853 852 )
854 853 self.fh.flush()
855 854
856 855
857 856 class fileobjectobserver(baseproxyobserver):
858 857 """Logs file object activity."""
859 858
860 859 def __init__(
861 860 self, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
862 861 ):
863 862 super(fileobjectobserver, self).__init__(fh, name, logdata, logdataapis)
864 863 self.reads = reads
865 864 self.writes = writes
866 865
867 866 def read(self, res, size=-1):
868 867 if not self.reads:
869 868 return
870 869 # Python 3 can return None from reads at EOF instead of empty strings.
871 870 if res is None:
872 871 res = b''
873 872
874 873 if size == -1 and res == b'':
875 874 # Suppress pointless read(-1) calls that return
876 875 # nothing. These happen _a lot_ on Python 3, and there
877 876 # doesn't seem to be a better workaround to have matching
878 877 # Python 2 and 3 behavior. :(
879 878 return
880 879
881 880 if self.logdataapis:
882 881 self.fh.write(b'%s> read(%d) -> %d' % (self.name, size, len(res)))
883 882
884 883 self._writedata(res)
885 884
886 885 def readline(self, res, limit=-1):
887 886 if not self.reads:
888 887 return
889 888
890 889 if self.logdataapis:
891 890 self.fh.write(b'%s> readline() -> %d' % (self.name, len(res)))
892 891
893 892 self._writedata(res)
894 893
895 894 def readinto(self, res, dest):
896 895 if not self.reads:
897 896 return
898 897
899 898 if self.logdataapis:
900 899 self.fh.write(
901 900 b'%s> readinto(%d) -> %r' % (self.name, len(dest), res)
902 901 )
903 902
904 903 data = dest[0:res] if res is not None else b''
905 904
906 905 # _writedata() uses "in" operator and is confused by memoryview because
907 906 # characters are ints on Python 3.
908 907 if isinstance(data, memoryview):
909 908 data = data.tobytes()
910 909
911 910 self._writedata(data)
912 911
913 912 def write(self, res, data):
914 913 if not self.writes:
915 914 return
916 915
917 916 # Python 2 returns None from some write() calls. Python 3 (reasonably)
918 917 # returns the integer bytes written.
919 918 if res is None and data:
920 919 res = len(data)
921 920
922 921 if self.logdataapis:
923 922 self.fh.write(b'%s> write(%d) -> %r' % (self.name, len(data), res))
924 923
925 924 self._writedata(data)
926 925
927 926 def flush(self, res):
928 927 if not self.writes:
929 928 return
930 929
931 930 self.fh.write(b'%s> flush() -> %r\n' % (self.name, res))
932 931
933 932 # For observedbufferedinputpipe.
934 933 def bufferedread(self, res, size):
935 934 if not self.reads:
936 935 return
937 936
938 937 if self.logdataapis:
939 938 self.fh.write(
940 939 b'%s> bufferedread(%d) -> %d' % (self.name, size, len(res))
941 940 )
942 941
943 942 self._writedata(res)
944 943
945 944 def bufferedreadline(self, res):
946 945 if not self.reads:
947 946 return
948 947
949 948 if self.logdataapis:
950 949 self.fh.write(
951 950 b'%s> bufferedreadline() -> %d' % (self.name, len(res))
952 951 )
953 952
954 953 self._writedata(res)
955 954
956 955
957 956 def makeloggingfileobject(
958 957 logh, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
959 958 ):
960 959 """Turn a file object into a logging file object."""
961 960
962 961 observer = fileobjectobserver(
963 962 logh,
964 963 name,
965 964 reads=reads,
966 965 writes=writes,
967 966 logdata=logdata,
968 967 logdataapis=logdataapis,
969 968 )
970 969 return fileobjectproxy(fh, observer)
971 970
972 971
973 972 class socketobserver(baseproxyobserver):
974 973 """Logs socket activity."""
975 974
976 975 def __init__(
977 976 self,
978 977 fh,
979 978 name,
980 979 reads=True,
981 980 writes=True,
982 981 states=True,
983 982 logdata=False,
984 983 logdataapis=True,
985 984 ):
986 985 super(socketobserver, self).__init__(fh, name, logdata, logdataapis)
987 986 self.reads = reads
988 987 self.writes = writes
989 988 self.states = states
990 989
991 990 def makefile(self, res, mode=None, bufsize=None):
992 991 if not self.states:
993 992 return
994 993
995 994 self.fh.write(b'%s> makefile(%r, %r)\n' % (self.name, mode, bufsize))
996 995
997 996 def recv(self, res, size, flags=0):
998 997 if not self.reads:
999 998 return
1000 999
1001 1000 if self.logdataapis:
1002 1001 self.fh.write(
1003 1002 b'%s> recv(%d, %d) -> %d' % (self.name, size, flags, len(res))
1004 1003 )
1005 1004 self._writedata(res)
1006 1005
1007 1006 def recvfrom(self, res, size, flags=0):
1008 1007 if not self.reads:
1009 1008 return
1010 1009
1011 1010 if self.logdataapis:
1012 1011 self.fh.write(
1013 1012 b'%s> recvfrom(%d, %d) -> %d'
1014 1013 % (self.name, size, flags, len(res[0]))
1015 1014 )
1016 1015
1017 1016 self._writedata(res[0])
1018 1017
1019 1018 def recvfrom_into(self, res, buf, size, flags=0):
1020 1019 if not self.reads:
1021 1020 return
1022 1021
1023 1022 if self.logdataapis:
1024 1023 self.fh.write(
1025 1024 b'%s> recvfrom_into(%d, %d) -> %d'
1026 1025 % (self.name, size, flags, res[0])
1027 1026 )
1028 1027
1029 1028 self._writedata(buf[0 : res[0]])
1030 1029
1031 1030 def recv_into(self, res, buf, size=0, flags=0):
1032 1031 if not self.reads:
1033 1032 return
1034 1033
1035 1034 if self.logdataapis:
1036 1035 self.fh.write(
1037 1036 b'%s> recv_into(%d, %d) -> %d' % (self.name, size, flags, res)
1038 1037 )
1039 1038
1040 1039 self._writedata(buf[0:res])
1041 1040
1042 1041 def send(self, res, data, flags=0):
1043 1042 if not self.writes:
1044 1043 return
1045 1044
1046 1045 self.fh.write(
1047 1046 b'%s> send(%d, %d) -> %d' % (self.name, len(data), flags, len(res))
1048 1047 )
1049 1048 self._writedata(data)
1050 1049
1051 1050 def sendall(self, res, data, flags=0):
1052 1051 if not self.writes:
1053 1052 return
1054 1053
1055 1054 if self.logdataapis:
1056 1055 # Returns None on success. So don't bother reporting return value.
1057 1056 self.fh.write(
1058 1057 b'%s> sendall(%d, %d)' % (self.name, len(data), flags)
1059 1058 )
1060 1059
1061 1060 self._writedata(data)
1062 1061
1063 1062 def sendto(self, res, data, flagsoraddress, address=None):
1064 1063 if not self.writes:
1065 1064 return
1066 1065
1067 1066 if address:
1068 1067 flags = flagsoraddress
1069 1068 else:
1070 1069 flags = 0
1071 1070
1072 1071 if self.logdataapis:
1073 1072 self.fh.write(
1074 1073 b'%s> sendto(%d, %d, %r) -> %d'
1075 1074 % (self.name, len(data), flags, address, res)
1076 1075 )
1077 1076
1078 1077 self._writedata(data)
1079 1078
1080 1079 def setblocking(self, res, flag):
1081 1080 if not self.states:
1082 1081 return
1083 1082
1084 1083 self.fh.write(b'%s> setblocking(%r)\n' % (self.name, flag))
1085 1084
1086 1085 def settimeout(self, res, value):
1087 1086 if not self.states:
1088 1087 return
1089 1088
1090 1089 self.fh.write(b'%s> settimeout(%r)\n' % (self.name, value))
1091 1090
1092 1091 def gettimeout(self, res):
1093 1092 if not self.states:
1094 1093 return
1095 1094
1096 1095 self.fh.write(b'%s> gettimeout() -> %f\n' % (self.name, res))
1097 1096
1098 1097 def setsockopt(self, res, level, optname, value):
1099 1098 if not self.states:
1100 1099 return
1101 1100
1102 1101 self.fh.write(
1103 1102 b'%s> setsockopt(%r, %r, %r) -> %r\n'
1104 1103 % (self.name, level, optname, value, res)
1105 1104 )
1106 1105
1107 1106
1108 1107 def makeloggingsocket(
1109 1108 logh,
1110 1109 fh,
1111 1110 name,
1112 1111 reads=True,
1113 1112 writes=True,
1114 1113 states=True,
1115 1114 logdata=False,
1116 1115 logdataapis=True,
1117 1116 ):
1118 1117 """Turn a socket into a logging socket."""
1119 1118
1120 1119 observer = socketobserver(
1121 1120 logh,
1122 1121 name,
1123 1122 reads=reads,
1124 1123 writes=writes,
1125 1124 states=states,
1126 1125 logdata=logdata,
1127 1126 logdataapis=logdataapis,
1128 1127 )
1129 1128 return socketproxy(fh, observer)
1130 1129
1131 1130
1132 1131 def version():
1133 1132 """Return version information if available."""
1134 1133 try:
1135 1134 from . import __version__
1136 1135
1137 1136 return __version__.version
1138 1137 except ImportError:
1139 1138 return b'unknown'
1140 1139
1141 1140
1142 1141 def versiontuple(v=None, n=4):
1143 1142 """Parses a Mercurial version string into an N-tuple.
1144 1143
1145 1144 The version string to be parsed is specified with the ``v`` argument.
1146 1145 If it isn't defined, the current Mercurial version string will be parsed.
1147 1146
1148 1147 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1149 1148 returned values:
1150 1149
1151 1150 >>> v = b'3.6.1+190-df9b73d2d444'
1152 1151 >>> versiontuple(v, 2)
1153 1152 (3, 6)
1154 1153 >>> versiontuple(v, 3)
1155 1154 (3, 6, 1)
1156 1155 >>> versiontuple(v, 4)
1157 1156 (3, 6, 1, '190-df9b73d2d444')
1158 1157
1159 1158 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1160 1159 (3, 6, 1, '190-df9b73d2d444+20151118')
1161 1160
1162 1161 >>> v = b'3.6'
1163 1162 >>> versiontuple(v, 2)
1164 1163 (3, 6)
1165 1164 >>> versiontuple(v, 3)
1166 1165 (3, 6, None)
1167 1166 >>> versiontuple(v, 4)
1168 1167 (3, 6, None, None)
1169 1168
1170 1169 >>> v = b'3.9-rc'
1171 1170 >>> versiontuple(v, 2)
1172 1171 (3, 9)
1173 1172 >>> versiontuple(v, 3)
1174 1173 (3, 9, None)
1175 1174 >>> versiontuple(v, 4)
1176 1175 (3, 9, None, 'rc')
1177 1176
1178 1177 >>> v = b'3.9-rc+2-02a8fea4289b'
1179 1178 >>> versiontuple(v, 2)
1180 1179 (3, 9)
1181 1180 >>> versiontuple(v, 3)
1182 1181 (3, 9, None)
1183 1182 >>> versiontuple(v, 4)
1184 1183 (3, 9, None, 'rc+2-02a8fea4289b')
1185 1184
1186 1185 >>> versiontuple(b'4.6rc0')
1187 1186 (4, 6, None, 'rc0')
1188 1187 >>> versiontuple(b'4.6rc0+12-425d55e54f98')
1189 1188 (4, 6, None, 'rc0+12-425d55e54f98')
1190 1189 >>> versiontuple(b'.1.2.3')
1191 1190 (None, None, None, '.1.2.3')
1192 1191 >>> versiontuple(b'12.34..5')
1193 1192 (12, 34, None, '..5')
1194 1193 >>> versiontuple(b'1.2.3.4.5.6')
1195 1194 (1, 2, 3, '.4.5.6')
1196 1195 """
1197 1196 if not v:
1198 1197 v = version()
1199 1198 m = remod.match(br'(\d+(?:\.\d+){,2})[+-]?(.*)', v)
1200 1199 if not m:
1201 1200 vparts, extra = b'', v
1202 1201 elif m.group(2):
1203 1202 vparts, extra = m.groups()
1204 1203 else:
1205 1204 vparts, extra = m.group(1), None
1206 1205
1207 1206 assert vparts is not None # help pytype
1208 1207
1209 1208 vints = []
1210 1209 for i in vparts.split(b'.'):
1211 1210 try:
1212 1211 vints.append(int(i))
1213 1212 except ValueError:
1214 1213 break
1215 1214 # (3, 6) -> (3, 6, None)
1216 1215 while len(vints) < 3:
1217 1216 vints.append(None)
1218 1217
1219 1218 if n == 2:
1220 1219 return (vints[0], vints[1])
1221 1220 if n == 3:
1222 1221 return (vints[0], vints[1], vints[2])
1223 1222 if n == 4:
1224 1223 return (vints[0], vints[1], vints[2], extra)
1225 1224
1226 1225 raise error.ProgrammingError(b"invalid version part request: %d" % n)
1227 1226
1228 1227
1229 1228 def cachefunc(func):
1230 1229 '''cache the result of function calls'''
1231 1230 # XXX doesn't handle keywords args
1232 1231 if func.__code__.co_argcount == 0:
1233 1232 listcache = []
1234 1233
1235 1234 def f():
1236 1235 if len(listcache) == 0:
1237 1236 listcache.append(func())
1238 1237 return listcache[0]
1239 1238
1240 1239 return f
1241 1240 cache = {}
1242 1241 if func.__code__.co_argcount == 1:
1243 1242 # we gain a small amount of time because
1244 1243 # we don't need to pack/unpack the list
1245 1244 def f(arg):
1246 1245 if arg not in cache:
1247 1246 cache[arg] = func(arg)
1248 1247 return cache[arg]
1249 1248
1250 1249 else:
1251 1250
1252 1251 def f(*args):
1253 1252 if args not in cache:
1254 1253 cache[args] = func(*args)
1255 1254 return cache[args]
1256 1255
1257 1256 return f
1258 1257
1259 1258
1260 1259 class cow:
1261 1260 """helper class to make copy-on-write easier
1262 1261
1263 1262 Call preparewrite before doing any writes.
1264 1263 """
1265 1264
1266 1265 def preparewrite(self):
1267 1266 """call this before writes, return self or a copied new object"""
1268 1267 if getattr(self, '_copied', 0):
1269 1268 self._copied -= 1
1270 1269 # Function cow.__init__ expects 1 arg(s), got 2 [wrong-arg-count]
1271 1270 return self.__class__(self) # pytype: disable=wrong-arg-count
1272 1271 return self
1273 1272
1274 1273 def copy(self):
1275 1274 """always do a cheap copy"""
1276 1275 self._copied = getattr(self, '_copied', 0) + 1
1277 1276 return self
1278 1277
1279 1278
1280 1279 class sortdict(collections.OrderedDict):
1281 1280 """a simple sorted dictionary
1282 1281
1283 1282 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1284 1283 >>> d2 = d1.copy()
1285 1284 >>> d2
1286 1285 sortdict([('a', 0), ('b', 1)])
1287 1286 >>> d2.update([(b'a', 2)])
1288 1287 >>> list(d2.keys()) # should still be in last-set order
1289 1288 ['b', 'a']
1290 1289 >>> d1.insert(1, b'a.5', 0.5)
1291 1290 >>> d1
1292 1291 sortdict([('a', 0), ('a.5', 0.5), ('b', 1)])
1293 1292 """
1294 1293
1295 1294 def __setitem__(self, key, value):
1296 1295 if key in self:
1297 1296 del self[key]
1298 1297 super(sortdict, self).__setitem__(key, value)
1299 1298
1300 1299 if pycompat.ispypy:
1301 1300 # __setitem__() isn't called as of PyPy 5.8.0
1302 1301 def update(self, src, **f):
1303 1302 if isinstance(src, dict):
1304 1303 src = src.items()
1305 1304 for k, v in src:
1306 1305 self[k] = v
1307 1306 for k in f:
1308 1307 self[k] = f[k]
1309 1308
1310 1309 def insert(self, position, key, value):
1311 1310 for (i, (k, v)) in enumerate(list(self.items())):
1312 1311 if i == position:
1313 1312 self[key] = value
1314 1313 if i >= position:
1315 1314 del self[k]
1316 1315 self[k] = v
1317 1316
1318 1317
1319 1318 class cowdict(cow, dict):
1320 1319 """copy-on-write dict
1321 1320
1322 1321 Be sure to call d = d.preparewrite() before writing to d.
1323 1322
1324 1323 >>> a = cowdict()
1325 1324 >>> a is a.preparewrite()
1326 1325 True
1327 1326 >>> b = a.copy()
1328 1327 >>> b is a
1329 1328 True
1330 1329 >>> c = b.copy()
1331 1330 >>> c is a
1332 1331 True
1333 1332 >>> a = a.preparewrite()
1334 1333 >>> b is a
1335 1334 False
1336 1335 >>> a is a.preparewrite()
1337 1336 True
1338 1337 >>> c = c.preparewrite()
1339 1338 >>> b is c
1340 1339 False
1341 1340 >>> b is b.preparewrite()
1342 1341 True
1343 1342 """
1344 1343
1345 1344
1346 1345 class cowsortdict(cow, sortdict):
1347 1346 """copy-on-write sortdict
1348 1347
1349 1348 Be sure to call d = d.preparewrite() before writing to d.
1350 1349 """
1351 1350
1352 1351
1353 1352 class transactional: # pytype: disable=ignored-metaclass
1354 1353 """Base class for making a transactional type into a context manager."""
1355 1354
1356 1355 __metaclass__ = abc.ABCMeta
1357 1356
1358 1357 @abc.abstractmethod
1359 1358 def close(self):
1360 1359 """Successfully closes the transaction."""
1361 1360
1362 1361 @abc.abstractmethod
1363 1362 def release(self):
1364 1363 """Marks the end of the transaction.
1365 1364
1366 1365 If the transaction has not been closed, it will be aborted.
1367 1366 """
1368 1367
1369 1368 def __enter__(self):
1370 1369 return self
1371 1370
1372 1371 def __exit__(self, exc_type, exc_val, exc_tb):
1373 1372 try:
1374 1373 if exc_type is None:
1375 1374 self.close()
1376 1375 finally:
1377 1376 self.release()
1378 1377
1379 1378
1380 1379 @contextlib.contextmanager
1381 1380 def acceptintervention(tr=None):
1382 1381 """A context manager that closes the transaction on InterventionRequired
1383 1382
1384 1383 If no transaction was provided, this simply runs the body and returns
1385 1384 """
1386 1385 if not tr:
1387 1386 yield
1388 1387 return
1389 1388 try:
1390 1389 yield
1391 1390 tr.close()
1392 1391 except error.InterventionRequired:
1393 1392 tr.close()
1394 1393 raise
1395 1394 finally:
1396 1395 tr.release()
1397 1396
1398 1397
1399 1398 @contextlib.contextmanager
1400 1399 def nullcontextmanager(enter_result=None):
1401 1400 yield enter_result
1402 1401
1403 1402
1404 1403 class _lrucachenode:
1405 1404 """A node in a doubly linked list.
1406 1405
1407 1406 Holds a reference to nodes on either side as well as a key-value
1408 1407 pair for the dictionary entry.
1409 1408 """
1410 1409
1411 1410 __slots__ = ('next', 'prev', 'key', 'value', 'cost')
1412 1411
1413 1412 def __init__(self):
1414 1413 self.next = self
1415 1414 self.prev = self
1416 1415
1417 1416 self.key = _notset
1418 1417 self.value = None
1419 1418 self.cost = 0
1420 1419
1421 1420 def markempty(self):
1422 1421 """Mark the node as emptied."""
1423 1422 self.key = _notset
1424 1423 self.value = None
1425 1424 self.cost = 0
1426 1425
1427 1426
1428 1427 class lrucachedict:
1429 1428 """Dict that caches most recent accesses and sets.
1430 1429
1431 1430 The dict consists of an actual backing dict - indexed by original
1432 1431 key - and a doubly linked circular list defining the order of entries in
1433 1432 the cache.
1434 1433
1435 1434 The head node is the newest entry in the cache. If the cache is full,
1436 1435 we recycle head.prev and make it the new head. Cache accesses result in
1437 1436 the node being moved to before the existing head and being marked as the
1438 1437 new head node.
1439 1438
1440 1439 Items in the cache can be inserted with an optional "cost" value. This is
1441 1440 simply an integer that is specified by the caller. The cache can be queried
1442 1441 for the total cost of all items presently in the cache.
1443 1442
1444 1443 The cache can also define a maximum cost. If a cache insertion would
1445 1444 cause the total cost of the cache to go beyond the maximum cost limit,
1446 1445 nodes will be evicted to make room for the new code. This can be used
1447 1446 to e.g. set a max memory limit and associate an estimated bytes size
1448 1447 cost to each item in the cache. By default, no maximum cost is enforced.
1449 1448 """
1450 1449
1451 1450 def __init__(self, max, maxcost=0):
1452 1451 self._cache = {}
1453 1452
1454 1453 self._head = _lrucachenode()
1455 1454 self._size = 1
1456 1455 self.capacity = max
1457 1456 self.totalcost = 0
1458 1457 self.maxcost = maxcost
1459 1458
1460 1459 def __len__(self):
1461 1460 return len(self._cache)
1462 1461
1463 1462 def __contains__(self, k):
1464 1463 return k in self._cache
1465 1464
1466 1465 def __iter__(self):
1467 1466 # We don't have to iterate in cache order, but why not.
1468 1467 n = self._head
1469 1468 for i in range(len(self._cache)):
1470 1469 yield n.key
1471 1470 n = n.next
1472 1471
1473 1472 def __getitem__(self, k):
1474 1473 node = self._cache[k]
1475 1474 self._movetohead(node)
1476 1475 return node.value
1477 1476
1478 1477 def insert(self, k, v, cost=0):
1479 1478 """Insert a new item in the cache with optional cost value."""
1480 1479 node = self._cache.get(k)
1481 1480 # Replace existing value and mark as newest.
1482 1481 if node is not None:
1483 1482 self.totalcost -= node.cost
1484 1483 node.value = v
1485 1484 node.cost = cost
1486 1485 self.totalcost += cost
1487 1486 self._movetohead(node)
1488 1487
1489 1488 if self.maxcost:
1490 1489 self._enforcecostlimit()
1491 1490
1492 1491 return
1493 1492
1494 1493 if self._size < self.capacity:
1495 1494 node = self._addcapacity()
1496 1495 else:
1497 1496 # Grab the last/oldest item.
1498 1497 node = self._head.prev
1499 1498
1500 1499 # At capacity. Kill the old entry.
1501 1500 if node.key is not _notset:
1502 1501 self.totalcost -= node.cost
1503 1502 del self._cache[node.key]
1504 1503
1505 1504 node.key = k
1506 1505 node.value = v
1507 1506 node.cost = cost
1508 1507 self.totalcost += cost
1509 1508 self._cache[k] = node
1510 1509 # And mark it as newest entry. No need to adjust order since it
1511 1510 # is already self._head.prev.
1512 1511 self._head = node
1513 1512
1514 1513 if self.maxcost:
1515 1514 self._enforcecostlimit()
1516 1515
1517 1516 def __setitem__(self, k, v):
1518 1517 self.insert(k, v)
1519 1518
1520 1519 def __delitem__(self, k):
1521 1520 self.pop(k)
1522 1521
1523 1522 def pop(self, k, default=_notset):
1524 1523 try:
1525 1524 node = self._cache.pop(k)
1526 1525 except KeyError:
1527 1526 if default is _notset:
1528 1527 raise
1529 1528 return default
1530 1529
1531 1530 assert node is not None # help pytype
1532 1531 value = node.value
1533 1532 self.totalcost -= node.cost
1534 1533 node.markempty()
1535 1534
1536 1535 # Temporarily mark as newest item before re-adjusting head to make
1537 1536 # this node the oldest item.
1538 1537 self._movetohead(node)
1539 1538 self._head = node.next
1540 1539
1541 1540 return value
1542 1541
1543 1542 # Additional dict methods.
1544 1543
1545 1544 def get(self, k, default=None):
1546 1545 try:
1547 1546 return self.__getitem__(k)
1548 1547 except KeyError:
1549 1548 return default
1550 1549
1551 1550 def peek(self, k, default=_notset):
1552 1551 """Get the specified item without moving it to the head
1553 1552
1554 1553 Unlike get(), this doesn't mutate the internal state. But be aware
1555 1554 that it doesn't mean peek() is thread safe.
1556 1555 """
1557 1556 try:
1558 1557 node = self._cache[k]
1559 1558 assert node is not None # help pytype
1560 1559 return node.value
1561 1560 except KeyError:
1562 1561 if default is _notset:
1563 1562 raise
1564 1563 return default
1565 1564
1566 1565 def clear(self):
1567 1566 n = self._head
1568 1567 while n.key is not _notset:
1569 1568 self.totalcost -= n.cost
1570 1569 n.markempty()
1571 1570 n = n.next
1572 1571
1573 1572 self._cache.clear()
1574 1573
1575 1574 def copy(self, capacity=None, maxcost=0):
1576 1575 """Create a new cache as a copy of the current one.
1577 1576
1578 1577 By default, the new cache has the same capacity as the existing one.
1579 1578 But, the cache capacity can be changed as part of performing the
1580 1579 copy.
1581 1580
1582 1581 Items in the copy have an insertion/access order matching this
1583 1582 instance.
1584 1583 """
1585 1584
1586 1585 capacity = capacity or self.capacity
1587 1586 maxcost = maxcost or self.maxcost
1588 1587 result = lrucachedict(capacity, maxcost=maxcost)
1589 1588
1590 1589 # We copy entries by iterating in oldest-to-newest order so the copy
1591 1590 # has the correct ordering.
1592 1591
1593 1592 # Find the first non-empty entry.
1594 1593 n = self._head.prev
1595 1594 while n.key is _notset and n is not self._head:
1596 1595 n = n.prev
1597 1596
1598 1597 # We could potentially skip the first N items when decreasing capacity.
1599 1598 # But let's keep it simple unless it is a performance problem.
1600 1599 for i in range(len(self._cache)):
1601 1600 result.insert(n.key, n.value, cost=n.cost)
1602 1601 n = n.prev
1603 1602
1604 1603 return result
1605 1604
1606 1605 def popoldest(self):
1607 1606 """Remove the oldest item from the cache.
1608 1607
1609 1608 Returns the (key, value) describing the removed cache entry.
1610 1609 """
1611 1610 if not self._cache:
1612 1611 return
1613 1612
1614 1613 # Walk the linked list backwards starting at tail node until we hit
1615 1614 # a non-empty node.
1616 1615 n = self._head.prev
1617 1616
1618 1617 assert n is not None # help pytype
1619 1618
1620 1619 while n.key is _notset:
1621 1620 n = n.prev
1622 1621
1623 1622 assert n is not None # help pytype
1624 1623
1625 1624 key, value = n.key, n.value
1626 1625
1627 1626 # And remove it from the cache and mark it as empty.
1628 1627 del self._cache[n.key]
1629 1628 self.totalcost -= n.cost
1630 1629 n.markempty()
1631 1630
1632 1631 return key, value
1633 1632
1634 1633 def _movetohead(self, node):
1635 1634 """Mark a node as the newest, making it the new head.
1636 1635
1637 1636 When a node is accessed, it becomes the freshest entry in the LRU
1638 1637 list, which is denoted by self._head.
1639 1638
1640 1639 Visually, let's make ``N`` the new head node (* denotes head):
1641 1640
1642 1641 previous/oldest <-> head <-> next/next newest
1643 1642
1644 1643 ----<->--- A* ---<->-----
1645 1644 | |
1646 1645 E <-> D <-> N <-> C <-> B
1647 1646
1648 1647 To:
1649 1648
1650 1649 ----<->--- N* ---<->-----
1651 1650 | |
1652 1651 E <-> D <-> C <-> B <-> A
1653 1652
1654 1653 This requires the following moves:
1655 1654
1656 1655 C.next = D (node.prev.next = node.next)
1657 1656 D.prev = C (node.next.prev = node.prev)
1658 1657 E.next = N (head.prev.next = node)
1659 1658 N.prev = E (node.prev = head.prev)
1660 1659 N.next = A (node.next = head)
1661 1660 A.prev = N (head.prev = node)
1662 1661 """
1663 1662 head = self._head
1664 1663 # C.next = D
1665 1664 node.prev.next = node.next
1666 1665 # D.prev = C
1667 1666 node.next.prev = node.prev
1668 1667 # N.prev = E
1669 1668 node.prev = head.prev
1670 1669 # N.next = A
1671 1670 # It is tempting to do just "head" here, however if node is
1672 1671 # adjacent to head, this will do bad things.
1673 1672 node.next = head.prev.next
1674 1673 # E.next = N
1675 1674 node.next.prev = node
1676 1675 # A.prev = N
1677 1676 node.prev.next = node
1678 1677
1679 1678 self._head = node
1680 1679
1681 1680 def _addcapacity(self):
1682 1681 """Add a node to the circular linked list.
1683 1682
1684 1683 The new node is inserted before the head node.
1685 1684 """
1686 1685 head = self._head
1687 1686 node = _lrucachenode()
1688 1687 head.prev.next = node
1689 1688 node.prev = head.prev
1690 1689 node.next = head
1691 1690 head.prev = node
1692 1691 self._size += 1
1693 1692 return node
1694 1693
1695 1694 def _enforcecostlimit(self):
1696 1695 # This should run after an insertion. It should only be called if total
1697 1696 # cost limits are being enforced.
1698 1697 # The most recently inserted node is never evicted.
1699 1698 if len(self) <= 1 or self.totalcost <= self.maxcost:
1700 1699 return
1701 1700
1702 1701 # This is logically equivalent to calling popoldest() until we
1703 1702 # free up enough cost. We don't do that since popoldest() needs
1704 1703 # to walk the linked list and doing this in a loop would be
1705 1704 # quadratic. So we find the first non-empty node and then
1706 1705 # walk nodes until we free up enough capacity.
1707 1706 #
1708 1707 # If we only removed the minimum number of nodes to free enough
1709 1708 # cost at insert time, chances are high that the next insert would
1710 1709 # also require pruning. This would effectively constitute quadratic
1711 1710 # behavior for insert-heavy workloads. To mitigate this, we set a
1712 1711 # target cost that is a percentage of the max cost. This will tend
1713 1712 # to free more nodes when the high water mark is reached, which
1714 1713 # lowers the chances of needing to prune on the subsequent insert.
1715 1714 targetcost = int(self.maxcost * 0.75)
1716 1715
1717 1716 n = self._head.prev
1718 1717 while n.key is _notset:
1719 1718 n = n.prev
1720 1719
1721 1720 while len(self) > 1 and self.totalcost > targetcost:
1722 1721 del self._cache[n.key]
1723 1722 self.totalcost -= n.cost
1724 1723 n.markempty()
1725 1724 n = n.prev
1726 1725
1727 1726
1728 1727 def lrucachefunc(func):
1729 1728 '''cache most recent results of function calls'''
1730 1729 cache = {}
1731 1730 order = collections.deque()
1732 1731 if func.__code__.co_argcount == 1:
1733 1732
1734 1733 def f(arg):
1735 1734 if arg not in cache:
1736 1735 if len(cache) > 20:
1737 1736 del cache[order.popleft()]
1738 1737 cache[arg] = func(arg)
1739 1738 else:
1740 1739 order.remove(arg)
1741 1740 order.append(arg)
1742 1741 return cache[arg]
1743 1742
1744 1743 else:
1745 1744
1746 1745 def f(*args):
1747 1746 if args not in cache:
1748 1747 if len(cache) > 20:
1749 1748 del cache[order.popleft()]
1750 1749 cache[args] = func(*args)
1751 1750 else:
1752 1751 order.remove(args)
1753 1752 order.append(args)
1754 1753 return cache[args]
1755 1754
1756 1755 return f
1757 1756
1758 1757
1759 1758 class propertycache:
1760 1759 def __init__(self, func):
1761 1760 self.func = func
1762 1761 self.name = func.__name__
1763 1762
1764 1763 def __get__(self, obj, type=None):
1765 1764 result = self.func(obj)
1766 1765 self.cachevalue(obj, result)
1767 1766 return result
1768 1767
1769 1768 def cachevalue(self, obj, value):
1770 1769 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1771 1770 obj.__dict__[self.name] = value
1772 1771
1773 1772
1774 1773 def clearcachedproperty(obj, prop):
1775 1774 '''clear a cached property value, if one has been set'''
1776 1775 prop = pycompat.sysstr(prop)
1777 1776 if prop in obj.__dict__:
1778 1777 del obj.__dict__[prop]
1779 1778
1780 1779
1781 1780 def increasingchunks(source, min=1024, max=65536):
1782 1781 """return no less than min bytes per chunk while data remains,
1783 1782 doubling min after each chunk until it reaches max"""
1784 1783
1785 1784 def log2(x):
1786 1785 if not x:
1787 1786 return 0
1788 1787 i = 0
1789 1788 while x:
1790 1789 x >>= 1
1791 1790 i += 1
1792 1791 return i - 1
1793 1792
1794 1793 buf = []
1795 1794 blen = 0
1796 1795 for chunk in source:
1797 1796 buf.append(chunk)
1798 1797 blen += len(chunk)
1799 1798 if blen >= min:
1800 1799 if min < max:
1801 1800 min = min << 1
1802 1801 nmin = 1 << log2(blen)
1803 1802 if nmin > min:
1804 1803 min = nmin
1805 1804 if min > max:
1806 1805 min = max
1807 1806 yield b''.join(buf)
1808 1807 blen = 0
1809 1808 buf = []
1810 1809 if buf:
1811 1810 yield b''.join(buf)
1812 1811
1813 1812
1814 1813 def always(fn):
1815 1814 return True
1816 1815
1817 1816
1818 1817 def never(fn):
1819 1818 return False
1820 1819
1821 1820
1822 1821 def nogc(func):
1823 1822 """disable garbage collector
1824 1823
1825 1824 Python's garbage collector triggers a GC each time a certain number of
1826 1825 container objects (the number being defined by gc.get_threshold()) are
1827 1826 allocated even when marked not to be tracked by the collector. Tracking has
1828 1827 no effect on when GCs are triggered, only on what objects the GC looks
1829 1828 into. As a workaround, disable GC while building complex (huge)
1830 1829 containers.
1831 1830
1832 1831 This garbage collector issue have been fixed in 2.7. But it still affect
1833 1832 CPython's performance.
1834 1833 """
1835 1834
1836 1835 def wrapper(*args, **kwargs):
1837 1836 gcenabled = gc.isenabled()
1838 1837 gc.disable()
1839 1838 try:
1840 1839 return func(*args, **kwargs)
1841 1840 finally:
1842 1841 if gcenabled:
1843 1842 gc.enable()
1844 1843
1845 1844 return wrapper
1846 1845
1847 1846
1848 1847 if pycompat.ispypy:
1849 1848 # PyPy runs slower with gc disabled
1850 1849 nogc = lambda x: x
1851 1850
1852 1851
1853 1852 def pathto(root, n1, n2):
1854 1853 # type: (bytes, bytes, bytes) -> bytes
1855 1854 """return the relative path from one place to another.
1856 1855 root should use os.sep to separate directories
1857 1856 n1 should use os.sep to separate directories
1858 1857 n2 should use "/" to separate directories
1859 1858 returns an os.sep-separated path.
1860 1859
1861 1860 If n1 is a relative path, it's assumed it's
1862 1861 relative to root.
1863 1862 n2 should always be relative to root.
1864 1863 """
1865 1864 if not n1:
1866 1865 return localpath(n2)
1867 1866 if os.path.isabs(n1):
1868 1867 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1869 1868 return os.path.join(root, localpath(n2))
1870 1869 n2 = b'/'.join((pconvert(root), n2))
1871 1870 a, b = splitpath(n1), n2.split(b'/')
1872 1871 a.reverse()
1873 1872 b.reverse()
1874 1873 while a and b and a[-1] == b[-1]:
1875 1874 a.pop()
1876 1875 b.pop()
1877 1876 b.reverse()
1878 1877 return pycompat.ossep.join(([b'..'] * len(a)) + b) or b'.'
1879 1878
1880 1879
1881 1880 def checksignature(func, depth=1):
1882 1881 '''wrap a function with code to check for calling errors'''
1883 1882
1884 1883 def check(*args, **kwargs):
1885 1884 try:
1886 1885 return func(*args, **kwargs)
1887 1886 except TypeError:
1888 1887 if len(traceback.extract_tb(sys.exc_info()[2])) == depth:
1889 1888 raise error.SignatureError
1890 1889 raise
1891 1890
1892 1891 return check
1893 1892
1894 1893
1895 1894 # a whilelist of known filesystems where hardlink works reliably
1896 1895 _hardlinkfswhitelist = {
1897 1896 b'apfs',
1898 1897 b'btrfs',
1899 1898 b'ext2',
1900 1899 b'ext3',
1901 1900 b'ext4',
1902 1901 b'hfs',
1903 1902 b'jfs',
1904 1903 b'NTFS',
1905 1904 b'reiserfs',
1906 1905 b'tmpfs',
1907 1906 b'ufs',
1908 1907 b'xfs',
1909 1908 b'zfs',
1910 1909 }
1911 1910
1912 1911
1913 1912 def copyfile(
1914 1913 src,
1915 1914 dest,
1916 1915 hardlink=False,
1917 1916 copystat=False,
1918 1917 checkambig=False,
1919 1918 nb_bytes=None,
1920 1919 no_hardlink_cb=None,
1921 1920 check_fs_hardlink=True,
1922 1921 ):
1923 1922 """copy a file, preserving mode and optionally other stat info like
1924 1923 atime/mtime
1925 1924
1926 1925 checkambig argument is used with filestat, and is useful only if
1927 1926 destination file is guarded by any lock (e.g. repo.lock or
1928 1927 repo.wlock).
1929 1928
1930 1929 copystat and checkambig should be exclusive.
1931 1930
1932 1931 nb_bytes: if set only copy the first `nb_bytes` of the source file.
1933 1932 """
1934 1933 assert not (copystat and checkambig)
1935 1934 oldstat = None
1936 1935 if os.path.lexists(dest):
1937 1936 if checkambig:
1938 1937 oldstat = checkambig and filestat.frompath(dest)
1939 1938 unlink(dest)
1940 1939 if hardlink and check_fs_hardlink:
1941 1940 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1942 1941 # unless we are confident that dest is on a whitelisted filesystem.
1943 1942 try:
1944 1943 fstype = getfstype(os.path.dirname(dest))
1945 1944 except OSError:
1946 1945 fstype = None
1947 1946 if fstype not in _hardlinkfswhitelist:
1948 1947 if no_hardlink_cb is not None:
1949 1948 no_hardlink_cb()
1950 1949 hardlink = False
1951 1950 if hardlink:
1952 1951 try:
1953 1952 oslink(src, dest)
1954 1953 if nb_bytes is not None:
1955 1954 m = "the `nb_bytes` argument is incompatible with `hardlink`"
1956 1955 raise error.ProgrammingError(m)
1957 1956 return
1958 1957 except (IOError, OSError) as exc:
1959 1958 if exc.errno != errno.EEXIST and no_hardlink_cb is not None:
1960 1959 no_hardlink_cb()
1961 1960 # fall back to normal copy
1962 1961 if os.path.islink(src):
1963 1962 os.symlink(os.readlink(src), dest)
1964 1963 # copytime is ignored for symlinks, but in general copytime isn't needed
1965 1964 # for them anyway
1966 1965 if nb_bytes is not None:
1967 1966 m = "cannot use `nb_bytes` on a symlink"
1968 1967 raise error.ProgrammingError(m)
1969 1968 else:
1970 1969 try:
1971 1970 shutil.copyfile(src, dest)
1972 1971 if copystat:
1973 1972 # copystat also copies mode
1974 1973 shutil.copystat(src, dest)
1975 1974 else:
1976 1975 shutil.copymode(src, dest)
1977 1976 if oldstat and oldstat.stat:
1978 1977 newstat = filestat.frompath(dest)
1979 1978 if newstat.isambig(oldstat):
1980 1979 # stat of copied file is ambiguous to original one
1981 1980 advanced = (
1982 1981 oldstat.stat[stat.ST_MTIME] + 1
1983 1982 ) & 0x7FFFFFFF
1984 1983 os.utime(dest, (advanced, advanced))
1985 1984 # We could do something smarter using `copy_file_range` call or similar
1986 1985 if nb_bytes is not None:
1987 1986 with open(dest, mode='r+') as f:
1988 1987 f.truncate(nb_bytes)
1989 1988 except shutil.Error as inst:
1990 1989 raise error.Abort(stringutil.forcebytestr(inst))
1991 1990
1992 1991
1993 1992 def copyfiles(src, dst, hardlink=None, progress=None):
1994 1993 """Copy a directory tree using hardlinks if possible."""
1995 1994 num = 0
1996 1995
1997 1996 def settopic():
1998 1997 if progress:
1999 1998 progress.topic = _(b'linking') if hardlink else _(b'copying')
2000 1999
2001 2000 if os.path.isdir(src):
2002 2001 if hardlink is None:
2003 2002 hardlink = (
2004 2003 os.stat(src).st_dev == os.stat(os.path.dirname(dst)).st_dev
2005 2004 )
2006 2005 settopic()
2007 2006 os.mkdir(dst)
2008 2007 for name, kind in listdir(src):
2009 2008 srcname = os.path.join(src, name)
2010 2009 dstname = os.path.join(dst, name)
2011 2010 hardlink, n = copyfiles(srcname, dstname, hardlink, progress)
2012 2011 num += n
2013 2012 else:
2014 2013 if hardlink is None:
2015 2014 hardlink = (
2016 2015 os.stat(os.path.dirname(src)).st_dev
2017 2016 == os.stat(os.path.dirname(dst)).st_dev
2018 2017 )
2019 2018 settopic()
2020 2019
2021 2020 if hardlink:
2022 2021 try:
2023 2022 oslink(src, dst)
2024 2023 except (IOError, OSError) as exc:
2025 2024 if exc.errno != errno.EEXIST:
2026 2025 hardlink = False
2027 2026 # XXX maybe try to relink if the file exist ?
2028 2027 shutil.copy(src, dst)
2029 2028 else:
2030 2029 shutil.copy(src, dst)
2031 2030 num += 1
2032 2031 if progress:
2033 2032 progress.increment()
2034 2033
2035 2034 return hardlink, num
2036 2035
2037 2036
2038 2037 _winreservednames = {
2039 2038 b'con',
2040 2039 b'prn',
2041 2040 b'aux',
2042 2041 b'nul',
2043 2042 b'com1',
2044 2043 b'com2',
2045 2044 b'com3',
2046 2045 b'com4',
2047 2046 b'com5',
2048 2047 b'com6',
2049 2048 b'com7',
2050 2049 b'com8',
2051 2050 b'com9',
2052 2051 b'lpt1',
2053 2052 b'lpt2',
2054 2053 b'lpt3',
2055 2054 b'lpt4',
2056 2055 b'lpt5',
2057 2056 b'lpt6',
2058 2057 b'lpt7',
2059 2058 b'lpt8',
2060 2059 b'lpt9',
2061 2060 }
2062 2061 _winreservedchars = b':*?"<>|'
2063 2062
2064 2063
2065 2064 def checkwinfilename(path):
2066 2065 # type: (bytes) -> Optional[bytes]
2067 2066 r"""Check that the base-relative path is a valid filename on Windows.
2068 2067 Returns None if the path is ok, or a UI string describing the problem.
2069 2068
2070 2069 >>> checkwinfilename(b"just/a/normal/path")
2071 2070 >>> checkwinfilename(b"foo/bar/con.xml")
2072 2071 "filename contains 'con', which is reserved on Windows"
2073 2072 >>> checkwinfilename(b"foo/con.xml/bar")
2074 2073 "filename contains 'con', which is reserved on Windows"
2075 2074 >>> checkwinfilename(b"foo/bar/xml.con")
2076 2075 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
2077 2076 "filename contains 'AUX', which is reserved on Windows"
2078 2077 >>> checkwinfilename(b"foo/bar/bla:.txt")
2079 2078 "filename contains ':', which is reserved on Windows"
2080 2079 >>> checkwinfilename(b"foo/bar/b\07la.txt")
2081 2080 "filename contains '\\x07', which is invalid on Windows"
2082 2081 >>> checkwinfilename(b"foo/bar/bla ")
2083 2082 "filename ends with ' ', which is not allowed on Windows"
2084 2083 >>> checkwinfilename(b"../bar")
2085 2084 >>> checkwinfilename(b"foo\\")
2086 2085 "filename ends with '\\', which is invalid on Windows"
2087 2086 >>> checkwinfilename(b"foo\\/bar")
2088 2087 "directory name ends with '\\', which is invalid on Windows"
2089 2088 """
2090 2089 if path.endswith(b'\\'):
2091 2090 return _(b"filename ends with '\\', which is invalid on Windows")
2092 2091 if b'\\/' in path:
2093 2092 return _(b"directory name ends with '\\', which is invalid on Windows")
2094 2093 for n in path.replace(b'\\', b'/').split(b'/'):
2095 2094 if not n:
2096 2095 continue
2097 2096 for c in _filenamebytestr(n):
2098 2097 if c in _winreservedchars:
2099 2098 return (
2100 2099 _(
2101 2100 b"filename contains '%s', which is reserved "
2102 2101 b"on Windows"
2103 2102 )
2104 2103 % c
2105 2104 )
2106 2105 if ord(c) <= 31:
2107 2106 return _(
2108 2107 b"filename contains '%s', which is invalid on Windows"
2109 2108 ) % stringutil.escapestr(c)
2110 2109 base = n.split(b'.')[0]
2111 2110 if base and base.lower() in _winreservednames:
2112 2111 return (
2113 2112 _(b"filename contains '%s', which is reserved on Windows")
2114 2113 % base
2115 2114 )
2116 2115 t = n[-1:]
2117 2116 if t in b'. ' and n not in b'..':
2118 2117 return (
2119 2118 _(
2120 2119 b"filename ends with '%s', which is not allowed "
2121 2120 b"on Windows"
2122 2121 )
2123 2122 % t
2124 2123 )
2125 2124
2126 2125
2127 2126 timer = getattr(time, "perf_counter", None)
2128 2127
2129 2128 if pycompat.iswindows:
2130 2129 checkosfilename = checkwinfilename
2131 2130 if not timer:
2132 2131 timer = time.clock
2133 2132 else:
2134 2133 # mercurial.windows doesn't have platform.checkosfilename
2135 2134 checkosfilename = platform.checkosfilename # pytype: disable=module-attr
2136 2135 if not timer:
2137 2136 timer = time.time
2138 2137
2139 2138
2140 2139 def makelock(info, pathname):
2141 2140 """Create a lock file atomically if possible
2142 2141
2143 2142 This may leave a stale lock file if symlink isn't supported and signal
2144 2143 interrupt is enabled.
2145 2144 """
2146 2145 try:
2147 2146 return os.symlink(info, pathname)
2148 2147 except OSError as why:
2149 2148 if why.errno == errno.EEXIST:
2150 2149 raise
2151 2150 except AttributeError: # no symlink in os
2152 2151 pass
2153 2152
2154 2153 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
2155 2154 ld = os.open(pathname, flags)
2156 2155 os.write(ld, info)
2157 2156 os.close(ld)
2158 2157
2159 2158
2160 2159 def readlock(pathname):
2161 2160 # type: (bytes) -> bytes
2162 2161 try:
2163 2162 return readlink(pathname)
2164 2163 except OSError as why:
2165 2164 if why.errno not in (errno.EINVAL, errno.ENOSYS):
2166 2165 raise
2167 2166 except AttributeError: # no symlink in os
2168 2167 pass
2169 2168 with posixfile(pathname, b'rb') as fp:
2170 2169 return fp.read()
2171 2170
2172 2171
2173 2172 def fstat(fp):
2174 2173 '''stat file object that may not have fileno method.'''
2175 2174 try:
2176 2175 return os.fstat(fp.fileno())
2177 2176 except AttributeError:
2178 2177 return os.stat(fp.name)
2179 2178
2180 2179
2181 2180 # File system features
2182 2181
2183 2182
2184 2183 def fscasesensitive(path):
2185 2184 # type: (bytes) -> bool
2186 2185 """
2187 2186 Return true if the given path is on a case-sensitive filesystem
2188 2187
2189 2188 Requires a path (like /foo/.hg) ending with a foldable final
2190 2189 directory component.
2191 2190 """
2192 2191 s1 = os.lstat(path)
2193 2192 d, b = os.path.split(path)
2194 2193 b2 = b.upper()
2195 2194 if b == b2:
2196 2195 b2 = b.lower()
2197 2196 if b == b2:
2198 2197 return True # no evidence against case sensitivity
2199 2198 p2 = os.path.join(d, b2)
2200 2199 try:
2201 2200 s2 = os.lstat(p2)
2202 2201 if s2 == s1:
2203 2202 return False
2204 2203 return True
2205 2204 except OSError:
2206 2205 return True
2207 2206
2208 2207
2209 2208 _re2_input = lambda x: x
2210 2209 try:
2211 2210 import re2 # pytype: disable=import-error
2212 2211
2213 2212 _re2 = None
2214 2213 except ImportError:
2215 2214 _re2 = False
2216 2215
2217 2216
2218 2217 class _re:
2219 2218 def _checkre2(self):
2220 2219 global _re2
2221 2220 global _re2_input
2222 2221
2223 2222 check_pattern = br'\[([^\[]+)\]'
2224 2223 check_input = b'[ui]'
2225 2224 try:
2226 2225 # check if match works, see issue3964
2227 2226 _re2 = bool(re2.match(check_pattern, check_input))
2228 2227 except ImportError:
2229 2228 _re2 = False
2230 2229 except TypeError:
2231 2230 # the `pyre-2` project provides a re2 module that accept bytes
2232 2231 # the `fb-re2` project provides a re2 module that acccept sysstr
2233 2232 check_pattern = pycompat.sysstr(check_pattern)
2234 2233 check_input = pycompat.sysstr(check_input)
2235 2234 _re2 = bool(re2.match(check_pattern, check_input))
2236 2235 _re2_input = pycompat.sysstr
2237 2236
2238 2237 def compile(self, pat, flags=0):
2239 2238 """Compile a regular expression, using re2 if possible
2240 2239
2241 2240 For best performance, use only re2-compatible regexp features. The
2242 2241 only flags from the re module that are re2-compatible are
2243 2242 IGNORECASE and MULTILINE."""
2244 2243 if _re2 is None:
2245 2244 self._checkre2()
2246 2245 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
2247 2246 if flags & remod.IGNORECASE:
2248 2247 pat = b'(?i)' + pat
2249 2248 if flags & remod.MULTILINE:
2250 2249 pat = b'(?m)' + pat
2251 2250 try:
2252 2251 return re2.compile(_re2_input(pat))
2253 2252 except re2.error:
2254 2253 pass
2255 2254 return remod.compile(pat, flags)
2256 2255
2257 2256 @propertycache
2258 2257 def escape(self):
2259 2258 """Return the version of escape corresponding to self.compile.
2260 2259
2261 2260 This is imperfect because whether re2 or re is used for a particular
2262 2261 function depends on the flags, etc, but it's the best we can do.
2263 2262 """
2264 2263 global _re2
2265 2264 if _re2 is None:
2266 2265 self._checkre2()
2267 2266 if _re2:
2268 2267 return re2.escape
2269 2268 else:
2270 2269 return remod.escape
2271 2270
2272 2271
2273 2272 re = _re()
2274 2273
2275 2274 _fspathcache = {}
2276 2275
2277 2276
2278 2277 def fspath(name, root):
2279 2278 # type: (bytes, bytes) -> bytes
2280 2279 """Get name in the case stored in the filesystem
2281 2280
2282 2281 The name should be relative to root, and be normcase-ed for efficiency.
2283 2282
2284 2283 Note that this function is unnecessary, and should not be
2285 2284 called, for case-sensitive filesystems (simply because it's expensive).
2286 2285
2287 2286 The root should be normcase-ed, too.
2288 2287 """
2289 2288
2290 2289 def _makefspathcacheentry(dir):
2291 2290 return {normcase(n): n for n in os.listdir(dir)}
2292 2291
2293 2292 seps = pycompat.ossep
2294 2293 if pycompat.osaltsep:
2295 2294 seps = seps + pycompat.osaltsep
2296 2295 # Protect backslashes. This gets silly very quickly.
2297 2296 seps.replace(b'\\', b'\\\\')
2298 2297 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
2299 2298 dir = os.path.normpath(root)
2300 2299 result = []
2301 2300 for part, sep in pattern.findall(name):
2302 2301 if sep:
2303 2302 result.append(sep)
2304 2303 continue
2305 2304
2306 2305 if dir not in _fspathcache:
2307 2306 _fspathcache[dir] = _makefspathcacheentry(dir)
2308 2307 contents = _fspathcache[dir]
2309 2308
2310 2309 found = contents.get(part)
2311 2310 if not found:
2312 2311 # retry "once per directory" per "dirstate.walk" which
2313 2312 # may take place for each patches of "hg qpush", for example
2314 2313 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
2315 2314 found = contents.get(part)
2316 2315
2317 2316 result.append(found or part)
2318 2317 dir = os.path.join(dir, part)
2319 2318
2320 2319 return b''.join(result)
2321 2320
2322 2321
2323 2322 def checknlink(testfile):
2324 2323 # type: (bytes) -> bool
2325 2324 '''check whether hardlink count reporting works properly'''
2326 2325
2327 2326 # testfile may be open, so we need a separate file for checking to
2328 2327 # work around issue2543 (or testfile may get lost on Samba shares)
2329 2328 f1, f2, fp = None, None, None
2330 2329 try:
2331 2330 fd, f1 = pycompat.mkstemp(
2332 2331 prefix=b'.%s-' % os.path.basename(testfile),
2333 2332 suffix=b'1~',
2334 2333 dir=os.path.dirname(testfile),
2335 2334 )
2336 2335 os.close(fd)
2337 2336 f2 = b'%s2~' % f1[:-2]
2338 2337
2339 2338 oslink(f1, f2)
2340 2339 # nlinks() may behave differently for files on Windows shares if
2341 2340 # the file is open.
2342 2341 fp = posixfile(f2)
2343 2342 return nlinks(f2) > 1
2344 2343 except OSError:
2345 2344 return False
2346 2345 finally:
2347 2346 if fp is not None:
2348 2347 fp.close()
2349 2348 for f in (f1, f2):
2350 2349 try:
2351 2350 if f is not None:
2352 2351 os.unlink(f)
2353 2352 except OSError:
2354 2353 pass
2355 2354
2356 2355
2357 2356 def endswithsep(path):
2358 2357 # type: (bytes) -> bool
2359 2358 '''Check path ends with os.sep or os.altsep.'''
2360 2359 return bool( # help pytype
2361 2360 path.endswith(pycompat.ossep)
2362 2361 or pycompat.osaltsep
2363 2362 and path.endswith(pycompat.osaltsep)
2364 2363 )
2365 2364
2366 2365
2367 2366 def splitpath(path):
2368 2367 # type: (bytes) -> List[bytes]
2369 2368 """Split path by os.sep.
2370 2369 Note that this function does not use os.altsep because this is
2371 2370 an alternative of simple "xxx.split(os.sep)".
2372 2371 It is recommended to use os.path.normpath() before using this
2373 2372 function if need."""
2374 2373 return path.split(pycompat.ossep)
2375 2374
2376 2375
2377 2376 def mktempcopy(name, emptyok=False, createmode=None, enforcewritable=False):
2378 2377 """Create a temporary file with the same contents from name
2379 2378
2380 2379 The permission bits are copied from the original file.
2381 2380
2382 2381 If the temporary file is going to be truncated immediately, you
2383 2382 can use emptyok=True as an optimization.
2384 2383
2385 2384 Returns the name of the temporary file.
2386 2385 """
2387 2386 d, fn = os.path.split(name)
2388 2387 fd, temp = pycompat.mkstemp(prefix=b'.%s-' % fn, suffix=b'~', dir=d)
2389 2388 os.close(fd)
2390 2389 # Temporary files are created with mode 0600, which is usually not
2391 2390 # what we want. If the original file already exists, just copy
2392 2391 # its mode. Otherwise, manually obey umask.
2393 2392 copymode(name, temp, createmode, enforcewritable)
2394 2393
2395 2394 if emptyok:
2396 2395 return temp
2397 2396 try:
2398 2397 try:
2399 2398 ifp = posixfile(name, b"rb")
2400 2399 except IOError as inst:
2401 2400 if inst.errno == errno.ENOENT:
2402 2401 return temp
2403 2402 if not getattr(inst, 'filename', None):
2404 2403 inst.filename = name
2405 2404 raise
2406 2405 ofp = posixfile(temp, b"wb")
2407 2406 for chunk in filechunkiter(ifp):
2408 2407 ofp.write(chunk)
2409 2408 ifp.close()
2410 2409 ofp.close()
2411 2410 except: # re-raises
2412 2411 try:
2413 2412 os.unlink(temp)
2414 2413 except OSError:
2415 2414 pass
2416 2415 raise
2417 2416 return temp
2418 2417
2419 2418
2420 2419 class filestat:
2421 2420 """help to exactly detect change of a file
2422 2421
2423 2422 'stat' attribute is result of 'os.stat()' if specified 'path'
2424 2423 exists. Otherwise, it is None. This can avoid preparative
2425 2424 'exists()' examination on client side of this class.
2426 2425 """
2427 2426
2428 2427 def __init__(self, stat):
2429 2428 self.stat = stat
2430 2429
2431 2430 @classmethod
2432 2431 def frompath(cls, path):
2433 2432 try:
2434 2433 stat = os.stat(path)
2435 2434 except OSError as err:
2436 2435 if err.errno != errno.ENOENT:
2437 2436 raise
2438 2437 stat = None
2439 2438 return cls(stat)
2440 2439
2441 2440 @classmethod
2442 2441 def fromfp(cls, fp):
2443 2442 stat = os.fstat(fp.fileno())
2444 2443 return cls(stat)
2445 2444
2446 2445 __hash__ = object.__hash__
2447 2446
2448 2447 def __eq__(self, old):
2449 2448 try:
2450 2449 # if ambiguity between stat of new and old file is
2451 2450 # avoided, comparison of size, ctime and mtime is enough
2452 2451 # to exactly detect change of a file regardless of platform
2453 2452 return (
2454 2453 self.stat.st_size == old.stat.st_size
2455 2454 and self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2456 2455 and self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME]
2457 2456 )
2458 2457 except AttributeError:
2459 2458 pass
2460 2459 try:
2461 2460 return self.stat is None and old.stat is None
2462 2461 except AttributeError:
2463 2462 return False
2464 2463
2465 2464 def isambig(self, old):
2466 2465 """Examine whether new (= self) stat is ambiguous against old one
2467 2466
2468 2467 "S[N]" below means stat of a file at N-th change:
2469 2468
2470 2469 - S[n-1].ctime < S[n].ctime: can detect change of a file
2471 2470 - S[n-1].ctime == S[n].ctime
2472 2471 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2473 2472 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2474 2473 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2475 2474 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2476 2475
2477 2476 Case (*2) above means that a file was changed twice or more at
2478 2477 same time in sec (= S[n-1].ctime), and comparison of timestamp
2479 2478 is ambiguous.
2480 2479
2481 2480 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2482 2481 timestamp is ambiguous".
2483 2482
2484 2483 But advancing mtime only in case (*2) doesn't work as
2485 2484 expected, because naturally advanced S[n].mtime in case (*1)
2486 2485 might be equal to manually advanced S[n-1 or earlier].mtime.
2487 2486
2488 2487 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2489 2488 treated as ambiguous regardless of mtime, to avoid overlooking
2490 2489 by confliction between such mtime.
2491 2490
2492 2491 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2493 2492 S[n].mtime", even if size of a file isn't changed.
2494 2493 """
2495 2494 try:
2496 2495 return self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2497 2496 except AttributeError:
2498 2497 return False
2499 2498
2500 2499 def avoidambig(self, path, old):
2501 2500 """Change file stat of specified path to avoid ambiguity
2502 2501
2503 2502 'old' should be previous filestat of 'path'.
2504 2503
2505 2504 This skips avoiding ambiguity, if a process doesn't have
2506 2505 appropriate privileges for 'path'. This returns False in this
2507 2506 case.
2508 2507
2509 2508 Otherwise, this returns True, as "ambiguity is avoided".
2510 2509 """
2511 2510 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2512 2511 try:
2513 2512 os.utime(path, (advanced, advanced))
2514 2513 except OSError as inst:
2515 2514 if inst.errno == errno.EPERM:
2516 2515 # utime() on the file created by another user causes EPERM,
2517 2516 # if a process doesn't have appropriate privileges
2518 2517 return False
2519 2518 raise
2520 2519 return True
2521 2520
2522 2521 def __ne__(self, other):
2523 2522 return not self == other
2524 2523
2525 2524
2526 2525 class atomictempfile:
2527 2526 """writable file object that atomically updates a file
2528 2527
2529 2528 All writes will go to a temporary copy of the original file. Call
2530 2529 close() when you are done writing, and atomictempfile will rename
2531 2530 the temporary copy to the original name, making the changes
2532 2531 visible. If the object is destroyed without being closed, all your
2533 2532 writes are discarded.
2534 2533
2535 2534 checkambig argument of constructor is used with filestat, and is
2536 2535 useful only if target file is guarded by any lock (e.g. repo.lock
2537 2536 or repo.wlock).
2538 2537 """
2539 2538
2540 2539 def __init__(self, name, mode=b'w+b', createmode=None, checkambig=False):
2541 2540 self.__name = name # permanent name
2542 2541 self._tempname = mktempcopy(
2543 2542 name,
2544 2543 emptyok=(b'w' in mode),
2545 2544 createmode=createmode,
2546 2545 enforcewritable=(b'w' in mode),
2547 2546 )
2548 2547
2549 2548 self._fp = posixfile(self._tempname, mode)
2550 2549 self._checkambig = checkambig
2551 2550
2552 2551 # delegated methods
2553 2552 self.read = self._fp.read
2554 2553 self.write = self._fp.write
2555 2554 self.seek = self._fp.seek
2556 2555 self.tell = self._fp.tell
2557 2556 self.fileno = self._fp.fileno
2558 2557
2559 2558 def close(self):
2560 2559 if not self._fp.closed:
2561 2560 self._fp.close()
2562 2561 filename = localpath(self.__name)
2563 2562 oldstat = self._checkambig and filestat.frompath(filename)
2564 2563 if oldstat and oldstat.stat:
2565 2564 rename(self._tempname, filename)
2566 2565 newstat = filestat.frompath(filename)
2567 2566 if newstat.isambig(oldstat):
2568 2567 # stat of changed file is ambiguous to original one
2569 2568 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2570 2569 os.utime(filename, (advanced, advanced))
2571 2570 else:
2572 2571 rename(self._tempname, filename)
2573 2572
2574 2573 def discard(self):
2575 2574 if not self._fp.closed:
2576 2575 try:
2577 2576 os.unlink(self._tempname)
2578 2577 except OSError:
2579 2578 pass
2580 2579 self._fp.close()
2581 2580
2582 2581 def __del__(self):
2583 2582 if safehasattr(self, '_fp'): # constructor actually did something
2584 2583 self.discard()
2585 2584
2586 2585 def __enter__(self):
2587 2586 return self
2588 2587
2589 2588 def __exit__(self, exctype, excvalue, traceback):
2590 2589 if exctype is not None:
2591 2590 self.discard()
2592 2591 else:
2593 2592 self.close()
2594 2593
2595 2594
2596 2595 def unlinkpath(f, ignoremissing=False, rmdir=True):
2597 2596 # type: (bytes, bool, bool) -> None
2598 2597 """unlink and remove the directory if it is empty"""
2599 2598 if ignoremissing:
2600 2599 tryunlink(f)
2601 2600 else:
2602 2601 unlink(f)
2603 2602 if rmdir:
2604 2603 # try removing directories that might now be empty
2605 2604 try:
2606 2605 removedirs(os.path.dirname(f))
2607 2606 except OSError:
2608 2607 pass
2609 2608
2610 2609
2611 2610 def tryunlink(f):
2612 2611 # type: (bytes) -> None
2613 2612 """Attempt to remove a file, ignoring ENOENT errors."""
2614 2613 try:
2615 2614 unlink(f)
2616 2615 except OSError as e:
2617 2616 if e.errno != errno.ENOENT:
2618 2617 raise
2619 2618
2620 2619
2621 2620 def makedirs(name, mode=None, notindexed=False):
2622 2621 # type: (bytes, Optional[int], bool) -> None
2623 2622 """recursive directory creation with parent mode inheritance
2624 2623
2625 2624 Newly created directories are marked as "not to be indexed by
2626 2625 the content indexing service", if ``notindexed`` is specified
2627 2626 for "write" mode access.
2628 2627 """
2629 2628 try:
2630 2629 makedir(name, notindexed)
2631 2630 except OSError as err:
2632 2631 if err.errno == errno.EEXIST:
2633 2632 return
2634 2633 if err.errno != errno.ENOENT or not name:
2635 2634 raise
2636 2635 parent = os.path.dirname(abspath(name))
2637 2636 if parent == name:
2638 2637 raise
2639 2638 makedirs(parent, mode, notindexed)
2640 2639 try:
2641 2640 makedir(name, notindexed)
2642 2641 except OSError as err:
2643 2642 # Catch EEXIST to handle races
2644 2643 if err.errno == errno.EEXIST:
2645 2644 return
2646 2645 raise
2647 2646 if mode is not None:
2648 2647 os.chmod(name, mode)
2649 2648
2650 2649
2651 2650 def readfile(path):
2652 2651 # type: (bytes) -> bytes
2653 2652 with open(path, b'rb') as fp:
2654 2653 return fp.read()
2655 2654
2656 2655
2657 2656 def writefile(path, text):
2658 2657 # type: (bytes, bytes) -> None
2659 2658 with open(path, b'wb') as fp:
2660 2659 fp.write(text)
2661 2660
2662 2661
2663 2662 def appendfile(path, text):
2664 2663 # type: (bytes, bytes) -> None
2665 2664 with open(path, b'ab') as fp:
2666 2665 fp.write(text)
2667 2666
2668 2667
2669 2668 class chunkbuffer:
2670 2669 """Allow arbitrary sized chunks of data to be efficiently read from an
2671 2670 iterator over chunks of arbitrary size."""
2672 2671
2673 2672 def __init__(self, in_iter):
2674 2673 """in_iter is the iterator that's iterating over the input chunks."""
2675 2674
2676 2675 def splitbig(chunks):
2677 2676 for chunk in chunks:
2678 2677 if len(chunk) > 2 ** 20:
2679 2678 pos = 0
2680 2679 while pos < len(chunk):
2681 2680 end = pos + 2 ** 18
2682 2681 yield chunk[pos:end]
2683 2682 pos = end
2684 2683 else:
2685 2684 yield chunk
2686 2685
2687 2686 self.iter = splitbig(in_iter)
2688 2687 self._queue = collections.deque()
2689 2688 self._chunkoffset = 0
2690 2689
2691 2690 def read(self, l=None):
2692 2691 """Read L bytes of data from the iterator of chunks of data.
2693 2692 Returns less than L bytes if the iterator runs dry.
2694 2693
2695 2694 If size parameter is omitted, read everything"""
2696 2695 if l is None:
2697 2696 return b''.join(self.iter)
2698 2697
2699 2698 left = l
2700 2699 buf = []
2701 2700 queue = self._queue
2702 2701 while left > 0:
2703 2702 # refill the queue
2704 2703 if not queue:
2705 2704 target = 2 ** 18
2706 2705 for chunk in self.iter:
2707 2706 queue.append(chunk)
2708 2707 target -= len(chunk)
2709 2708 if target <= 0:
2710 2709 break
2711 2710 if not queue:
2712 2711 break
2713 2712
2714 2713 # The easy way to do this would be to queue.popleft(), modify the
2715 2714 # chunk (if necessary), then queue.appendleft(). However, for cases
2716 2715 # where we read partial chunk content, this incurs 2 dequeue
2717 2716 # mutations and creates a new str for the remaining chunk in the
2718 2717 # queue. Our code below avoids this overhead.
2719 2718
2720 2719 chunk = queue[0]
2721 2720 chunkl = len(chunk)
2722 2721 offset = self._chunkoffset
2723 2722
2724 2723 # Use full chunk.
2725 2724 if offset == 0 and left >= chunkl:
2726 2725 left -= chunkl
2727 2726 queue.popleft()
2728 2727 buf.append(chunk)
2729 2728 # self._chunkoffset remains at 0.
2730 2729 continue
2731 2730
2732 2731 chunkremaining = chunkl - offset
2733 2732
2734 2733 # Use all of unconsumed part of chunk.
2735 2734 if left >= chunkremaining:
2736 2735 left -= chunkremaining
2737 2736 queue.popleft()
2738 2737 # offset == 0 is enabled by block above, so this won't merely
2739 2738 # copy via ``chunk[0:]``.
2740 2739 buf.append(chunk[offset:])
2741 2740 self._chunkoffset = 0
2742 2741
2743 2742 # Partial chunk needed.
2744 2743 else:
2745 2744 buf.append(chunk[offset : offset + left])
2746 2745 self._chunkoffset += left
2747 2746 left -= chunkremaining
2748 2747
2749 2748 return b''.join(buf)
2750 2749
2751 2750
2752 2751 def filechunkiter(f, size=131072, limit=None):
2753 2752 """Create a generator that produces the data in the file size
2754 2753 (default 131072) bytes at a time, up to optional limit (default is
2755 2754 to read all data). Chunks may be less than size bytes if the
2756 2755 chunk is the last chunk in the file, or the file is a socket or
2757 2756 some other type of file that sometimes reads less data than is
2758 2757 requested."""
2759 2758 assert size >= 0
2760 2759 assert limit is None or limit >= 0
2761 2760 while True:
2762 2761 if limit is None:
2763 2762 nbytes = size
2764 2763 else:
2765 2764 nbytes = min(limit, size)
2766 2765 s = nbytes and f.read(nbytes)
2767 2766 if not s:
2768 2767 break
2769 2768 if limit:
2770 2769 limit -= len(s)
2771 2770 yield s
2772 2771
2773 2772
2774 2773 class cappedreader:
2775 2774 """A file object proxy that allows reading up to N bytes.
2776 2775
2777 2776 Given a source file object, instances of this type allow reading up to
2778 2777 N bytes from that source file object. Attempts to read past the allowed
2779 2778 limit are treated as EOF.
2780 2779
2781 2780 It is assumed that I/O is not performed on the original file object
2782 2781 in addition to I/O that is performed by this instance. If there is,
2783 2782 state tracking will get out of sync and unexpected results will ensue.
2784 2783 """
2785 2784
2786 2785 def __init__(self, fh, limit):
2787 2786 """Allow reading up to <limit> bytes from <fh>."""
2788 2787 self._fh = fh
2789 2788 self._left = limit
2790 2789
2791 2790 def read(self, n=-1):
2792 2791 if not self._left:
2793 2792 return b''
2794 2793
2795 2794 if n < 0:
2796 2795 n = self._left
2797 2796
2798 2797 data = self._fh.read(min(n, self._left))
2799 2798 self._left -= len(data)
2800 2799 assert self._left >= 0
2801 2800
2802 2801 return data
2803 2802
2804 2803 def readinto(self, b):
2805 2804 res = self.read(len(b))
2806 2805 if res is None:
2807 2806 return None
2808 2807
2809 2808 b[0 : len(res)] = res
2810 2809 return len(res)
2811 2810
2812 2811
2813 2812 def unitcountfn(*unittable):
2814 2813 '''return a function that renders a readable count of some quantity'''
2815 2814
2816 2815 def go(count):
2817 2816 for multiplier, divisor, format in unittable:
2818 2817 if abs(count) >= divisor * multiplier:
2819 2818 return format % (count / float(divisor))
2820 2819 return unittable[-1][2] % count
2821 2820
2822 2821 return go
2823 2822
2824 2823
2825 2824 def processlinerange(fromline, toline):
2826 2825 # type: (int, int) -> Tuple[int, int]
2827 2826 """Check that linerange <fromline>:<toline> makes sense and return a
2828 2827 0-based range.
2829 2828
2830 2829 >>> processlinerange(10, 20)
2831 2830 (9, 20)
2832 2831 >>> processlinerange(2, 1)
2833 2832 Traceback (most recent call last):
2834 2833 ...
2835 2834 ParseError: line range must be positive
2836 2835 >>> processlinerange(0, 5)
2837 2836 Traceback (most recent call last):
2838 2837 ...
2839 2838 ParseError: fromline must be strictly positive
2840 2839 """
2841 2840 if toline - fromline < 0:
2842 2841 raise error.ParseError(_(b"line range must be positive"))
2843 2842 if fromline < 1:
2844 2843 raise error.ParseError(_(b"fromline must be strictly positive"))
2845 2844 return fromline - 1, toline
2846 2845
2847 2846
2848 2847 bytecount = unitcountfn(
2849 2848 (100, 1 << 30, _(b'%.0f GB')),
2850 2849 (10, 1 << 30, _(b'%.1f GB')),
2851 2850 (1, 1 << 30, _(b'%.2f GB')),
2852 2851 (100, 1 << 20, _(b'%.0f MB')),
2853 2852 (10, 1 << 20, _(b'%.1f MB')),
2854 2853 (1, 1 << 20, _(b'%.2f MB')),
2855 2854 (100, 1 << 10, _(b'%.0f KB')),
2856 2855 (10, 1 << 10, _(b'%.1f KB')),
2857 2856 (1, 1 << 10, _(b'%.2f KB')),
2858 2857 (1, 1, _(b'%.0f bytes')),
2859 2858 )
2860 2859
2861 2860
2862 2861 class transformingwriter:
2863 2862 """Writable file wrapper to transform data by function"""
2864 2863
2865 2864 def __init__(self, fp, encode):
2866 2865 self._fp = fp
2867 2866 self._encode = encode
2868 2867
2869 2868 def close(self):
2870 2869 self._fp.close()
2871 2870
2872 2871 def flush(self):
2873 2872 self._fp.flush()
2874 2873
2875 2874 def write(self, data):
2876 2875 return self._fp.write(self._encode(data))
2877 2876
2878 2877
2879 2878 # Matches a single EOL which can either be a CRLF where repeated CR
2880 2879 # are removed or a LF. We do not care about old Macintosh files, so a
2881 2880 # stray CR is an error.
2882 2881 _eolre = remod.compile(br'\r*\n')
2883 2882
2884 2883
2885 2884 def tolf(s):
2886 2885 # type: (bytes) -> bytes
2887 2886 return _eolre.sub(b'\n', s)
2888 2887
2889 2888
2890 2889 def tocrlf(s):
2891 2890 # type: (bytes) -> bytes
2892 2891 return _eolre.sub(b'\r\n', s)
2893 2892
2894 2893
2895 2894 def _crlfwriter(fp):
2896 2895 return transformingwriter(fp, tocrlf)
2897 2896
2898 2897
2899 2898 if pycompat.oslinesep == b'\r\n':
2900 2899 tonativeeol = tocrlf
2901 2900 fromnativeeol = tolf
2902 2901 nativeeolwriter = _crlfwriter
2903 2902 else:
2904 2903 tonativeeol = pycompat.identity
2905 2904 fromnativeeol = pycompat.identity
2906 2905 nativeeolwriter = pycompat.identity
2907 2906
2908 2907
2909 2908 # TODO delete since workaround variant for Python 2 no longer needed.
2910 2909 def iterfile(fp):
2911 2910 return fp
2912 2911
2913 2912
2914 2913 def iterlines(iterator):
2915 2914 # type: (Iterator[bytes]) -> Iterator[bytes]
2916 2915 for chunk in iterator:
2917 2916 for line in chunk.splitlines():
2918 2917 yield line
2919 2918
2920 2919
2921 2920 def expandpath(path):
2922 2921 # type: (bytes) -> bytes
2923 2922 return os.path.expanduser(os.path.expandvars(path))
2924 2923
2925 2924
2926 2925 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2927 2926 """Return the result of interpolating items in the mapping into string s.
2928 2927
2929 2928 prefix is a single character string, or a two character string with
2930 2929 a backslash as the first character if the prefix needs to be escaped in
2931 2930 a regular expression.
2932 2931
2933 2932 fn is an optional function that will be applied to the replacement text
2934 2933 just before replacement.
2935 2934
2936 2935 escape_prefix is an optional flag that allows using doubled prefix for
2937 2936 its escaping.
2938 2937 """
2939 2938 fn = fn or (lambda s: s)
2940 2939 patterns = b'|'.join(mapping.keys())
2941 2940 if escape_prefix:
2942 2941 patterns += b'|' + prefix
2943 2942 if len(prefix) > 1:
2944 2943 prefix_char = prefix[1:]
2945 2944 else:
2946 2945 prefix_char = prefix
2947 2946 mapping[prefix_char] = prefix_char
2948 2947 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2949 2948 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2950 2949
2951 2950
2952 2951 timecount = unitcountfn(
2953 2952 (1, 1e3, _(b'%.0f s')),
2954 2953 (100, 1, _(b'%.1f s')),
2955 2954 (10, 1, _(b'%.2f s')),
2956 2955 (1, 1, _(b'%.3f s')),
2957 2956 (100, 0.001, _(b'%.1f ms')),
2958 2957 (10, 0.001, _(b'%.2f ms')),
2959 2958 (1, 0.001, _(b'%.3f ms')),
2960 2959 (100, 0.000001, _(b'%.1f us')),
2961 2960 (10, 0.000001, _(b'%.2f us')),
2962 2961 (1, 0.000001, _(b'%.3f us')),
2963 2962 (100, 0.000000001, _(b'%.1f ns')),
2964 2963 (10, 0.000000001, _(b'%.2f ns')),
2965 2964 (1, 0.000000001, _(b'%.3f ns')),
2966 2965 )
2967 2966
2968 2967
2969 2968 @attr.s
2970 2969 class timedcmstats:
2971 2970 """Stats information produced by the timedcm context manager on entering."""
2972 2971
2973 2972 # the starting value of the timer as a float (meaning and resulution is
2974 2973 # platform dependent, see util.timer)
2975 2974 start = attr.ib(default=attr.Factory(lambda: timer()))
2976 2975 # the number of seconds as a floating point value; starts at 0, updated when
2977 2976 # the context is exited.
2978 2977 elapsed = attr.ib(default=0)
2979 2978 # the number of nested timedcm context managers.
2980 2979 level = attr.ib(default=1)
2981 2980
2982 2981 def __bytes__(self):
2983 2982 return timecount(self.elapsed) if self.elapsed else b'<unknown>'
2984 2983
2985 2984 __str__ = encoding.strmethod(__bytes__)
2986 2985
2987 2986
2988 2987 @contextlib.contextmanager
2989 2988 def timedcm(whencefmt, *whenceargs):
2990 2989 """A context manager that produces timing information for a given context.
2991 2990
2992 2991 On entering a timedcmstats instance is produced.
2993 2992
2994 2993 This context manager is reentrant.
2995 2994
2996 2995 """
2997 2996 # track nested context managers
2998 2997 timedcm._nested += 1
2999 2998 timing_stats = timedcmstats(level=timedcm._nested)
3000 2999 try:
3001 3000 with tracing.log(whencefmt, *whenceargs):
3002 3001 yield timing_stats
3003 3002 finally:
3004 3003 timing_stats.elapsed = timer() - timing_stats.start
3005 3004 timedcm._nested -= 1
3006 3005
3007 3006
3008 3007 timedcm._nested = 0
3009 3008
3010 3009
3011 3010 def timed(func):
3012 3011 """Report the execution time of a function call to stderr.
3013 3012
3014 3013 During development, use as a decorator when you need to measure
3015 3014 the cost of a function, e.g. as follows:
3016 3015
3017 3016 @util.timed
3018 3017 def foo(a, b, c):
3019 3018 pass
3020 3019 """
3021 3020
3022 3021 def wrapper(*args, **kwargs):
3023 3022 with timedcm(pycompat.bytestr(func.__name__)) as time_stats:
3024 3023 result = func(*args, **kwargs)
3025 3024 stderr = procutil.stderr
3026 3025 stderr.write(
3027 3026 b'%s%s: %s\n'
3028 3027 % (
3029 3028 b' ' * time_stats.level * 2,
3030 3029 pycompat.bytestr(func.__name__),
3031 3030 time_stats,
3032 3031 )
3033 3032 )
3034 3033 return result
3035 3034
3036 3035 return wrapper
3037 3036
3038 3037
3039 3038 _sizeunits = (
3040 3039 (b'm', 2 ** 20),
3041 3040 (b'k', 2 ** 10),
3042 3041 (b'g', 2 ** 30),
3043 3042 (b'kb', 2 ** 10),
3044 3043 (b'mb', 2 ** 20),
3045 3044 (b'gb', 2 ** 30),
3046 3045 (b'b', 1),
3047 3046 )
3048 3047
3049 3048
3050 3049 def sizetoint(s):
3051 3050 # type: (bytes) -> int
3052 3051 """Convert a space specifier to a byte count.
3053 3052
3054 3053 >>> sizetoint(b'30')
3055 3054 30
3056 3055 >>> sizetoint(b'2.2kb')
3057 3056 2252
3058 3057 >>> sizetoint(b'6M')
3059 3058 6291456
3060 3059 """
3061 3060 t = s.strip().lower()
3062 3061 try:
3063 3062 for k, u in _sizeunits:
3064 3063 if t.endswith(k):
3065 3064 return int(float(t[: -len(k)]) * u)
3066 3065 return int(t)
3067 3066 except ValueError:
3068 3067 raise error.ParseError(_(b"couldn't parse size: %s") % s)
3069 3068
3070 3069
3071 3070 class hooks:
3072 3071 """A collection of hook functions that can be used to extend a
3073 3072 function's behavior. Hooks are called in lexicographic order,
3074 3073 based on the names of their sources."""
3075 3074
3076 3075 def __init__(self):
3077 3076 self._hooks = []
3078 3077
3079 3078 def add(self, source, hook):
3080 3079 self._hooks.append((source, hook))
3081 3080
3082 3081 def __call__(self, *args):
3083 3082 self._hooks.sort(key=lambda x: x[0])
3084 3083 results = []
3085 3084 for source, hook in self._hooks:
3086 3085 results.append(hook(*args))
3087 3086 return results
3088 3087
3089 3088
3090 3089 def getstackframes(skip=0, line=b' %-*s in %s\n', fileline=b'%s:%d', depth=0):
3091 3090 """Yields lines for a nicely formatted stacktrace.
3092 3091 Skips the 'skip' last entries, then return the last 'depth' entries.
3093 3092 Each file+linenumber is formatted according to fileline.
3094 3093 Each line is formatted according to line.
3095 3094 If line is None, it yields:
3096 3095 length of longest filepath+line number,
3097 3096 filepath+linenumber,
3098 3097 function
3099 3098
3100 3099 Not be used in production code but very convenient while developing.
3101 3100 """
3102 3101 entries = [
3103 3102 (fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3104 3103 for fn, ln, func, _text in traceback.extract_stack()[: -skip - 1]
3105 3104 ][-depth:]
3106 3105 if entries:
3107 3106 fnmax = max(len(entry[0]) for entry in entries)
3108 3107 for fnln, func in entries:
3109 3108 if line is None:
3110 3109 yield (fnmax, fnln, func)
3111 3110 else:
3112 3111 yield line % (fnmax, fnln, func)
3113 3112
3114 3113
3115 3114 def debugstacktrace(
3116 3115 msg=b'stacktrace',
3117 3116 skip=0,
3118 3117 f=procutil.stderr,
3119 3118 otherf=procutil.stdout,
3120 3119 depth=0,
3121 3120 prefix=b'',
3122 3121 ):
3123 3122 """Writes a message to f (stderr) with a nicely formatted stacktrace.
3124 3123 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3125 3124 By default it will flush stdout first.
3126 3125 It can be used everywhere and intentionally does not require an ui object.
3127 3126 Not be used in production code but very convenient while developing.
3128 3127 """
3129 3128 if otherf:
3130 3129 otherf.flush()
3131 3130 f.write(b'%s%s at:\n' % (prefix, msg.rstrip()))
3132 3131 for line in getstackframes(skip + 1, depth=depth):
3133 3132 f.write(prefix + line)
3134 3133 f.flush()
3135 3134
3136 3135
3137 3136 # convenient shortcut
3138 3137 dst = debugstacktrace
3139 3138
3140 3139
3141 3140 def safename(f, tag, ctx, others=None):
3142 3141 """
3143 3142 Generate a name that it is safe to rename f to in the given context.
3144 3143
3145 3144 f: filename to rename
3146 3145 tag: a string tag that will be included in the new name
3147 3146 ctx: a context, in which the new name must not exist
3148 3147 others: a set of other filenames that the new name must not be in
3149 3148
3150 3149 Returns a file name of the form oldname~tag[~number] which does not exist
3151 3150 in the provided context and is not in the set of other names.
3152 3151 """
3153 3152 if others is None:
3154 3153 others = set()
3155 3154
3156 3155 fn = b'%s~%s' % (f, tag)
3157 3156 if fn not in ctx and fn not in others:
3158 3157 return fn
3159 3158 for n in itertools.count(1):
3160 3159 fn = b'%s~%s~%s' % (f, tag, n)
3161 3160 if fn not in ctx and fn not in others:
3162 3161 return fn
3163 3162
3164 3163
3165 3164 def readexactly(stream, n):
3166 3165 '''read n bytes from stream.read and abort if less was available'''
3167 3166 s = stream.read(n)
3168 3167 if len(s) < n:
3169 3168 raise error.Abort(
3170 3169 _(b"stream ended unexpectedly (got %d bytes, expected %d)")
3171 3170 % (len(s), n)
3172 3171 )
3173 3172 return s
3174 3173
3175 3174
3176 3175 def uvarintencode(value):
3177 3176 """Encode an unsigned integer value to a varint.
3178 3177
3179 3178 A varint is a variable length integer of 1 or more bytes. Each byte
3180 3179 except the last has the most significant bit set. The lower 7 bits of
3181 3180 each byte store the 2's complement representation, least significant group
3182 3181 first.
3183 3182
3184 3183 >>> uvarintencode(0)
3185 3184 '\\x00'
3186 3185 >>> uvarintencode(1)
3187 3186 '\\x01'
3188 3187 >>> uvarintencode(127)
3189 3188 '\\x7f'
3190 3189 >>> uvarintencode(1337)
3191 3190 '\\xb9\\n'
3192 3191 >>> uvarintencode(65536)
3193 3192 '\\x80\\x80\\x04'
3194 3193 >>> uvarintencode(-1)
3195 3194 Traceback (most recent call last):
3196 3195 ...
3197 3196 ProgrammingError: negative value for uvarint: -1
3198 3197 """
3199 3198 if value < 0:
3200 3199 raise error.ProgrammingError(b'negative value for uvarint: %d' % value)
3201 3200 bits = value & 0x7F
3202 3201 value >>= 7
3203 3202 bytes = []
3204 3203 while value:
3205 3204 bytes.append(pycompat.bytechr(0x80 | bits))
3206 3205 bits = value & 0x7F
3207 3206 value >>= 7
3208 3207 bytes.append(pycompat.bytechr(bits))
3209 3208
3210 3209 return b''.join(bytes)
3211 3210
3212 3211
3213 3212 def uvarintdecodestream(fh):
3214 3213 """Decode an unsigned variable length integer from a stream.
3215 3214
3216 3215 The passed argument is anything that has a ``.read(N)`` method.
3217 3216
3218 3217 >>> try:
3219 3218 ... from StringIO import StringIO as BytesIO
3220 3219 ... except ImportError:
3221 3220 ... from io import BytesIO
3222 3221 >>> uvarintdecodestream(BytesIO(b'\\x00'))
3223 3222 0
3224 3223 >>> uvarintdecodestream(BytesIO(b'\\x01'))
3225 3224 1
3226 3225 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
3227 3226 127
3228 3227 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
3229 3228 1337
3230 3229 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
3231 3230 65536
3232 3231 >>> uvarintdecodestream(BytesIO(b'\\x80'))
3233 3232 Traceback (most recent call last):
3234 3233 ...
3235 3234 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
3236 3235 """
3237 3236 result = 0
3238 3237 shift = 0
3239 3238 while True:
3240 3239 byte = ord(readexactly(fh, 1))
3241 3240 result |= (byte & 0x7F) << shift
3242 3241 if not (byte & 0x80):
3243 3242 return result
3244 3243 shift += 7
3245 3244
3246 3245
3247 3246 # Passing the '' locale means that the locale should be set according to the
3248 3247 # user settings (environment variables).
3249 3248 # Python sometimes avoids setting the global locale settings. When interfacing
3250 3249 # with C code (e.g. the curses module or the Subversion bindings), the global
3251 3250 # locale settings must be initialized correctly. Python 2 does not initialize
3252 3251 # the global locale settings on interpreter startup. Python 3 sometimes
3253 3252 # initializes LC_CTYPE, but not consistently at least on Windows. Therefore we
3254 3253 # explicitly initialize it to get consistent behavior if it's not already
3255 3254 # initialized. Since CPython commit 177d921c8c03d30daa32994362023f777624b10d,
3256 3255 # LC_CTYPE is always initialized. If we require Python 3.8+, we should re-check
3257 3256 # if we can remove this code.
3258 3257 @contextlib.contextmanager
3259 3258 def with_lc_ctype():
3260 3259 oldloc = locale.setlocale(locale.LC_CTYPE, None)
3261 3260 if oldloc == 'C':
3262 3261 try:
3263 3262 try:
3264 3263 locale.setlocale(locale.LC_CTYPE, '')
3265 3264 except locale.Error:
3266 3265 # The likely case is that the locale from the environment
3267 3266 # variables is unknown.
3268 3267 pass
3269 3268 yield
3270 3269 finally:
3271 3270 locale.setlocale(locale.LC_CTYPE, oldloc)
3272 3271 else:
3273 3272 yield
3274 3273
3275 3274
3276 3275 def _estimatememory():
3277 3276 # type: () -> Optional[int]
3278 3277 """Provide an estimate for the available system memory in Bytes.
3279 3278
3280 3279 If no estimate can be provided on the platform, returns None.
3281 3280 """
3282 3281 if pycompat.sysplatform.startswith(b'win'):
3283 3282 # On Windows, use the GlobalMemoryStatusEx kernel function directly.
3284 3283 from ctypes import c_long as DWORD, c_ulonglong as DWORDLONG
3285 3284 from ctypes.wintypes import ( # pytype: disable=import-error
3286 3285 Structure,
3287 3286 byref,
3288 3287 sizeof,
3289 3288 windll,
3290 3289 )
3291 3290
3292 3291 class MEMORYSTATUSEX(Structure):
3293 3292 _fields_ = [
3294 3293 ('dwLength', DWORD),
3295 3294 ('dwMemoryLoad', DWORD),
3296 3295 ('ullTotalPhys', DWORDLONG),
3297 3296 ('ullAvailPhys', DWORDLONG),
3298 3297 ('ullTotalPageFile', DWORDLONG),
3299 3298 ('ullAvailPageFile', DWORDLONG),
3300 3299 ('ullTotalVirtual', DWORDLONG),
3301 3300 ('ullAvailVirtual', DWORDLONG),
3302 3301 ('ullExtendedVirtual', DWORDLONG),
3303 3302 ]
3304 3303
3305 3304 x = MEMORYSTATUSEX()
3306 3305 x.dwLength = sizeof(x)
3307 3306 windll.kernel32.GlobalMemoryStatusEx(byref(x))
3308 3307 return x.ullAvailPhys
3309 3308
3310 3309 # On newer Unix-like systems and Mac OSX, the sysconf interface
3311 3310 # can be used. _SC_PAGE_SIZE is part of POSIX; _SC_PHYS_PAGES
3312 3311 # seems to be implemented on most systems.
3313 3312 try:
3314 3313 pagesize = os.sysconf(os.sysconf_names['SC_PAGE_SIZE'])
3315 3314 pages = os.sysconf(os.sysconf_names['SC_PHYS_PAGES'])
3316 3315 return pagesize * pages
3317 3316 except OSError: # sysconf can fail
3318 3317 pass
3319 3318 except KeyError: # unknown parameter
3320 3319 pass
General Comments 0
You need to be logged in to leave comments. Login now