##// END OF EJS Templates
copyfiles: deal with existing file when hardlinking...
marmoute -
r48210:9ea52521 default
parent child Browse files
Show More
@@ -1,3394 +1,3396
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 from __future__ import absolute_import, print_function
17 17
18 18 import abc
19 19 import collections
20 20 import contextlib
21 21 import errno
22 22 import gc
23 23 import hashlib
24 24 import itertools
25 25 import locale
26 26 import mmap
27 27 import os
28 28 import platform as pyplatform
29 29 import re as remod
30 30 import shutil
31 31 import stat
32 32 import sys
33 33 import time
34 34 import traceback
35 35 import warnings
36 36
37 37 from .node import hex
38 38 from .thirdparty import attr
39 39 from .pycompat import (
40 40 delattr,
41 41 getattr,
42 42 open,
43 43 setattr,
44 44 )
45 45 from .node import hex
46 46 from hgdemandimport import tracing
47 47 from . import (
48 48 encoding,
49 49 error,
50 50 i18n,
51 51 policy,
52 52 pycompat,
53 53 urllibcompat,
54 54 )
55 55 from .utils import (
56 56 compression,
57 57 hashutil,
58 58 procutil,
59 59 stringutil,
60 60 urlutil,
61 61 )
62 62
63 63 if pycompat.TYPE_CHECKING:
64 64 from typing import (
65 65 Iterator,
66 66 List,
67 67 Optional,
68 68 Tuple,
69 69 )
70 70
71 71
72 72 base85 = policy.importmod('base85')
73 73 osutil = policy.importmod('osutil')
74 74
75 75 b85decode = base85.b85decode
76 76 b85encode = base85.b85encode
77 77
78 78 cookielib = pycompat.cookielib
79 79 httplib = pycompat.httplib
80 80 pickle = pycompat.pickle
81 81 safehasattr = pycompat.safehasattr
82 82 socketserver = pycompat.socketserver
83 83 bytesio = pycompat.bytesio
84 84 # TODO deprecate stringio name, as it is a lie on Python 3.
85 85 stringio = bytesio
86 86 xmlrpclib = pycompat.xmlrpclib
87 87
88 88 httpserver = urllibcompat.httpserver
89 89 urlerr = urllibcompat.urlerr
90 90 urlreq = urllibcompat.urlreq
91 91
92 92 # workaround for win32mbcs
93 93 _filenamebytestr = pycompat.bytestr
94 94
95 95 if pycompat.iswindows:
96 96 from . import windows as platform
97 97 else:
98 98 from . import posix as platform
99 99
100 100 _ = i18n._
101 101
102 102 bindunixsocket = platform.bindunixsocket
103 103 cachestat = platform.cachestat
104 104 checkexec = platform.checkexec
105 105 checklink = platform.checklink
106 106 copymode = platform.copymode
107 107 expandglobs = platform.expandglobs
108 108 getfsmountpoint = platform.getfsmountpoint
109 109 getfstype = platform.getfstype
110 110 get_password = platform.get_password
111 111 groupmembers = platform.groupmembers
112 112 groupname = platform.groupname
113 113 isexec = platform.isexec
114 114 isowner = platform.isowner
115 115 listdir = osutil.listdir
116 116 localpath = platform.localpath
117 117 lookupreg = platform.lookupreg
118 118 makedir = platform.makedir
119 119 nlinks = platform.nlinks
120 120 normpath = platform.normpath
121 121 normcase = platform.normcase
122 122 normcasespec = platform.normcasespec
123 123 normcasefallback = platform.normcasefallback
124 124 openhardlinks = platform.openhardlinks
125 125 oslink = platform.oslink
126 126 parsepatchoutput = platform.parsepatchoutput
127 127 pconvert = platform.pconvert
128 128 poll = platform.poll
129 129 posixfile = platform.posixfile
130 130 readlink = platform.readlink
131 131 rename = platform.rename
132 132 removedirs = platform.removedirs
133 133 samedevice = platform.samedevice
134 134 samefile = platform.samefile
135 135 samestat = platform.samestat
136 136 setflags = platform.setflags
137 137 split = platform.split
138 138 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
139 139 statisexec = platform.statisexec
140 140 statislink = platform.statislink
141 141 umask = platform.umask
142 142 unlink = platform.unlink
143 143 username = platform.username
144 144
145 145
146 146 def setumask(val):
147 147 # type: (int) -> None
148 148 '''updates the umask. used by chg server'''
149 149 if pycompat.iswindows:
150 150 return
151 151 os.umask(val)
152 152 global umask
153 153 platform.umask = umask = val & 0o777
154 154
155 155
156 156 # small compat layer
157 157 compengines = compression.compengines
158 158 SERVERROLE = compression.SERVERROLE
159 159 CLIENTROLE = compression.CLIENTROLE
160 160
161 161 try:
162 162 recvfds = osutil.recvfds
163 163 except AttributeError:
164 164 pass
165 165
166 166 # Python compatibility
167 167
168 168 _notset = object()
169 169
170 170
171 171 def bitsfrom(container):
172 172 bits = 0
173 173 for bit in container:
174 174 bits |= bit
175 175 return bits
176 176
177 177
178 178 # python 2.6 still have deprecation warning enabled by default. We do not want
179 179 # to display anything to standard user so detect if we are running test and
180 180 # only use python deprecation warning in this case.
181 181 _dowarn = bool(encoding.environ.get(b'HGEMITWARNINGS'))
182 182 if _dowarn:
183 183 # explicitly unfilter our warning for python 2.7
184 184 #
185 185 # The option of setting PYTHONWARNINGS in the test runner was investigated.
186 186 # However, module name set through PYTHONWARNINGS was exactly matched, so
187 187 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
188 188 # makes the whole PYTHONWARNINGS thing useless for our usecase.
189 189 warnings.filterwarnings('default', '', DeprecationWarning, 'mercurial')
190 190 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext')
191 191 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext3rd')
192 192 if _dowarn and pycompat.ispy3:
193 193 # silence warning emitted by passing user string to re.sub()
194 194 warnings.filterwarnings(
195 195 'ignore', 'bad escape', DeprecationWarning, 'mercurial'
196 196 )
197 197 warnings.filterwarnings(
198 198 'ignore', 'invalid escape sequence', DeprecationWarning, 'mercurial'
199 199 )
200 200 # TODO: reinvent imp.is_frozen()
201 201 warnings.filterwarnings(
202 202 'ignore',
203 203 'the imp module is deprecated',
204 204 DeprecationWarning,
205 205 'mercurial',
206 206 )
207 207
208 208
209 209 def nouideprecwarn(msg, version, stacklevel=1):
210 210 """Issue an python native deprecation warning
211 211
212 212 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
213 213 """
214 214 if _dowarn:
215 215 msg += (
216 216 b"\n(compatibility will be dropped after Mercurial-%s,"
217 217 b" update your code.)"
218 218 ) % version
219 219 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
220 220 # on python 3 with chg, we will need to explicitly flush the output
221 221 sys.stderr.flush()
222 222
223 223
224 224 DIGESTS = {
225 225 b'md5': hashlib.md5,
226 226 b'sha1': hashutil.sha1,
227 227 b'sha512': hashlib.sha512,
228 228 }
229 229 # List of digest types from strongest to weakest
230 230 DIGESTS_BY_STRENGTH = [b'sha512', b'sha1', b'md5']
231 231
232 232 for k in DIGESTS_BY_STRENGTH:
233 233 assert k in DIGESTS
234 234
235 235
236 236 class digester(object):
237 237 """helper to compute digests.
238 238
239 239 This helper can be used to compute one or more digests given their name.
240 240
241 241 >>> d = digester([b'md5', b'sha1'])
242 242 >>> d.update(b'foo')
243 243 >>> [k for k in sorted(d)]
244 244 ['md5', 'sha1']
245 245 >>> d[b'md5']
246 246 'acbd18db4cc2f85cedef654fccc4a4d8'
247 247 >>> d[b'sha1']
248 248 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
249 249 >>> digester.preferred([b'md5', b'sha1'])
250 250 'sha1'
251 251 """
252 252
253 253 def __init__(self, digests, s=b''):
254 254 self._hashes = {}
255 255 for k in digests:
256 256 if k not in DIGESTS:
257 257 raise error.Abort(_(b'unknown digest type: %s') % k)
258 258 self._hashes[k] = DIGESTS[k]()
259 259 if s:
260 260 self.update(s)
261 261
262 262 def update(self, data):
263 263 for h in self._hashes.values():
264 264 h.update(data)
265 265
266 266 def __getitem__(self, key):
267 267 if key not in DIGESTS:
268 268 raise error.Abort(_(b'unknown digest type: %s') % k)
269 269 return hex(self._hashes[key].digest())
270 270
271 271 def __iter__(self):
272 272 return iter(self._hashes)
273 273
274 274 @staticmethod
275 275 def preferred(supported):
276 276 """returns the strongest digest type in both supported and DIGESTS."""
277 277
278 278 for k in DIGESTS_BY_STRENGTH:
279 279 if k in supported:
280 280 return k
281 281 return None
282 282
283 283
284 284 class digestchecker(object):
285 285 """file handle wrapper that additionally checks content against a given
286 286 size and digests.
287 287
288 288 d = digestchecker(fh, size, {'md5': '...'})
289 289
290 290 When multiple digests are given, all of them are validated.
291 291 """
292 292
293 293 def __init__(self, fh, size, digests):
294 294 self._fh = fh
295 295 self._size = size
296 296 self._got = 0
297 297 self._digests = dict(digests)
298 298 self._digester = digester(self._digests.keys())
299 299
300 300 def read(self, length=-1):
301 301 content = self._fh.read(length)
302 302 self._digester.update(content)
303 303 self._got += len(content)
304 304 return content
305 305
306 306 def validate(self):
307 307 if self._size != self._got:
308 308 raise error.Abort(
309 309 _(b'size mismatch: expected %d, got %d')
310 310 % (self._size, self._got)
311 311 )
312 312 for k, v in self._digests.items():
313 313 if v != self._digester[k]:
314 314 # i18n: first parameter is a digest name
315 315 raise error.Abort(
316 316 _(b'%s mismatch: expected %s, got %s')
317 317 % (k, v, self._digester[k])
318 318 )
319 319
320 320
321 321 try:
322 322 buffer = buffer # pytype: disable=name-error
323 323 except NameError:
324 324
325 325 def buffer(sliceable, offset=0, length=None):
326 326 if length is not None:
327 327 return memoryview(sliceable)[offset : offset + length]
328 328 return memoryview(sliceable)[offset:]
329 329
330 330
331 331 _chunksize = 4096
332 332
333 333
334 334 class bufferedinputpipe(object):
335 335 """a manually buffered input pipe
336 336
337 337 Python will not let us use buffered IO and lazy reading with 'polling' at
338 338 the same time. We cannot probe the buffer state and select will not detect
339 339 that data are ready to read if they are already buffered.
340 340
341 341 This class let us work around that by implementing its own buffering
342 342 (allowing efficient readline) while offering a way to know if the buffer is
343 343 empty from the output (allowing collaboration of the buffer with polling).
344 344
345 345 This class lives in the 'util' module because it makes use of the 'os'
346 346 module from the python stdlib.
347 347 """
348 348
349 349 def __new__(cls, fh):
350 350 # If we receive a fileobjectproxy, we need to use a variation of this
351 351 # class that notifies observers about activity.
352 352 if isinstance(fh, fileobjectproxy):
353 353 cls = observedbufferedinputpipe
354 354
355 355 return super(bufferedinputpipe, cls).__new__(cls)
356 356
357 357 def __init__(self, input):
358 358 self._input = input
359 359 self._buffer = []
360 360 self._eof = False
361 361 self._lenbuf = 0
362 362
363 363 @property
364 364 def hasbuffer(self):
365 365 """True is any data is currently buffered
366 366
367 367 This will be used externally a pre-step for polling IO. If there is
368 368 already data then no polling should be set in place."""
369 369 return bool(self._buffer)
370 370
371 371 @property
372 372 def closed(self):
373 373 return self._input.closed
374 374
375 375 def fileno(self):
376 376 return self._input.fileno()
377 377
378 378 def close(self):
379 379 return self._input.close()
380 380
381 381 def read(self, size):
382 382 while (not self._eof) and (self._lenbuf < size):
383 383 self._fillbuffer()
384 384 return self._frombuffer(size)
385 385
386 386 def unbufferedread(self, size):
387 387 if not self._eof and self._lenbuf == 0:
388 388 self._fillbuffer(max(size, _chunksize))
389 389 return self._frombuffer(min(self._lenbuf, size))
390 390
391 391 def readline(self, *args, **kwargs):
392 392 if len(self._buffer) > 1:
393 393 # this should not happen because both read and readline end with a
394 394 # _frombuffer call that collapse it.
395 395 self._buffer = [b''.join(self._buffer)]
396 396 self._lenbuf = len(self._buffer[0])
397 397 lfi = -1
398 398 if self._buffer:
399 399 lfi = self._buffer[-1].find(b'\n')
400 400 while (not self._eof) and lfi < 0:
401 401 self._fillbuffer()
402 402 if self._buffer:
403 403 lfi = self._buffer[-1].find(b'\n')
404 404 size = lfi + 1
405 405 if lfi < 0: # end of file
406 406 size = self._lenbuf
407 407 elif len(self._buffer) > 1:
408 408 # we need to take previous chunks into account
409 409 size += self._lenbuf - len(self._buffer[-1])
410 410 return self._frombuffer(size)
411 411
412 412 def _frombuffer(self, size):
413 413 """return at most 'size' data from the buffer
414 414
415 415 The data are removed from the buffer."""
416 416 if size == 0 or not self._buffer:
417 417 return b''
418 418 buf = self._buffer[0]
419 419 if len(self._buffer) > 1:
420 420 buf = b''.join(self._buffer)
421 421
422 422 data = buf[:size]
423 423 buf = buf[len(data) :]
424 424 if buf:
425 425 self._buffer = [buf]
426 426 self._lenbuf = len(buf)
427 427 else:
428 428 self._buffer = []
429 429 self._lenbuf = 0
430 430 return data
431 431
432 432 def _fillbuffer(self, size=_chunksize):
433 433 """read data to the buffer"""
434 434 data = os.read(self._input.fileno(), size)
435 435 if not data:
436 436 self._eof = True
437 437 else:
438 438 self._lenbuf += len(data)
439 439 self._buffer.append(data)
440 440
441 441 return data
442 442
443 443
444 444 def mmapread(fp, size=None):
445 445 if size == 0:
446 446 # size of 0 to mmap.mmap() means "all data"
447 447 # rather than "zero bytes", so special case that.
448 448 return b''
449 449 elif size is None:
450 450 size = 0
451 451 try:
452 452 fd = getattr(fp, 'fileno', lambda: fp)()
453 453 return mmap.mmap(fd, size, access=mmap.ACCESS_READ)
454 454 except ValueError:
455 455 # Empty files cannot be mmapped, but mmapread should still work. Check
456 456 # if the file is empty, and if so, return an empty buffer.
457 457 if os.fstat(fd).st_size == 0:
458 458 return b''
459 459 raise
460 460
461 461
462 462 class fileobjectproxy(object):
463 463 """A proxy around file objects that tells a watcher when events occur.
464 464
465 465 This type is intended to only be used for testing purposes. Think hard
466 466 before using it in important code.
467 467 """
468 468
469 469 __slots__ = (
470 470 '_orig',
471 471 '_observer',
472 472 )
473 473
474 474 def __init__(self, fh, observer):
475 475 object.__setattr__(self, '_orig', fh)
476 476 object.__setattr__(self, '_observer', observer)
477 477
478 478 def __getattribute__(self, name):
479 479 ours = {
480 480 '_observer',
481 481 # IOBase
482 482 'close',
483 483 # closed if a property
484 484 'fileno',
485 485 'flush',
486 486 'isatty',
487 487 'readable',
488 488 'readline',
489 489 'readlines',
490 490 'seek',
491 491 'seekable',
492 492 'tell',
493 493 'truncate',
494 494 'writable',
495 495 'writelines',
496 496 # RawIOBase
497 497 'read',
498 498 'readall',
499 499 'readinto',
500 500 'write',
501 501 # BufferedIOBase
502 502 # raw is a property
503 503 'detach',
504 504 # read defined above
505 505 'read1',
506 506 # readinto defined above
507 507 # write defined above
508 508 }
509 509
510 510 # We only observe some methods.
511 511 if name in ours:
512 512 return object.__getattribute__(self, name)
513 513
514 514 return getattr(object.__getattribute__(self, '_orig'), name)
515 515
516 516 def __nonzero__(self):
517 517 return bool(object.__getattribute__(self, '_orig'))
518 518
519 519 __bool__ = __nonzero__
520 520
521 521 def __delattr__(self, name):
522 522 return delattr(object.__getattribute__(self, '_orig'), name)
523 523
524 524 def __setattr__(self, name, value):
525 525 return setattr(object.__getattribute__(self, '_orig'), name, value)
526 526
527 527 def __iter__(self):
528 528 return object.__getattribute__(self, '_orig').__iter__()
529 529
530 530 def _observedcall(self, name, *args, **kwargs):
531 531 # Call the original object.
532 532 orig = object.__getattribute__(self, '_orig')
533 533 res = getattr(orig, name)(*args, **kwargs)
534 534
535 535 # Call a method on the observer of the same name with arguments
536 536 # so it can react, log, etc.
537 537 observer = object.__getattribute__(self, '_observer')
538 538 fn = getattr(observer, name, None)
539 539 if fn:
540 540 fn(res, *args, **kwargs)
541 541
542 542 return res
543 543
544 544 def close(self, *args, **kwargs):
545 545 return object.__getattribute__(self, '_observedcall')(
546 546 'close', *args, **kwargs
547 547 )
548 548
549 549 def fileno(self, *args, **kwargs):
550 550 return object.__getattribute__(self, '_observedcall')(
551 551 'fileno', *args, **kwargs
552 552 )
553 553
554 554 def flush(self, *args, **kwargs):
555 555 return object.__getattribute__(self, '_observedcall')(
556 556 'flush', *args, **kwargs
557 557 )
558 558
559 559 def isatty(self, *args, **kwargs):
560 560 return object.__getattribute__(self, '_observedcall')(
561 561 'isatty', *args, **kwargs
562 562 )
563 563
564 564 def readable(self, *args, **kwargs):
565 565 return object.__getattribute__(self, '_observedcall')(
566 566 'readable', *args, **kwargs
567 567 )
568 568
569 569 def readline(self, *args, **kwargs):
570 570 return object.__getattribute__(self, '_observedcall')(
571 571 'readline', *args, **kwargs
572 572 )
573 573
574 574 def readlines(self, *args, **kwargs):
575 575 return object.__getattribute__(self, '_observedcall')(
576 576 'readlines', *args, **kwargs
577 577 )
578 578
579 579 def seek(self, *args, **kwargs):
580 580 return object.__getattribute__(self, '_observedcall')(
581 581 'seek', *args, **kwargs
582 582 )
583 583
584 584 def seekable(self, *args, **kwargs):
585 585 return object.__getattribute__(self, '_observedcall')(
586 586 'seekable', *args, **kwargs
587 587 )
588 588
589 589 def tell(self, *args, **kwargs):
590 590 return object.__getattribute__(self, '_observedcall')(
591 591 'tell', *args, **kwargs
592 592 )
593 593
594 594 def truncate(self, *args, **kwargs):
595 595 return object.__getattribute__(self, '_observedcall')(
596 596 'truncate', *args, **kwargs
597 597 )
598 598
599 599 def writable(self, *args, **kwargs):
600 600 return object.__getattribute__(self, '_observedcall')(
601 601 'writable', *args, **kwargs
602 602 )
603 603
604 604 def writelines(self, *args, **kwargs):
605 605 return object.__getattribute__(self, '_observedcall')(
606 606 'writelines', *args, **kwargs
607 607 )
608 608
609 609 def read(self, *args, **kwargs):
610 610 return object.__getattribute__(self, '_observedcall')(
611 611 'read', *args, **kwargs
612 612 )
613 613
614 614 def readall(self, *args, **kwargs):
615 615 return object.__getattribute__(self, '_observedcall')(
616 616 'readall', *args, **kwargs
617 617 )
618 618
619 619 def readinto(self, *args, **kwargs):
620 620 return object.__getattribute__(self, '_observedcall')(
621 621 'readinto', *args, **kwargs
622 622 )
623 623
624 624 def write(self, *args, **kwargs):
625 625 return object.__getattribute__(self, '_observedcall')(
626 626 'write', *args, **kwargs
627 627 )
628 628
629 629 def detach(self, *args, **kwargs):
630 630 return object.__getattribute__(self, '_observedcall')(
631 631 'detach', *args, **kwargs
632 632 )
633 633
634 634 def read1(self, *args, **kwargs):
635 635 return object.__getattribute__(self, '_observedcall')(
636 636 'read1', *args, **kwargs
637 637 )
638 638
639 639
640 640 class observedbufferedinputpipe(bufferedinputpipe):
641 641 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
642 642
643 643 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
644 644 bypass ``fileobjectproxy``. Because of this, we need to make
645 645 ``bufferedinputpipe`` aware of these operations.
646 646
647 647 This variation of ``bufferedinputpipe`` can notify observers about
648 648 ``os.read()`` events. It also re-publishes other events, such as
649 649 ``read()`` and ``readline()``.
650 650 """
651 651
652 652 def _fillbuffer(self):
653 653 res = super(observedbufferedinputpipe, self)._fillbuffer()
654 654
655 655 fn = getattr(self._input._observer, 'osread', None)
656 656 if fn:
657 657 fn(res, _chunksize)
658 658
659 659 return res
660 660
661 661 # We use different observer methods because the operation isn't
662 662 # performed on the actual file object but on us.
663 663 def read(self, size):
664 664 res = super(observedbufferedinputpipe, self).read(size)
665 665
666 666 fn = getattr(self._input._observer, 'bufferedread', None)
667 667 if fn:
668 668 fn(res, size)
669 669
670 670 return res
671 671
672 672 def readline(self, *args, **kwargs):
673 673 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
674 674
675 675 fn = getattr(self._input._observer, 'bufferedreadline', None)
676 676 if fn:
677 677 fn(res)
678 678
679 679 return res
680 680
681 681
682 682 PROXIED_SOCKET_METHODS = {
683 683 'makefile',
684 684 'recv',
685 685 'recvfrom',
686 686 'recvfrom_into',
687 687 'recv_into',
688 688 'send',
689 689 'sendall',
690 690 'sendto',
691 691 'setblocking',
692 692 'settimeout',
693 693 'gettimeout',
694 694 'setsockopt',
695 695 }
696 696
697 697
698 698 class socketproxy(object):
699 699 """A proxy around a socket that tells a watcher when events occur.
700 700
701 701 This is like ``fileobjectproxy`` except for sockets.
702 702
703 703 This type is intended to only be used for testing purposes. Think hard
704 704 before using it in important code.
705 705 """
706 706
707 707 __slots__ = (
708 708 '_orig',
709 709 '_observer',
710 710 )
711 711
712 712 def __init__(self, sock, observer):
713 713 object.__setattr__(self, '_orig', sock)
714 714 object.__setattr__(self, '_observer', observer)
715 715
716 716 def __getattribute__(self, name):
717 717 if name in PROXIED_SOCKET_METHODS:
718 718 return object.__getattribute__(self, name)
719 719
720 720 return getattr(object.__getattribute__(self, '_orig'), name)
721 721
722 722 def __delattr__(self, name):
723 723 return delattr(object.__getattribute__(self, '_orig'), name)
724 724
725 725 def __setattr__(self, name, value):
726 726 return setattr(object.__getattribute__(self, '_orig'), name, value)
727 727
728 728 def __nonzero__(self):
729 729 return bool(object.__getattribute__(self, '_orig'))
730 730
731 731 __bool__ = __nonzero__
732 732
733 733 def _observedcall(self, name, *args, **kwargs):
734 734 # Call the original object.
735 735 orig = object.__getattribute__(self, '_orig')
736 736 res = getattr(orig, name)(*args, **kwargs)
737 737
738 738 # Call a method on the observer of the same name with arguments
739 739 # so it can react, log, etc.
740 740 observer = object.__getattribute__(self, '_observer')
741 741 fn = getattr(observer, name, None)
742 742 if fn:
743 743 fn(res, *args, **kwargs)
744 744
745 745 return res
746 746
747 747 def makefile(self, *args, **kwargs):
748 748 res = object.__getattribute__(self, '_observedcall')(
749 749 'makefile', *args, **kwargs
750 750 )
751 751
752 752 # The file object may be used for I/O. So we turn it into a
753 753 # proxy using our observer.
754 754 observer = object.__getattribute__(self, '_observer')
755 755 return makeloggingfileobject(
756 756 observer.fh,
757 757 res,
758 758 observer.name,
759 759 reads=observer.reads,
760 760 writes=observer.writes,
761 761 logdata=observer.logdata,
762 762 logdataapis=observer.logdataapis,
763 763 )
764 764
765 765 def recv(self, *args, **kwargs):
766 766 return object.__getattribute__(self, '_observedcall')(
767 767 'recv', *args, **kwargs
768 768 )
769 769
770 770 def recvfrom(self, *args, **kwargs):
771 771 return object.__getattribute__(self, '_observedcall')(
772 772 'recvfrom', *args, **kwargs
773 773 )
774 774
775 775 def recvfrom_into(self, *args, **kwargs):
776 776 return object.__getattribute__(self, '_observedcall')(
777 777 'recvfrom_into', *args, **kwargs
778 778 )
779 779
780 780 def recv_into(self, *args, **kwargs):
781 781 return object.__getattribute__(self, '_observedcall')(
782 782 'recv_info', *args, **kwargs
783 783 )
784 784
785 785 def send(self, *args, **kwargs):
786 786 return object.__getattribute__(self, '_observedcall')(
787 787 'send', *args, **kwargs
788 788 )
789 789
790 790 def sendall(self, *args, **kwargs):
791 791 return object.__getattribute__(self, '_observedcall')(
792 792 'sendall', *args, **kwargs
793 793 )
794 794
795 795 def sendto(self, *args, **kwargs):
796 796 return object.__getattribute__(self, '_observedcall')(
797 797 'sendto', *args, **kwargs
798 798 )
799 799
800 800 def setblocking(self, *args, **kwargs):
801 801 return object.__getattribute__(self, '_observedcall')(
802 802 'setblocking', *args, **kwargs
803 803 )
804 804
805 805 def settimeout(self, *args, **kwargs):
806 806 return object.__getattribute__(self, '_observedcall')(
807 807 'settimeout', *args, **kwargs
808 808 )
809 809
810 810 def gettimeout(self, *args, **kwargs):
811 811 return object.__getattribute__(self, '_observedcall')(
812 812 'gettimeout', *args, **kwargs
813 813 )
814 814
815 815 def setsockopt(self, *args, **kwargs):
816 816 return object.__getattribute__(self, '_observedcall')(
817 817 'setsockopt', *args, **kwargs
818 818 )
819 819
820 820
821 821 class baseproxyobserver(object):
822 822 def __init__(self, fh, name, logdata, logdataapis):
823 823 self.fh = fh
824 824 self.name = name
825 825 self.logdata = logdata
826 826 self.logdataapis = logdataapis
827 827
828 828 def _writedata(self, data):
829 829 if not self.logdata:
830 830 if self.logdataapis:
831 831 self.fh.write(b'\n')
832 832 self.fh.flush()
833 833 return
834 834
835 835 # Simple case writes all data on a single line.
836 836 if b'\n' not in data:
837 837 if self.logdataapis:
838 838 self.fh.write(b': %s\n' % stringutil.escapestr(data))
839 839 else:
840 840 self.fh.write(
841 841 b'%s> %s\n' % (self.name, stringutil.escapestr(data))
842 842 )
843 843 self.fh.flush()
844 844 return
845 845
846 846 # Data with newlines is written to multiple lines.
847 847 if self.logdataapis:
848 848 self.fh.write(b':\n')
849 849
850 850 lines = data.splitlines(True)
851 851 for line in lines:
852 852 self.fh.write(
853 853 b'%s> %s\n' % (self.name, stringutil.escapestr(line))
854 854 )
855 855 self.fh.flush()
856 856
857 857
858 858 class fileobjectobserver(baseproxyobserver):
859 859 """Logs file object activity."""
860 860
861 861 def __init__(
862 862 self, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
863 863 ):
864 864 super(fileobjectobserver, self).__init__(fh, name, logdata, logdataapis)
865 865 self.reads = reads
866 866 self.writes = writes
867 867
868 868 def read(self, res, size=-1):
869 869 if not self.reads:
870 870 return
871 871 # Python 3 can return None from reads at EOF instead of empty strings.
872 872 if res is None:
873 873 res = b''
874 874
875 875 if size == -1 and res == b'':
876 876 # Suppress pointless read(-1) calls that return
877 877 # nothing. These happen _a lot_ on Python 3, and there
878 878 # doesn't seem to be a better workaround to have matching
879 879 # Python 2 and 3 behavior. :(
880 880 return
881 881
882 882 if self.logdataapis:
883 883 self.fh.write(b'%s> read(%d) -> %d' % (self.name, size, len(res)))
884 884
885 885 self._writedata(res)
886 886
887 887 def readline(self, res, limit=-1):
888 888 if not self.reads:
889 889 return
890 890
891 891 if self.logdataapis:
892 892 self.fh.write(b'%s> readline() -> %d' % (self.name, len(res)))
893 893
894 894 self._writedata(res)
895 895
896 896 def readinto(self, res, dest):
897 897 if not self.reads:
898 898 return
899 899
900 900 if self.logdataapis:
901 901 self.fh.write(
902 902 b'%s> readinto(%d) -> %r' % (self.name, len(dest), res)
903 903 )
904 904
905 905 data = dest[0:res] if res is not None else b''
906 906
907 907 # _writedata() uses "in" operator and is confused by memoryview because
908 908 # characters are ints on Python 3.
909 909 if isinstance(data, memoryview):
910 910 data = data.tobytes()
911 911
912 912 self._writedata(data)
913 913
914 914 def write(self, res, data):
915 915 if not self.writes:
916 916 return
917 917
918 918 # Python 2 returns None from some write() calls. Python 3 (reasonably)
919 919 # returns the integer bytes written.
920 920 if res is None and data:
921 921 res = len(data)
922 922
923 923 if self.logdataapis:
924 924 self.fh.write(b'%s> write(%d) -> %r' % (self.name, len(data), res))
925 925
926 926 self._writedata(data)
927 927
928 928 def flush(self, res):
929 929 if not self.writes:
930 930 return
931 931
932 932 self.fh.write(b'%s> flush() -> %r\n' % (self.name, res))
933 933
934 934 # For observedbufferedinputpipe.
935 935 def bufferedread(self, res, size):
936 936 if not self.reads:
937 937 return
938 938
939 939 if self.logdataapis:
940 940 self.fh.write(
941 941 b'%s> bufferedread(%d) -> %d' % (self.name, size, len(res))
942 942 )
943 943
944 944 self._writedata(res)
945 945
946 946 def bufferedreadline(self, res):
947 947 if not self.reads:
948 948 return
949 949
950 950 if self.logdataapis:
951 951 self.fh.write(
952 952 b'%s> bufferedreadline() -> %d' % (self.name, len(res))
953 953 )
954 954
955 955 self._writedata(res)
956 956
957 957
958 958 def makeloggingfileobject(
959 959 logh, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
960 960 ):
961 961 """Turn a file object into a logging file object."""
962 962
963 963 observer = fileobjectobserver(
964 964 logh,
965 965 name,
966 966 reads=reads,
967 967 writes=writes,
968 968 logdata=logdata,
969 969 logdataapis=logdataapis,
970 970 )
971 971 return fileobjectproxy(fh, observer)
972 972
973 973
974 974 class socketobserver(baseproxyobserver):
975 975 """Logs socket activity."""
976 976
977 977 def __init__(
978 978 self,
979 979 fh,
980 980 name,
981 981 reads=True,
982 982 writes=True,
983 983 states=True,
984 984 logdata=False,
985 985 logdataapis=True,
986 986 ):
987 987 super(socketobserver, self).__init__(fh, name, logdata, logdataapis)
988 988 self.reads = reads
989 989 self.writes = writes
990 990 self.states = states
991 991
992 992 def makefile(self, res, mode=None, bufsize=None):
993 993 if not self.states:
994 994 return
995 995
996 996 self.fh.write(b'%s> makefile(%r, %r)\n' % (self.name, mode, bufsize))
997 997
998 998 def recv(self, res, size, flags=0):
999 999 if not self.reads:
1000 1000 return
1001 1001
1002 1002 if self.logdataapis:
1003 1003 self.fh.write(
1004 1004 b'%s> recv(%d, %d) -> %d' % (self.name, size, flags, len(res))
1005 1005 )
1006 1006 self._writedata(res)
1007 1007
1008 1008 def recvfrom(self, res, size, flags=0):
1009 1009 if not self.reads:
1010 1010 return
1011 1011
1012 1012 if self.logdataapis:
1013 1013 self.fh.write(
1014 1014 b'%s> recvfrom(%d, %d) -> %d'
1015 1015 % (self.name, size, flags, len(res[0]))
1016 1016 )
1017 1017
1018 1018 self._writedata(res[0])
1019 1019
1020 1020 def recvfrom_into(self, res, buf, size, flags=0):
1021 1021 if not self.reads:
1022 1022 return
1023 1023
1024 1024 if self.logdataapis:
1025 1025 self.fh.write(
1026 1026 b'%s> recvfrom_into(%d, %d) -> %d'
1027 1027 % (self.name, size, flags, res[0])
1028 1028 )
1029 1029
1030 1030 self._writedata(buf[0 : res[0]])
1031 1031
1032 1032 def recv_into(self, res, buf, size=0, flags=0):
1033 1033 if not self.reads:
1034 1034 return
1035 1035
1036 1036 if self.logdataapis:
1037 1037 self.fh.write(
1038 1038 b'%s> recv_into(%d, %d) -> %d' % (self.name, size, flags, res)
1039 1039 )
1040 1040
1041 1041 self._writedata(buf[0:res])
1042 1042
1043 1043 def send(self, res, data, flags=0):
1044 1044 if not self.writes:
1045 1045 return
1046 1046
1047 1047 self.fh.write(
1048 1048 b'%s> send(%d, %d) -> %d' % (self.name, len(data), flags, len(res))
1049 1049 )
1050 1050 self._writedata(data)
1051 1051
1052 1052 def sendall(self, res, data, flags=0):
1053 1053 if not self.writes:
1054 1054 return
1055 1055
1056 1056 if self.logdataapis:
1057 1057 # Returns None on success. So don't bother reporting return value.
1058 1058 self.fh.write(
1059 1059 b'%s> sendall(%d, %d)' % (self.name, len(data), flags)
1060 1060 )
1061 1061
1062 1062 self._writedata(data)
1063 1063
1064 1064 def sendto(self, res, data, flagsoraddress, address=None):
1065 1065 if not self.writes:
1066 1066 return
1067 1067
1068 1068 if address:
1069 1069 flags = flagsoraddress
1070 1070 else:
1071 1071 flags = 0
1072 1072
1073 1073 if self.logdataapis:
1074 1074 self.fh.write(
1075 1075 b'%s> sendto(%d, %d, %r) -> %d'
1076 1076 % (self.name, len(data), flags, address, res)
1077 1077 )
1078 1078
1079 1079 self._writedata(data)
1080 1080
1081 1081 def setblocking(self, res, flag):
1082 1082 if not self.states:
1083 1083 return
1084 1084
1085 1085 self.fh.write(b'%s> setblocking(%r)\n' % (self.name, flag))
1086 1086
1087 1087 def settimeout(self, res, value):
1088 1088 if not self.states:
1089 1089 return
1090 1090
1091 1091 self.fh.write(b'%s> settimeout(%r)\n' % (self.name, value))
1092 1092
1093 1093 def gettimeout(self, res):
1094 1094 if not self.states:
1095 1095 return
1096 1096
1097 1097 self.fh.write(b'%s> gettimeout() -> %f\n' % (self.name, res))
1098 1098
1099 1099 def setsockopt(self, res, level, optname, value):
1100 1100 if not self.states:
1101 1101 return
1102 1102
1103 1103 self.fh.write(
1104 1104 b'%s> setsockopt(%r, %r, %r) -> %r\n'
1105 1105 % (self.name, level, optname, value, res)
1106 1106 )
1107 1107
1108 1108
1109 1109 def makeloggingsocket(
1110 1110 logh,
1111 1111 fh,
1112 1112 name,
1113 1113 reads=True,
1114 1114 writes=True,
1115 1115 states=True,
1116 1116 logdata=False,
1117 1117 logdataapis=True,
1118 1118 ):
1119 1119 """Turn a socket into a logging socket."""
1120 1120
1121 1121 observer = socketobserver(
1122 1122 logh,
1123 1123 name,
1124 1124 reads=reads,
1125 1125 writes=writes,
1126 1126 states=states,
1127 1127 logdata=logdata,
1128 1128 logdataapis=logdataapis,
1129 1129 )
1130 1130 return socketproxy(fh, observer)
1131 1131
1132 1132
1133 1133 def version():
1134 1134 """Return version information if available."""
1135 1135 try:
1136 1136 from . import __version__
1137 1137
1138 1138 return __version__.version
1139 1139 except ImportError:
1140 1140 return b'unknown'
1141 1141
1142 1142
1143 1143 def versiontuple(v=None, n=4):
1144 1144 """Parses a Mercurial version string into an N-tuple.
1145 1145
1146 1146 The version string to be parsed is specified with the ``v`` argument.
1147 1147 If it isn't defined, the current Mercurial version string will be parsed.
1148 1148
1149 1149 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1150 1150 returned values:
1151 1151
1152 1152 >>> v = b'3.6.1+190-df9b73d2d444'
1153 1153 >>> versiontuple(v, 2)
1154 1154 (3, 6)
1155 1155 >>> versiontuple(v, 3)
1156 1156 (3, 6, 1)
1157 1157 >>> versiontuple(v, 4)
1158 1158 (3, 6, 1, '190-df9b73d2d444')
1159 1159
1160 1160 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1161 1161 (3, 6, 1, '190-df9b73d2d444+20151118')
1162 1162
1163 1163 >>> v = b'3.6'
1164 1164 >>> versiontuple(v, 2)
1165 1165 (3, 6)
1166 1166 >>> versiontuple(v, 3)
1167 1167 (3, 6, None)
1168 1168 >>> versiontuple(v, 4)
1169 1169 (3, 6, None, None)
1170 1170
1171 1171 >>> v = b'3.9-rc'
1172 1172 >>> versiontuple(v, 2)
1173 1173 (3, 9)
1174 1174 >>> versiontuple(v, 3)
1175 1175 (3, 9, None)
1176 1176 >>> versiontuple(v, 4)
1177 1177 (3, 9, None, 'rc')
1178 1178
1179 1179 >>> v = b'3.9-rc+2-02a8fea4289b'
1180 1180 >>> versiontuple(v, 2)
1181 1181 (3, 9)
1182 1182 >>> versiontuple(v, 3)
1183 1183 (3, 9, None)
1184 1184 >>> versiontuple(v, 4)
1185 1185 (3, 9, None, 'rc+2-02a8fea4289b')
1186 1186
1187 1187 >>> versiontuple(b'4.6rc0')
1188 1188 (4, 6, None, 'rc0')
1189 1189 >>> versiontuple(b'4.6rc0+12-425d55e54f98')
1190 1190 (4, 6, None, 'rc0+12-425d55e54f98')
1191 1191 >>> versiontuple(b'.1.2.3')
1192 1192 (None, None, None, '.1.2.3')
1193 1193 >>> versiontuple(b'12.34..5')
1194 1194 (12, 34, None, '..5')
1195 1195 >>> versiontuple(b'1.2.3.4.5.6')
1196 1196 (1, 2, 3, '.4.5.6')
1197 1197 """
1198 1198 if not v:
1199 1199 v = version()
1200 1200 m = remod.match(br'(\d+(?:\.\d+){,2})[+-]?(.*)', v)
1201 1201 if not m:
1202 1202 vparts, extra = b'', v
1203 1203 elif m.group(2):
1204 1204 vparts, extra = m.groups()
1205 1205 else:
1206 1206 vparts, extra = m.group(1), None
1207 1207
1208 1208 assert vparts is not None # help pytype
1209 1209
1210 1210 vints = []
1211 1211 for i in vparts.split(b'.'):
1212 1212 try:
1213 1213 vints.append(int(i))
1214 1214 except ValueError:
1215 1215 break
1216 1216 # (3, 6) -> (3, 6, None)
1217 1217 while len(vints) < 3:
1218 1218 vints.append(None)
1219 1219
1220 1220 if n == 2:
1221 1221 return (vints[0], vints[1])
1222 1222 if n == 3:
1223 1223 return (vints[0], vints[1], vints[2])
1224 1224 if n == 4:
1225 1225 return (vints[0], vints[1], vints[2], extra)
1226 1226
1227 1227
1228 1228 def cachefunc(func):
1229 1229 '''cache the result of function calls'''
1230 1230 # XXX doesn't handle keywords args
1231 1231 if func.__code__.co_argcount == 0:
1232 1232 listcache = []
1233 1233
1234 1234 def f():
1235 1235 if len(listcache) == 0:
1236 1236 listcache.append(func())
1237 1237 return listcache[0]
1238 1238
1239 1239 return f
1240 1240 cache = {}
1241 1241 if func.__code__.co_argcount == 1:
1242 1242 # we gain a small amount of time because
1243 1243 # we don't need to pack/unpack the list
1244 1244 def f(arg):
1245 1245 if arg not in cache:
1246 1246 cache[arg] = func(arg)
1247 1247 return cache[arg]
1248 1248
1249 1249 else:
1250 1250
1251 1251 def f(*args):
1252 1252 if args not in cache:
1253 1253 cache[args] = func(*args)
1254 1254 return cache[args]
1255 1255
1256 1256 return f
1257 1257
1258 1258
1259 1259 class cow(object):
1260 1260 """helper class to make copy-on-write easier
1261 1261
1262 1262 Call preparewrite before doing any writes.
1263 1263 """
1264 1264
1265 1265 def preparewrite(self):
1266 1266 """call this before writes, return self or a copied new object"""
1267 1267 if getattr(self, '_copied', 0):
1268 1268 self._copied -= 1
1269 1269 # Function cow.__init__ expects 1 arg(s), got 2 [wrong-arg-count]
1270 1270 return self.__class__(self) # pytype: disable=wrong-arg-count
1271 1271 return self
1272 1272
1273 1273 def copy(self):
1274 1274 """always do a cheap copy"""
1275 1275 self._copied = getattr(self, '_copied', 0) + 1
1276 1276 return self
1277 1277
1278 1278
1279 1279 class sortdict(collections.OrderedDict):
1280 1280 """a simple sorted dictionary
1281 1281
1282 1282 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1283 1283 >>> d2 = d1.copy()
1284 1284 >>> d2
1285 1285 sortdict([('a', 0), ('b', 1)])
1286 1286 >>> d2.update([(b'a', 2)])
1287 1287 >>> list(d2.keys()) # should still be in last-set order
1288 1288 ['b', 'a']
1289 1289 >>> d1.insert(1, b'a.5', 0.5)
1290 1290 >>> d1
1291 1291 sortdict([('a', 0), ('a.5', 0.5), ('b', 1)])
1292 1292 """
1293 1293
1294 1294 def __setitem__(self, key, value):
1295 1295 if key in self:
1296 1296 del self[key]
1297 1297 super(sortdict, self).__setitem__(key, value)
1298 1298
1299 1299 if pycompat.ispypy:
1300 1300 # __setitem__() isn't called as of PyPy 5.8.0
1301 1301 def update(self, src, **f):
1302 1302 if isinstance(src, dict):
1303 1303 src = pycompat.iteritems(src)
1304 1304 for k, v in src:
1305 1305 self[k] = v
1306 1306 for k in f:
1307 1307 self[k] = f[k]
1308 1308
1309 1309 def insert(self, position, key, value):
1310 1310 for (i, (k, v)) in enumerate(list(self.items())):
1311 1311 if i == position:
1312 1312 self[key] = value
1313 1313 if i >= position:
1314 1314 del self[k]
1315 1315 self[k] = v
1316 1316
1317 1317
1318 1318 class cowdict(cow, dict):
1319 1319 """copy-on-write dict
1320 1320
1321 1321 Be sure to call d = d.preparewrite() before writing to d.
1322 1322
1323 1323 >>> a = cowdict()
1324 1324 >>> a is a.preparewrite()
1325 1325 True
1326 1326 >>> b = a.copy()
1327 1327 >>> b is a
1328 1328 True
1329 1329 >>> c = b.copy()
1330 1330 >>> c is a
1331 1331 True
1332 1332 >>> a = a.preparewrite()
1333 1333 >>> b is a
1334 1334 False
1335 1335 >>> a is a.preparewrite()
1336 1336 True
1337 1337 >>> c = c.preparewrite()
1338 1338 >>> b is c
1339 1339 False
1340 1340 >>> b is b.preparewrite()
1341 1341 True
1342 1342 """
1343 1343
1344 1344
1345 1345 class cowsortdict(cow, sortdict):
1346 1346 """copy-on-write sortdict
1347 1347
1348 1348 Be sure to call d = d.preparewrite() before writing to d.
1349 1349 """
1350 1350
1351 1351
1352 1352 class transactional(object): # pytype: disable=ignored-metaclass
1353 1353 """Base class for making a transactional type into a context manager."""
1354 1354
1355 1355 __metaclass__ = abc.ABCMeta
1356 1356
1357 1357 @abc.abstractmethod
1358 1358 def close(self):
1359 1359 """Successfully closes the transaction."""
1360 1360
1361 1361 @abc.abstractmethod
1362 1362 def release(self):
1363 1363 """Marks the end of the transaction.
1364 1364
1365 1365 If the transaction has not been closed, it will be aborted.
1366 1366 """
1367 1367
1368 1368 def __enter__(self):
1369 1369 return self
1370 1370
1371 1371 def __exit__(self, exc_type, exc_val, exc_tb):
1372 1372 try:
1373 1373 if exc_type is None:
1374 1374 self.close()
1375 1375 finally:
1376 1376 self.release()
1377 1377
1378 1378
1379 1379 @contextlib.contextmanager
1380 1380 def acceptintervention(tr=None):
1381 1381 """A context manager that closes the transaction on InterventionRequired
1382 1382
1383 1383 If no transaction was provided, this simply runs the body and returns
1384 1384 """
1385 1385 if not tr:
1386 1386 yield
1387 1387 return
1388 1388 try:
1389 1389 yield
1390 1390 tr.close()
1391 1391 except error.InterventionRequired:
1392 1392 tr.close()
1393 1393 raise
1394 1394 finally:
1395 1395 tr.release()
1396 1396
1397 1397
1398 1398 @contextlib.contextmanager
1399 1399 def nullcontextmanager(enter_result=None):
1400 1400 yield enter_result
1401 1401
1402 1402
1403 1403 class _lrucachenode(object):
1404 1404 """A node in a doubly linked list.
1405 1405
1406 1406 Holds a reference to nodes on either side as well as a key-value
1407 1407 pair for the dictionary entry.
1408 1408 """
1409 1409
1410 1410 __slots__ = ('next', 'prev', 'key', 'value', 'cost')
1411 1411
1412 1412 def __init__(self):
1413 1413 self.next = self
1414 1414 self.prev = self
1415 1415
1416 1416 self.key = _notset
1417 1417 self.value = None
1418 1418 self.cost = 0
1419 1419
1420 1420 def markempty(self):
1421 1421 """Mark the node as emptied."""
1422 1422 self.key = _notset
1423 1423 self.value = None
1424 1424 self.cost = 0
1425 1425
1426 1426
1427 1427 class lrucachedict(object):
1428 1428 """Dict that caches most recent accesses and sets.
1429 1429
1430 1430 The dict consists of an actual backing dict - indexed by original
1431 1431 key - and a doubly linked circular list defining the order of entries in
1432 1432 the cache.
1433 1433
1434 1434 The head node is the newest entry in the cache. If the cache is full,
1435 1435 we recycle head.prev and make it the new head. Cache accesses result in
1436 1436 the node being moved to before the existing head and being marked as the
1437 1437 new head node.
1438 1438
1439 1439 Items in the cache can be inserted with an optional "cost" value. This is
1440 1440 simply an integer that is specified by the caller. The cache can be queried
1441 1441 for the total cost of all items presently in the cache.
1442 1442
1443 1443 The cache can also define a maximum cost. If a cache insertion would
1444 1444 cause the total cost of the cache to go beyond the maximum cost limit,
1445 1445 nodes will be evicted to make room for the new code. This can be used
1446 1446 to e.g. set a max memory limit and associate an estimated bytes size
1447 1447 cost to each item in the cache. By default, no maximum cost is enforced.
1448 1448 """
1449 1449
1450 1450 def __init__(self, max, maxcost=0):
1451 1451 self._cache = {}
1452 1452
1453 1453 self._head = _lrucachenode()
1454 1454 self._size = 1
1455 1455 self.capacity = max
1456 1456 self.totalcost = 0
1457 1457 self.maxcost = maxcost
1458 1458
1459 1459 def __len__(self):
1460 1460 return len(self._cache)
1461 1461
1462 1462 def __contains__(self, k):
1463 1463 return k in self._cache
1464 1464
1465 1465 def __iter__(self):
1466 1466 # We don't have to iterate in cache order, but why not.
1467 1467 n = self._head
1468 1468 for i in range(len(self._cache)):
1469 1469 yield n.key
1470 1470 n = n.next
1471 1471
1472 1472 def __getitem__(self, k):
1473 1473 node = self._cache[k]
1474 1474 self._movetohead(node)
1475 1475 return node.value
1476 1476
1477 1477 def insert(self, k, v, cost=0):
1478 1478 """Insert a new item in the cache with optional cost value."""
1479 1479 node = self._cache.get(k)
1480 1480 # Replace existing value and mark as newest.
1481 1481 if node is not None:
1482 1482 self.totalcost -= node.cost
1483 1483 node.value = v
1484 1484 node.cost = cost
1485 1485 self.totalcost += cost
1486 1486 self._movetohead(node)
1487 1487
1488 1488 if self.maxcost:
1489 1489 self._enforcecostlimit()
1490 1490
1491 1491 return
1492 1492
1493 1493 if self._size < self.capacity:
1494 1494 node = self._addcapacity()
1495 1495 else:
1496 1496 # Grab the last/oldest item.
1497 1497 node = self._head.prev
1498 1498
1499 1499 # At capacity. Kill the old entry.
1500 1500 if node.key is not _notset:
1501 1501 self.totalcost -= node.cost
1502 1502 del self._cache[node.key]
1503 1503
1504 1504 node.key = k
1505 1505 node.value = v
1506 1506 node.cost = cost
1507 1507 self.totalcost += cost
1508 1508 self._cache[k] = node
1509 1509 # And mark it as newest entry. No need to adjust order since it
1510 1510 # is already self._head.prev.
1511 1511 self._head = node
1512 1512
1513 1513 if self.maxcost:
1514 1514 self._enforcecostlimit()
1515 1515
1516 1516 def __setitem__(self, k, v):
1517 1517 self.insert(k, v)
1518 1518
1519 1519 def __delitem__(self, k):
1520 1520 self.pop(k)
1521 1521
1522 1522 def pop(self, k, default=_notset):
1523 1523 try:
1524 1524 node = self._cache.pop(k)
1525 1525 except KeyError:
1526 1526 if default is _notset:
1527 1527 raise
1528 1528 return default
1529 1529
1530 1530 assert node is not None # help pytype
1531 1531 value = node.value
1532 1532 self.totalcost -= node.cost
1533 1533 node.markempty()
1534 1534
1535 1535 # Temporarily mark as newest item before re-adjusting head to make
1536 1536 # this node the oldest item.
1537 1537 self._movetohead(node)
1538 1538 self._head = node.next
1539 1539
1540 1540 return value
1541 1541
1542 1542 # Additional dict methods.
1543 1543
1544 1544 def get(self, k, default=None):
1545 1545 try:
1546 1546 return self.__getitem__(k)
1547 1547 except KeyError:
1548 1548 return default
1549 1549
1550 1550 def peek(self, k, default=_notset):
1551 1551 """Get the specified item without moving it to the head
1552 1552
1553 1553 Unlike get(), this doesn't mutate the internal state. But be aware
1554 1554 that it doesn't mean peek() is thread safe.
1555 1555 """
1556 1556 try:
1557 1557 node = self._cache[k]
1558 1558 assert node is not None # help pytype
1559 1559 return node.value
1560 1560 except KeyError:
1561 1561 if default is _notset:
1562 1562 raise
1563 1563 return default
1564 1564
1565 1565 def clear(self):
1566 1566 n = self._head
1567 1567 while n.key is not _notset:
1568 1568 self.totalcost -= n.cost
1569 1569 n.markempty()
1570 1570 n = n.next
1571 1571
1572 1572 self._cache.clear()
1573 1573
1574 1574 def copy(self, capacity=None, maxcost=0):
1575 1575 """Create a new cache as a copy of the current one.
1576 1576
1577 1577 By default, the new cache has the same capacity as the existing one.
1578 1578 But, the cache capacity can be changed as part of performing the
1579 1579 copy.
1580 1580
1581 1581 Items in the copy have an insertion/access order matching this
1582 1582 instance.
1583 1583 """
1584 1584
1585 1585 capacity = capacity or self.capacity
1586 1586 maxcost = maxcost or self.maxcost
1587 1587 result = lrucachedict(capacity, maxcost=maxcost)
1588 1588
1589 1589 # We copy entries by iterating in oldest-to-newest order so the copy
1590 1590 # has the correct ordering.
1591 1591
1592 1592 # Find the first non-empty entry.
1593 1593 n = self._head.prev
1594 1594 while n.key is _notset and n is not self._head:
1595 1595 n = n.prev
1596 1596
1597 1597 # We could potentially skip the first N items when decreasing capacity.
1598 1598 # But let's keep it simple unless it is a performance problem.
1599 1599 for i in range(len(self._cache)):
1600 1600 result.insert(n.key, n.value, cost=n.cost)
1601 1601 n = n.prev
1602 1602
1603 1603 return result
1604 1604
1605 1605 def popoldest(self):
1606 1606 """Remove the oldest item from the cache.
1607 1607
1608 1608 Returns the (key, value) describing the removed cache entry.
1609 1609 """
1610 1610 if not self._cache:
1611 1611 return
1612 1612
1613 1613 # Walk the linked list backwards starting at tail node until we hit
1614 1614 # a non-empty node.
1615 1615 n = self._head.prev
1616 1616
1617 1617 assert n is not None # help pytype
1618 1618
1619 1619 while n.key is _notset:
1620 1620 n = n.prev
1621 1621
1622 1622 assert n is not None # help pytype
1623 1623
1624 1624 key, value = n.key, n.value
1625 1625
1626 1626 # And remove it from the cache and mark it as empty.
1627 1627 del self._cache[n.key]
1628 1628 self.totalcost -= n.cost
1629 1629 n.markempty()
1630 1630
1631 1631 return key, value
1632 1632
1633 1633 def _movetohead(self, node):
1634 1634 """Mark a node as the newest, making it the new head.
1635 1635
1636 1636 When a node is accessed, it becomes the freshest entry in the LRU
1637 1637 list, which is denoted by self._head.
1638 1638
1639 1639 Visually, let's make ``N`` the new head node (* denotes head):
1640 1640
1641 1641 previous/oldest <-> head <-> next/next newest
1642 1642
1643 1643 ----<->--- A* ---<->-----
1644 1644 | |
1645 1645 E <-> D <-> N <-> C <-> B
1646 1646
1647 1647 To:
1648 1648
1649 1649 ----<->--- N* ---<->-----
1650 1650 | |
1651 1651 E <-> D <-> C <-> B <-> A
1652 1652
1653 1653 This requires the following moves:
1654 1654
1655 1655 C.next = D (node.prev.next = node.next)
1656 1656 D.prev = C (node.next.prev = node.prev)
1657 1657 E.next = N (head.prev.next = node)
1658 1658 N.prev = E (node.prev = head.prev)
1659 1659 N.next = A (node.next = head)
1660 1660 A.prev = N (head.prev = node)
1661 1661 """
1662 1662 head = self._head
1663 1663 # C.next = D
1664 1664 node.prev.next = node.next
1665 1665 # D.prev = C
1666 1666 node.next.prev = node.prev
1667 1667 # N.prev = E
1668 1668 node.prev = head.prev
1669 1669 # N.next = A
1670 1670 # It is tempting to do just "head" here, however if node is
1671 1671 # adjacent to head, this will do bad things.
1672 1672 node.next = head.prev.next
1673 1673 # E.next = N
1674 1674 node.next.prev = node
1675 1675 # A.prev = N
1676 1676 node.prev.next = node
1677 1677
1678 1678 self._head = node
1679 1679
1680 1680 def _addcapacity(self):
1681 1681 """Add a node to the circular linked list.
1682 1682
1683 1683 The new node is inserted before the head node.
1684 1684 """
1685 1685 head = self._head
1686 1686 node = _lrucachenode()
1687 1687 head.prev.next = node
1688 1688 node.prev = head.prev
1689 1689 node.next = head
1690 1690 head.prev = node
1691 1691 self._size += 1
1692 1692 return node
1693 1693
1694 1694 def _enforcecostlimit(self):
1695 1695 # This should run after an insertion. It should only be called if total
1696 1696 # cost limits are being enforced.
1697 1697 # The most recently inserted node is never evicted.
1698 1698 if len(self) <= 1 or self.totalcost <= self.maxcost:
1699 1699 return
1700 1700
1701 1701 # This is logically equivalent to calling popoldest() until we
1702 1702 # free up enough cost. We don't do that since popoldest() needs
1703 1703 # to walk the linked list and doing this in a loop would be
1704 1704 # quadratic. So we find the first non-empty node and then
1705 1705 # walk nodes until we free up enough capacity.
1706 1706 #
1707 1707 # If we only removed the minimum number of nodes to free enough
1708 1708 # cost at insert time, chances are high that the next insert would
1709 1709 # also require pruning. This would effectively constitute quadratic
1710 1710 # behavior for insert-heavy workloads. To mitigate this, we set a
1711 1711 # target cost that is a percentage of the max cost. This will tend
1712 1712 # to free more nodes when the high water mark is reached, which
1713 1713 # lowers the chances of needing to prune on the subsequent insert.
1714 1714 targetcost = int(self.maxcost * 0.75)
1715 1715
1716 1716 n = self._head.prev
1717 1717 while n.key is _notset:
1718 1718 n = n.prev
1719 1719
1720 1720 while len(self) > 1 and self.totalcost > targetcost:
1721 1721 del self._cache[n.key]
1722 1722 self.totalcost -= n.cost
1723 1723 n.markempty()
1724 1724 n = n.prev
1725 1725
1726 1726
1727 1727 def lrucachefunc(func):
1728 1728 '''cache most recent results of function calls'''
1729 1729 cache = {}
1730 1730 order = collections.deque()
1731 1731 if func.__code__.co_argcount == 1:
1732 1732
1733 1733 def f(arg):
1734 1734 if arg not in cache:
1735 1735 if len(cache) > 20:
1736 1736 del cache[order.popleft()]
1737 1737 cache[arg] = func(arg)
1738 1738 else:
1739 1739 order.remove(arg)
1740 1740 order.append(arg)
1741 1741 return cache[arg]
1742 1742
1743 1743 else:
1744 1744
1745 1745 def f(*args):
1746 1746 if args not in cache:
1747 1747 if len(cache) > 20:
1748 1748 del cache[order.popleft()]
1749 1749 cache[args] = func(*args)
1750 1750 else:
1751 1751 order.remove(args)
1752 1752 order.append(args)
1753 1753 return cache[args]
1754 1754
1755 1755 return f
1756 1756
1757 1757
1758 1758 class propertycache(object):
1759 1759 def __init__(self, func):
1760 1760 self.func = func
1761 1761 self.name = func.__name__
1762 1762
1763 1763 def __get__(self, obj, type=None):
1764 1764 result = self.func(obj)
1765 1765 self.cachevalue(obj, result)
1766 1766 return result
1767 1767
1768 1768 def cachevalue(self, obj, value):
1769 1769 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1770 1770 obj.__dict__[self.name] = value
1771 1771
1772 1772
1773 1773 def clearcachedproperty(obj, prop):
1774 1774 '''clear a cached property value, if one has been set'''
1775 1775 prop = pycompat.sysstr(prop)
1776 1776 if prop in obj.__dict__:
1777 1777 del obj.__dict__[prop]
1778 1778
1779 1779
1780 1780 def increasingchunks(source, min=1024, max=65536):
1781 1781 """return no less than min bytes per chunk while data remains,
1782 1782 doubling min after each chunk until it reaches max"""
1783 1783
1784 1784 def log2(x):
1785 1785 if not x:
1786 1786 return 0
1787 1787 i = 0
1788 1788 while x:
1789 1789 x >>= 1
1790 1790 i += 1
1791 1791 return i - 1
1792 1792
1793 1793 buf = []
1794 1794 blen = 0
1795 1795 for chunk in source:
1796 1796 buf.append(chunk)
1797 1797 blen += len(chunk)
1798 1798 if blen >= min:
1799 1799 if min < max:
1800 1800 min = min << 1
1801 1801 nmin = 1 << log2(blen)
1802 1802 if nmin > min:
1803 1803 min = nmin
1804 1804 if min > max:
1805 1805 min = max
1806 1806 yield b''.join(buf)
1807 1807 blen = 0
1808 1808 buf = []
1809 1809 if buf:
1810 1810 yield b''.join(buf)
1811 1811
1812 1812
1813 1813 def always(fn):
1814 1814 return True
1815 1815
1816 1816
1817 1817 def never(fn):
1818 1818 return False
1819 1819
1820 1820
1821 1821 def nogc(func):
1822 1822 """disable garbage collector
1823 1823
1824 1824 Python's garbage collector triggers a GC each time a certain number of
1825 1825 container objects (the number being defined by gc.get_threshold()) are
1826 1826 allocated even when marked not to be tracked by the collector. Tracking has
1827 1827 no effect on when GCs are triggered, only on what objects the GC looks
1828 1828 into. As a workaround, disable GC while building complex (huge)
1829 1829 containers.
1830 1830
1831 1831 This garbage collector issue have been fixed in 2.7. But it still affect
1832 1832 CPython's performance.
1833 1833 """
1834 1834
1835 1835 def wrapper(*args, **kwargs):
1836 1836 gcenabled = gc.isenabled()
1837 1837 gc.disable()
1838 1838 try:
1839 1839 return func(*args, **kwargs)
1840 1840 finally:
1841 1841 if gcenabled:
1842 1842 gc.enable()
1843 1843
1844 1844 return wrapper
1845 1845
1846 1846
1847 1847 if pycompat.ispypy:
1848 1848 # PyPy runs slower with gc disabled
1849 1849 nogc = lambda x: x
1850 1850
1851 1851
1852 1852 def pathto(root, n1, n2):
1853 1853 # type: (bytes, bytes, bytes) -> bytes
1854 1854 """return the relative path from one place to another.
1855 1855 root should use os.sep to separate directories
1856 1856 n1 should use os.sep to separate directories
1857 1857 n2 should use "/" to separate directories
1858 1858 returns an os.sep-separated path.
1859 1859
1860 1860 If n1 is a relative path, it's assumed it's
1861 1861 relative to root.
1862 1862 n2 should always be relative to root.
1863 1863 """
1864 1864 if not n1:
1865 1865 return localpath(n2)
1866 1866 if os.path.isabs(n1):
1867 1867 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1868 1868 return os.path.join(root, localpath(n2))
1869 1869 n2 = b'/'.join((pconvert(root), n2))
1870 1870 a, b = splitpath(n1), n2.split(b'/')
1871 1871 a.reverse()
1872 1872 b.reverse()
1873 1873 while a and b and a[-1] == b[-1]:
1874 1874 a.pop()
1875 1875 b.pop()
1876 1876 b.reverse()
1877 1877 return pycompat.ossep.join(([b'..'] * len(a)) + b) or b'.'
1878 1878
1879 1879
1880 1880 def checksignature(func, depth=1):
1881 1881 '''wrap a function with code to check for calling errors'''
1882 1882
1883 1883 def check(*args, **kwargs):
1884 1884 try:
1885 1885 return func(*args, **kwargs)
1886 1886 except TypeError:
1887 1887 if len(traceback.extract_tb(sys.exc_info()[2])) == depth:
1888 1888 raise error.SignatureError
1889 1889 raise
1890 1890
1891 1891 return check
1892 1892
1893 1893
1894 1894 # a whilelist of known filesystems where hardlink works reliably
1895 1895 _hardlinkfswhitelist = {
1896 1896 b'apfs',
1897 1897 b'btrfs',
1898 1898 b'ext2',
1899 1899 b'ext3',
1900 1900 b'ext4',
1901 1901 b'hfs',
1902 1902 b'jfs',
1903 1903 b'NTFS',
1904 1904 b'reiserfs',
1905 1905 b'tmpfs',
1906 1906 b'ufs',
1907 1907 b'xfs',
1908 1908 b'zfs',
1909 1909 }
1910 1910
1911 1911
1912 1912 def copyfile(
1913 1913 src, dest, hardlink=False, copystat=False, checkambig=False, nb_bytes=None
1914 1914 ):
1915 1915 """copy a file, preserving mode and optionally other stat info like
1916 1916 atime/mtime
1917 1917
1918 1918 checkambig argument is used with filestat, and is useful only if
1919 1919 destination file is guarded by any lock (e.g. repo.lock or
1920 1920 repo.wlock).
1921 1921
1922 1922 copystat and checkambig should be exclusive.
1923 1923
1924 1924 nb_bytes: if set only copy the first `nb_bytes` of the source file.
1925 1925 """
1926 1926 assert not (copystat and checkambig)
1927 1927 oldstat = None
1928 1928 if os.path.lexists(dest):
1929 1929 if checkambig:
1930 1930 oldstat = checkambig and filestat.frompath(dest)
1931 1931 unlink(dest)
1932 1932 if hardlink:
1933 1933 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1934 1934 # unless we are confident that dest is on a whitelisted filesystem.
1935 1935 try:
1936 1936 fstype = getfstype(os.path.dirname(dest))
1937 1937 except OSError:
1938 1938 fstype = None
1939 1939 if fstype not in _hardlinkfswhitelist:
1940 1940 hardlink = False
1941 1941 if hardlink:
1942 1942 try:
1943 1943 oslink(src, dest)
1944 1944 if nb_bytes is not None:
1945 1945 m = "the `nb_bytes` argument is incompatible with `hardlink`"
1946 1946 raise error.ProgrammingError(m)
1947 1947 return
1948 1948 except (IOError, OSError):
1949 1949 pass # fall back to normal copy
1950 1950 if os.path.islink(src):
1951 1951 os.symlink(os.readlink(src), dest)
1952 1952 # copytime is ignored for symlinks, but in general copytime isn't needed
1953 1953 # for them anyway
1954 1954 if nb_bytes is not None:
1955 1955 m = "cannot use `nb_bytes` on a symlink"
1956 1956 raise error.ProgrammingError(m)
1957 1957 else:
1958 1958 try:
1959 1959 shutil.copyfile(src, dest)
1960 1960 if copystat:
1961 1961 # copystat also copies mode
1962 1962 shutil.copystat(src, dest)
1963 1963 else:
1964 1964 shutil.copymode(src, dest)
1965 1965 if oldstat and oldstat.stat:
1966 1966 newstat = filestat.frompath(dest)
1967 1967 if newstat.isambig(oldstat):
1968 1968 # stat of copied file is ambiguous to original one
1969 1969 advanced = (
1970 1970 oldstat.stat[stat.ST_MTIME] + 1
1971 1971 ) & 0x7FFFFFFF
1972 1972 os.utime(dest, (advanced, advanced))
1973 1973 # We could do something smarter using `copy_file_range` call or similar
1974 1974 if nb_bytes is not None:
1975 1975 with open(dest, mode='r+') as f:
1976 1976 f.truncate(nb_bytes)
1977 1977 except shutil.Error as inst:
1978 1978 raise error.Abort(stringutil.forcebytestr(inst))
1979 1979
1980 1980
1981 1981 def copyfiles(src, dst, hardlink=None, progress=None):
1982 1982 """Copy a directory tree using hardlinks if possible."""
1983 1983 num = 0
1984 1984
1985 1985 def settopic():
1986 1986 if progress:
1987 1987 progress.topic = _(b'linking') if hardlink else _(b'copying')
1988 1988
1989 1989 if os.path.isdir(src):
1990 1990 if hardlink is None:
1991 1991 hardlink = (
1992 1992 os.stat(src).st_dev == os.stat(os.path.dirname(dst)).st_dev
1993 1993 )
1994 1994 settopic()
1995 1995 os.mkdir(dst)
1996 1996 for name, kind in listdir(src):
1997 1997 srcname = os.path.join(src, name)
1998 1998 dstname = os.path.join(dst, name)
1999 1999 hardlink, n = copyfiles(srcname, dstname, hardlink, progress)
2000 2000 num += n
2001 2001 else:
2002 2002 if hardlink is None:
2003 2003 hardlink = (
2004 2004 os.stat(os.path.dirname(src)).st_dev
2005 2005 == os.stat(os.path.dirname(dst)).st_dev
2006 2006 )
2007 2007 settopic()
2008 2008
2009 2009 if hardlink:
2010 2010 try:
2011 2011 oslink(src, dst)
2012 except (IOError, OSError):
2012 except (IOError, OSError) as exc:
2013 if exc.errno != errno.EEXIST:
2013 2014 hardlink = False
2015 # XXX maybe try to relink if the file exist ?
2014 2016 shutil.copy(src, dst)
2015 2017 else:
2016 2018 shutil.copy(src, dst)
2017 2019 num += 1
2018 2020 if progress:
2019 2021 progress.increment()
2020 2022
2021 2023 return hardlink, num
2022 2024
2023 2025
2024 2026 _winreservednames = {
2025 2027 b'con',
2026 2028 b'prn',
2027 2029 b'aux',
2028 2030 b'nul',
2029 2031 b'com1',
2030 2032 b'com2',
2031 2033 b'com3',
2032 2034 b'com4',
2033 2035 b'com5',
2034 2036 b'com6',
2035 2037 b'com7',
2036 2038 b'com8',
2037 2039 b'com9',
2038 2040 b'lpt1',
2039 2041 b'lpt2',
2040 2042 b'lpt3',
2041 2043 b'lpt4',
2042 2044 b'lpt5',
2043 2045 b'lpt6',
2044 2046 b'lpt7',
2045 2047 b'lpt8',
2046 2048 b'lpt9',
2047 2049 }
2048 2050 _winreservedchars = b':*?"<>|'
2049 2051
2050 2052
2051 2053 def checkwinfilename(path):
2052 2054 # type: (bytes) -> Optional[bytes]
2053 2055 r"""Check that the base-relative path is a valid filename on Windows.
2054 2056 Returns None if the path is ok, or a UI string describing the problem.
2055 2057
2056 2058 >>> checkwinfilename(b"just/a/normal/path")
2057 2059 >>> checkwinfilename(b"foo/bar/con.xml")
2058 2060 "filename contains 'con', which is reserved on Windows"
2059 2061 >>> checkwinfilename(b"foo/con.xml/bar")
2060 2062 "filename contains 'con', which is reserved on Windows"
2061 2063 >>> checkwinfilename(b"foo/bar/xml.con")
2062 2064 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
2063 2065 "filename contains 'AUX', which is reserved on Windows"
2064 2066 >>> checkwinfilename(b"foo/bar/bla:.txt")
2065 2067 "filename contains ':', which is reserved on Windows"
2066 2068 >>> checkwinfilename(b"foo/bar/b\07la.txt")
2067 2069 "filename contains '\\x07', which is invalid on Windows"
2068 2070 >>> checkwinfilename(b"foo/bar/bla ")
2069 2071 "filename ends with ' ', which is not allowed on Windows"
2070 2072 >>> checkwinfilename(b"../bar")
2071 2073 >>> checkwinfilename(b"foo\\")
2072 2074 "filename ends with '\\', which is invalid on Windows"
2073 2075 >>> checkwinfilename(b"foo\\/bar")
2074 2076 "directory name ends with '\\', which is invalid on Windows"
2075 2077 """
2076 2078 if path.endswith(b'\\'):
2077 2079 return _(b"filename ends with '\\', which is invalid on Windows")
2078 2080 if b'\\/' in path:
2079 2081 return _(b"directory name ends with '\\', which is invalid on Windows")
2080 2082 for n in path.replace(b'\\', b'/').split(b'/'):
2081 2083 if not n:
2082 2084 continue
2083 2085 for c in _filenamebytestr(n):
2084 2086 if c in _winreservedchars:
2085 2087 return (
2086 2088 _(
2087 2089 b"filename contains '%s', which is reserved "
2088 2090 b"on Windows"
2089 2091 )
2090 2092 % c
2091 2093 )
2092 2094 if ord(c) <= 31:
2093 2095 return _(
2094 2096 b"filename contains '%s', which is invalid on Windows"
2095 2097 ) % stringutil.escapestr(c)
2096 2098 base = n.split(b'.')[0]
2097 2099 if base and base.lower() in _winreservednames:
2098 2100 return (
2099 2101 _(b"filename contains '%s', which is reserved on Windows")
2100 2102 % base
2101 2103 )
2102 2104 t = n[-1:]
2103 2105 if t in b'. ' and n not in b'..':
2104 2106 return (
2105 2107 _(
2106 2108 b"filename ends with '%s', which is not allowed "
2107 2109 b"on Windows"
2108 2110 )
2109 2111 % t
2110 2112 )
2111 2113
2112 2114
2113 2115 timer = getattr(time, "perf_counter", None)
2114 2116
2115 2117 if pycompat.iswindows:
2116 2118 checkosfilename = checkwinfilename
2117 2119 if not timer:
2118 2120 timer = time.clock
2119 2121 else:
2120 2122 # mercurial.windows doesn't have platform.checkosfilename
2121 2123 checkosfilename = platform.checkosfilename # pytype: disable=module-attr
2122 2124 if not timer:
2123 2125 timer = time.time
2124 2126
2125 2127
2126 2128 def makelock(info, pathname):
2127 2129 """Create a lock file atomically if possible
2128 2130
2129 2131 This may leave a stale lock file if symlink isn't supported and signal
2130 2132 interrupt is enabled.
2131 2133 """
2132 2134 try:
2133 2135 return os.symlink(info, pathname)
2134 2136 except OSError as why:
2135 2137 if why.errno == errno.EEXIST:
2136 2138 raise
2137 2139 except AttributeError: # no symlink in os
2138 2140 pass
2139 2141
2140 2142 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
2141 2143 ld = os.open(pathname, flags)
2142 2144 os.write(ld, info)
2143 2145 os.close(ld)
2144 2146
2145 2147
2146 2148 def readlock(pathname):
2147 2149 # type: (bytes) -> bytes
2148 2150 try:
2149 2151 return readlink(pathname)
2150 2152 except OSError as why:
2151 2153 if why.errno not in (errno.EINVAL, errno.ENOSYS):
2152 2154 raise
2153 2155 except AttributeError: # no symlink in os
2154 2156 pass
2155 2157 with posixfile(pathname, b'rb') as fp:
2156 2158 return fp.read()
2157 2159
2158 2160
2159 2161 def fstat(fp):
2160 2162 '''stat file object that may not have fileno method.'''
2161 2163 try:
2162 2164 return os.fstat(fp.fileno())
2163 2165 except AttributeError:
2164 2166 return os.stat(fp.name)
2165 2167
2166 2168
2167 2169 # File system features
2168 2170
2169 2171
2170 2172 def fscasesensitive(path):
2171 2173 # type: (bytes) -> bool
2172 2174 """
2173 2175 Return true if the given path is on a case-sensitive filesystem
2174 2176
2175 2177 Requires a path (like /foo/.hg) ending with a foldable final
2176 2178 directory component.
2177 2179 """
2178 2180 s1 = os.lstat(path)
2179 2181 d, b = os.path.split(path)
2180 2182 b2 = b.upper()
2181 2183 if b == b2:
2182 2184 b2 = b.lower()
2183 2185 if b == b2:
2184 2186 return True # no evidence against case sensitivity
2185 2187 p2 = os.path.join(d, b2)
2186 2188 try:
2187 2189 s2 = os.lstat(p2)
2188 2190 if s2 == s1:
2189 2191 return False
2190 2192 return True
2191 2193 except OSError:
2192 2194 return True
2193 2195
2194 2196
2195 2197 _re2_input = lambda x: x
2196 2198 try:
2197 2199 import re2 # pytype: disable=import-error
2198 2200
2199 2201 _re2 = None
2200 2202 except ImportError:
2201 2203 _re2 = False
2202 2204
2203 2205
2204 2206 class _re(object):
2205 2207 def _checkre2(self):
2206 2208 global _re2
2207 2209 global _re2_input
2208 2210
2209 2211 check_pattern = br'\[([^\[]+)\]'
2210 2212 check_input = b'[ui]'
2211 2213 try:
2212 2214 # check if match works, see issue3964
2213 2215 _re2 = bool(re2.match(check_pattern, check_input))
2214 2216 except ImportError:
2215 2217 _re2 = False
2216 2218 except TypeError:
2217 2219 # the `pyre-2` project provides a re2 module that accept bytes
2218 2220 # the `fb-re2` project provides a re2 module that acccept sysstr
2219 2221 check_pattern = pycompat.sysstr(check_pattern)
2220 2222 check_input = pycompat.sysstr(check_input)
2221 2223 _re2 = bool(re2.match(check_pattern, check_input))
2222 2224 _re2_input = pycompat.sysstr
2223 2225
2224 2226 def compile(self, pat, flags=0):
2225 2227 """Compile a regular expression, using re2 if possible
2226 2228
2227 2229 For best performance, use only re2-compatible regexp features. The
2228 2230 only flags from the re module that are re2-compatible are
2229 2231 IGNORECASE and MULTILINE."""
2230 2232 if _re2 is None:
2231 2233 self._checkre2()
2232 2234 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
2233 2235 if flags & remod.IGNORECASE:
2234 2236 pat = b'(?i)' + pat
2235 2237 if flags & remod.MULTILINE:
2236 2238 pat = b'(?m)' + pat
2237 2239 try:
2238 2240 return re2.compile(_re2_input(pat))
2239 2241 except re2.error:
2240 2242 pass
2241 2243 return remod.compile(pat, flags)
2242 2244
2243 2245 @propertycache
2244 2246 def escape(self):
2245 2247 """Return the version of escape corresponding to self.compile.
2246 2248
2247 2249 This is imperfect because whether re2 or re is used for a particular
2248 2250 function depends on the flags, etc, but it's the best we can do.
2249 2251 """
2250 2252 global _re2
2251 2253 if _re2 is None:
2252 2254 self._checkre2()
2253 2255 if _re2:
2254 2256 return re2.escape
2255 2257 else:
2256 2258 return remod.escape
2257 2259
2258 2260
2259 2261 re = _re()
2260 2262
2261 2263 _fspathcache = {}
2262 2264
2263 2265
2264 2266 def fspath(name, root):
2265 2267 # type: (bytes, bytes) -> bytes
2266 2268 """Get name in the case stored in the filesystem
2267 2269
2268 2270 The name should be relative to root, and be normcase-ed for efficiency.
2269 2271
2270 2272 Note that this function is unnecessary, and should not be
2271 2273 called, for case-sensitive filesystems (simply because it's expensive).
2272 2274
2273 2275 The root should be normcase-ed, too.
2274 2276 """
2275 2277
2276 2278 def _makefspathcacheentry(dir):
2277 2279 return {normcase(n): n for n in os.listdir(dir)}
2278 2280
2279 2281 seps = pycompat.ossep
2280 2282 if pycompat.osaltsep:
2281 2283 seps = seps + pycompat.osaltsep
2282 2284 # Protect backslashes. This gets silly very quickly.
2283 2285 seps.replace(b'\\', b'\\\\')
2284 2286 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
2285 2287 dir = os.path.normpath(root)
2286 2288 result = []
2287 2289 for part, sep in pattern.findall(name):
2288 2290 if sep:
2289 2291 result.append(sep)
2290 2292 continue
2291 2293
2292 2294 if dir not in _fspathcache:
2293 2295 _fspathcache[dir] = _makefspathcacheentry(dir)
2294 2296 contents = _fspathcache[dir]
2295 2297
2296 2298 found = contents.get(part)
2297 2299 if not found:
2298 2300 # retry "once per directory" per "dirstate.walk" which
2299 2301 # may take place for each patches of "hg qpush", for example
2300 2302 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
2301 2303 found = contents.get(part)
2302 2304
2303 2305 result.append(found or part)
2304 2306 dir = os.path.join(dir, part)
2305 2307
2306 2308 return b''.join(result)
2307 2309
2308 2310
2309 2311 def checknlink(testfile):
2310 2312 # type: (bytes) -> bool
2311 2313 '''check whether hardlink count reporting works properly'''
2312 2314
2313 2315 # testfile may be open, so we need a separate file for checking to
2314 2316 # work around issue2543 (or testfile may get lost on Samba shares)
2315 2317 f1, f2, fp = None, None, None
2316 2318 try:
2317 2319 fd, f1 = pycompat.mkstemp(
2318 2320 prefix=b'.%s-' % os.path.basename(testfile),
2319 2321 suffix=b'1~',
2320 2322 dir=os.path.dirname(testfile),
2321 2323 )
2322 2324 os.close(fd)
2323 2325 f2 = b'%s2~' % f1[:-2]
2324 2326
2325 2327 oslink(f1, f2)
2326 2328 # nlinks() may behave differently for files on Windows shares if
2327 2329 # the file is open.
2328 2330 fp = posixfile(f2)
2329 2331 return nlinks(f2) > 1
2330 2332 except OSError:
2331 2333 return False
2332 2334 finally:
2333 2335 if fp is not None:
2334 2336 fp.close()
2335 2337 for f in (f1, f2):
2336 2338 try:
2337 2339 if f is not None:
2338 2340 os.unlink(f)
2339 2341 except OSError:
2340 2342 pass
2341 2343
2342 2344
2343 2345 def endswithsep(path):
2344 2346 # type: (bytes) -> bool
2345 2347 '''Check path ends with os.sep or os.altsep.'''
2346 2348 return bool( # help pytype
2347 2349 path.endswith(pycompat.ossep)
2348 2350 or pycompat.osaltsep
2349 2351 and path.endswith(pycompat.osaltsep)
2350 2352 )
2351 2353
2352 2354
2353 2355 def splitpath(path):
2354 2356 # type: (bytes) -> List[bytes]
2355 2357 """Split path by os.sep.
2356 2358 Note that this function does not use os.altsep because this is
2357 2359 an alternative of simple "xxx.split(os.sep)".
2358 2360 It is recommended to use os.path.normpath() before using this
2359 2361 function if need."""
2360 2362 return path.split(pycompat.ossep)
2361 2363
2362 2364
2363 2365 def mktempcopy(name, emptyok=False, createmode=None, enforcewritable=False):
2364 2366 """Create a temporary file with the same contents from name
2365 2367
2366 2368 The permission bits are copied from the original file.
2367 2369
2368 2370 If the temporary file is going to be truncated immediately, you
2369 2371 can use emptyok=True as an optimization.
2370 2372
2371 2373 Returns the name of the temporary file.
2372 2374 """
2373 2375 d, fn = os.path.split(name)
2374 2376 fd, temp = pycompat.mkstemp(prefix=b'.%s-' % fn, suffix=b'~', dir=d)
2375 2377 os.close(fd)
2376 2378 # Temporary files are created with mode 0600, which is usually not
2377 2379 # what we want. If the original file already exists, just copy
2378 2380 # its mode. Otherwise, manually obey umask.
2379 2381 copymode(name, temp, createmode, enforcewritable)
2380 2382
2381 2383 if emptyok:
2382 2384 return temp
2383 2385 try:
2384 2386 try:
2385 2387 ifp = posixfile(name, b"rb")
2386 2388 except IOError as inst:
2387 2389 if inst.errno == errno.ENOENT:
2388 2390 return temp
2389 2391 if not getattr(inst, 'filename', None):
2390 2392 inst.filename = name
2391 2393 raise
2392 2394 ofp = posixfile(temp, b"wb")
2393 2395 for chunk in filechunkiter(ifp):
2394 2396 ofp.write(chunk)
2395 2397 ifp.close()
2396 2398 ofp.close()
2397 2399 except: # re-raises
2398 2400 try:
2399 2401 os.unlink(temp)
2400 2402 except OSError:
2401 2403 pass
2402 2404 raise
2403 2405 return temp
2404 2406
2405 2407
2406 2408 class filestat(object):
2407 2409 """help to exactly detect change of a file
2408 2410
2409 2411 'stat' attribute is result of 'os.stat()' if specified 'path'
2410 2412 exists. Otherwise, it is None. This can avoid preparative
2411 2413 'exists()' examination on client side of this class.
2412 2414 """
2413 2415
2414 2416 def __init__(self, stat):
2415 2417 self.stat = stat
2416 2418
2417 2419 @classmethod
2418 2420 def frompath(cls, path):
2419 2421 try:
2420 2422 stat = os.stat(path)
2421 2423 except OSError as err:
2422 2424 if err.errno != errno.ENOENT:
2423 2425 raise
2424 2426 stat = None
2425 2427 return cls(stat)
2426 2428
2427 2429 @classmethod
2428 2430 def fromfp(cls, fp):
2429 2431 stat = os.fstat(fp.fileno())
2430 2432 return cls(stat)
2431 2433
2432 2434 __hash__ = object.__hash__
2433 2435
2434 2436 def __eq__(self, old):
2435 2437 try:
2436 2438 # if ambiguity between stat of new and old file is
2437 2439 # avoided, comparison of size, ctime and mtime is enough
2438 2440 # to exactly detect change of a file regardless of platform
2439 2441 return (
2440 2442 self.stat.st_size == old.stat.st_size
2441 2443 and self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2442 2444 and self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME]
2443 2445 )
2444 2446 except AttributeError:
2445 2447 pass
2446 2448 try:
2447 2449 return self.stat is None and old.stat is None
2448 2450 except AttributeError:
2449 2451 return False
2450 2452
2451 2453 def isambig(self, old):
2452 2454 """Examine whether new (= self) stat is ambiguous against old one
2453 2455
2454 2456 "S[N]" below means stat of a file at N-th change:
2455 2457
2456 2458 - S[n-1].ctime < S[n].ctime: can detect change of a file
2457 2459 - S[n-1].ctime == S[n].ctime
2458 2460 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2459 2461 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2460 2462 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2461 2463 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2462 2464
2463 2465 Case (*2) above means that a file was changed twice or more at
2464 2466 same time in sec (= S[n-1].ctime), and comparison of timestamp
2465 2467 is ambiguous.
2466 2468
2467 2469 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2468 2470 timestamp is ambiguous".
2469 2471
2470 2472 But advancing mtime only in case (*2) doesn't work as
2471 2473 expected, because naturally advanced S[n].mtime in case (*1)
2472 2474 might be equal to manually advanced S[n-1 or earlier].mtime.
2473 2475
2474 2476 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2475 2477 treated as ambiguous regardless of mtime, to avoid overlooking
2476 2478 by confliction between such mtime.
2477 2479
2478 2480 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2479 2481 S[n].mtime", even if size of a file isn't changed.
2480 2482 """
2481 2483 try:
2482 2484 return self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2483 2485 except AttributeError:
2484 2486 return False
2485 2487
2486 2488 def avoidambig(self, path, old):
2487 2489 """Change file stat of specified path to avoid ambiguity
2488 2490
2489 2491 'old' should be previous filestat of 'path'.
2490 2492
2491 2493 This skips avoiding ambiguity, if a process doesn't have
2492 2494 appropriate privileges for 'path'. This returns False in this
2493 2495 case.
2494 2496
2495 2497 Otherwise, this returns True, as "ambiguity is avoided".
2496 2498 """
2497 2499 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2498 2500 try:
2499 2501 os.utime(path, (advanced, advanced))
2500 2502 except OSError as inst:
2501 2503 if inst.errno == errno.EPERM:
2502 2504 # utime() on the file created by another user causes EPERM,
2503 2505 # if a process doesn't have appropriate privileges
2504 2506 return False
2505 2507 raise
2506 2508 return True
2507 2509
2508 2510 def __ne__(self, other):
2509 2511 return not self == other
2510 2512
2511 2513
2512 2514 class atomictempfile(object):
2513 2515 """writable file object that atomically updates a file
2514 2516
2515 2517 All writes will go to a temporary copy of the original file. Call
2516 2518 close() when you are done writing, and atomictempfile will rename
2517 2519 the temporary copy to the original name, making the changes
2518 2520 visible. If the object is destroyed without being closed, all your
2519 2521 writes are discarded.
2520 2522
2521 2523 checkambig argument of constructor is used with filestat, and is
2522 2524 useful only if target file is guarded by any lock (e.g. repo.lock
2523 2525 or repo.wlock).
2524 2526 """
2525 2527
2526 2528 def __init__(self, name, mode=b'w+b', createmode=None, checkambig=False):
2527 2529 self.__name = name # permanent name
2528 2530 self._tempname = mktempcopy(
2529 2531 name,
2530 2532 emptyok=(b'w' in mode),
2531 2533 createmode=createmode,
2532 2534 enforcewritable=(b'w' in mode),
2533 2535 )
2534 2536
2535 2537 self._fp = posixfile(self._tempname, mode)
2536 2538 self._checkambig = checkambig
2537 2539
2538 2540 # delegated methods
2539 2541 self.read = self._fp.read
2540 2542 self.write = self._fp.write
2541 2543 self.seek = self._fp.seek
2542 2544 self.tell = self._fp.tell
2543 2545 self.fileno = self._fp.fileno
2544 2546
2545 2547 def close(self):
2546 2548 if not self._fp.closed:
2547 2549 self._fp.close()
2548 2550 filename = localpath(self.__name)
2549 2551 oldstat = self._checkambig and filestat.frompath(filename)
2550 2552 if oldstat and oldstat.stat:
2551 2553 rename(self._tempname, filename)
2552 2554 newstat = filestat.frompath(filename)
2553 2555 if newstat.isambig(oldstat):
2554 2556 # stat of changed file is ambiguous to original one
2555 2557 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2556 2558 os.utime(filename, (advanced, advanced))
2557 2559 else:
2558 2560 rename(self._tempname, filename)
2559 2561
2560 2562 def discard(self):
2561 2563 if not self._fp.closed:
2562 2564 try:
2563 2565 os.unlink(self._tempname)
2564 2566 except OSError:
2565 2567 pass
2566 2568 self._fp.close()
2567 2569
2568 2570 def __del__(self):
2569 2571 if safehasattr(self, '_fp'): # constructor actually did something
2570 2572 self.discard()
2571 2573
2572 2574 def __enter__(self):
2573 2575 return self
2574 2576
2575 2577 def __exit__(self, exctype, excvalue, traceback):
2576 2578 if exctype is not None:
2577 2579 self.discard()
2578 2580 else:
2579 2581 self.close()
2580 2582
2581 2583
2582 2584 def unlinkpath(f, ignoremissing=False, rmdir=True):
2583 2585 # type: (bytes, bool, bool) -> None
2584 2586 """unlink and remove the directory if it is empty"""
2585 2587 if ignoremissing:
2586 2588 tryunlink(f)
2587 2589 else:
2588 2590 unlink(f)
2589 2591 if rmdir:
2590 2592 # try removing directories that might now be empty
2591 2593 try:
2592 2594 removedirs(os.path.dirname(f))
2593 2595 except OSError:
2594 2596 pass
2595 2597
2596 2598
2597 2599 def tryunlink(f):
2598 2600 # type: (bytes) -> None
2599 2601 """Attempt to remove a file, ignoring ENOENT errors."""
2600 2602 try:
2601 2603 unlink(f)
2602 2604 except OSError as e:
2603 2605 if e.errno != errno.ENOENT:
2604 2606 raise
2605 2607
2606 2608
2607 2609 def makedirs(name, mode=None, notindexed=False):
2608 2610 # type: (bytes, Optional[int], bool) -> None
2609 2611 """recursive directory creation with parent mode inheritance
2610 2612
2611 2613 Newly created directories are marked as "not to be indexed by
2612 2614 the content indexing service", if ``notindexed`` is specified
2613 2615 for "write" mode access.
2614 2616 """
2615 2617 try:
2616 2618 makedir(name, notindexed)
2617 2619 except OSError as err:
2618 2620 if err.errno == errno.EEXIST:
2619 2621 return
2620 2622 if err.errno != errno.ENOENT or not name:
2621 2623 raise
2622 2624 parent = os.path.dirname(os.path.abspath(name))
2623 2625 if parent == name:
2624 2626 raise
2625 2627 makedirs(parent, mode, notindexed)
2626 2628 try:
2627 2629 makedir(name, notindexed)
2628 2630 except OSError as err:
2629 2631 # Catch EEXIST to handle races
2630 2632 if err.errno == errno.EEXIST:
2631 2633 return
2632 2634 raise
2633 2635 if mode is not None:
2634 2636 os.chmod(name, mode)
2635 2637
2636 2638
2637 2639 def readfile(path):
2638 2640 # type: (bytes) -> bytes
2639 2641 with open(path, b'rb') as fp:
2640 2642 return fp.read()
2641 2643
2642 2644
2643 2645 def writefile(path, text):
2644 2646 # type: (bytes, bytes) -> None
2645 2647 with open(path, b'wb') as fp:
2646 2648 fp.write(text)
2647 2649
2648 2650
2649 2651 def appendfile(path, text):
2650 2652 # type: (bytes, bytes) -> None
2651 2653 with open(path, b'ab') as fp:
2652 2654 fp.write(text)
2653 2655
2654 2656
2655 2657 class chunkbuffer(object):
2656 2658 """Allow arbitrary sized chunks of data to be efficiently read from an
2657 2659 iterator over chunks of arbitrary size."""
2658 2660
2659 2661 def __init__(self, in_iter):
2660 2662 """in_iter is the iterator that's iterating over the input chunks."""
2661 2663
2662 2664 def splitbig(chunks):
2663 2665 for chunk in chunks:
2664 2666 if len(chunk) > 2 ** 20:
2665 2667 pos = 0
2666 2668 while pos < len(chunk):
2667 2669 end = pos + 2 ** 18
2668 2670 yield chunk[pos:end]
2669 2671 pos = end
2670 2672 else:
2671 2673 yield chunk
2672 2674
2673 2675 self.iter = splitbig(in_iter)
2674 2676 self._queue = collections.deque()
2675 2677 self._chunkoffset = 0
2676 2678
2677 2679 def read(self, l=None):
2678 2680 """Read L bytes of data from the iterator of chunks of data.
2679 2681 Returns less than L bytes if the iterator runs dry.
2680 2682
2681 2683 If size parameter is omitted, read everything"""
2682 2684 if l is None:
2683 2685 return b''.join(self.iter)
2684 2686
2685 2687 left = l
2686 2688 buf = []
2687 2689 queue = self._queue
2688 2690 while left > 0:
2689 2691 # refill the queue
2690 2692 if not queue:
2691 2693 target = 2 ** 18
2692 2694 for chunk in self.iter:
2693 2695 queue.append(chunk)
2694 2696 target -= len(chunk)
2695 2697 if target <= 0:
2696 2698 break
2697 2699 if not queue:
2698 2700 break
2699 2701
2700 2702 # The easy way to do this would be to queue.popleft(), modify the
2701 2703 # chunk (if necessary), then queue.appendleft(). However, for cases
2702 2704 # where we read partial chunk content, this incurs 2 dequeue
2703 2705 # mutations and creates a new str for the remaining chunk in the
2704 2706 # queue. Our code below avoids this overhead.
2705 2707
2706 2708 chunk = queue[0]
2707 2709 chunkl = len(chunk)
2708 2710 offset = self._chunkoffset
2709 2711
2710 2712 # Use full chunk.
2711 2713 if offset == 0 and left >= chunkl:
2712 2714 left -= chunkl
2713 2715 queue.popleft()
2714 2716 buf.append(chunk)
2715 2717 # self._chunkoffset remains at 0.
2716 2718 continue
2717 2719
2718 2720 chunkremaining = chunkl - offset
2719 2721
2720 2722 # Use all of unconsumed part of chunk.
2721 2723 if left >= chunkremaining:
2722 2724 left -= chunkremaining
2723 2725 queue.popleft()
2724 2726 # offset == 0 is enabled by block above, so this won't merely
2725 2727 # copy via ``chunk[0:]``.
2726 2728 buf.append(chunk[offset:])
2727 2729 self._chunkoffset = 0
2728 2730
2729 2731 # Partial chunk needed.
2730 2732 else:
2731 2733 buf.append(chunk[offset : offset + left])
2732 2734 self._chunkoffset += left
2733 2735 left -= chunkremaining
2734 2736
2735 2737 return b''.join(buf)
2736 2738
2737 2739
2738 2740 def filechunkiter(f, size=131072, limit=None):
2739 2741 """Create a generator that produces the data in the file size
2740 2742 (default 131072) bytes at a time, up to optional limit (default is
2741 2743 to read all data). Chunks may be less than size bytes if the
2742 2744 chunk is the last chunk in the file, or the file is a socket or
2743 2745 some other type of file that sometimes reads less data than is
2744 2746 requested."""
2745 2747 assert size >= 0
2746 2748 assert limit is None or limit >= 0
2747 2749 while True:
2748 2750 if limit is None:
2749 2751 nbytes = size
2750 2752 else:
2751 2753 nbytes = min(limit, size)
2752 2754 s = nbytes and f.read(nbytes)
2753 2755 if not s:
2754 2756 break
2755 2757 if limit:
2756 2758 limit -= len(s)
2757 2759 yield s
2758 2760
2759 2761
2760 2762 class cappedreader(object):
2761 2763 """A file object proxy that allows reading up to N bytes.
2762 2764
2763 2765 Given a source file object, instances of this type allow reading up to
2764 2766 N bytes from that source file object. Attempts to read past the allowed
2765 2767 limit are treated as EOF.
2766 2768
2767 2769 It is assumed that I/O is not performed on the original file object
2768 2770 in addition to I/O that is performed by this instance. If there is,
2769 2771 state tracking will get out of sync and unexpected results will ensue.
2770 2772 """
2771 2773
2772 2774 def __init__(self, fh, limit):
2773 2775 """Allow reading up to <limit> bytes from <fh>."""
2774 2776 self._fh = fh
2775 2777 self._left = limit
2776 2778
2777 2779 def read(self, n=-1):
2778 2780 if not self._left:
2779 2781 return b''
2780 2782
2781 2783 if n < 0:
2782 2784 n = self._left
2783 2785
2784 2786 data = self._fh.read(min(n, self._left))
2785 2787 self._left -= len(data)
2786 2788 assert self._left >= 0
2787 2789
2788 2790 return data
2789 2791
2790 2792 def readinto(self, b):
2791 2793 res = self.read(len(b))
2792 2794 if res is None:
2793 2795 return None
2794 2796
2795 2797 b[0 : len(res)] = res
2796 2798 return len(res)
2797 2799
2798 2800
2799 2801 def unitcountfn(*unittable):
2800 2802 '''return a function that renders a readable count of some quantity'''
2801 2803
2802 2804 def go(count):
2803 2805 for multiplier, divisor, format in unittable:
2804 2806 if abs(count) >= divisor * multiplier:
2805 2807 return format % (count / float(divisor))
2806 2808 return unittable[-1][2] % count
2807 2809
2808 2810 return go
2809 2811
2810 2812
2811 2813 def processlinerange(fromline, toline):
2812 2814 # type: (int, int) -> Tuple[int, int]
2813 2815 """Check that linerange <fromline>:<toline> makes sense and return a
2814 2816 0-based range.
2815 2817
2816 2818 >>> processlinerange(10, 20)
2817 2819 (9, 20)
2818 2820 >>> processlinerange(2, 1)
2819 2821 Traceback (most recent call last):
2820 2822 ...
2821 2823 ParseError: line range must be positive
2822 2824 >>> processlinerange(0, 5)
2823 2825 Traceback (most recent call last):
2824 2826 ...
2825 2827 ParseError: fromline must be strictly positive
2826 2828 """
2827 2829 if toline - fromline < 0:
2828 2830 raise error.ParseError(_(b"line range must be positive"))
2829 2831 if fromline < 1:
2830 2832 raise error.ParseError(_(b"fromline must be strictly positive"))
2831 2833 return fromline - 1, toline
2832 2834
2833 2835
2834 2836 bytecount = unitcountfn(
2835 2837 (100, 1 << 30, _(b'%.0f GB')),
2836 2838 (10, 1 << 30, _(b'%.1f GB')),
2837 2839 (1, 1 << 30, _(b'%.2f GB')),
2838 2840 (100, 1 << 20, _(b'%.0f MB')),
2839 2841 (10, 1 << 20, _(b'%.1f MB')),
2840 2842 (1, 1 << 20, _(b'%.2f MB')),
2841 2843 (100, 1 << 10, _(b'%.0f KB')),
2842 2844 (10, 1 << 10, _(b'%.1f KB')),
2843 2845 (1, 1 << 10, _(b'%.2f KB')),
2844 2846 (1, 1, _(b'%.0f bytes')),
2845 2847 )
2846 2848
2847 2849
2848 2850 class transformingwriter(object):
2849 2851 """Writable file wrapper to transform data by function"""
2850 2852
2851 2853 def __init__(self, fp, encode):
2852 2854 self._fp = fp
2853 2855 self._encode = encode
2854 2856
2855 2857 def close(self):
2856 2858 self._fp.close()
2857 2859
2858 2860 def flush(self):
2859 2861 self._fp.flush()
2860 2862
2861 2863 def write(self, data):
2862 2864 return self._fp.write(self._encode(data))
2863 2865
2864 2866
2865 2867 # Matches a single EOL which can either be a CRLF where repeated CR
2866 2868 # are removed or a LF. We do not care about old Macintosh files, so a
2867 2869 # stray CR is an error.
2868 2870 _eolre = remod.compile(br'\r*\n')
2869 2871
2870 2872
2871 2873 def tolf(s):
2872 2874 # type: (bytes) -> bytes
2873 2875 return _eolre.sub(b'\n', s)
2874 2876
2875 2877
2876 2878 def tocrlf(s):
2877 2879 # type: (bytes) -> bytes
2878 2880 return _eolre.sub(b'\r\n', s)
2879 2881
2880 2882
2881 2883 def _crlfwriter(fp):
2882 2884 return transformingwriter(fp, tocrlf)
2883 2885
2884 2886
2885 2887 if pycompat.oslinesep == b'\r\n':
2886 2888 tonativeeol = tocrlf
2887 2889 fromnativeeol = tolf
2888 2890 nativeeolwriter = _crlfwriter
2889 2891 else:
2890 2892 tonativeeol = pycompat.identity
2891 2893 fromnativeeol = pycompat.identity
2892 2894 nativeeolwriter = pycompat.identity
2893 2895
2894 2896 if pyplatform.python_implementation() == b'CPython' and sys.version_info < (
2895 2897 3,
2896 2898 0,
2897 2899 ):
2898 2900 # There is an issue in CPython that some IO methods do not handle EINTR
2899 2901 # correctly. The following table shows what CPython version (and functions)
2900 2902 # are affected (buggy: has the EINTR bug, okay: otherwise):
2901 2903 #
2902 2904 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2903 2905 # --------------------------------------------------
2904 2906 # fp.__iter__ | buggy | buggy | okay
2905 2907 # fp.read* | buggy | okay [1] | okay
2906 2908 #
2907 2909 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2908 2910 #
2909 2911 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2910 2912 # like "read*" work fine, as we do not support Python < 2.7.4.
2911 2913 #
2912 2914 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2913 2915 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2914 2916 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2915 2917 # fp.__iter__ but not other fp.read* methods.
2916 2918 #
2917 2919 # On modern systems like Linux, the "read" syscall cannot be interrupted
2918 2920 # when reading "fast" files like on-disk files. So the EINTR issue only
2919 2921 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2920 2922 # files approximately as "fast" files and use the fast (unsafe) code path,
2921 2923 # to minimize the performance impact.
2922 2924
2923 2925 def iterfile(fp):
2924 2926 fastpath = True
2925 2927 if type(fp) is file:
2926 2928 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2927 2929 if fastpath:
2928 2930 return fp
2929 2931 else:
2930 2932 # fp.readline deals with EINTR correctly, use it as a workaround.
2931 2933 return iter(fp.readline, b'')
2932 2934
2933 2935
2934 2936 else:
2935 2937 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2936 2938 def iterfile(fp):
2937 2939 return fp
2938 2940
2939 2941
2940 2942 def iterlines(iterator):
2941 2943 # type: (Iterator[bytes]) -> Iterator[bytes]
2942 2944 for chunk in iterator:
2943 2945 for line in chunk.splitlines():
2944 2946 yield line
2945 2947
2946 2948
2947 2949 def expandpath(path):
2948 2950 # type: (bytes) -> bytes
2949 2951 return os.path.expanduser(os.path.expandvars(path))
2950 2952
2951 2953
2952 2954 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2953 2955 """Return the result of interpolating items in the mapping into string s.
2954 2956
2955 2957 prefix is a single character string, or a two character string with
2956 2958 a backslash as the first character if the prefix needs to be escaped in
2957 2959 a regular expression.
2958 2960
2959 2961 fn is an optional function that will be applied to the replacement text
2960 2962 just before replacement.
2961 2963
2962 2964 escape_prefix is an optional flag that allows using doubled prefix for
2963 2965 its escaping.
2964 2966 """
2965 2967 fn = fn or (lambda s: s)
2966 2968 patterns = b'|'.join(mapping.keys())
2967 2969 if escape_prefix:
2968 2970 patterns += b'|' + prefix
2969 2971 if len(prefix) > 1:
2970 2972 prefix_char = prefix[1:]
2971 2973 else:
2972 2974 prefix_char = prefix
2973 2975 mapping[prefix_char] = prefix_char
2974 2976 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2975 2977 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2976 2978
2977 2979
2978 2980 def getport(*args, **kwargs):
2979 2981 msg = b'getport(...) moved to mercurial.utils.urlutil'
2980 2982 nouideprecwarn(msg, b'6.0', stacklevel=2)
2981 2983 return urlutil.getport(*args, **kwargs)
2982 2984
2983 2985
2984 2986 def url(*args, **kwargs):
2985 2987 msg = b'url(...) moved to mercurial.utils.urlutil'
2986 2988 nouideprecwarn(msg, b'6.0', stacklevel=2)
2987 2989 return urlutil.url(*args, **kwargs)
2988 2990
2989 2991
2990 2992 def hasscheme(*args, **kwargs):
2991 2993 msg = b'hasscheme(...) moved to mercurial.utils.urlutil'
2992 2994 nouideprecwarn(msg, b'6.0', stacklevel=2)
2993 2995 return urlutil.hasscheme(*args, **kwargs)
2994 2996
2995 2997
2996 2998 def hasdriveletter(*args, **kwargs):
2997 2999 msg = b'hasdriveletter(...) moved to mercurial.utils.urlutil'
2998 3000 nouideprecwarn(msg, b'6.0', stacklevel=2)
2999 3001 return urlutil.hasdriveletter(*args, **kwargs)
3000 3002
3001 3003
3002 3004 def urllocalpath(*args, **kwargs):
3003 3005 msg = b'urllocalpath(...) moved to mercurial.utils.urlutil'
3004 3006 nouideprecwarn(msg, b'6.0', stacklevel=2)
3005 3007 return urlutil.urllocalpath(*args, **kwargs)
3006 3008
3007 3009
3008 3010 def checksafessh(*args, **kwargs):
3009 3011 msg = b'checksafessh(...) moved to mercurial.utils.urlutil'
3010 3012 nouideprecwarn(msg, b'6.0', stacklevel=2)
3011 3013 return urlutil.checksafessh(*args, **kwargs)
3012 3014
3013 3015
3014 3016 def hidepassword(*args, **kwargs):
3015 3017 msg = b'hidepassword(...) moved to mercurial.utils.urlutil'
3016 3018 nouideprecwarn(msg, b'6.0', stacklevel=2)
3017 3019 return urlutil.hidepassword(*args, **kwargs)
3018 3020
3019 3021
3020 3022 def removeauth(*args, **kwargs):
3021 3023 msg = b'removeauth(...) moved to mercurial.utils.urlutil'
3022 3024 nouideprecwarn(msg, b'6.0', stacklevel=2)
3023 3025 return urlutil.removeauth(*args, **kwargs)
3024 3026
3025 3027
3026 3028 timecount = unitcountfn(
3027 3029 (1, 1e3, _(b'%.0f s')),
3028 3030 (100, 1, _(b'%.1f s')),
3029 3031 (10, 1, _(b'%.2f s')),
3030 3032 (1, 1, _(b'%.3f s')),
3031 3033 (100, 0.001, _(b'%.1f ms')),
3032 3034 (10, 0.001, _(b'%.2f ms')),
3033 3035 (1, 0.001, _(b'%.3f ms')),
3034 3036 (100, 0.000001, _(b'%.1f us')),
3035 3037 (10, 0.000001, _(b'%.2f us')),
3036 3038 (1, 0.000001, _(b'%.3f us')),
3037 3039 (100, 0.000000001, _(b'%.1f ns')),
3038 3040 (10, 0.000000001, _(b'%.2f ns')),
3039 3041 (1, 0.000000001, _(b'%.3f ns')),
3040 3042 )
3041 3043
3042 3044
3043 3045 @attr.s
3044 3046 class timedcmstats(object):
3045 3047 """Stats information produced by the timedcm context manager on entering."""
3046 3048
3047 3049 # the starting value of the timer as a float (meaning and resulution is
3048 3050 # platform dependent, see util.timer)
3049 3051 start = attr.ib(default=attr.Factory(lambda: timer()))
3050 3052 # the number of seconds as a floating point value; starts at 0, updated when
3051 3053 # the context is exited.
3052 3054 elapsed = attr.ib(default=0)
3053 3055 # the number of nested timedcm context managers.
3054 3056 level = attr.ib(default=1)
3055 3057
3056 3058 def __bytes__(self):
3057 3059 return timecount(self.elapsed) if self.elapsed else b'<unknown>'
3058 3060
3059 3061 __str__ = encoding.strmethod(__bytes__)
3060 3062
3061 3063
3062 3064 @contextlib.contextmanager
3063 3065 def timedcm(whencefmt, *whenceargs):
3064 3066 """A context manager that produces timing information for a given context.
3065 3067
3066 3068 On entering a timedcmstats instance is produced.
3067 3069
3068 3070 This context manager is reentrant.
3069 3071
3070 3072 """
3071 3073 # track nested context managers
3072 3074 timedcm._nested += 1
3073 3075 timing_stats = timedcmstats(level=timedcm._nested)
3074 3076 try:
3075 3077 with tracing.log(whencefmt, *whenceargs):
3076 3078 yield timing_stats
3077 3079 finally:
3078 3080 timing_stats.elapsed = timer() - timing_stats.start
3079 3081 timedcm._nested -= 1
3080 3082
3081 3083
3082 3084 timedcm._nested = 0
3083 3085
3084 3086
3085 3087 def timed(func):
3086 3088 """Report the execution time of a function call to stderr.
3087 3089
3088 3090 During development, use as a decorator when you need to measure
3089 3091 the cost of a function, e.g. as follows:
3090 3092
3091 3093 @util.timed
3092 3094 def foo(a, b, c):
3093 3095 pass
3094 3096 """
3095 3097
3096 3098 def wrapper(*args, **kwargs):
3097 3099 with timedcm(pycompat.bytestr(func.__name__)) as time_stats:
3098 3100 result = func(*args, **kwargs)
3099 3101 stderr = procutil.stderr
3100 3102 stderr.write(
3101 3103 b'%s%s: %s\n'
3102 3104 % (
3103 3105 b' ' * time_stats.level * 2,
3104 3106 pycompat.bytestr(func.__name__),
3105 3107 time_stats,
3106 3108 )
3107 3109 )
3108 3110 return result
3109 3111
3110 3112 return wrapper
3111 3113
3112 3114
3113 3115 _sizeunits = (
3114 3116 (b'm', 2 ** 20),
3115 3117 (b'k', 2 ** 10),
3116 3118 (b'g', 2 ** 30),
3117 3119 (b'kb', 2 ** 10),
3118 3120 (b'mb', 2 ** 20),
3119 3121 (b'gb', 2 ** 30),
3120 3122 (b'b', 1),
3121 3123 )
3122 3124
3123 3125
3124 3126 def sizetoint(s):
3125 3127 # type: (bytes) -> int
3126 3128 """Convert a space specifier to a byte count.
3127 3129
3128 3130 >>> sizetoint(b'30')
3129 3131 30
3130 3132 >>> sizetoint(b'2.2kb')
3131 3133 2252
3132 3134 >>> sizetoint(b'6M')
3133 3135 6291456
3134 3136 """
3135 3137 t = s.strip().lower()
3136 3138 try:
3137 3139 for k, u in _sizeunits:
3138 3140 if t.endswith(k):
3139 3141 return int(float(t[: -len(k)]) * u)
3140 3142 return int(t)
3141 3143 except ValueError:
3142 3144 raise error.ParseError(_(b"couldn't parse size: %s") % s)
3143 3145
3144 3146
3145 3147 class hooks(object):
3146 3148 """A collection of hook functions that can be used to extend a
3147 3149 function's behavior. Hooks are called in lexicographic order,
3148 3150 based on the names of their sources."""
3149 3151
3150 3152 def __init__(self):
3151 3153 self._hooks = []
3152 3154
3153 3155 def add(self, source, hook):
3154 3156 self._hooks.append((source, hook))
3155 3157
3156 3158 def __call__(self, *args):
3157 3159 self._hooks.sort(key=lambda x: x[0])
3158 3160 results = []
3159 3161 for source, hook in self._hooks:
3160 3162 results.append(hook(*args))
3161 3163 return results
3162 3164
3163 3165
3164 3166 def getstackframes(skip=0, line=b' %-*s in %s\n', fileline=b'%s:%d', depth=0):
3165 3167 """Yields lines for a nicely formatted stacktrace.
3166 3168 Skips the 'skip' last entries, then return the last 'depth' entries.
3167 3169 Each file+linenumber is formatted according to fileline.
3168 3170 Each line is formatted according to line.
3169 3171 If line is None, it yields:
3170 3172 length of longest filepath+line number,
3171 3173 filepath+linenumber,
3172 3174 function
3173 3175
3174 3176 Not be used in production code but very convenient while developing.
3175 3177 """
3176 3178 entries = [
3177 3179 (fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3178 3180 for fn, ln, func, _text in traceback.extract_stack()[: -skip - 1]
3179 3181 ][-depth:]
3180 3182 if entries:
3181 3183 fnmax = max(len(entry[0]) for entry in entries)
3182 3184 for fnln, func in entries:
3183 3185 if line is None:
3184 3186 yield (fnmax, fnln, func)
3185 3187 else:
3186 3188 yield line % (fnmax, fnln, func)
3187 3189
3188 3190
3189 3191 def debugstacktrace(
3190 3192 msg=b'stacktrace',
3191 3193 skip=0,
3192 3194 f=procutil.stderr,
3193 3195 otherf=procutil.stdout,
3194 3196 depth=0,
3195 3197 prefix=b'',
3196 3198 ):
3197 3199 """Writes a message to f (stderr) with a nicely formatted stacktrace.
3198 3200 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3199 3201 By default it will flush stdout first.
3200 3202 It can be used everywhere and intentionally does not require an ui object.
3201 3203 Not be used in production code but very convenient while developing.
3202 3204 """
3203 3205 if otherf:
3204 3206 otherf.flush()
3205 3207 f.write(b'%s%s at:\n' % (prefix, msg.rstrip()))
3206 3208 for line in getstackframes(skip + 1, depth=depth):
3207 3209 f.write(prefix + line)
3208 3210 f.flush()
3209 3211
3210 3212
3211 3213 # convenient shortcut
3212 3214 dst = debugstacktrace
3213 3215
3214 3216
3215 3217 def safename(f, tag, ctx, others=None):
3216 3218 """
3217 3219 Generate a name that it is safe to rename f to in the given context.
3218 3220
3219 3221 f: filename to rename
3220 3222 tag: a string tag that will be included in the new name
3221 3223 ctx: a context, in which the new name must not exist
3222 3224 others: a set of other filenames that the new name must not be in
3223 3225
3224 3226 Returns a file name of the form oldname~tag[~number] which does not exist
3225 3227 in the provided context and is not in the set of other names.
3226 3228 """
3227 3229 if others is None:
3228 3230 others = set()
3229 3231
3230 3232 fn = b'%s~%s' % (f, tag)
3231 3233 if fn not in ctx and fn not in others:
3232 3234 return fn
3233 3235 for n in itertools.count(1):
3234 3236 fn = b'%s~%s~%s' % (f, tag, n)
3235 3237 if fn not in ctx and fn not in others:
3236 3238 return fn
3237 3239
3238 3240
3239 3241 def readexactly(stream, n):
3240 3242 '''read n bytes from stream.read and abort if less was available'''
3241 3243 s = stream.read(n)
3242 3244 if len(s) < n:
3243 3245 raise error.Abort(
3244 3246 _(b"stream ended unexpectedly (got %d bytes, expected %d)")
3245 3247 % (len(s), n)
3246 3248 )
3247 3249 return s
3248 3250
3249 3251
3250 3252 def uvarintencode(value):
3251 3253 """Encode an unsigned integer value to a varint.
3252 3254
3253 3255 A varint is a variable length integer of 1 or more bytes. Each byte
3254 3256 except the last has the most significant bit set. The lower 7 bits of
3255 3257 each byte store the 2's complement representation, least significant group
3256 3258 first.
3257 3259
3258 3260 >>> uvarintencode(0)
3259 3261 '\\x00'
3260 3262 >>> uvarintencode(1)
3261 3263 '\\x01'
3262 3264 >>> uvarintencode(127)
3263 3265 '\\x7f'
3264 3266 >>> uvarintencode(1337)
3265 3267 '\\xb9\\n'
3266 3268 >>> uvarintencode(65536)
3267 3269 '\\x80\\x80\\x04'
3268 3270 >>> uvarintencode(-1)
3269 3271 Traceback (most recent call last):
3270 3272 ...
3271 3273 ProgrammingError: negative value for uvarint: -1
3272 3274 """
3273 3275 if value < 0:
3274 3276 raise error.ProgrammingError(b'negative value for uvarint: %d' % value)
3275 3277 bits = value & 0x7F
3276 3278 value >>= 7
3277 3279 bytes = []
3278 3280 while value:
3279 3281 bytes.append(pycompat.bytechr(0x80 | bits))
3280 3282 bits = value & 0x7F
3281 3283 value >>= 7
3282 3284 bytes.append(pycompat.bytechr(bits))
3283 3285
3284 3286 return b''.join(bytes)
3285 3287
3286 3288
3287 3289 def uvarintdecodestream(fh):
3288 3290 """Decode an unsigned variable length integer from a stream.
3289 3291
3290 3292 The passed argument is anything that has a ``.read(N)`` method.
3291 3293
3292 3294 >>> try:
3293 3295 ... from StringIO import StringIO as BytesIO
3294 3296 ... except ImportError:
3295 3297 ... from io import BytesIO
3296 3298 >>> uvarintdecodestream(BytesIO(b'\\x00'))
3297 3299 0
3298 3300 >>> uvarintdecodestream(BytesIO(b'\\x01'))
3299 3301 1
3300 3302 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
3301 3303 127
3302 3304 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
3303 3305 1337
3304 3306 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
3305 3307 65536
3306 3308 >>> uvarintdecodestream(BytesIO(b'\\x80'))
3307 3309 Traceback (most recent call last):
3308 3310 ...
3309 3311 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
3310 3312 """
3311 3313 result = 0
3312 3314 shift = 0
3313 3315 while True:
3314 3316 byte = ord(readexactly(fh, 1))
3315 3317 result |= (byte & 0x7F) << shift
3316 3318 if not (byte & 0x80):
3317 3319 return result
3318 3320 shift += 7
3319 3321
3320 3322
3321 3323 # Passing the '' locale means that the locale should be set according to the
3322 3324 # user settings (environment variables).
3323 3325 # Python sometimes avoids setting the global locale settings. When interfacing
3324 3326 # with C code (e.g. the curses module or the Subversion bindings), the global
3325 3327 # locale settings must be initialized correctly. Python 2 does not initialize
3326 3328 # the global locale settings on interpreter startup. Python 3 sometimes
3327 3329 # initializes LC_CTYPE, but not consistently at least on Windows. Therefore we
3328 3330 # explicitly initialize it to get consistent behavior if it's not already
3329 3331 # initialized. Since CPython commit 177d921c8c03d30daa32994362023f777624b10d,
3330 3332 # LC_CTYPE is always initialized. If we require Python 3.8+, we should re-check
3331 3333 # if we can remove this code.
3332 3334 @contextlib.contextmanager
3333 3335 def with_lc_ctype():
3334 3336 oldloc = locale.setlocale(locale.LC_CTYPE, None)
3335 3337 if oldloc == 'C':
3336 3338 try:
3337 3339 try:
3338 3340 locale.setlocale(locale.LC_CTYPE, '')
3339 3341 except locale.Error:
3340 3342 # The likely case is that the locale from the environment
3341 3343 # variables is unknown.
3342 3344 pass
3343 3345 yield
3344 3346 finally:
3345 3347 locale.setlocale(locale.LC_CTYPE, oldloc)
3346 3348 else:
3347 3349 yield
3348 3350
3349 3351
3350 3352 def _estimatememory():
3351 3353 # type: () -> Optional[int]
3352 3354 """Provide an estimate for the available system memory in Bytes.
3353 3355
3354 3356 If no estimate can be provided on the platform, returns None.
3355 3357 """
3356 3358 if pycompat.sysplatform.startswith(b'win'):
3357 3359 # On Windows, use the GlobalMemoryStatusEx kernel function directly.
3358 3360 from ctypes import c_long as DWORD, c_ulonglong as DWORDLONG
3359 3361 from ctypes.wintypes import ( # pytype: disable=import-error
3360 3362 Structure,
3361 3363 byref,
3362 3364 sizeof,
3363 3365 windll,
3364 3366 )
3365 3367
3366 3368 class MEMORYSTATUSEX(Structure):
3367 3369 _fields_ = [
3368 3370 ('dwLength', DWORD),
3369 3371 ('dwMemoryLoad', DWORD),
3370 3372 ('ullTotalPhys', DWORDLONG),
3371 3373 ('ullAvailPhys', DWORDLONG),
3372 3374 ('ullTotalPageFile', DWORDLONG),
3373 3375 ('ullAvailPageFile', DWORDLONG),
3374 3376 ('ullTotalVirtual', DWORDLONG),
3375 3377 ('ullAvailVirtual', DWORDLONG),
3376 3378 ('ullExtendedVirtual', DWORDLONG),
3377 3379 ]
3378 3380
3379 3381 x = MEMORYSTATUSEX()
3380 3382 x.dwLength = sizeof(x)
3381 3383 windll.kernel32.GlobalMemoryStatusEx(byref(x))
3382 3384 return x.ullAvailPhys
3383 3385
3384 3386 # On newer Unix-like systems and Mac OSX, the sysconf interface
3385 3387 # can be used. _SC_PAGE_SIZE is part of POSIX; _SC_PHYS_PAGES
3386 3388 # seems to be implemented on most systems.
3387 3389 try:
3388 3390 pagesize = os.sysconf(os.sysconf_names['SC_PAGE_SIZE'])
3389 3391 pages = os.sysconf(os.sysconf_names['SC_PHYS_PAGES'])
3390 3392 return pagesize * pages
3391 3393 except OSError: # sysconf can fail
3392 3394 pass
3393 3395 except KeyError: # unknown parameter
3394 3396 pass
General Comments 0
You need to be logged in to leave comments. Login now