##// END OF EJS Templates
util: avoid a leaked file descriptor in `util.makelock()` exceptional case
Matt Harbison -
r52781:f833ad92 default
parent child Browse files
Show More
@@ -1,3404 +1,3406
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 from __future__ import annotations
17 17
18 18 import abc
19 19 import collections
20 20 import contextlib
21 21 import errno
22 22 import gc
23 23 import hashlib
24 24 import io
25 25 import itertools
26 26 import locale
27 27 import mmap
28 28 import os
29 29 import pickle # provides util.pickle symbol
30 30 import re as remod
31 31 import shutil
32 32 import stat
33 33 import sys
34 34 import time
35 35 import traceback
36 36 import typing
37 37 import warnings
38 38
39 39 from typing import (
40 40 Any,
41 41 BinaryIO,
42 42 Callable,
43 43 Iterable,
44 44 Iterator,
45 45 List,
46 46 Optional,
47 47 Tuple,
48 48 Type,
49 49 TypeVar,
50 50 )
51 51
52 52 from .node import hex
53 53 from .thirdparty import attr
54 54
55 55 # Force pytype to use the non-vendored package
56 56 if typing.TYPE_CHECKING:
57 57 # noinspection PyPackageRequirements
58 58 import attr
59 59
60 60 from .pycompat import (
61 61 open,
62 62 )
63 63 from hgdemandimport import tracing
64 64 from . import (
65 65 encoding,
66 66 error,
67 67 i18n,
68 68 policy,
69 69 pycompat,
70 70 typelib,
71 71 urllibcompat,
72 72 )
73 73 from .utils import (
74 74 compression,
75 75 hashutil,
76 76 procutil,
77 77 stringutil,
78 78 )
79 79
80 80 # keeps pyflakes happy
81 81 assert [
82 82 Iterable,
83 83 Iterator,
84 84 List,
85 85 Optional,
86 86 Tuple,
87 87 ]
88 88
89 89
90 90 base85 = policy.importmod('base85')
91 91 osutil = policy.importmod('osutil')
92 92
93 93 b85decode = base85.b85decode
94 94 b85encode = base85.b85encode
95 95
96 96 cookielib = pycompat.cookielib
97 97 httplib = pycompat.httplib
98 98 safehasattr = pycompat.safehasattr
99 99 socketserver = pycompat.socketserver
100 100 bytesio = io.BytesIO
101 101 # TODO deprecate stringio name, as it is a lie on Python 3.
102 102 stringio = bytesio
103 103 xmlrpclib = pycompat.xmlrpclib
104 104
105 105 httpserver = urllibcompat.httpserver
106 106 urlerr = urllibcompat.urlerr
107 107 urlreq = urllibcompat.urlreq
108 108
109 109 # workaround for win32mbcs
110 110 _filenamebytestr = pycompat.bytestr
111 111
112 112 if pycompat.iswindows:
113 113 from . import windows as platform
114 114 else:
115 115 from . import posix as platform
116 116
117 117 _ = i18n._
118 118
119 119 abspath = platform.abspath
120 120 bindunixsocket = platform.bindunixsocket
121 121 cachestat = platform.cachestat
122 122 checkexec = platform.checkexec
123 123 checklink = platform.checklink
124 124 copymode = platform.copymode
125 125 expandglobs = platform.expandglobs
126 126 getfsmountpoint = platform.getfsmountpoint
127 127 getfstype = platform.getfstype
128 128 get_password = platform.get_password
129 129 groupmembers = platform.groupmembers
130 130 groupname = platform.groupname
131 131 isexec = platform.isexec
132 132 isowner = platform.isowner
133 133 listdir = osutil.listdir
134 134 localpath = platform.localpath
135 135 lookupreg = platform.lookupreg
136 136 makedir = platform.makedir
137 137 nlinks = platform.nlinks
138 138 normpath = platform.normpath
139 139 normcase = platform.normcase
140 140 normcasespec = platform.normcasespec
141 141 normcasefallback = platform.normcasefallback
142 142 openhardlinks = platform.openhardlinks
143 143 oslink = platform.oslink
144 144 parsepatchoutput = platform.parsepatchoutput
145 145 pconvert = platform.pconvert
146 146 poll = platform.poll
147 147 posixfile = platform.posixfile
148 148 readlink = platform.readlink
149 149 rename = platform.rename
150 150 removedirs = platform.removedirs
151 151 samedevice = platform.samedevice
152 152 samefile = platform.samefile
153 153 samestat = platform.samestat
154 154 setflags = platform.setflags
155 155 split = platform.split
156 156 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
157 157 statisexec = platform.statisexec
158 158 statislink = platform.statislink
159 159 umask = platform.umask
160 160 unlink = platform.unlink
161 161 username = platform.username
162 162
163 163
164 164 if typing.TYPE_CHECKING:
165 165 _Tfilestat = TypeVar('_Tfilestat', bound='filestat')
166 166
167 167
168 168 def setumask(val: int) -> None:
169 169 '''updates the umask. used by chg server'''
170 170 if pycompat.iswindows:
171 171 return
172 172 os.umask(val)
173 173 global umask
174 174 platform.umask = umask = val & 0o777
175 175
176 176
177 177 # small compat layer
178 178 compengines = compression.compengines
179 179 SERVERROLE = compression.SERVERROLE
180 180 CLIENTROLE = compression.CLIENTROLE
181 181
182 182 # Python compatibility
183 183
184 184 _notset = object()
185 185
186 186
187 187 def bitsfrom(container):
188 188 bits = 0
189 189 for bit in container:
190 190 bits |= bit
191 191 return bits
192 192
193 193
194 194 # python 2.6 still have deprecation warning enabled by default. We do not want
195 195 # to display anything to standard user so detect if we are running test and
196 196 # only use python deprecation warning in this case.
197 197 _dowarn = bool(encoding.environ.get(b'HGEMITWARNINGS'))
198 198 if _dowarn:
199 199 # explicitly unfilter our warning for python 2.7
200 200 #
201 201 # The option of setting PYTHONWARNINGS in the test runner was investigated.
202 202 # However, module name set through PYTHONWARNINGS was exactly matched, so
203 203 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
204 204 # makes the whole PYTHONWARNINGS thing useless for our usecase.
205 205 warnings.filterwarnings('default', '', DeprecationWarning, 'mercurial')
206 206 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext')
207 207 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext3rd')
208 208 if _dowarn:
209 209 # silence warning emitted by passing user string to re.sub()
210 210 warnings.filterwarnings(
211 211 'ignore', 'bad escape', DeprecationWarning, 'mercurial'
212 212 )
213 213 warnings.filterwarnings(
214 214 'ignore', 'invalid escape sequence', DeprecationWarning, 'mercurial'
215 215 )
216 216 # TODO: reinvent imp.is_frozen()
217 217 warnings.filterwarnings(
218 218 'ignore',
219 219 'the imp module is deprecated',
220 220 DeprecationWarning,
221 221 'mercurial',
222 222 )
223 223
224 224
225 225 def nouideprecwarn(msg, version, stacklevel=1):
226 226 """Issue an python native deprecation warning
227 227
228 228 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
229 229 """
230 230 if _dowarn:
231 231 msg += (
232 232 b"\n(compatibility will be dropped after Mercurial-%s,"
233 233 b" update your code.)"
234 234 ) % version
235 235 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
236 236 # on python 3 with chg, we will need to explicitly flush the output
237 237 sys.stderr.flush()
238 238
239 239
240 240 DIGESTS = {
241 241 b'md5': hashlib.md5,
242 242 b'sha1': hashutil.sha1,
243 243 b'sha512': hashlib.sha512,
244 244 }
245 245 # List of digest types from strongest to weakest
246 246 DIGESTS_BY_STRENGTH = [b'sha512', b'sha1', b'md5']
247 247
248 248 for k in DIGESTS_BY_STRENGTH:
249 249 assert k in DIGESTS
250 250
251 251
252 252 class digester:
253 253 """helper to compute digests.
254 254
255 255 This helper can be used to compute one or more digests given their name.
256 256
257 257 >>> d = digester([b'md5', b'sha1'])
258 258 >>> d.update(b'foo')
259 259 >>> [k for k in sorted(d)]
260 260 ['md5', 'sha1']
261 261 >>> d[b'md5']
262 262 'acbd18db4cc2f85cedef654fccc4a4d8'
263 263 >>> d[b'sha1']
264 264 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
265 265 >>> digester.preferred([b'md5', b'sha1'])
266 266 'sha1'
267 267 """
268 268
269 269 def __init__(self, digests, s=b''):
270 270 self._hashes = {}
271 271 for k in digests:
272 272 if k not in DIGESTS:
273 273 raise error.Abort(_(b'unknown digest type: %s') % k)
274 274 self._hashes[k] = DIGESTS[k]()
275 275 if s:
276 276 self.update(s)
277 277
278 278 def update(self, data):
279 279 for h in self._hashes.values():
280 280 h.update(data)
281 281
282 282 def __getitem__(self, key):
283 283 if key not in DIGESTS:
284 284 raise error.Abort(_(b'unknown digest type: %s') % k)
285 285 return hex(self._hashes[key].digest())
286 286
287 287 def __iter__(self):
288 288 return iter(self._hashes)
289 289
290 290 @staticmethod
291 291 def preferred(supported):
292 292 """returns the strongest digest type in both supported and DIGESTS."""
293 293
294 294 for k in DIGESTS_BY_STRENGTH:
295 295 if k in supported:
296 296 return k
297 297 return None
298 298
299 299
300 300 class digestchecker:
301 301 """file handle wrapper that additionally checks content against a given
302 302 size and digests.
303 303
304 304 d = digestchecker(fh, size, {'md5': '...'})
305 305
306 306 When multiple digests are given, all of them are validated.
307 307 """
308 308
309 309 def __init__(self, fh, size, digests):
310 310 self._fh = fh
311 311 self._size = size
312 312 self._got = 0
313 313 self._digests = dict(digests)
314 314 self._digester = digester(self._digests.keys())
315 315
316 316 def read(self, length=-1):
317 317 content = self._fh.read(length)
318 318 self._digester.update(content)
319 319 self._got += len(content)
320 320 return content
321 321
322 322 def validate(self):
323 323 if self._size != self._got:
324 324 raise error.Abort(
325 325 _(b'size mismatch: expected %d, got %d')
326 326 % (self._size, self._got)
327 327 )
328 328 for k, v in self._digests.items():
329 329 if v != self._digester[k]:
330 330 # i18n: first parameter is a digest name
331 331 raise error.Abort(
332 332 _(b'%s mismatch: expected %s, got %s')
333 333 % (k, v, self._digester[k])
334 334 )
335 335
336 336
337 337 try:
338 338 buffer = buffer # pytype: disable=name-error
339 339 except NameError:
340 340
341 341 def buffer(sliceable, offset=0, length=None):
342 342 if length is not None:
343 343 view = memoryview(sliceable)[offset : offset + length]
344 344 else:
345 345 view = memoryview(sliceable)[offset:]
346 346 return view.toreadonly()
347 347
348 348
349 349 _chunksize = 4096
350 350
351 351
352 352 class bufferedinputpipe:
353 353 """a manually buffered input pipe
354 354
355 355 Python will not let us use buffered IO and lazy reading with 'polling' at
356 356 the same time. We cannot probe the buffer state and select will not detect
357 357 that data are ready to read if they are already buffered.
358 358
359 359 This class let us work around that by implementing its own buffering
360 360 (allowing efficient readline) while offering a way to know if the buffer is
361 361 empty from the output (allowing collaboration of the buffer with polling).
362 362
363 363 This class lives in the 'util' module because it makes use of the 'os'
364 364 module from the python stdlib.
365 365 """
366 366
367 367 def __new__(cls, fh):
368 368 # If we receive a fileobjectproxy, we need to use a variation of this
369 369 # class that notifies observers about activity.
370 370 if isinstance(fh, fileobjectproxy):
371 371 cls = observedbufferedinputpipe
372 372
373 373 return super(bufferedinputpipe, cls).__new__(cls)
374 374
375 375 def __init__(self, input):
376 376 self._input = input
377 377 self._buffer = []
378 378 self._eof = False
379 379 self._lenbuf = 0
380 380
381 381 @property
382 382 def hasbuffer(self):
383 383 """True is any data is currently buffered
384 384
385 385 This will be used externally a pre-step for polling IO. If there is
386 386 already data then no polling should be set in place."""
387 387 return bool(self._buffer)
388 388
389 389 @property
390 390 def closed(self):
391 391 return self._input.closed
392 392
393 393 def fileno(self):
394 394 return self._input.fileno()
395 395
396 396 def close(self):
397 397 return self._input.close()
398 398
399 399 def read(self, size):
400 400 while (not self._eof) and (self._lenbuf < size):
401 401 self._fillbuffer()
402 402 return self._frombuffer(size)
403 403
404 404 def unbufferedread(self, size):
405 405 if not self._eof and self._lenbuf == 0:
406 406 self._fillbuffer(max(size, _chunksize))
407 407 return self._frombuffer(min(self._lenbuf, size))
408 408
409 409 def readline(self, *args, **kwargs):
410 410 if len(self._buffer) > 1:
411 411 # this should not happen because both read and readline end with a
412 412 # _frombuffer call that collapse it.
413 413 self._buffer = [b''.join(self._buffer)]
414 414 self._lenbuf = len(self._buffer[0])
415 415 lfi = -1
416 416 if self._buffer:
417 417 lfi = self._buffer[-1].find(b'\n')
418 418 while (not self._eof) and lfi < 0:
419 419 self._fillbuffer()
420 420 if self._buffer:
421 421 lfi = self._buffer[-1].find(b'\n')
422 422 size = lfi + 1
423 423 if lfi < 0: # end of file
424 424 size = self._lenbuf
425 425 elif len(self._buffer) > 1:
426 426 # we need to take previous chunks into account
427 427 size += self._lenbuf - len(self._buffer[-1])
428 428 return self._frombuffer(size)
429 429
430 430 def _frombuffer(self, size):
431 431 """return at most 'size' data from the buffer
432 432
433 433 The data are removed from the buffer."""
434 434 if size == 0 or not self._buffer:
435 435 return b''
436 436 buf = self._buffer[0]
437 437 if len(self._buffer) > 1:
438 438 buf = b''.join(self._buffer)
439 439
440 440 data = buf[:size]
441 441 buf = buf[len(data) :]
442 442 if buf:
443 443 self._buffer = [buf]
444 444 self._lenbuf = len(buf)
445 445 else:
446 446 self._buffer = []
447 447 self._lenbuf = 0
448 448 return data
449 449
450 450 def _fillbuffer(self, size=_chunksize):
451 451 """read data to the buffer"""
452 452 data = os.read(self._input.fileno(), size)
453 453 if not data:
454 454 self._eof = True
455 455 else:
456 456 self._lenbuf += len(data)
457 457 self._buffer.append(data)
458 458
459 459 return data
460 460
461 461
462 462 def has_mmap_populate():
463 463 return hasattr(osutil, "background_mmap_populate") or hasattr(
464 464 mmap, 'MAP_POPULATE'
465 465 )
466 466
467 467
468 468 def mmapread(fp, size=None, pre_populate=True):
469 469 """Read a file content using mmap
470 470
471 471 The responsability of checking the file system is mmap safe is the
472 472 responsability of the caller (see `vfs.is_mmap_safe`).
473 473
474 474 In some case, a normal string might be returned.
475 475
476 476 If `pre_populate` is True (the default), the mmapped data will be
477 477 pre-populated in memory if the system support this option, this slow down
478 478 the initial mmaping but avoid potentially crippling page fault on later
479 479 access. If this is not the desired behavior, set `pre_populate` to False.
480 480 """
481 481 if size == 0:
482 482 # size of 0 to mmap.mmap() means "all data"
483 483 # rather than "zero bytes", so special case that.
484 484 return b''
485 485 elif size is None:
486 486 size = 0
487 487 fd = getattr(fp, 'fileno', lambda: fp)()
488 488 flags = mmap.MAP_PRIVATE
489 489 bg_populate = hasattr(osutil, "background_mmap_populate")
490 490 if pre_populate and not bg_populate:
491 491 flags |= getattr(mmap, 'MAP_POPULATE', 0)
492 492 try:
493 493 m = mmap.mmap(fd, size, flags=flags, prot=mmap.PROT_READ)
494 494 if pre_populate and bg_populate:
495 495 osutil.background_mmap_populate(m)
496 496 return m
497 497 except ValueError:
498 498 # Empty files cannot be mmapped, but mmapread should still work. Check
499 499 # if the file is empty, and if so, return an empty buffer.
500 500 if os.fstat(fd).st_size == 0:
501 501 return b''
502 502 raise
503 503
504 504
505 505 class fileobjectproxy:
506 506 """A proxy around file objects that tells a watcher when events occur.
507 507
508 508 This type is intended to only be used for testing purposes. Think hard
509 509 before using it in important code.
510 510 """
511 511
512 512 __slots__ = (
513 513 '_orig',
514 514 '_observer',
515 515 )
516 516
517 517 def __init__(self, fh, observer):
518 518 object.__setattr__(self, '_orig', fh)
519 519 object.__setattr__(self, '_observer', observer)
520 520
521 521 def __getattribute__(self, name):
522 522 ours = {
523 523 '_observer',
524 524 # IOBase
525 525 'close',
526 526 # closed if a property
527 527 'fileno',
528 528 'flush',
529 529 'isatty',
530 530 'readable',
531 531 'readline',
532 532 'readlines',
533 533 'seek',
534 534 'seekable',
535 535 'tell',
536 536 'truncate',
537 537 'writable',
538 538 'writelines',
539 539 # RawIOBase
540 540 'read',
541 541 'readall',
542 542 'readinto',
543 543 'write',
544 544 # BufferedIOBase
545 545 # raw is a property
546 546 'detach',
547 547 # read defined above
548 548 'read1',
549 549 # readinto defined above
550 550 # write defined above
551 551 }
552 552
553 553 # We only observe some methods.
554 554 if name in ours:
555 555 return object.__getattribute__(self, name)
556 556
557 557 return getattr(object.__getattribute__(self, '_orig'), name)
558 558
559 559 def __nonzero__(self):
560 560 return bool(object.__getattribute__(self, '_orig'))
561 561
562 562 __bool__ = __nonzero__
563 563
564 564 def __delattr__(self, name):
565 565 return delattr(object.__getattribute__(self, '_orig'), name)
566 566
567 567 def __setattr__(self, name, value):
568 568 return setattr(object.__getattribute__(self, '_orig'), name, value)
569 569
570 570 def __iter__(self):
571 571 return object.__getattribute__(self, '_orig').__iter__()
572 572
573 573 def _observedcall(self, name, *args, **kwargs):
574 574 # Call the original object.
575 575 orig = object.__getattribute__(self, '_orig')
576 576 res = getattr(orig, name)(*args, **kwargs)
577 577
578 578 # Call a method on the observer of the same name with arguments
579 579 # so it can react, log, etc.
580 580 observer = object.__getattribute__(self, '_observer')
581 581 fn = getattr(observer, name, None)
582 582 if fn:
583 583 fn(res, *args, **kwargs)
584 584
585 585 return res
586 586
587 587 def close(self, *args, **kwargs):
588 588 return object.__getattribute__(self, '_observedcall')(
589 589 'close', *args, **kwargs
590 590 )
591 591
592 592 def fileno(self, *args, **kwargs):
593 593 return object.__getattribute__(self, '_observedcall')(
594 594 'fileno', *args, **kwargs
595 595 )
596 596
597 597 def flush(self, *args, **kwargs):
598 598 return object.__getattribute__(self, '_observedcall')(
599 599 'flush', *args, **kwargs
600 600 )
601 601
602 602 def isatty(self, *args, **kwargs):
603 603 return object.__getattribute__(self, '_observedcall')(
604 604 'isatty', *args, **kwargs
605 605 )
606 606
607 607 def readable(self, *args, **kwargs):
608 608 return object.__getattribute__(self, '_observedcall')(
609 609 'readable', *args, **kwargs
610 610 )
611 611
612 612 def readline(self, *args, **kwargs):
613 613 return object.__getattribute__(self, '_observedcall')(
614 614 'readline', *args, **kwargs
615 615 )
616 616
617 617 def readlines(self, *args, **kwargs):
618 618 return object.__getattribute__(self, '_observedcall')(
619 619 'readlines', *args, **kwargs
620 620 )
621 621
622 622 def seek(self, *args, **kwargs):
623 623 return object.__getattribute__(self, '_observedcall')(
624 624 'seek', *args, **kwargs
625 625 )
626 626
627 627 def seekable(self, *args, **kwargs):
628 628 return object.__getattribute__(self, '_observedcall')(
629 629 'seekable', *args, **kwargs
630 630 )
631 631
632 632 def tell(self, *args, **kwargs):
633 633 return object.__getattribute__(self, '_observedcall')(
634 634 'tell', *args, **kwargs
635 635 )
636 636
637 637 def truncate(self, *args, **kwargs):
638 638 return object.__getattribute__(self, '_observedcall')(
639 639 'truncate', *args, **kwargs
640 640 )
641 641
642 642 def writable(self, *args, **kwargs):
643 643 return object.__getattribute__(self, '_observedcall')(
644 644 'writable', *args, **kwargs
645 645 )
646 646
647 647 def writelines(self, *args, **kwargs):
648 648 return object.__getattribute__(self, '_observedcall')(
649 649 'writelines', *args, **kwargs
650 650 )
651 651
652 652 def read(self, *args, **kwargs):
653 653 return object.__getattribute__(self, '_observedcall')(
654 654 'read', *args, **kwargs
655 655 )
656 656
657 657 def readall(self, *args, **kwargs):
658 658 return object.__getattribute__(self, '_observedcall')(
659 659 'readall', *args, **kwargs
660 660 )
661 661
662 662 def readinto(self, *args, **kwargs):
663 663 return object.__getattribute__(self, '_observedcall')(
664 664 'readinto', *args, **kwargs
665 665 )
666 666
667 667 def write(self, *args, **kwargs):
668 668 return object.__getattribute__(self, '_observedcall')(
669 669 'write', *args, **kwargs
670 670 )
671 671
672 672 def detach(self, *args, **kwargs):
673 673 return object.__getattribute__(self, '_observedcall')(
674 674 'detach', *args, **kwargs
675 675 )
676 676
677 677 def read1(self, *args, **kwargs):
678 678 return object.__getattribute__(self, '_observedcall')(
679 679 'read1', *args, **kwargs
680 680 )
681 681
682 682
683 683 class observedbufferedinputpipe(bufferedinputpipe):
684 684 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
685 685
686 686 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
687 687 bypass ``fileobjectproxy``. Because of this, we need to make
688 688 ``bufferedinputpipe`` aware of these operations.
689 689
690 690 This variation of ``bufferedinputpipe`` can notify observers about
691 691 ``os.read()`` events. It also re-publishes other events, such as
692 692 ``read()`` and ``readline()``.
693 693 """
694 694
695 695 def _fillbuffer(self, size=_chunksize):
696 696 res = super(observedbufferedinputpipe, self)._fillbuffer(size=size)
697 697
698 698 fn = getattr(self._input._observer, 'osread', None)
699 699 if fn:
700 700 fn(res, size)
701 701
702 702 return res
703 703
704 704 # We use different observer methods because the operation isn't
705 705 # performed on the actual file object but on us.
706 706 def read(self, size):
707 707 res = super(observedbufferedinputpipe, self).read(size)
708 708
709 709 fn = getattr(self._input._observer, 'bufferedread', None)
710 710 if fn:
711 711 fn(res, size)
712 712
713 713 return res
714 714
715 715 def readline(self, *args, **kwargs):
716 716 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
717 717
718 718 fn = getattr(self._input._observer, 'bufferedreadline', None)
719 719 if fn:
720 720 fn(res)
721 721
722 722 return res
723 723
724 724
725 725 PROXIED_SOCKET_METHODS = {
726 726 'makefile',
727 727 'recv',
728 728 'recvfrom',
729 729 'recvfrom_into',
730 730 'recv_into',
731 731 'send',
732 732 'sendall',
733 733 'sendto',
734 734 'setblocking',
735 735 'settimeout',
736 736 'gettimeout',
737 737 'setsockopt',
738 738 }
739 739
740 740
741 741 class socketproxy:
742 742 """A proxy around a socket that tells a watcher when events occur.
743 743
744 744 This is like ``fileobjectproxy`` except for sockets.
745 745
746 746 This type is intended to only be used for testing purposes. Think hard
747 747 before using it in important code.
748 748 """
749 749
750 750 __slots__ = (
751 751 '_orig',
752 752 '_observer',
753 753 )
754 754
755 755 def __init__(self, sock, observer):
756 756 object.__setattr__(self, '_orig', sock)
757 757 object.__setattr__(self, '_observer', observer)
758 758
759 759 def __getattribute__(self, name):
760 760 if name in PROXIED_SOCKET_METHODS:
761 761 return object.__getattribute__(self, name)
762 762
763 763 return getattr(object.__getattribute__(self, '_orig'), name)
764 764
765 765 def __delattr__(self, name):
766 766 return delattr(object.__getattribute__(self, '_orig'), name)
767 767
768 768 def __setattr__(self, name, value):
769 769 return setattr(object.__getattribute__(self, '_orig'), name, value)
770 770
771 771 def __nonzero__(self):
772 772 return bool(object.__getattribute__(self, '_orig'))
773 773
774 774 __bool__ = __nonzero__
775 775
776 776 def _observedcall(self, name, *args, **kwargs):
777 777 # Call the original object.
778 778 orig = object.__getattribute__(self, '_orig')
779 779 res = getattr(orig, name)(*args, **kwargs)
780 780
781 781 # Call a method on the observer of the same name with arguments
782 782 # so it can react, log, etc.
783 783 observer = object.__getattribute__(self, '_observer')
784 784 fn = getattr(observer, name, None)
785 785 if fn:
786 786 fn(res, *args, **kwargs)
787 787
788 788 return res
789 789
790 790 def makefile(self, *args, **kwargs):
791 791 res = object.__getattribute__(self, '_observedcall')(
792 792 'makefile', *args, **kwargs
793 793 )
794 794
795 795 # The file object may be used for I/O. So we turn it into a
796 796 # proxy using our observer.
797 797 observer = object.__getattribute__(self, '_observer')
798 798 return makeloggingfileobject(
799 799 observer.fh,
800 800 res,
801 801 observer.name,
802 802 reads=observer.reads,
803 803 writes=observer.writes,
804 804 logdata=observer.logdata,
805 805 logdataapis=observer.logdataapis,
806 806 )
807 807
808 808 def recv(self, *args, **kwargs):
809 809 return object.__getattribute__(self, '_observedcall')(
810 810 'recv', *args, **kwargs
811 811 )
812 812
813 813 def recvfrom(self, *args, **kwargs):
814 814 return object.__getattribute__(self, '_observedcall')(
815 815 'recvfrom', *args, **kwargs
816 816 )
817 817
818 818 def recvfrom_into(self, *args, **kwargs):
819 819 return object.__getattribute__(self, '_observedcall')(
820 820 'recvfrom_into', *args, **kwargs
821 821 )
822 822
823 823 def recv_into(self, *args, **kwargs):
824 824 return object.__getattribute__(self, '_observedcall')(
825 825 'recv_info', *args, **kwargs
826 826 )
827 827
828 828 def send(self, *args, **kwargs):
829 829 return object.__getattribute__(self, '_observedcall')(
830 830 'send', *args, **kwargs
831 831 )
832 832
833 833 def sendall(self, *args, **kwargs):
834 834 return object.__getattribute__(self, '_observedcall')(
835 835 'sendall', *args, **kwargs
836 836 )
837 837
838 838 def sendto(self, *args, **kwargs):
839 839 return object.__getattribute__(self, '_observedcall')(
840 840 'sendto', *args, **kwargs
841 841 )
842 842
843 843 def setblocking(self, *args, **kwargs):
844 844 return object.__getattribute__(self, '_observedcall')(
845 845 'setblocking', *args, **kwargs
846 846 )
847 847
848 848 def settimeout(self, *args, **kwargs):
849 849 return object.__getattribute__(self, '_observedcall')(
850 850 'settimeout', *args, **kwargs
851 851 )
852 852
853 853 def gettimeout(self, *args, **kwargs):
854 854 return object.__getattribute__(self, '_observedcall')(
855 855 'gettimeout', *args, **kwargs
856 856 )
857 857
858 858 def setsockopt(self, *args, **kwargs):
859 859 return object.__getattribute__(self, '_observedcall')(
860 860 'setsockopt', *args, **kwargs
861 861 )
862 862
863 863
864 864 class baseproxyobserver:
865 865 def __init__(self, fh, name, logdata, logdataapis):
866 866 self.fh = fh
867 867 self.name = name
868 868 self.logdata = logdata
869 869 self.logdataapis = logdataapis
870 870
871 871 def _writedata(self, data):
872 872 if not self.logdata:
873 873 if self.logdataapis:
874 874 self.fh.write(b'\n')
875 875 self.fh.flush()
876 876 return
877 877
878 878 # Simple case writes all data on a single line.
879 879 if b'\n' not in data:
880 880 if self.logdataapis:
881 881 self.fh.write(b': %s\n' % stringutil.escapestr(data))
882 882 else:
883 883 self.fh.write(
884 884 b'%s> %s\n' % (self.name, stringutil.escapestr(data))
885 885 )
886 886 self.fh.flush()
887 887 return
888 888
889 889 # Data with newlines is written to multiple lines.
890 890 if self.logdataapis:
891 891 self.fh.write(b':\n')
892 892
893 893 lines = data.splitlines(True)
894 894 for line in lines:
895 895 self.fh.write(
896 896 b'%s> %s\n' % (self.name, stringutil.escapestr(line))
897 897 )
898 898 self.fh.flush()
899 899
900 900
901 901 class fileobjectobserver(baseproxyobserver):
902 902 """Logs file object activity."""
903 903
904 904 def __init__(
905 905 self, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
906 906 ):
907 907 super(fileobjectobserver, self).__init__(fh, name, logdata, logdataapis)
908 908 self.reads = reads
909 909 self.writes = writes
910 910
911 911 def read(self, res, size=-1):
912 912 if not self.reads:
913 913 return
914 914 # Python 3 can return None from reads at EOF instead of empty strings.
915 915 if res is None:
916 916 res = b''
917 917
918 918 if size == -1 and res == b'':
919 919 # Suppress pointless read(-1) calls that return
920 920 # nothing. These happen _a lot_ on Python 3, and there
921 921 # doesn't seem to be a better workaround to have matching
922 922 # Python 2 and 3 behavior. :(
923 923 return
924 924
925 925 if self.logdataapis:
926 926 self.fh.write(b'%s> read(%d) -> %d' % (self.name, size, len(res)))
927 927
928 928 self._writedata(res)
929 929
930 930 def readline(self, res, limit=-1):
931 931 if not self.reads:
932 932 return
933 933
934 934 if self.logdataapis:
935 935 self.fh.write(b'%s> readline() -> %d' % (self.name, len(res)))
936 936
937 937 self._writedata(res)
938 938
939 939 def readinto(self, res, dest):
940 940 if not self.reads:
941 941 return
942 942
943 943 if self.logdataapis:
944 944 self.fh.write(
945 945 b'%s> readinto(%d) -> %r' % (self.name, len(dest), res)
946 946 )
947 947
948 948 data = dest[0:res] if res is not None else b''
949 949
950 950 # _writedata() uses "in" operator and is confused by memoryview because
951 951 # characters are ints on Python 3.
952 952 if isinstance(data, memoryview):
953 953 data = data.tobytes()
954 954
955 955 self._writedata(data)
956 956
957 957 def write(self, res, data):
958 958 if not self.writes:
959 959 return
960 960
961 961 # Python 2 returns None from some write() calls. Python 3 (reasonably)
962 962 # returns the integer bytes written.
963 963 if res is None and data:
964 964 res = len(data)
965 965
966 966 if self.logdataapis:
967 967 self.fh.write(b'%s> write(%d) -> %r' % (self.name, len(data), res))
968 968
969 969 self._writedata(data)
970 970
971 971 def flush(self, res):
972 972 if not self.writes:
973 973 return
974 974
975 975 self.fh.write(b'%s> flush() -> %r\n' % (self.name, res))
976 976
977 977 # For observedbufferedinputpipe.
978 978 def bufferedread(self, res, size):
979 979 if not self.reads:
980 980 return
981 981
982 982 if self.logdataapis:
983 983 self.fh.write(
984 984 b'%s> bufferedread(%d) -> %d' % (self.name, size, len(res))
985 985 )
986 986
987 987 self._writedata(res)
988 988
989 989 def bufferedreadline(self, res):
990 990 if not self.reads:
991 991 return
992 992
993 993 if self.logdataapis:
994 994 self.fh.write(
995 995 b'%s> bufferedreadline() -> %d' % (self.name, len(res))
996 996 )
997 997
998 998 self._writedata(res)
999 999
1000 1000
1001 1001 def makeloggingfileobject(
1002 1002 logh, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
1003 1003 ):
1004 1004 """Turn a file object into a logging file object."""
1005 1005
1006 1006 observer = fileobjectobserver(
1007 1007 logh,
1008 1008 name,
1009 1009 reads=reads,
1010 1010 writes=writes,
1011 1011 logdata=logdata,
1012 1012 logdataapis=logdataapis,
1013 1013 )
1014 1014 return fileobjectproxy(fh, observer)
1015 1015
1016 1016
1017 1017 class socketobserver(baseproxyobserver):
1018 1018 """Logs socket activity."""
1019 1019
1020 1020 def __init__(
1021 1021 self,
1022 1022 fh,
1023 1023 name,
1024 1024 reads=True,
1025 1025 writes=True,
1026 1026 states=True,
1027 1027 logdata=False,
1028 1028 logdataapis=True,
1029 1029 ):
1030 1030 super(socketobserver, self).__init__(fh, name, logdata, logdataapis)
1031 1031 self.reads = reads
1032 1032 self.writes = writes
1033 1033 self.states = states
1034 1034
1035 1035 def makefile(self, res, mode=None, bufsize=None):
1036 1036 if not self.states:
1037 1037 return
1038 1038
1039 1039 self.fh.write(b'%s> makefile(%r, %r)\n' % (self.name, mode, bufsize))
1040 1040
1041 1041 def recv(self, res, size, flags=0):
1042 1042 if not self.reads:
1043 1043 return
1044 1044
1045 1045 if self.logdataapis:
1046 1046 self.fh.write(
1047 1047 b'%s> recv(%d, %d) -> %d' % (self.name, size, flags, len(res))
1048 1048 )
1049 1049 self._writedata(res)
1050 1050
1051 1051 def recvfrom(self, res, size, flags=0):
1052 1052 if not self.reads:
1053 1053 return
1054 1054
1055 1055 if self.logdataapis:
1056 1056 self.fh.write(
1057 1057 b'%s> recvfrom(%d, %d) -> %d'
1058 1058 % (self.name, size, flags, len(res[0]))
1059 1059 )
1060 1060
1061 1061 self._writedata(res[0])
1062 1062
1063 1063 def recvfrom_into(self, res, buf, size, flags=0):
1064 1064 if not self.reads:
1065 1065 return
1066 1066
1067 1067 if self.logdataapis:
1068 1068 self.fh.write(
1069 1069 b'%s> recvfrom_into(%d, %d) -> %d'
1070 1070 % (self.name, size, flags, res[0])
1071 1071 )
1072 1072
1073 1073 self._writedata(buf[0 : res[0]])
1074 1074
1075 1075 def recv_into(self, res, buf, size=0, flags=0):
1076 1076 if not self.reads:
1077 1077 return
1078 1078
1079 1079 if self.logdataapis:
1080 1080 self.fh.write(
1081 1081 b'%s> recv_into(%d, %d) -> %d' % (self.name, size, flags, res)
1082 1082 )
1083 1083
1084 1084 self._writedata(buf[0:res])
1085 1085
1086 1086 def send(self, res, data, flags=0):
1087 1087 if not self.writes:
1088 1088 return
1089 1089
1090 1090 self.fh.write(
1091 1091 b'%s> send(%d, %d) -> %d' % (self.name, len(data), flags, len(res))
1092 1092 )
1093 1093 self._writedata(data)
1094 1094
1095 1095 def sendall(self, res, data, flags=0):
1096 1096 if not self.writes:
1097 1097 return
1098 1098
1099 1099 if self.logdataapis:
1100 1100 # Returns None on success. So don't bother reporting return value.
1101 1101 self.fh.write(
1102 1102 b'%s> sendall(%d, %d)' % (self.name, len(data), flags)
1103 1103 )
1104 1104
1105 1105 self._writedata(data)
1106 1106
1107 1107 def sendto(self, res, data, flagsoraddress, address=None):
1108 1108 if not self.writes:
1109 1109 return
1110 1110
1111 1111 if address:
1112 1112 flags = flagsoraddress
1113 1113 else:
1114 1114 flags = 0
1115 1115
1116 1116 if self.logdataapis:
1117 1117 self.fh.write(
1118 1118 b'%s> sendto(%d, %d, %r) -> %d'
1119 1119 % (self.name, len(data), flags, address, res)
1120 1120 )
1121 1121
1122 1122 self._writedata(data)
1123 1123
1124 1124 def setblocking(self, res, flag):
1125 1125 if not self.states:
1126 1126 return
1127 1127
1128 1128 self.fh.write(b'%s> setblocking(%r)\n' % (self.name, flag))
1129 1129
1130 1130 def settimeout(self, res, value):
1131 1131 if not self.states:
1132 1132 return
1133 1133
1134 1134 self.fh.write(b'%s> settimeout(%r)\n' % (self.name, value))
1135 1135
1136 1136 def gettimeout(self, res):
1137 1137 if not self.states:
1138 1138 return
1139 1139
1140 1140 self.fh.write(b'%s> gettimeout() -> %f\n' % (self.name, res))
1141 1141
1142 1142 def setsockopt(self, res, level, optname, value):
1143 1143 if not self.states:
1144 1144 return
1145 1145
1146 1146 self.fh.write(
1147 1147 b'%s> setsockopt(%r, %r, %r) -> %r\n'
1148 1148 % (self.name, level, optname, value, res)
1149 1149 )
1150 1150
1151 1151
1152 1152 def makeloggingsocket(
1153 1153 logh,
1154 1154 fh,
1155 1155 name,
1156 1156 reads=True,
1157 1157 writes=True,
1158 1158 states=True,
1159 1159 logdata=False,
1160 1160 logdataapis=True,
1161 1161 ):
1162 1162 """Turn a socket into a logging socket."""
1163 1163
1164 1164 observer = socketobserver(
1165 1165 logh,
1166 1166 name,
1167 1167 reads=reads,
1168 1168 writes=writes,
1169 1169 states=states,
1170 1170 logdata=logdata,
1171 1171 logdataapis=logdataapis,
1172 1172 )
1173 1173 return socketproxy(fh, observer)
1174 1174
1175 1175
1176 1176 def version():
1177 1177 """Return version information if available."""
1178 1178 try:
1179 1179 from . import __version__ # pytype: disable=import-error
1180 1180
1181 1181 return __version__.version
1182 1182 except ImportError:
1183 1183 return b'unknown'
1184 1184
1185 1185
1186 1186 def versiontuple(v=None, n=4):
1187 1187 """Parses a Mercurial version string into an N-tuple.
1188 1188
1189 1189 The version string to be parsed is specified with the ``v`` argument.
1190 1190 If it isn't defined, the current Mercurial version string will be parsed.
1191 1191
1192 1192 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1193 1193 returned values:
1194 1194
1195 1195 >>> v = b'3.6.1+190-df9b73d2d444'
1196 1196 >>> versiontuple(v, 2)
1197 1197 (3, 6)
1198 1198 >>> versiontuple(v, 3)
1199 1199 (3, 6, 1)
1200 1200 >>> versiontuple(v, 4)
1201 1201 (3, 6, 1, '190-df9b73d2d444')
1202 1202
1203 1203 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1204 1204 (3, 6, 1, '190-df9b73d2d444+20151118')
1205 1205
1206 1206 >>> v = b'3.6'
1207 1207 >>> versiontuple(v, 2)
1208 1208 (3, 6)
1209 1209 >>> versiontuple(v, 3)
1210 1210 (3, 6, None)
1211 1211 >>> versiontuple(v, 4)
1212 1212 (3, 6, None, None)
1213 1213
1214 1214 >>> v = b'3.9-rc'
1215 1215 >>> versiontuple(v, 2)
1216 1216 (3, 9)
1217 1217 >>> versiontuple(v, 3)
1218 1218 (3, 9, None)
1219 1219 >>> versiontuple(v, 4)
1220 1220 (3, 9, None, 'rc')
1221 1221
1222 1222 >>> v = b'3.9-rc+2-02a8fea4289b'
1223 1223 >>> versiontuple(v, 2)
1224 1224 (3, 9)
1225 1225 >>> versiontuple(v, 3)
1226 1226 (3, 9, None)
1227 1227 >>> versiontuple(v, 4)
1228 1228 (3, 9, None, 'rc+2-02a8fea4289b')
1229 1229
1230 1230 >>> versiontuple(b'4.6rc0')
1231 1231 (4, 6, None, 'rc0')
1232 1232 >>> versiontuple(b'4.6rc0+12-425d55e54f98')
1233 1233 (4, 6, None, 'rc0+12-425d55e54f98')
1234 1234 >>> versiontuple(b'.1.2.3')
1235 1235 (None, None, None, '.1.2.3')
1236 1236 >>> versiontuple(b'12.34..5')
1237 1237 (12, 34, None, '..5')
1238 1238 >>> versiontuple(b'1.2.3.4.5.6')
1239 1239 (1, 2, 3, '.4.5.6')
1240 1240 """
1241 1241 if not v:
1242 1242 v = version()
1243 1243 m = remod.match(br'(\d+(?:\.\d+){,2})[+-]?(.*)', v)
1244 1244 if not m:
1245 1245 vparts, extra = b'', v
1246 1246 elif m.group(2):
1247 1247 vparts, extra = m.groups()
1248 1248 else:
1249 1249 vparts, extra = m.group(1), None
1250 1250
1251 1251 assert vparts is not None # help pytype
1252 1252
1253 1253 vints = []
1254 1254 for i in vparts.split(b'.'):
1255 1255 try:
1256 1256 vints.append(int(i))
1257 1257 except ValueError:
1258 1258 break
1259 1259 # (3, 6) -> (3, 6, None)
1260 1260 while len(vints) < 3:
1261 1261 vints.append(None)
1262 1262
1263 1263 if n == 2:
1264 1264 return (vints[0], vints[1])
1265 1265 if n == 3:
1266 1266 return (vints[0], vints[1], vints[2])
1267 1267 if n == 4:
1268 1268 return (vints[0], vints[1], vints[2], extra)
1269 1269
1270 1270 raise error.ProgrammingError(b"invalid version part request: %d" % n)
1271 1271
1272 1272
1273 1273 def cachefunc(func):
1274 1274 '''cache the result of function calls'''
1275 1275 # XXX doesn't handle keywords args
1276 1276 if func.__code__.co_argcount == 0:
1277 1277 listcache = []
1278 1278
1279 1279 def f():
1280 1280 if len(listcache) == 0:
1281 1281 listcache.append(func())
1282 1282 return listcache[0]
1283 1283
1284 1284 return f
1285 1285 cache = {}
1286 1286 if func.__code__.co_argcount == 1:
1287 1287 # we gain a small amount of time because
1288 1288 # we don't need to pack/unpack the list
1289 1289 def f(arg):
1290 1290 if arg not in cache:
1291 1291 cache[arg] = func(arg)
1292 1292 return cache[arg]
1293 1293
1294 1294 else:
1295 1295
1296 1296 def f(*args):
1297 1297 if args not in cache:
1298 1298 cache[args] = func(*args)
1299 1299 return cache[args]
1300 1300
1301 1301 return f
1302 1302
1303 1303
1304 1304 class cow:
1305 1305 """helper class to make copy-on-write easier
1306 1306
1307 1307 Call preparewrite before doing any writes.
1308 1308 """
1309 1309
1310 1310 def preparewrite(self):
1311 1311 """call this before writes, return self or a copied new object"""
1312 1312 if getattr(self, '_copied', 0):
1313 1313 self._copied -= 1
1314 1314 # Function cow.__init__ expects 1 arg(s), got 2 [wrong-arg-count]
1315 1315 return self.__class__(self) # pytype: disable=wrong-arg-count
1316 1316 return self
1317 1317
1318 1318 def copy(self):
1319 1319 """always do a cheap copy"""
1320 1320 self._copied = getattr(self, '_copied', 0) + 1
1321 1321 return self
1322 1322
1323 1323
1324 1324 class sortdict(collections.OrderedDict):
1325 1325 """a simple sorted dictionary
1326 1326
1327 1327 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1328 1328 >>> d2 = d1.copy()
1329 1329 >>> list(d2.items())
1330 1330 [('a', 0), ('b', 1)]
1331 1331 >>> d2.update([(b'a', 2)])
1332 1332 >>> list(d2.keys()) # should still be in last-set order
1333 1333 ['b', 'a']
1334 1334 >>> d1.insert(1, b'a.5', 0.5)
1335 1335 >>> list(d1.items())
1336 1336 [('a', 0), ('a.5', 0.5), ('b', 1)]
1337 1337 """
1338 1338
1339 1339 def __setitem__(self, key, value):
1340 1340 if key in self:
1341 1341 del self[key]
1342 1342 super(sortdict, self).__setitem__(key, value)
1343 1343
1344 1344 if pycompat.ispypy:
1345 1345 # __setitem__() isn't called as of PyPy 5.8.0
1346 1346 def update(self, src, **f):
1347 1347 if isinstance(src, dict):
1348 1348 src = src.items()
1349 1349 for k, v in src:
1350 1350 self[k] = v
1351 1351 for k in f:
1352 1352 self[k] = f[k]
1353 1353
1354 1354 def insert(self, position, key, value):
1355 1355 for i, (k, v) in enumerate(list(self.items())):
1356 1356 if i == position:
1357 1357 self[key] = value
1358 1358 if i >= position:
1359 1359 del self[k]
1360 1360 self[k] = v
1361 1361
1362 1362
1363 1363 class cowdict(cow, dict):
1364 1364 """copy-on-write dict
1365 1365
1366 1366 Be sure to call d = d.preparewrite() before writing to d.
1367 1367
1368 1368 >>> a = cowdict()
1369 1369 >>> a is a.preparewrite()
1370 1370 True
1371 1371 >>> b = a.copy()
1372 1372 >>> b is a
1373 1373 True
1374 1374 >>> c = b.copy()
1375 1375 >>> c is a
1376 1376 True
1377 1377 >>> a = a.preparewrite()
1378 1378 >>> b is a
1379 1379 False
1380 1380 >>> a is a.preparewrite()
1381 1381 True
1382 1382 >>> c = c.preparewrite()
1383 1383 >>> b is c
1384 1384 False
1385 1385 >>> b is b.preparewrite()
1386 1386 True
1387 1387 """
1388 1388
1389 1389
1390 1390 class cowsortdict(cow, sortdict):
1391 1391 """copy-on-write sortdict
1392 1392
1393 1393 Be sure to call d = d.preparewrite() before writing to d.
1394 1394 """
1395 1395
1396 1396
1397 1397 class transactional: # pytype: disable=ignored-metaclass
1398 1398 """Base class for making a transactional type into a context manager."""
1399 1399
1400 1400 __metaclass__ = abc.ABCMeta
1401 1401
1402 1402 @abc.abstractmethod
1403 1403 def close(self):
1404 1404 """Successfully closes the transaction."""
1405 1405
1406 1406 @abc.abstractmethod
1407 1407 def release(self):
1408 1408 """Marks the end of the transaction.
1409 1409
1410 1410 If the transaction has not been closed, it will be aborted.
1411 1411 """
1412 1412
1413 1413 def __enter__(self):
1414 1414 return self
1415 1415
1416 1416 def __exit__(self, exc_type, exc_val, exc_tb):
1417 1417 try:
1418 1418 if exc_type is None:
1419 1419 self.close()
1420 1420 finally:
1421 1421 self.release()
1422 1422
1423 1423
1424 1424 @contextlib.contextmanager
1425 1425 def acceptintervention(tr=None):
1426 1426 """A context manager that closes the transaction on InterventionRequired
1427 1427
1428 1428 If no transaction was provided, this simply runs the body and returns
1429 1429 """
1430 1430 if not tr:
1431 1431 yield
1432 1432 return
1433 1433 try:
1434 1434 yield
1435 1435 tr.close()
1436 1436 except error.InterventionRequired:
1437 1437 tr.close()
1438 1438 raise
1439 1439 finally:
1440 1440 tr.release()
1441 1441
1442 1442
1443 1443 @contextlib.contextmanager
1444 1444 def nullcontextmanager(enter_result=None):
1445 1445 yield enter_result
1446 1446
1447 1447
1448 1448 class _lrucachenode:
1449 1449 """A node in a doubly linked list.
1450 1450
1451 1451 Holds a reference to nodes on either side as well as a key-value
1452 1452 pair for the dictionary entry.
1453 1453 """
1454 1454
1455 1455 __slots__ = ('next', 'prev', 'key', 'value', 'cost')
1456 1456
1457 1457 def __init__(self):
1458 1458 self.next = self
1459 1459 self.prev = self
1460 1460
1461 1461 self.key = _notset
1462 1462 self.value = None
1463 1463 self.cost = 0
1464 1464
1465 1465 def markempty(self):
1466 1466 """Mark the node as emptied."""
1467 1467 self.key = _notset
1468 1468 self.value = None
1469 1469 self.cost = 0
1470 1470
1471 1471
1472 1472 class lrucachedict:
1473 1473 """Dict that caches most recent accesses and sets.
1474 1474
1475 1475 The dict consists of an actual backing dict - indexed by original
1476 1476 key - and a doubly linked circular list defining the order of entries in
1477 1477 the cache.
1478 1478
1479 1479 The head node is the newest entry in the cache. If the cache is full,
1480 1480 we recycle head.prev and make it the new head. Cache accesses result in
1481 1481 the node being moved to before the existing head and being marked as the
1482 1482 new head node.
1483 1483
1484 1484 Items in the cache can be inserted with an optional "cost" value. This is
1485 1485 simply an integer that is specified by the caller. The cache can be queried
1486 1486 for the total cost of all items presently in the cache.
1487 1487
1488 1488 The cache can also define a maximum cost. If a cache insertion would
1489 1489 cause the total cost of the cache to go beyond the maximum cost limit,
1490 1490 nodes will be evicted to make room for the new code. This can be used
1491 1491 to e.g. set a max memory limit and associate an estimated bytes size
1492 1492 cost to each item in the cache. By default, no maximum cost is enforced.
1493 1493 """
1494 1494
1495 1495 def __init__(self, max, maxcost=0):
1496 1496 self._cache = {}
1497 1497
1498 1498 self._head = _lrucachenode()
1499 1499 self._size = 1
1500 1500 self.capacity = max
1501 1501 self.totalcost = 0
1502 1502 self.maxcost = maxcost
1503 1503
1504 1504 def __len__(self):
1505 1505 return len(self._cache)
1506 1506
1507 1507 def __contains__(self, k):
1508 1508 return k in self._cache
1509 1509
1510 1510 def __iter__(self):
1511 1511 # We don't have to iterate in cache order, but why not.
1512 1512 n = self._head
1513 1513 for i in range(len(self._cache)):
1514 1514 yield n.key
1515 1515 n = n.next
1516 1516
1517 1517 def __getitem__(self, k):
1518 1518 node = self._cache[k]
1519 1519 self._movetohead(node)
1520 1520 return node.value
1521 1521
1522 1522 def insert(self, k, v, cost=0):
1523 1523 """Insert a new item in the cache with optional cost value."""
1524 1524 node = self._cache.get(k)
1525 1525 # Replace existing value and mark as newest.
1526 1526 if node is not None:
1527 1527 self.totalcost -= node.cost
1528 1528 node.value = v
1529 1529 node.cost = cost
1530 1530 self.totalcost += cost
1531 1531 self._movetohead(node)
1532 1532
1533 1533 if self.maxcost:
1534 1534 self._enforcecostlimit()
1535 1535
1536 1536 return
1537 1537
1538 1538 if self._size < self.capacity:
1539 1539 node = self._addcapacity()
1540 1540 else:
1541 1541 # Grab the last/oldest item.
1542 1542 node = self._head.prev
1543 1543
1544 1544 # At capacity. Kill the old entry.
1545 1545 if node.key is not _notset:
1546 1546 self.totalcost -= node.cost
1547 1547 del self._cache[node.key]
1548 1548
1549 1549 node.key = k
1550 1550 node.value = v
1551 1551 node.cost = cost
1552 1552 self.totalcost += cost
1553 1553 self._cache[k] = node
1554 1554 # And mark it as newest entry. No need to adjust order since it
1555 1555 # is already self._head.prev.
1556 1556 self._head = node
1557 1557
1558 1558 if self.maxcost:
1559 1559 self._enforcecostlimit()
1560 1560
1561 1561 def __setitem__(self, k, v):
1562 1562 self.insert(k, v)
1563 1563
1564 1564 def __delitem__(self, k):
1565 1565 self.pop(k)
1566 1566
1567 1567 def pop(self, k, default=_notset):
1568 1568 try:
1569 1569 node = self._cache.pop(k)
1570 1570 except KeyError:
1571 1571 if default is _notset:
1572 1572 raise
1573 1573 return default
1574 1574
1575 1575 value = node.value
1576 1576 self.totalcost -= node.cost
1577 1577 node.markempty()
1578 1578
1579 1579 # Temporarily mark as newest item before re-adjusting head to make
1580 1580 # this node the oldest item.
1581 1581 self._movetohead(node)
1582 1582 self._head = node.next
1583 1583
1584 1584 return value
1585 1585
1586 1586 # Additional dict methods.
1587 1587
1588 1588 def get(self, k, default=None):
1589 1589 try:
1590 1590 return self.__getitem__(k)
1591 1591 except KeyError:
1592 1592 return default
1593 1593
1594 1594 def peek(self, k, default=_notset):
1595 1595 """Get the specified item without moving it to the head
1596 1596
1597 1597 Unlike get(), this doesn't mutate the internal state. But be aware
1598 1598 that it doesn't mean peek() is thread safe.
1599 1599 """
1600 1600 try:
1601 1601 node = self._cache[k]
1602 1602 return node.value
1603 1603 except KeyError:
1604 1604 if default is _notset:
1605 1605 raise
1606 1606 return default
1607 1607
1608 1608 def clear(self):
1609 1609 n = self._head
1610 1610 while n.key is not _notset:
1611 1611 self.totalcost -= n.cost
1612 1612 n.markempty()
1613 1613 n = n.next
1614 1614
1615 1615 self._cache.clear()
1616 1616
1617 1617 def copy(self, capacity=None, maxcost=0):
1618 1618 """Create a new cache as a copy of the current one.
1619 1619
1620 1620 By default, the new cache has the same capacity as the existing one.
1621 1621 But, the cache capacity can be changed as part of performing the
1622 1622 copy.
1623 1623
1624 1624 Items in the copy have an insertion/access order matching this
1625 1625 instance.
1626 1626 """
1627 1627
1628 1628 capacity = capacity or self.capacity
1629 1629 maxcost = maxcost or self.maxcost
1630 1630 result = lrucachedict(capacity, maxcost=maxcost)
1631 1631
1632 1632 # We copy entries by iterating in oldest-to-newest order so the copy
1633 1633 # has the correct ordering.
1634 1634
1635 1635 # Find the first non-empty entry.
1636 1636 n = self._head.prev
1637 1637 while n.key is _notset and n is not self._head:
1638 1638 n = n.prev
1639 1639
1640 1640 # We could potentially skip the first N items when decreasing capacity.
1641 1641 # But let's keep it simple unless it is a performance problem.
1642 1642 for i in range(len(self._cache)):
1643 1643 result.insert(n.key, n.value, cost=n.cost)
1644 1644 n = n.prev
1645 1645
1646 1646 return result
1647 1647
1648 1648 def popoldest(self):
1649 1649 """Remove the oldest item from the cache.
1650 1650
1651 1651 Returns the (key, value) describing the removed cache entry.
1652 1652 """
1653 1653 if not self._cache:
1654 1654 return
1655 1655
1656 1656 # Walk the linked list backwards starting at tail node until we hit
1657 1657 # a non-empty node.
1658 1658 n = self._head.prev
1659 1659
1660 1660 while n.key is _notset:
1661 1661 n = n.prev
1662 1662
1663 1663 key, value = n.key, n.value
1664 1664
1665 1665 # And remove it from the cache and mark it as empty.
1666 1666 del self._cache[n.key]
1667 1667 self.totalcost -= n.cost
1668 1668 n.markempty()
1669 1669
1670 1670 return key, value
1671 1671
1672 1672 def _movetohead(self, node: _lrucachenode):
1673 1673 """Mark a node as the newest, making it the new head.
1674 1674
1675 1675 When a node is accessed, it becomes the freshest entry in the LRU
1676 1676 list, which is denoted by self._head.
1677 1677
1678 1678 Visually, let's make ``N`` the new head node (* denotes head):
1679 1679
1680 1680 previous/oldest <-> head <-> next/next newest
1681 1681
1682 1682 ----<->--- A* ---<->-----
1683 1683 | |
1684 1684 E <-> D <-> N <-> C <-> B
1685 1685
1686 1686 To:
1687 1687
1688 1688 ----<->--- N* ---<->-----
1689 1689 | |
1690 1690 E <-> D <-> C <-> B <-> A
1691 1691
1692 1692 This requires the following moves:
1693 1693
1694 1694 C.next = D (node.prev.next = node.next)
1695 1695 D.prev = C (node.next.prev = node.prev)
1696 1696 E.next = N (head.prev.next = node)
1697 1697 N.prev = E (node.prev = head.prev)
1698 1698 N.next = A (node.next = head)
1699 1699 A.prev = N (head.prev = node)
1700 1700 """
1701 1701 head = self._head
1702 1702 # C.next = D
1703 1703 node.prev.next = node.next
1704 1704 # D.prev = C
1705 1705 node.next.prev = node.prev
1706 1706 # N.prev = E
1707 1707 node.prev = head.prev
1708 1708 # N.next = A
1709 1709 # It is tempting to do just "head" here, however if node is
1710 1710 # adjacent to head, this will do bad things.
1711 1711 node.next = head.prev.next
1712 1712 # E.next = N
1713 1713 node.next.prev = node
1714 1714 # A.prev = N
1715 1715 node.prev.next = node
1716 1716
1717 1717 self._head = node
1718 1718
1719 1719 def _addcapacity(self) -> _lrucachenode:
1720 1720 """Add a node to the circular linked list.
1721 1721
1722 1722 The new node is inserted before the head node.
1723 1723 """
1724 1724 head = self._head
1725 1725 node = _lrucachenode()
1726 1726 head.prev.next = node
1727 1727 node.prev = head.prev
1728 1728 node.next = head
1729 1729 head.prev = node
1730 1730 self._size += 1
1731 1731 return node
1732 1732
1733 1733 def _enforcecostlimit(self):
1734 1734 # This should run after an insertion. It should only be called if total
1735 1735 # cost limits are being enforced.
1736 1736 # The most recently inserted node is never evicted.
1737 1737 if len(self) <= 1 or self.totalcost <= self.maxcost:
1738 1738 return
1739 1739
1740 1740 # This is logically equivalent to calling popoldest() until we
1741 1741 # free up enough cost. We don't do that since popoldest() needs
1742 1742 # to walk the linked list and doing this in a loop would be
1743 1743 # quadratic. So we find the first non-empty node and then
1744 1744 # walk nodes until we free up enough capacity.
1745 1745 #
1746 1746 # If we only removed the minimum number of nodes to free enough
1747 1747 # cost at insert time, chances are high that the next insert would
1748 1748 # also require pruning. This would effectively constitute quadratic
1749 1749 # behavior for insert-heavy workloads. To mitigate this, we set a
1750 1750 # target cost that is a percentage of the max cost. This will tend
1751 1751 # to free more nodes when the high water mark is reached, which
1752 1752 # lowers the chances of needing to prune on the subsequent insert.
1753 1753 targetcost = int(self.maxcost * 0.75)
1754 1754
1755 1755 n = self._head.prev
1756 1756 while n.key is _notset:
1757 1757 n = n.prev
1758 1758
1759 1759 while len(self) > 1 and self.totalcost > targetcost:
1760 1760 del self._cache[n.key]
1761 1761 self.totalcost -= n.cost
1762 1762 n.markempty()
1763 1763 n = n.prev
1764 1764
1765 1765
1766 1766 def lrucachefunc(func):
1767 1767 '''cache most recent results of function calls'''
1768 1768 cache = {}
1769 1769 order = collections.deque()
1770 1770 if func.__code__.co_argcount == 1:
1771 1771
1772 1772 def f(arg):
1773 1773 if arg not in cache:
1774 1774 if len(cache) > 20:
1775 1775 del cache[order.popleft()]
1776 1776 cache[arg] = func(arg)
1777 1777 else:
1778 1778 order.remove(arg)
1779 1779 order.append(arg)
1780 1780 return cache[arg]
1781 1781
1782 1782 else:
1783 1783
1784 1784 def f(*args):
1785 1785 if args not in cache:
1786 1786 if len(cache) > 20:
1787 1787 del cache[order.popleft()]
1788 1788 cache[args] = func(*args)
1789 1789 else:
1790 1790 order.remove(args)
1791 1791 order.append(args)
1792 1792 return cache[args]
1793 1793
1794 1794 return f
1795 1795
1796 1796
1797 1797 class propertycache:
1798 1798 def __init__(self, func):
1799 1799 self.func = func
1800 1800 self.name = func.__name__
1801 1801
1802 1802 def __get__(self, obj, type=None):
1803 1803 result = self.func(obj)
1804 1804 self.cachevalue(obj, result)
1805 1805 return result
1806 1806
1807 1807 def cachevalue(self, obj, value):
1808 1808 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1809 1809 obj.__dict__[self.name] = value
1810 1810
1811 1811
1812 1812 def clearcachedproperty(obj, prop):
1813 1813 '''clear a cached property value, if one has been set'''
1814 1814 prop = pycompat.sysstr(prop)
1815 1815 if prop in obj.__dict__:
1816 1816 del obj.__dict__[prop]
1817 1817
1818 1818
1819 1819 def increasingchunks(source, min=1024, max=65536):
1820 1820 """return no less than min bytes per chunk while data remains,
1821 1821 doubling min after each chunk until it reaches max"""
1822 1822
1823 1823 def log2(x):
1824 1824 if not x:
1825 1825 return 0
1826 1826 i = 0
1827 1827 while x:
1828 1828 x >>= 1
1829 1829 i += 1
1830 1830 return i - 1
1831 1831
1832 1832 buf = []
1833 1833 blen = 0
1834 1834 for chunk in source:
1835 1835 buf.append(chunk)
1836 1836 blen += len(chunk)
1837 1837 if blen >= min:
1838 1838 if min < max:
1839 1839 min = min << 1
1840 1840 nmin = 1 << log2(blen)
1841 1841 if nmin > min:
1842 1842 min = nmin
1843 1843 if min > max:
1844 1844 min = max
1845 1845 yield b''.join(buf)
1846 1846 blen = 0
1847 1847 buf = []
1848 1848 if buf:
1849 1849 yield b''.join(buf)
1850 1850
1851 1851
1852 1852 def always(fn):
1853 1853 return True
1854 1854
1855 1855
1856 1856 def never(fn):
1857 1857 return False
1858 1858
1859 1859
1860 1860 def nogc(func=None) -> Any:
1861 1861 """disable garbage collector
1862 1862
1863 1863 Python's garbage collector triggers a GC each time a certain number of
1864 1864 container objects (the number being defined by gc.get_threshold()) are
1865 1865 allocated even when marked not to be tracked by the collector. Tracking has
1866 1866 no effect on when GCs are triggered, only on what objects the GC looks
1867 1867 into. As a workaround, disable GC while building complex (huge)
1868 1868 containers.
1869 1869
1870 1870 This garbage collector issue have been fixed in 2.7. But it still affect
1871 1871 CPython's performance.
1872 1872 """
1873 1873 if func is None:
1874 1874 return _nogc_context()
1875 1875 else:
1876 1876 return _nogc_decorator(func)
1877 1877
1878 1878
1879 1879 @contextlib.contextmanager
1880 1880 def _nogc_context():
1881 1881 gcenabled = gc.isenabled()
1882 1882 gc.disable()
1883 1883 try:
1884 1884 yield
1885 1885 finally:
1886 1886 if gcenabled:
1887 1887 gc.enable()
1888 1888
1889 1889
1890 1890 def _nogc_decorator(func):
1891 1891 def wrapper(*args, **kwargs):
1892 1892 with _nogc_context():
1893 1893 return func(*args, **kwargs)
1894 1894
1895 1895 return wrapper
1896 1896
1897 1897
1898 1898 if pycompat.ispypy:
1899 1899 # PyPy runs slower with gc disabled
1900 1900 nogc = lambda x: x
1901 1901
1902 1902
1903 1903 def pathto(root: bytes, n1: bytes, n2: bytes) -> bytes:
1904 1904 """return the relative path from one place to another.
1905 1905 root should use os.sep to separate directories
1906 1906 n1 should use os.sep to separate directories
1907 1907 n2 should use "/" to separate directories
1908 1908 returns an os.sep-separated path.
1909 1909
1910 1910 If n1 is a relative path, it's assumed it's
1911 1911 relative to root.
1912 1912 n2 should always be relative to root.
1913 1913 """
1914 1914 if not n1:
1915 1915 return localpath(n2)
1916 1916 if os.path.isabs(n1):
1917 1917 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1918 1918 return os.path.join(root, localpath(n2))
1919 1919 n2 = b'/'.join((pconvert(root), n2))
1920 1920 a, b = splitpath(n1), n2.split(b'/')
1921 1921 a.reverse()
1922 1922 b.reverse()
1923 1923 while a and b and a[-1] == b[-1]:
1924 1924 a.pop()
1925 1925 b.pop()
1926 1926 b.reverse()
1927 1927 return pycompat.ossep.join(([b'..'] * len(a)) + b) or b'.'
1928 1928
1929 1929
1930 1930 def checksignature(func, depth=1):
1931 1931 '''wrap a function with code to check for calling errors'''
1932 1932
1933 1933 def check(*args, **kwargs):
1934 1934 try:
1935 1935 return func(*args, **kwargs)
1936 1936 except TypeError:
1937 1937 if len(traceback.extract_tb(sys.exc_info()[2])) == depth:
1938 1938 raise error.SignatureError
1939 1939 raise
1940 1940
1941 1941 return check
1942 1942
1943 1943
1944 1944 # a whilelist of known filesystems where hardlink works reliably
1945 1945 _hardlinkfswhitelist = {
1946 1946 b'apfs',
1947 1947 b'btrfs',
1948 1948 b'ext2',
1949 1949 b'ext3',
1950 1950 b'ext4',
1951 1951 b'hfs',
1952 1952 b'jfs',
1953 1953 b'NTFS',
1954 1954 b'reiserfs',
1955 1955 b'tmpfs',
1956 1956 b'ufs',
1957 1957 b'xfs',
1958 1958 b'zfs',
1959 1959 }
1960 1960
1961 1961
1962 1962 def copyfile(
1963 1963 src,
1964 1964 dest,
1965 1965 hardlink=False,
1966 1966 copystat=False,
1967 1967 checkambig=False,
1968 1968 nb_bytes=None,
1969 1969 no_hardlink_cb=None,
1970 1970 check_fs_hardlink=True,
1971 1971 ):
1972 1972 """copy a file, preserving mode and optionally other stat info like
1973 1973 atime/mtime
1974 1974
1975 1975 checkambig argument is used with filestat, and is useful only if
1976 1976 destination file is guarded by any lock (e.g. repo.lock or
1977 1977 repo.wlock).
1978 1978
1979 1979 copystat and checkambig should be exclusive.
1980 1980
1981 1981 nb_bytes: if set only copy the first `nb_bytes` of the source file.
1982 1982 """
1983 1983 assert not (copystat and checkambig)
1984 1984 oldstat = None
1985 1985 if os.path.lexists(dest):
1986 1986 if checkambig:
1987 1987 oldstat = checkambig and filestat.frompath(dest)
1988 1988 unlink(dest)
1989 1989 if hardlink and check_fs_hardlink:
1990 1990 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1991 1991 # unless we are confident that dest is on a whitelisted filesystem.
1992 1992 try:
1993 1993 fstype = getfstype(os.path.dirname(dest))
1994 1994 except OSError:
1995 1995 fstype = None
1996 1996 if fstype not in _hardlinkfswhitelist:
1997 1997 if no_hardlink_cb is not None:
1998 1998 no_hardlink_cb()
1999 1999 hardlink = False
2000 2000 if hardlink:
2001 2001 try:
2002 2002 oslink(src, dest)
2003 2003 if nb_bytes is not None:
2004 2004 m = "the `nb_bytes` argument is incompatible with `hardlink`"
2005 2005 raise error.ProgrammingError(m)
2006 2006 return
2007 2007 except (IOError, OSError) as exc:
2008 2008 if exc.errno != errno.EEXIST and no_hardlink_cb is not None:
2009 2009 no_hardlink_cb()
2010 2010 # fall back to normal copy
2011 2011 if os.path.islink(src):
2012 2012 os.symlink(os.readlink(src), dest)
2013 2013 # copytime is ignored for symlinks, but in general copytime isn't needed
2014 2014 # for them anyway
2015 2015 if nb_bytes is not None:
2016 2016 m = "cannot use `nb_bytes` on a symlink"
2017 2017 raise error.ProgrammingError(m)
2018 2018 else:
2019 2019 try:
2020 2020 shutil.copyfile(src, dest)
2021 2021 if copystat:
2022 2022 # copystat also copies mode
2023 2023 shutil.copystat(src, dest)
2024 2024 else:
2025 2025 shutil.copymode(src, dest)
2026 2026 if oldstat and oldstat.stat:
2027 2027 newstat = filestat.frompath(dest)
2028 2028 if newstat.isambig(oldstat):
2029 2029 # stat of copied file is ambiguous to original one
2030 2030 advanced = (
2031 2031 oldstat.stat[stat.ST_MTIME] + 1
2032 2032 ) & 0x7FFFFFFF
2033 2033 os.utime(dest, (advanced, advanced))
2034 2034 # We could do something smarter using `copy_file_range` call or similar
2035 2035 if nb_bytes is not None:
2036 2036 with open(dest, mode='r+') as f:
2037 2037 f.truncate(nb_bytes)
2038 2038 except shutil.Error as inst:
2039 2039 raise error.Abort(stringutil.forcebytestr(inst))
2040 2040
2041 2041
2042 2042 def copyfiles(src, dst, hardlink=None, progress=None):
2043 2043 """Copy a directory tree using hardlinks if possible."""
2044 2044 num = 0
2045 2045
2046 2046 def settopic():
2047 2047 if progress:
2048 2048 progress.topic = _(b'linking') if hardlink else _(b'copying')
2049 2049
2050 2050 if os.path.isdir(src):
2051 2051 if hardlink is None:
2052 2052 hardlink = (
2053 2053 os.stat(src).st_dev == os.stat(os.path.dirname(dst)).st_dev
2054 2054 )
2055 2055 settopic()
2056 2056 os.mkdir(dst)
2057 2057 for name, kind in listdir(src):
2058 2058 srcname = os.path.join(src, name)
2059 2059 dstname = os.path.join(dst, name)
2060 2060 hardlink, n = copyfiles(srcname, dstname, hardlink, progress)
2061 2061 num += n
2062 2062 else:
2063 2063 if hardlink is None:
2064 2064 hardlink = (
2065 2065 os.stat(os.path.dirname(src)).st_dev
2066 2066 == os.stat(os.path.dirname(dst)).st_dev
2067 2067 )
2068 2068 settopic()
2069 2069
2070 2070 if hardlink:
2071 2071 try:
2072 2072 oslink(src, dst)
2073 2073 except (IOError, OSError) as exc:
2074 2074 if exc.errno != errno.EEXIST:
2075 2075 hardlink = False
2076 2076 # XXX maybe try to relink if the file exist ?
2077 2077 shutil.copy(src, dst)
2078 2078 else:
2079 2079 shutil.copy(src, dst)
2080 2080 num += 1
2081 2081 if progress:
2082 2082 progress.increment()
2083 2083
2084 2084 return hardlink, num
2085 2085
2086 2086
2087 2087 _winreservednames = {
2088 2088 b'con',
2089 2089 b'prn',
2090 2090 b'aux',
2091 2091 b'nul',
2092 2092 b'com1',
2093 2093 b'com2',
2094 2094 b'com3',
2095 2095 b'com4',
2096 2096 b'com5',
2097 2097 b'com6',
2098 2098 b'com7',
2099 2099 b'com8',
2100 2100 b'com9',
2101 2101 b'lpt1',
2102 2102 b'lpt2',
2103 2103 b'lpt3',
2104 2104 b'lpt4',
2105 2105 b'lpt5',
2106 2106 b'lpt6',
2107 2107 b'lpt7',
2108 2108 b'lpt8',
2109 2109 b'lpt9',
2110 2110 }
2111 2111 _winreservedchars = b':*?"<>|'
2112 2112
2113 2113
2114 2114 def checkwinfilename(path: bytes) -> Optional[bytes]:
2115 2115 r"""Check that the base-relative path is a valid filename on Windows.
2116 2116 Returns None if the path is ok, or a UI string describing the problem.
2117 2117
2118 2118 >>> checkwinfilename(b"just/a/normal/path")
2119 2119 >>> checkwinfilename(b"foo/bar/con.xml")
2120 2120 "filename contains 'con', which is reserved on Windows"
2121 2121 >>> checkwinfilename(b"foo/con.xml/bar")
2122 2122 "filename contains 'con', which is reserved on Windows"
2123 2123 >>> checkwinfilename(b"foo/bar/xml.con")
2124 2124 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
2125 2125 "filename contains 'AUX', which is reserved on Windows"
2126 2126 >>> checkwinfilename(b"foo/bar/bla:.txt")
2127 2127 "filename contains ':', which is reserved on Windows"
2128 2128 >>> checkwinfilename(b"foo/bar/b\07la.txt")
2129 2129 "filename contains '\\x07', which is invalid on Windows"
2130 2130 >>> checkwinfilename(b"foo/bar/bla ")
2131 2131 "filename ends with ' ', which is not allowed on Windows"
2132 2132 >>> checkwinfilename(b"../bar")
2133 2133 >>> checkwinfilename(b"foo\\")
2134 2134 "filename ends with '\\', which is invalid on Windows"
2135 2135 >>> checkwinfilename(b"foo\\/bar")
2136 2136 "directory name ends with '\\', which is invalid on Windows"
2137 2137 """
2138 2138 if path.endswith(b'\\'):
2139 2139 return _(b"filename ends with '\\', which is invalid on Windows")
2140 2140 if b'\\/' in path:
2141 2141 return _(b"directory name ends with '\\', which is invalid on Windows")
2142 2142 for n in path.replace(b'\\', b'/').split(b'/'):
2143 2143 if not n:
2144 2144 continue
2145 2145 for c in _filenamebytestr(n):
2146 2146 if c in _winreservedchars:
2147 2147 return (
2148 2148 _(
2149 2149 b"filename contains '%s', which is reserved "
2150 2150 b"on Windows"
2151 2151 )
2152 2152 % c
2153 2153 )
2154 2154 if ord(c) <= 31:
2155 2155 return _(
2156 2156 b"filename contains '%s', which is invalid on Windows"
2157 2157 ) % stringutil.escapestr(c)
2158 2158 base = n.split(b'.')[0]
2159 2159 if base and base.lower() in _winreservednames:
2160 2160 return (
2161 2161 _(b"filename contains '%s', which is reserved on Windows")
2162 2162 % base
2163 2163 )
2164 2164 t = n[-1:]
2165 2165 if t in b'. ' and n not in b'..':
2166 2166 return (
2167 2167 _(
2168 2168 b"filename ends with '%s', which is not allowed "
2169 2169 b"on Windows"
2170 2170 )
2171 2171 % t
2172 2172 )
2173 2173
2174 2174
2175 2175 timer = getattr(time, "perf_counter", None)
2176 2176
2177 2177 if pycompat.iswindows:
2178 2178 checkosfilename = checkwinfilename
2179 2179 if not timer:
2180 2180 timer = time.clock # pytype: disable=module-attr
2181 2181 else:
2182 2182 # mercurial.windows doesn't have platform.checkosfilename
2183 2183 checkosfilename = platform.checkosfilename # pytype: disable=module-attr
2184 2184 if not timer:
2185 2185 timer = time.time
2186 2186
2187 2187
2188 2188 def makelock(info, pathname):
2189 2189 """Create a lock file atomically if possible
2190 2190
2191 2191 This may leave a stale lock file if symlink isn't supported and signal
2192 2192 interrupt is enabled.
2193 2193 """
2194 2194 try:
2195 2195 return os.symlink(info, pathname)
2196 2196 except OSError as why:
2197 2197 if why.errno == errno.EEXIST:
2198 2198 raise
2199 2199 except AttributeError: # no symlink in os
2200 2200 pass
2201 2201
2202 2202 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
2203 2203 ld = os.open(pathname, flags)
2204 os.write(ld, info)
2205 os.close(ld)
2204 try:
2205 os.write(ld, info)
2206 finally:
2207 os.close(ld)
2206 2208
2207 2209
2208 2210 def readlock(pathname: bytes) -> bytes:
2209 2211 try:
2210 2212 return readlink(pathname)
2211 2213 except OSError as why:
2212 2214 if why.errno not in (errno.EINVAL, errno.ENOSYS):
2213 2215 raise
2214 2216 except AttributeError: # no symlink in os
2215 2217 pass
2216 2218 with posixfile(pathname, b'rb') as fp:
2217 2219 return fp.read()
2218 2220
2219 2221
2220 2222 def fstat(fp):
2221 2223 '''stat file object that may not have fileno method.'''
2222 2224 try:
2223 2225 return os.fstat(fp.fileno())
2224 2226 except AttributeError:
2225 2227 return os.stat(fp.name)
2226 2228
2227 2229
2228 2230 # File system features
2229 2231
2230 2232
2231 2233 def fscasesensitive(path: bytes) -> bool:
2232 2234 """
2233 2235 Return true if the given path is on a case-sensitive filesystem
2234 2236
2235 2237 Requires a path (like /foo/.hg) ending with a foldable final
2236 2238 directory component.
2237 2239 """
2238 2240 s1 = os.lstat(path)
2239 2241 d, b = os.path.split(path)
2240 2242 b2 = b.upper()
2241 2243 if b == b2:
2242 2244 b2 = b.lower()
2243 2245 if b == b2:
2244 2246 return True # no evidence against case sensitivity
2245 2247 p2 = os.path.join(d, b2)
2246 2248 try:
2247 2249 s2 = os.lstat(p2)
2248 2250 if s2 == s1:
2249 2251 return False
2250 2252 return True
2251 2253 except OSError:
2252 2254 return True
2253 2255
2254 2256
2255 2257 _re2_input = lambda x: x
2256 2258 # google-re2 will need to be tell to not output error on its own
2257 2259 _re2_options = None
2258 2260 try:
2259 2261 import re2 # pytype: disable=import-error
2260 2262
2261 2263 _re2 = None
2262 2264 except ImportError:
2263 2265 _re2 = False
2264 2266
2265 2267
2266 2268 def has_re2():
2267 2269 """return True is re2 is available, False otherwise"""
2268 2270 if _re2 is None:
2269 2271 _re._checkre2()
2270 2272 return _re2
2271 2273
2272 2274
2273 2275 class _re:
2274 2276 @staticmethod
2275 2277 def _checkre2():
2276 2278 global _re2
2277 2279 global _re2_input
2278 2280 global _re2_options
2279 2281 if _re2 is not None:
2280 2282 # we already have the answer
2281 2283 return
2282 2284
2283 2285 check_pattern = br'\[([^\[]+)\]'
2284 2286 check_input = b'[ui]'
2285 2287 try:
2286 2288 # check if match works, see issue3964
2287 2289 _re2 = bool(re2.match(check_pattern, check_input))
2288 2290 except ImportError:
2289 2291 _re2 = False
2290 2292 except TypeError:
2291 2293 # the `pyre-2` project provides a re2 module that accept bytes
2292 2294 # the `fb-re2` project provides a re2 module that acccept sysstr
2293 2295 check_pattern = pycompat.sysstr(check_pattern)
2294 2296 check_input = pycompat.sysstr(check_input)
2295 2297 _re2 = bool(re2.match(check_pattern, check_input))
2296 2298 _re2_input = pycompat.sysstr
2297 2299 try:
2298 2300 quiet = re2.Options()
2299 2301 quiet.log_errors = False
2300 2302 _re2_options = quiet
2301 2303 except AttributeError:
2302 2304 pass
2303 2305
2304 2306 def compile(self, pat, flags=0):
2305 2307 """Compile a regular expression, using re2 if possible
2306 2308
2307 2309 For best performance, use only re2-compatible regexp features. The
2308 2310 only flags from the re module that are re2-compatible are
2309 2311 IGNORECASE and MULTILINE."""
2310 2312 if _re2 is None:
2311 2313 self._checkre2()
2312 2314 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
2313 2315 if flags & remod.IGNORECASE:
2314 2316 pat = b'(?i)' + pat
2315 2317 if flags & remod.MULTILINE:
2316 2318 pat = b'(?m)' + pat
2317 2319 try:
2318 2320 input_regex = _re2_input(pat)
2319 2321 if _re2_options is not None:
2320 2322 compiled = re2.compile(input_regex, options=_re2_options)
2321 2323 else:
2322 2324 compiled = re2.compile(input_regex)
2323 2325 return compiled
2324 2326 except re2.error:
2325 2327 pass
2326 2328 return remod.compile(pat, flags)
2327 2329
2328 2330 @propertycache
2329 2331 def escape(self):
2330 2332 """Return the version of escape corresponding to self.compile.
2331 2333
2332 2334 This is imperfect because whether re2 or re is used for a particular
2333 2335 function depends on the flags, etc, but it's the best we can do.
2334 2336 """
2335 2337 global _re2
2336 2338 if _re2 is None:
2337 2339 self._checkre2()
2338 2340 if _re2:
2339 2341 return re2.escape
2340 2342 else:
2341 2343 return remod.escape
2342 2344
2343 2345
2344 2346 re = _re()
2345 2347
2346 2348 _fspathcache = {}
2347 2349
2348 2350
2349 2351 def fspath(name: bytes, root: bytes) -> bytes:
2350 2352 """Get name in the case stored in the filesystem
2351 2353
2352 2354 The name should be relative to root, and be normcase-ed for efficiency.
2353 2355
2354 2356 Note that this function is unnecessary, and should not be
2355 2357 called, for case-sensitive filesystems (simply because it's expensive).
2356 2358
2357 2359 The root should be normcase-ed, too.
2358 2360 """
2359 2361
2360 2362 def _makefspathcacheentry(dir):
2361 2363 return {normcase(n): n for n in os.listdir(dir)}
2362 2364
2363 2365 seps = pycompat.ossep
2364 2366 if pycompat.osaltsep:
2365 2367 seps = seps + pycompat.osaltsep
2366 2368 # Protect backslashes. This gets silly very quickly.
2367 2369 seps.replace(b'\\', b'\\\\')
2368 2370 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
2369 2371 dir = os.path.normpath(root)
2370 2372 result = []
2371 2373 for part, sep in pattern.findall(name):
2372 2374 if sep:
2373 2375 result.append(sep)
2374 2376 continue
2375 2377
2376 2378 if dir not in _fspathcache:
2377 2379 _fspathcache[dir] = _makefspathcacheentry(dir)
2378 2380 contents = _fspathcache[dir]
2379 2381
2380 2382 found = contents.get(part)
2381 2383 if not found:
2382 2384 # retry "once per directory" per "dirstate.walk" which
2383 2385 # may take place for each patches of "hg qpush", for example
2384 2386 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
2385 2387 found = contents.get(part)
2386 2388
2387 2389 result.append(found or part)
2388 2390 dir = os.path.join(dir, part)
2389 2391
2390 2392 return b''.join(result)
2391 2393
2392 2394
2393 2395 def checknlink(testfile: bytes) -> bool:
2394 2396 '''check whether hardlink count reporting works properly'''
2395 2397
2396 2398 # testfile may be open, so we need a separate file for checking to
2397 2399 # work around issue2543 (or testfile may get lost on Samba shares)
2398 2400 f1, f2, fp = None, None, None
2399 2401 try:
2400 2402 fd, f1 = pycompat.mkstemp(
2401 2403 prefix=b'.%s-' % os.path.basename(testfile),
2402 2404 suffix=b'1~',
2403 2405 dir=os.path.dirname(testfile),
2404 2406 )
2405 2407 os.close(fd)
2406 2408 f2 = b'%s2~' % f1[:-2]
2407 2409
2408 2410 oslink(f1, f2)
2409 2411 # nlinks() may behave differently for files on Windows shares if
2410 2412 # the file is open.
2411 2413 fp = posixfile(f2)
2412 2414 return nlinks(f2) > 1
2413 2415 except OSError:
2414 2416 return False
2415 2417 finally:
2416 2418 if fp is not None:
2417 2419 fp.close()
2418 2420 for f in (f1, f2):
2419 2421 try:
2420 2422 if f is not None:
2421 2423 os.unlink(f)
2422 2424 except OSError:
2423 2425 pass
2424 2426
2425 2427
2426 2428 def endswithsep(path: bytes) -> bool:
2427 2429 '''Check path ends with os.sep or os.altsep.'''
2428 2430 return bool( # help pytype
2429 2431 path.endswith(pycompat.ossep)
2430 2432 or pycompat.osaltsep
2431 2433 and path.endswith(pycompat.osaltsep)
2432 2434 )
2433 2435
2434 2436
2435 2437 def splitpath(path: bytes) -> List[bytes]:
2436 2438 """Split path by os.sep.
2437 2439 Note that this function does not use os.altsep because this is
2438 2440 an alternative of simple "xxx.split(os.sep)".
2439 2441 It is recommended to use os.path.normpath() before using this
2440 2442 function if need."""
2441 2443 return path.split(pycompat.ossep)
2442 2444
2443 2445
2444 2446 def mktempcopy(
2445 2447 name: bytes,
2446 2448 emptyok: bool = False,
2447 2449 createmode: Optional[int] = None,
2448 2450 enforcewritable: bool = False,
2449 2451 ) -> bytes:
2450 2452 """Create a temporary file with the same contents from name
2451 2453
2452 2454 The permission bits are copied from the original file.
2453 2455
2454 2456 If the temporary file is going to be truncated immediately, you
2455 2457 can use emptyok=True as an optimization.
2456 2458
2457 2459 Returns the name of the temporary file.
2458 2460 """
2459 2461 d, fn = os.path.split(name)
2460 2462 fd, temp = pycompat.mkstemp(prefix=b'.%s-' % fn, suffix=b'~', dir=d)
2461 2463 os.close(fd)
2462 2464 # Temporary files are created with mode 0600, which is usually not
2463 2465 # what we want. If the original file already exists, just copy
2464 2466 # its mode. Otherwise, manually obey umask.
2465 2467 copymode(name, temp, createmode, enforcewritable)
2466 2468
2467 2469 if emptyok:
2468 2470 return temp
2469 2471 try:
2470 2472 try:
2471 2473 ifp = posixfile(name, b"rb")
2472 2474 except IOError as inst:
2473 2475 if inst.errno == errno.ENOENT:
2474 2476 return temp
2475 2477 if not getattr(inst, 'filename', None):
2476 2478 inst.filename = name
2477 2479 raise
2478 2480 ofp = posixfile(temp, b"wb")
2479 2481 for chunk in filechunkiter(ifp):
2480 2482 ofp.write(chunk)
2481 2483 ifp.close()
2482 2484 ofp.close()
2483 2485 except: # re-raises
2484 2486 try:
2485 2487 os.unlink(temp)
2486 2488 except OSError:
2487 2489 pass
2488 2490 raise
2489 2491 return temp
2490 2492
2491 2493
2492 2494 class filestat:
2493 2495 """help to exactly detect change of a file
2494 2496
2495 2497 'stat' attribute is result of 'os.stat()' if specified 'path'
2496 2498 exists. Otherwise, it is None. This can avoid preparative
2497 2499 'exists()' examination on client side of this class.
2498 2500 """
2499 2501
2500 2502 def __init__(self, stat: Optional[os.stat_result]) -> None:
2501 2503 self.stat = stat
2502 2504
2503 2505 @classmethod
2504 2506 def frompath(cls: Type[_Tfilestat], path: bytes) -> _Tfilestat:
2505 2507 try:
2506 2508 stat = os.stat(path)
2507 2509 except FileNotFoundError:
2508 2510 stat = None
2509 2511 return cls(stat)
2510 2512
2511 2513 @classmethod
2512 2514 def fromfp(cls: Type[_Tfilestat], fp: BinaryIO) -> _Tfilestat:
2513 2515 stat = os.fstat(fp.fileno())
2514 2516 return cls(stat)
2515 2517
2516 2518 __hash__ = object.__hash__
2517 2519
2518 2520 def __eq__(self, old) -> bool:
2519 2521 try:
2520 2522 # if ambiguity between stat of new and old file is
2521 2523 # avoided, comparison of size, ctime and mtime is enough
2522 2524 # to exactly detect change of a file regardless of platform
2523 2525 return (
2524 2526 self.stat.st_size == old.stat.st_size
2525 2527 and self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2526 2528 and self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME]
2527 2529 )
2528 2530 except AttributeError:
2529 2531 pass
2530 2532 try:
2531 2533 return self.stat is None and old.stat is None
2532 2534 except AttributeError:
2533 2535 return False
2534 2536
2535 2537 def isambig(self, old: _Tfilestat) -> bool:
2536 2538 """Examine whether new (= self) stat is ambiguous against old one
2537 2539
2538 2540 "S[N]" below means stat of a file at N-th change:
2539 2541
2540 2542 - S[n-1].ctime < S[n].ctime: can detect change of a file
2541 2543 - S[n-1].ctime == S[n].ctime
2542 2544 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2543 2545 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2544 2546 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2545 2547 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2546 2548
2547 2549 Case (*2) above means that a file was changed twice or more at
2548 2550 same time in sec (= S[n-1].ctime), and comparison of timestamp
2549 2551 is ambiguous.
2550 2552
2551 2553 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2552 2554 timestamp is ambiguous".
2553 2555
2554 2556 But advancing mtime only in case (*2) doesn't work as
2555 2557 expected, because naturally advanced S[n].mtime in case (*1)
2556 2558 might be equal to manually advanced S[n-1 or earlier].mtime.
2557 2559
2558 2560 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2559 2561 treated as ambiguous regardless of mtime, to avoid overlooking
2560 2562 by confliction between such mtime.
2561 2563
2562 2564 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2563 2565 S[n].mtime", even if size of a file isn't changed.
2564 2566 """
2565 2567 try:
2566 2568 return self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2567 2569 except AttributeError:
2568 2570 return False
2569 2571
2570 2572 def avoidambig(self, path: bytes, old: _Tfilestat) -> bool:
2571 2573 """Change file stat of specified path to avoid ambiguity
2572 2574
2573 2575 'old' should be previous filestat of 'path'.
2574 2576
2575 2577 This skips avoiding ambiguity, if a process doesn't have
2576 2578 appropriate privileges for 'path'. This returns False in this
2577 2579 case.
2578 2580
2579 2581 Otherwise, this returns True, as "ambiguity is avoided".
2580 2582 """
2581 2583 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2582 2584 try:
2583 2585 os.utime(path, (advanced, advanced))
2584 2586 except PermissionError:
2585 2587 # utime() on the file created by another user causes EPERM,
2586 2588 # if a process doesn't have appropriate privileges
2587 2589 return False
2588 2590 return True
2589 2591
2590 2592 def __ne__(self, other) -> bool:
2591 2593 return not self == other
2592 2594
2593 2595
2594 2596 class atomictempfile:
2595 2597 """writable file object that atomically updates a file
2596 2598
2597 2599 All writes will go to a temporary copy of the original file. Call
2598 2600 close() when you are done writing, and atomictempfile will rename
2599 2601 the temporary copy to the original name, making the changes
2600 2602 visible. If the object is destroyed without being closed, all your
2601 2603 writes are discarded.
2602 2604
2603 2605 checkambig argument of constructor is used with filestat, and is
2604 2606 useful only if target file is guarded by any lock (e.g. repo.lock
2605 2607 or repo.wlock).
2606 2608 """
2607 2609
2608 2610 def __init__(self, name, mode=b'w+b', createmode=None, checkambig=False):
2609 2611 self.__name = name # permanent name
2610 2612 self._tempname = mktempcopy(
2611 2613 name,
2612 2614 emptyok=(b'w' in mode),
2613 2615 createmode=createmode,
2614 2616 enforcewritable=(b'w' in mode),
2615 2617 )
2616 2618
2617 2619 self._fp = posixfile(self._tempname, mode)
2618 2620 self._checkambig = checkambig
2619 2621
2620 2622 # delegated methods
2621 2623 self.read = self._fp.read
2622 2624 self.write = self._fp.write
2623 2625 self.writelines = self._fp.writelines
2624 2626 self.seek = self._fp.seek
2625 2627 self.tell = self._fp.tell
2626 2628 self.fileno = self._fp.fileno
2627 2629
2628 2630 def close(self):
2629 2631 if not self._fp.closed:
2630 2632 self._fp.close()
2631 2633 filename = localpath(self.__name)
2632 2634 oldstat = self._checkambig and filestat.frompath(filename)
2633 2635 if oldstat and oldstat.stat:
2634 2636 rename(self._tempname, filename)
2635 2637 newstat = filestat.frompath(filename)
2636 2638 if newstat.isambig(oldstat):
2637 2639 # stat of changed file is ambiguous to original one
2638 2640 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2639 2641 os.utime(filename, (advanced, advanced))
2640 2642 else:
2641 2643 rename(self._tempname, filename)
2642 2644
2643 2645 def discard(self):
2644 2646 if not self._fp.closed:
2645 2647 try:
2646 2648 os.unlink(self._tempname)
2647 2649 except OSError:
2648 2650 pass
2649 2651 self._fp.close()
2650 2652
2651 2653 def __del__(self):
2652 2654 if hasattr(self, '_fp'): # constructor actually did something
2653 2655 self.discard()
2654 2656
2655 2657 def __enter__(self):
2656 2658 return self
2657 2659
2658 2660 def __exit__(self, exctype, excvalue, traceback):
2659 2661 if exctype is not None:
2660 2662 self.discard()
2661 2663 else:
2662 2664 self.close()
2663 2665
2664 2666
2665 2667 def tryrmdir(f):
2666 2668 try:
2667 2669 removedirs(f)
2668 2670 except OSError as e:
2669 2671 if e.errno != errno.ENOENT and e.errno != errno.ENOTEMPTY:
2670 2672 raise
2671 2673
2672 2674
2673 2675 def unlinkpath(
2674 2676 f: bytes, ignoremissing: bool = False, rmdir: bool = True
2675 2677 ) -> None:
2676 2678 """unlink and remove the directory if it is empty"""
2677 2679 if ignoremissing:
2678 2680 tryunlink(f)
2679 2681 else:
2680 2682 unlink(f)
2681 2683 if rmdir:
2682 2684 # try removing directories that might now be empty
2683 2685 try:
2684 2686 removedirs(os.path.dirname(f))
2685 2687 except OSError:
2686 2688 pass
2687 2689
2688 2690
2689 2691 def tryunlink(f: bytes) -> bool:
2690 2692 """Attempt to remove a file, ignoring FileNotFoundError.
2691 2693
2692 2694 Returns False in case the file did not exit, True otherwise
2693 2695 """
2694 2696 try:
2695 2697 unlink(f)
2696 2698 return True
2697 2699 except FileNotFoundError:
2698 2700 return False
2699 2701
2700 2702
2701 2703 def makedirs(
2702 2704 name: bytes, mode: Optional[int] = None, notindexed: bool = False
2703 2705 ) -> None:
2704 2706 """recursive directory creation with parent mode inheritance
2705 2707
2706 2708 Newly created directories are marked as "not to be indexed by
2707 2709 the content indexing service", if ``notindexed`` is specified
2708 2710 for "write" mode access.
2709 2711 """
2710 2712 try:
2711 2713 makedir(name, notindexed)
2712 2714 except OSError as err:
2713 2715 if err.errno == errno.EEXIST:
2714 2716 return
2715 2717 if err.errno != errno.ENOENT or not name:
2716 2718 raise
2717 2719 parent = os.path.dirname(abspath(name))
2718 2720 if parent == name:
2719 2721 raise
2720 2722 makedirs(parent, mode, notindexed)
2721 2723 try:
2722 2724 makedir(name, notindexed)
2723 2725 except OSError as err:
2724 2726 # Catch EEXIST to handle races
2725 2727 if err.errno == errno.EEXIST:
2726 2728 return
2727 2729 raise
2728 2730 if mode is not None:
2729 2731 os.chmod(name, mode)
2730 2732
2731 2733
2732 2734 def readfile(path: bytes) -> bytes:
2733 2735 with open(path, b'rb') as fp:
2734 2736 return fp.read()
2735 2737
2736 2738
2737 2739 def writefile(path: bytes, text: bytes) -> None:
2738 2740 with open(path, b'wb') as fp:
2739 2741 fp.write(text)
2740 2742
2741 2743
2742 2744 def appendfile(path: bytes, text: bytes) -> None:
2743 2745 with open(path, b'ab') as fp:
2744 2746 fp.write(text)
2745 2747
2746 2748
2747 2749 class chunkbuffer:
2748 2750 """Allow arbitrary sized chunks of data to be efficiently read from an
2749 2751 iterator over chunks of arbitrary size."""
2750 2752
2751 2753 def __init__(self, in_iter):
2752 2754 """in_iter is the iterator that's iterating over the input chunks."""
2753 2755
2754 2756 def splitbig(chunks):
2755 2757 for chunk in chunks:
2756 2758 if len(chunk) > 2**20:
2757 2759 pos = 0
2758 2760 while pos < len(chunk):
2759 2761 end = pos + 2**18
2760 2762 yield chunk[pos:end]
2761 2763 pos = end
2762 2764 else:
2763 2765 yield chunk
2764 2766
2765 2767 self.iter = splitbig(in_iter)
2766 2768 self._queue = collections.deque()
2767 2769 self._chunkoffset = 0
2768 2770
2769 2771 def read(self, l=None):
2770 2772 """Read L bytes of data from the iterator of chunks of data.
2771 2773 Returns less than L bytes if the iterator runs dry.
2772 2774
2773 2775 If size parameter is omitted, read everything"""
2774 2776 if l is None:
2775 2777 return b''.join(self.iter)
2776 2778
2777 2779 left = l
2778 2780 buf = []
2779 2781 queue = self._queue
2780 2782 while left > 0:
2781 2783 # refill the queue
2782 2784 if not queue:
2783 2785 target = 2**18
2784 2786 for chunk in self.iter:
2785 2787 queue.append(chunk)
2786 2788 target -= len(chunk)
2787 2789 if target <= 0:
2788 2790 break
2789 2791 if not queue:
2790 2792 break
2791 2793
2792 2794 # The easy way to do this would be to queue.popleft(), modify the
2793 2795 # chunk (if necessary), then queue.appendleft(). However, for cases
2794 2796 # where we read partial chunk content, this incurs 2 dequeue
2795 2797 # mutations and creates a new str for the remaining chunk in the
2796 2798 # queue. Our code below avoids this overhead.
2797 2799
2798 2800 chunk = queue[0]
2799 2801 chunkl = len(chunk)
2800 2802 offset = self._chunkoffset
2801 2803
2802 2804 # Use full chunk.
2803 2805 if offset == 0 and left >= chunkl:
2804 2806 left -= chunkl
2805 2807 queue.popleft()
2806 2808 buf.append(chunk)
2807 2809 # self._chunkoffset remains at 0.
2808 2810 continue
2809 2811
2810 2812 chunkremaining = chunkl - offset
2811 2813
2812 2814 # Use all of unconsumed part of chunk.
2813 2815 if left >= chunkremaining:
2814 2816 left -= chunkremaining
2815 2817 queue.popleft()
2816 2818 # offset == 0 is enabled by block above, so this won't merely
2817 2819 # copy via ``chunk[0:]``.
2818 2820 buf.append(chunk[offset:])
2819 2821 self._chunkoffset = 0
2820 2822
2821 2823 # Partial chunk needed.
2822 2824 else:
2823 2825 buf.append(chunk[offset : offset + left])
2824 2826 self._chunkoffset += left
2825 2827 left -= chunkremaining
2826 2828
2827 2829 return b''.join(buf)
2828 2830
2829 2831
2830 2832 def filechunkiter(f, size=131072, limit=None):
2831 2833 """Create a generator that produces the data in the file size
2832 2834 (default 131072) bytes at a time, up to optional limit (default is
2833 2835 to read all data). Chunks may be less than size bytes if the
2834 2836 chunk is the last chunk in the file, or the file is a socket or
2835 2837 some other type of file that sometimes reads less data than is
2836 2838 requested."""
2837 2839 assert size >= 0
2838 2840 assert limit is None or limit >= 0
2839 2841 while True:
2840 2842 if limit is None:
2841 2843 nbytes = size
2842 2844 else:
2843 2845 nbytes = min(limit, size)
2844 2846 s = nbytes and f.read(nbytes)
2845 2847 if not s:
2846 2848 break
2847 2849 if limit:
2848 2850 limit -= len(s)
2849 2851 yield s
2850 2852
2851 2853
2852 2854 class cappedreader:
2853 2855 """A file object proxy that allows reading up to N bytes.
2854 2856
2855 2857 Given a source file object, instances of this type allow reading up to
2856 2858 N bytes from that source file object. Attempts to read past the allowed
2857 2859 limit are treated as EOF.
2858 2860
2859 2861 It is assumed that I/O is not performed on the original file object
2860 2862 in addition to I/O that is performed by this instance. If there is,
2861 2863 state tracking will get out of sync and unexpected results will ensue.
2862 2864 """
2863 2865
2864 2866 def __init__(self, fh, limit):
2865 2867 """Allow reading up to <limit> bytes from <fh>."""
2866 2868 self._fh = fh
2867 2869 self._left = limit
2868 2870
2869 2871 def read(self, n=-1):
2870 2872 if not self._left:
2871 2873 return b''
2872 2874
2873 2875 if n < 0:
2874 2876 n = self._left
2875 2877
2876 2878 data = self._fh.read(min(n, self._left))
2877 2879 self._left -= len(data)
2878 2880 assert self._left >= 0
2879 2881
2880 2882 return data
2881 2883
2882 2884 def readinto(self, b):
2883 2885 res = self.read(len(b))
2884 2886 if res is None:
2885 2887 return None
2886 2888
2887 2889 b[0 : len(res)] = res
2888 2890 return len(res)
2889 2891
2890 2892
2891 2893 def unitcountfn(*unittable):
2892 2894 '''return a function that renders a readable count of some quantity'''
2893 2895
2894 2896 def go(count):
2895 2897 for multiplier, divisor, format in unittable:
2896 2898 if abs(count) >= divisor * multiplier:
2897 2899 return format % (count / float(divisor))
2898 2900 return unittable[-1][2] % count
2899 2901
2900 2902 return go
2901 2903
2902 2904
2903 2905 def processlinerange(fromline: int, toline: int) -> Tuple[int, int]:
2904 2906 """Check that linerange <fromline>:<toline> makes sense and return a
2905 2907 0-based range.
2906 2908
2907 2909 >>> processlinerange(10, 20)
2908 2910 (9, 20)
2909 2911 >>> processlinerange(2, 1)
2910 2912 Traceback (most recent call last):
2911 2913 ...
2912 2914 ParseError: line range must be positive
2913 2915 >>> processlinerange(0, 5)
2914 2916 Traceback (most recent call last):
2915 2917 ...
2916 2918 ParseError: fromline must be strictly positive
2917 2919 """
2918 2920 if toline - fromline < 0:
2919 2921 raise error.ParseError(_(b"line range must be positive"))
2920 2922 if fromline < 1:
2921 2923 raise error.ParseError(_(b"fromline must be strictly positive"))
2922 2924 return fromline - 1, toline
2923 2925
2924 2926
2925 2927 bytecount = unitcountfn(
2926 2928 (100, 1 << 30, _(b'%.0f GB')),
2927 2929 (10, 1 << 30, _(b'%.1f GB')),
2928 2930 (1, 1 << 30, _(b'%.2f GB')),
2929 2931 (100, 1 << 20, _(b'%.0f MB')),
2930 2932 (10, 1 << 20, _(b'%.1f MB')),
2931 2933 (1, 1 << 20, _(b'%.2f MB')),
2932 2934 (100, 1 << 10, _(b'%.0f KB')),
2933 2935 (10, 1 << 10, _(b'%.1f KB')),
2934 2936 (1, 1 << 10, _(b'%.2f KB')),
2935 2937 (1, 1, _(b'%.0f bytes')),
2936 2938 )
2937 2939
2938 2940
2939 2941 class transformingwriter(typelib.BinaryIO_Proxy):
2940 2942 """Writable file wrapper to transform data by function"""
2941 2943
2942 2944 def __init__(self, fp: BinaryIO, encode: Callable[[bytes], bytes]) -> None:
2943 2945 self._fp = fp
2944 2946 self._encode = encode
2945 2947
2946 2948 def close(self) -> None:
2947 2949 self._fp.close()
2948 2950
2949 2951 def flush(self) -> None:
2950 2952 self._fp.flush()
2951 2953
2952 2954 def write(self, data: bytes) -> int:
2953 2955 return self._fp.write(self._encode(data))
2954 2956
2955 2957
2956 2958 # Matches a single EOL which can either be a CRLF where repeated CR
2957 2959 # are removed or a LF. We do not care about old Macintosh files, so a
2958 2960 # stray CR is an error.
2959 2961 _eolre = remod.compile(br'\r*\n')
2960 2962
2961 2963
2962 2964 def tolf(s: bytes) -> bytes:
2963 2965 return _eolre.sub(b'\n', s)
2964 2966
2965 2967
2966 2968 def tocrlf(s: bytes) -> bytes:
2967 2969 return _eolre.sub(b'\r\n', s)
2968 2970
2969 2971
2970 2972 def _crlfwriter(fp: typelib.BinaryIO_Proxy) -> typelib.BinaryIO_Proxy:
2971 2973 return transformingwriter(fp, tocrlf)
2972 2974
2973 2975
2974 2976 if pycompat.oslinesep == b'\r\n':
2975 2977 tonativeeol = tocrlf
2976 2978 fromnativeeol = tolf
2977 2979 nativeeolwriter = _crlfwriter
2978 2980 else:
2979 2981 tonativeeol = pycompat.identity
2980 2982 fromnativeeol = pycompat.identity
2981 2983 nativeeolwriter = pycompat.identity
2982 2984
2983 2985 if typing.TYPE_CHECKING:
2984 2986 # Replace the various overloads that come along with aliasing other methods
2985 2987 # with the narrow definition that we care about in the type checking phase
2986 2988 # only. This ensures that both Windows and POSIX see only the definition
2987 2989 # that is actually available.
2988 2990
2989 2991 def tonativeeol(s: bytes) -> bytes:
2990 2992 raise NotImplementedError
2991 2993
2992 2994 def fromnativeeol(s: bytes) -> bytes:
2993 2995 raise NotImplementedError
2994 2996
2995 2997 def nativeeolwriter(fp: typelib.BinaryIO_Proxy) -> typelib.BinaryIO_Proxy:
2996 2998 raise NotImplementedError
2997 2999
2998 3000
2999 3001 # TODO delete since workaround variant for Python 2 no longer needed.
3000 3002 def iterfile(fp):
3001 3003 return fp
3002 3004
3003 3005
3004 3006 def iterlines(iterator: Iterable[bytes]) -> Iterator[bytes]:
3005 3007 for chunk in iterator:
3006 3008 for line in chunk.splitlines():
3007 3009 yield line
3008 3010
3009 3011
3010 3012 def expandpath(path: bytes) -> bytes:
3011 3013 return os.path.expanduser(os.path.expandvars(path))
3012 3014
3013 3015
3014 3016 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
3015 3017 """Return the result of interpolating items in the mapping into string s.
3016 3018
3017 3019 prefix is a single character string, or a two character string with
3018 3020 a backslash as the first character if the prefix needs to be escaped in
3019 3021 a regular expression.
3020 3022
3021 3023 fn is an optional function that will be applied to the replacement text
3022 3024 just before replacement.
3023 3025
3024 3026 escape_prefix is an optional flag that allows using doubled prefix for
3025 3027 its escaping.
3026 3028 """
3027 3029 fn = fn or (lambda s: s)
3028 3030 patterns = b'|'.join(mapping.keys())
3029 3031 if escape_prefix:
3030 3032 patterns += b'|' + prefix
3031 3033 if len(prefix) > 1:
3032 3034 prefix_char = prefix[1:]
3033 3035 else:
3034 3036 prefix_char = prefix
3035 3037 mapping[prefix_char] = prefix_char
3036 3038 r = remod.compile(br'%s(%s)' % (prefix, patterns))
3037 3039 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
3038 3040
3039 3041
3040 3042 timecount = unitcountfn(
3041 3043 (1, 1e3, _(b'%.0f s')),
3042 3044 (100, 1, _(b'%.1f s')),
3043 3045 (10, 1, _(b'%.2f s')),
3044 3046 (1, 1, _(b'%.3f s')),
3045 3047 (100, 0.001, _(b'%.1f ms')),
3046 3048 (10, 0.001, _(b'%.2f ms')),
3047 3049 (1, 0.001, _(b'%.3f ms')),
3048 3050 (100, 0.000001, _(b'%.1f us')),
3049 3051 (10, 0.000001, _(b'%.2f us')),
3050 3052 (1, 0.000001, _(b'%.3f us')),
3051 3053 (100, 0.000000001, _(b'%.1f ns')),
3052 3054 (10, 0.000000001, _(b'%.2f ns')),
3053 3055 (1, 0.000000001, _(b'%.3f ns')),
3054 3056 )
3055 3057
3056 3058
3057 3059 @attr.s
3058 3060 class timedcmstats:
3059 3061 """Stats information produced by the timedcm context manager on entering."""
3060 3062
3061 3063 # the starting value of the timer as a float (meaning and resulution is
3062 3064 # platform dependent, see util.timer)
3063 3065 start = attr.ib(default=attr.Factory(lambda: timer()))
3064 3066 # the number of seconds as a floating point value; starts at 0, updated when
3065 3067 # the context is exited.
3066 3068 elapsed = attr.ib(default=0)
3067 3069 # the number of nested timedcm context managers.
3068 3070 level = attr.ib(default=1)
3069 3071
3070 3072 def __bytes__(self):
3071 3073 return timecount(self.elapsed) if self.elapsed else b'<unknown>'
3072 3074
3073 3075 __str__ = encoding.strmethod(__bytes__)
3074 3076
3075 3077
3076 3078 @contextlib.contextmanager
3077 3079 def timedcm(whencefmt, *whenceargs):
3078 3080 """A context manager that produces timing information for a given context.
3079 3081
3080 3082 On entering a timedcmstats instance is produced.
3081 3083
3082 3084 This context manager is reentrant.
3083 3085
3084 3086 """
3085 3087 # track nested context managers
3086 3088 timedcm._nested += 1
3087 3089 timing_stats = timedcmstats(level=timedcm._nested)
3088 3090 try:
3089 3091 with tracing.log(whencefmt, *whenceargs):
3090 3092 yield timing_stats
3091 3093 finally:
3092 3094 timing_stats.elapsed = timer() - timing_stats.start
3093 3095 timedcm._nested -= 1
3094 3096
3095 3097
3096 3098 timedcm._nested = 0
3097 3099
3098 3100
3099 3101 def timed(func):
3100 3102 """Report the execution time of a function call to stderr.
3101 3103
3102 3104 During development, use as a decorator when you need to measure
3103 3105 the cost of a function, e.g. as follows:
3104 3106
3105 3107 @util.timed
3106 3108 def foo(a, b, c):
3107 3109 pass
3108 3110 """
3109 3111
3110 3112 def wrapper(*args, **kwargs):
3111 3113 with timedcm(pycompat.bytestr(func.__name__)) as time_stats:
3112 3114 result = func(*args, **kwargs)
3113 3115 stderr = procutil.stderr
3114 3116 stderr.write(
3115 3117 b'%s%s: %s\n'
3116 3118 % (
3117 3119 b' ' * time_stats.level * 2,
3118 3120 pycompat.bytestr(func.__name__),
3119 3121 time_stats,
3120 3122 )
3121 3123 )
3122 3124 return result
3123 3125
3124 3126 return wrapper
3125 3127
3126 3128
3127 3129 _sizeunits = (
3128 3130 (b'm', 2**20),
3129 3131 (b'k', 2**10),
3130 3132 (b'g', 2**30),
3131 3133 (b'kb', 2**10),
3132 3134 (b'mb', 2**20),
3133 3135 (b'gb', 2**30),
3134 3136 (b'b', 1),
3135 3137 )
3136 3138
3137 3139
3138 3140 def sizetoint(s: bytes) -> int:
3139 3141 """Convert a space specifier to a byte count.
3140 3142
3141 3143 >>> sizetoint(b'30')
3142 3144 30
3143 3145 >>> sizetoint(b'2.2kb')
3144 3146 2252
3145 3147 >>> sizetoint(b'6M')
3146 3148 6291456
3147 3149 """
3148 3150 t = s.strip().lower()
3149 3151 try:
3150 3152 for k, u in _sizeunits:
3151 3153 if t.endswith(k):
3152 3154 return int(float(t[: -len(k)]) * u)
3153 3155 return int(t)
3154 3156 except ValueError:
3155 3157 raise error.ParseError(_(b"couldn't parse size: %s") % s)
3156 3158
3157 3159
3158 3160 class hooks:
3159 3161 """A collection of hook functions that can be used to extend a
3160 3162 function's behavior. Hooks are called in lexicographic order,
3161 3163 based on the names of their sources."""
3162 3164
3163 3165 def __init__(self):
3164 3166 self._hooks = []
3165 3167
3166 3168 def add(self, source, hook):
3167 3169 self._hooks.append((source, hook))
3168 3170
3169 3171 def __call__(self, *args):
3170 3172 self._hooks.sort(key=lambda x: x[0])
3171 3173 results = []
3172 3174 for source, hook in self._hooks:
3173 3175 results.append(hook(*args))
3174 3176 return results
3175 3177
3176 3178
3177 3179 def getstackframes(skip=0, line=b' %-*s in %s\n', fileline=b'%s:%d', depth=0):
3178 3180 """Yields lines for a nicely formatted stacktrace.
3179 3181 Skips the 'skip' last entries, then return the last 'depth' entries.
3180 3182 Each file+linenumber is formatted according to fileline.
3181 3183 Each line is formatted according to line.
3182 3184 If line is None, it yields:
3183 3185 length of longest filepath+line number,
3184 3186 filepath+linenumber,
3185 3187 function
3186 3188
3187 3189 Not be used in production code but very convenient while developing.
3188 3190 """
3189 3191 entries = [
3190 3192 (fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3191 3193 for fn, ln, func, _text in traceback.extract_stack()[: -skip - 1]
3192 3194 ][-depth:]
3193 3195 if entries:
3194 3196 fnmax = max(len(entry[0]) for entry in entries)
3195 3197 for fnln, func in entries:
3196 3198 if line is None:
3197 3199 yield (fnmax, fnln, func)
3198 3200 else:
3199 3201 yield line % (fnmax, fnln, func)
3200 3202
3201 3203
3202 3204 def debugstacktrace(
3203 3205 msg=b'stacktrace',
3204 3206 skip=0,
3205 3207 f=procutil.stderr,
3206 3208 otherf=procutil.stdout,
3207 3209 depth=0,
3208 3210 prefix=b'',
3209 3211 ):
3210 3212 """Writes a message to f (stderr) with a nicely formatted stacktrace.
3211 3213 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3212 3214 By default it will flush stdout first.
3213 3215 It can be used everywhere and intentionally does not require an ui object.
3214 3216 Not be used in production code but very convenient while developing.
3215 3217 """
3216 3218 if otherf:
3217 3219 otherf.flush()
3218 3220 f.write(b'%s%s at:\n' % (prefix, msg.rstrip()))
3219 3221 for line in getstackframes(skip + 1, depth=depth):
3220 3222 f.write(prefix + line)
3221 3223 f.flush()
3222 3224
3223 3225
3224 3226 # convenient shortcut
3225 3227 dst = debugstacktrace
3226 3228
3227 3229
3228 3230 def safename(f, tag, ctx, others=None):
3229 3231 """
3230 3232 Generate a name that it is safe to rename f to in the given context.
3231 3233
3232 3234 f: filename to rename
3233 3235 tag: a string tag that will be included in the new name
3234 3236 ctx: a context, in which the new name must not exist
3235 3237 others: a set of other filenames that the new name must not be in
3236 3238
3237 3239 Returns a file name of the form oldname~tag[~number] which does not exist
3238 3240 in the provided context and is not in the set of other names.
3239 3241 """
3240 3242 if others is None:
3241 3243 others = set()
3242 3244
3243 3245 fn = b'%s~%s' % (f, tag)
3244 3246 if fn not in ctx and fn not in others:
3245 3247 return fn
3246 3248 for n in itertools.count(1):
3247 3249 fn = b'%s~%s~%s' % (f, tag, n)
3248 3250 if fn not in ctx and fn not in others:
3249 3251 return fn
3250 3252
3251 3253
3252 3254 def readexactly(stream, n):
3253 3255 '''read n bytes from stream.read and abort if less was available'''
3254 3256 s = stream.read(n)
3255 3257 if len(s) < n:
3256 3258 raise error.Abort(
3257 3259 _(b"stream ended unexpectedly (got %d bytes, expected %d)")
3258 3260 % (len(s), n)
3259 3261 )
3260 3262 return s
3261 3263
3262 3264
3263 3265 def uvarintencode(value):
3264 3266 """Encode an unsigned integer value to a varint.
3265 3267
3266 3268 A varint is a variable length integer of 1 or more bytes. Each byte
3267 3269 except the last has the most significant bit set. The lower 7 bits of
3268 3270 each byte store the 2's complement representation, least significant group
3269 3271 first.
3270 3272
3271 3273 >>> uvarintencode(0)
3272 3274 '\\x00'
3273 3275 >>> uvarintencode(1)
3274 3276 '\\x01'
3275 3277 >>> uvarintencode(127)
3276 3278 '\\x7f'
3277 3279 >>> uvarintencode(1337)
3278 3280 '\\xb9\\n'
3279 3281 >>> uvarintencode(65536)
3280 3282 '\\x80\\x80\\x04'
3281 3283 >>> uvarintencode(-1)
3282 3284 Traceback (most recent call last):
3283 3285 ...
3284 3286 ProgrammingError: negative value for uvarint: -1
3285 3287 """
3286 3288 if value < 0:
3287 3289 raise error.ProgrammingError(b'negative value for uvarint: %d' % value)
3288 3290 bits = value & 0x7F
3289 3291 value >>= 7
3290 3292 bytes = []
3291 3293 while value:
3292 3294 bytes.append(pycompat.bytechr(0x80 | bits))
3293 3295 bits = value & 0x7F
3294 3296 value >>= 7
3295 3297 bytes.append(pycompat.bytechr(bits))
3296 3298
3297 3299 return b''.join(bytes)
3298 3300
3299 3301
3300 3302 def uvarintdecodestream(fh):
3301 3303 """Decode an unsigned variable length integer from a stream.
3302 3304
3303 3305 The passed argument is anything that has a ``.read(N)`` method.
3304 3306
3305 3307 >>> from io import BytesIO
3306 3308 >>> uvarintdecodestream(BytesIO(b'\\x00'))
3307 3309 0
3308 3310 >>> uvarintdecodestream(BytesIO(b'\\x01'))
3309 3311 1
3310 3312 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
3311 3313 127
3312 3314 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
3313 3315 1337
3314 3316 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
3315 3317 65536
3316 3318 >>> uvarintdecodestream(BytesIO(b'\\x80'))
3317 3319 Traceback (most recent call last):
3318 3320 ...
3319 3321 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
3320 3322 """
3321 3323 result = 0
3322 3324 shift = 0
3323 3325 while True:
3324 3326 byte = ord(readexactly(fh, 1))
3325 3327 result |= (byte & 0x7F) << shift
3326 3328 if not (byte & 0x80):
3327 3329 return result
3328 3330 shift += 7
3329 3331
3330 3332
3331 3333 # Passing the '' locale means that the locale should be set according to the
3332 3334 # user settings (environment variables).
3333 3335 # Python sometimes avoids setting the global locale settings. When interfacing
3334 3336 # with C code (e.g. the curses module or the Subversion bindings), the global
3335 3337 # locale settings must be initialized correctly. Python 2 does not initialize
3336 3338 # the global locale settings on interpreter startup. Python 3 sometimes
3337 3339 # initializes LC_CTYPE, but not consistently at least on Windows. Therefore we
3338 3340 # explicitly initialize it to get consistent behavior if it's not already
3339 3341 # initialized. Since CPython commit 177d921c8c03d30daa32994362023f777624b10d,
3340 3342 # LC_CTYPE is always initialized. If we require Python 3.8+, we should re-check
3341 3343 # if we can remove this code.
3342 3344 @contextlib.contextmanager
3343 3345 def with_lc_ctype():
3344 3346 oldloc = locale.setlocale(locale.LC_CTYPE, None)
3345 3347 if oldloc == 'C':
3346 3348 try:
3347 3349 try:
3348 3350 locale.setlocale(locale.LC_CTYPE, '')
3349 3351 except locale.Error:
3350 3352 # The likely case is that the locale from the environment
3351 3353 # variables is unknown.
3352 3354 pass
3353 3355 yield
3354 3356 finally:
3355 3357 locale.setlocale(locale.LC_CTYPE, oldloc)
3356 3358 else:
3357 3359 yield
3358 3360
3359 3361
3360 3362 def _estimatememory() -> Optional[int]:
3361 3363 """Provide an estimate for the available system memory in Bytes.
3362 3364
3363 3365 If no estimate can be provided on the platform, returns None.
3364 3366 """
3365 3367 if pycompat.sysplatform.startswith(b'win'):
3366 3368 # On Windows, use the GlobalMemoryStatusEx kernel function directly.
3367 3369 # noinspection PyPep8Naming
3368 3370 from ctypes import c_long as DWORD, c_ulonglong as DWORDLONG
3369 3371 from ctypes.wintypes import ( # pytype: disable=import-error
3370 3372 Structure,
3371 3373 byref,
3372 3374 sizeof,
3373 3375 windll,
3374 3376 )
3375 3377
3376 3378 class MEMORYSTATUSEX(Structure):
3377 3379 _fields_ = [
3378 3380 ('dwLength', DWORD),
3379 3381 ('dwMemoryLoad', DWORD),
3380 3382 ('ullTotalPhys', DWORDLONG),
3381 3383 ('ullAvailPhys', DWORDLONG),
3382 3384 ('ullTotalPageFile', DWORDLONG),
3383 3385 ('ullAvailPageFile', DWORDLONG),
3384 3386 ('ullTotalVirtual', DWORDLONG),
3385 3387 ('ullAvailVirtual', DWORDLONG),
3386 3388 ('ullExtendedVirtual', DWORDLONG),
3387 3389 ]
3388 3390
3389 3391 x = MEMORYSTATUSEX()
3390 3392 x.dwLength = sizeof(x)
3391 3393 windll.kernel32.GlobalMemoryStatusEx(byref(x))
3392 3394 return x.ullAvailPhys
3393 3395
3394 3396 # On newer Unix-like systems and Mac OSX, the sysconf interface
3395 3397 # can be used. _SC_PAGE_SIZE is part of POSIX; _SC_PHYS_PAGES
3396 3398 # seems to be implemented on most systems.
3397 3399 try:
3398 3400 pagesize = os.sysconf(os.sysconf_names['SC_PAGE_SIZE'])
3399 3401 pages = os.sysconf(os.sysconf_names['SC_PHYS_PAGES'])
3400 3402 return pagesize * pages
3401 3403 except OSError: # sysconf can fail
3402 3404 pass
3403 3405 except KeyError: # unknown parameter
3404 3406 pass
General Comments 0
You need to be logged in to leave comments. Login now