##// END OF EJS Templates
util: remove dead code which used to be for old python2 versions...
Alex Gaynor -
r33549:9a2ee959 default
parent child Browse files
Show More
@@ -1,3702 +1,3696
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 from __future__ import absolute_import
17 17
18 18 import bz2
19 19 import calendar
20 20 import codecs
21 21 import collections
22 22 import contextlib
23 23 import datetime
24 24 import errno
25 25 import gc
26 26 import hashlib
27 27 import imp
28 28 import os
29 29 import platform as pyplatform
30 30 import re as remod
31 31 import shutil
32 32 import signal
33 33 import socket
34 34 import stat
35 35 import string
36 36 import subprocess
37 37 import sys
38 38 import tempfile
39 39 import textwrap
40 40 import time
41 41 import traceback
42 42 import warnings
43 43 import zlib
44 44
45 45 from . import (
46 46 encoding,
47 47 error,
48 48 i18n,
49 49 policy,
50 50 pycompat,
51 51 )
52 52
53 53 base85 = policy.importmod(r'base85')
54 54 osutil = policy.importmod(r'osutil')
55 55 parsers = policy.importmod(r'parsers')
56 56
57 57 b85decode = base85.b85decode
58 58 b85encode = base85.b85encode
59 59
60 60 cookielib = pycompat.cookielib
61 61 empty = pycompat.empty
62 62 httplib = pycompat.httplib
63 63 httpserver = pycompat.httpserver
64 64 pickle = pycompat.pickle
65 65 queue = pycompat.queue
66 66 socketserver = pycompat.socketserver
67 67 stderr = pycompat.stderr
68 68 stdin = pycompat.stdin
69 69 stdout = pycompat.stdout
70 70 stringio = pycompat.stringio
71 71 urlerr = pycompat.urlerr
72 72 urlreq = pycompat.urlreq
73 73 xmlrpclib = pycompat.xmlrpclib
74 74
75 75 # workaround for win32mbcs
76 76 _filenamebytestr = pycompat.bytestr
77 77
78 78 def isatty(fp):
79 79 try:
80 80 return fp.isatty()
81 81 except AttributeError:
82 82 return False
83 83
84 84 # glibc determines buffering on first write to stdout - if we replace a TTY
85 85 # destined stdout with a pipe destined stdout (e.g. pager), we want line
86 86 # buffering
87 87 if isatty(stdout):
88 88 stdout = os.fdopen(stdout.fileno(), pycompat.sysstr('wb'), 1)
89 89
90 90 if pycompat.osname == 'nt':
91 91 from . import windows as platform
92 92 stdout = platform.winstdout(stdout)
93 93 else:
94 94 from . import posix as platform
95 95
96 96 _ = i18n._
97 97
98 98 bindunixsocket = platform.bindunixsocket
99 99 cachestat = platform.cachestat
100 100 checkexec = platform.checkexec
101 101 checklink = platform.checklink
102 102 copymode = platform.copymode
103 103 executablepath = platform.executablepath
104 104 expandglobs = platform.expandglobs
105 105 explainexit = platform.explainexit
106 106 findexe = platform.findexe
107 107 gethgcmd = platform.gethgcmd
108 108 getuser = platform.getuser
109 109 getpid = os.getpid
110 110 groupmembers = platform.groupmembers
111 111 groupname = platform.groupname
112 112 hidewindow = platform.hidewindow
113 113 isexec = platform.isexec
114 114 isowner = platform.isowner
115 115 listdir = osutil.listdir
116 116 localpath = platform.localpath
117 117 lookupreg = platform.lookupreg
118 118 makedir = platform.makedir
119 119 nlinks = platform.nlinks
120 120 normpath = platform.normpath
121 121 normcase = platform.normcase
122 122 normcasespec = platform.normcasespec
123 123 normcasefallback = platform.normcasefallback
124 124 openhardlinks = platform.openhardlinks
125 125 oslink = platform.oslink
126 126 parsepatchoutput = platform.parsepatchoutput
127 127 pconvert = platform.pconvert
128 128 poll = platform.poll
129 129 popen = platform.popen
130 130 posixfile = platform.posixfile
131 131 quotecommand = platform.quotecommand
132 132 readpipe = platform.readpipe
133 133 rename = platform.rename
134 134 removedirs = platform.removedirs
135 135 samedevice = platform.samedevice
136 136 samefile = platform.samefile
137 137 samestat = platform.samestat
138 138 setbinary = platform.setbinary
139 139 setflags = platform.setflags
140 140 setsignalhandler = platform.setsignalhandler
141 141 shellquote = platform.shellquote
142 142 spawndetached = platform.spawndetached
143 143 split = platform.split
144 144 sshargs = platform.sshargs
145 145 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
146 146 statisexec = platform.statisexec
147 147 statislink = platform.statislink
148 148 testpid = platform.testpid
149 149 umask = platform.umask
150 150 unlink = platform.unlink
151 151 username = platform.username
152 152
153 153 try:
154 154 recvfds = osutil.recvfds
155 155 except AttributeError:
156 156 pass
157 157 try:
158 158 setprocname = osutil.setprocname
159 159 except AttributeError:
160 160 pass
161 161
162 162 # Python compatibility
163 163
164 164 _notset = object()
165 165
166 166 # disable Python's problematic floating point timestamps (issue4836)
167 167 # (Python hypocritically says you shouldn't change this behavior in
168 168 # libraries, and sure enough Mercurial is not a library.)
169 169 os.stat_float_times(False)
170 170
171 171 def safehasattr(thing, attr):
172 172 return getattr(thing, attr, _notset) is not _notset
173 173
174 174 def bitsfrom(container):
175 175 bits = 0
176 176 for bit in container:
177 177 bits |= bit
178 178 return bits
179 179
180 180 # python 2.6 still have deprecation warning enabled by default. We do not want
181 181 # to display anything to standard user so detect if we are running test and
182 182 # only use python deprecation warning in this case.
183 183 _dowarn = bool(encoding.environ.get('HGEMITWARNINGS'))
184 184 if _dowarn:
185 185 # explicitly unfilter our warning for python 2.7
186 186 #
187 187 # The option of setting PYTHONWARNINGS in the test runner was investigated.
188 188 # However, module name set through PYTHONWARNINGS was exactly matched, so
189 189 # we cannot set 'mercurial' and have it match eg: 'mercurial.scmutil'. This
190 190 # makes the whole PYTHONWARNINGS thing useless for our usecase.
191 191 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'mercurial')
192 192 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext')
193 193 warnings.filterwarnings(r'default', r'', DeprecationWarning, r'hgext3rd')
194 194
195 195 def nouideprecwarn(msg, version, stacklevel=1):
196 196 """Issue an python native deprecation warning
197 197
198 198 This is a noop outside of tests, use 'ui.deprecwarn' when possible.
199 199 """
200 200 if _dowarn:
201 201 msg += ("\n(compatibility will be dropped after Mercurial-%s,"
202 202 " update your code.)") % version
203 203 warnings.warn(msg, DeprecationWarning, stacklevel + 1)
204 204
205 205 DIGESTS = {
206 206 'md5': hashlib.md5,
207 207 'sha1': hashlib.sha1,
208 208 'sha512': hashlib.sha512,
209 209 }
210 210 # List of digest types from strongest to weakest
211 211 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
212 212
213 213 for k in DIGESTS_BY_STRENGTH:
214 214 assert k in DIGESTS
215 215
216 216 class digester(object):
217 217 """helper to compute digests.
218 218
219 219 This helper can be used to compute one or more digests given their name.
220 220
221 221 >>> d = digester(['md5', 'sha1'])
222 222 >>> d.update('foo')
223 223 >>> [k for k in sorted(d)]
224 224 ['md5', 'sha1']
225 225 >>> d['md5']
226 226 'acbd18db4cc2f85cedef654fccc4a4d8'
227 227 >>> d['sha1']
228 228 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
229 229 >>> digester.preferred(['md5', 'sha1'])
230 230 'sha1'
231 231 """
232 232
233 233 def __init__(self, digests, s=''):
234 234 self._hashes = {}
235 235 for k in digests:
236 236 if k not in DIGESTS:
237 237 raise Abort(_('unknown digest type: %s') % k)
238 238 self._hashes[k] = DIGESTS[k]()
239 239 if s:
240 240 self.update(s)
241 241
242 242 def update(self, data):
243 243 for h in self._hashes.values():
244 244 h.update(data)
245 245
246 246 def __getitem__(self, key):
247 247 if key not in DIGESTS:
248 248 raise Abort(_('unknown digest type: %s') % k)
249 249 return self._hashes[key].hexdigest()
250 250
251 251 def __iter__(self):
252 252 return iter(self._hashes)
253 253
254 254 @staticmethod
255 255 def preferred(supported):
256 256 """returns the strongest digest type in both supported and DIGESTS."""
257 257
258 258 for k in DIGESTS_BY_STRENGTH:
259 259 if k in supported:
260 260 return k
261 261 return None
262 262
263 263 class digestchecker(object):
264 264 """file handle wrapper that additionally checks content against a given
265 265 size and digests.
266 266
267 267 d = digestchecker(fh, size, {'md5': '...'})
268 268
269 269 When multiple digests are given, all of them are validated.
270 270 """
271 271
272 272 def __init__(self, fh, size, digests):
273 273 self._fh = fh
274 274 self._size = size
275 275 self._got = 0
276 276 self._digests = dict(digests)
277 277 self._digester = digester(self._digests.keys())
278 278
279 279 def read(self, length=-1):
280 280 content = self._fh.read(length)
281 281 self._digester.update(content)
282 282 self._got += len(content)
283 283 return content
284 284
285 285 def validate(self):
286 286 if self._size != self._got:
287 287 raise Abort(_('size mismatch: expected %d, got %d') %
288 288 (self._size, self._got))
289 289 for k, v in self._digests.items():
290 290 if v != self._digester[k]:
291 291 # i18n: first parameter is a digest name
292 292 raise Abort(_('%s mismatch: expected %s, got %s') %
293 293 (k, v, self._digester[k]))
294 294
295 295 try:
296 296 buffer = buffer
297 297 except NameError:
298 if not pycompat.ispy3:
299 def buffer(sliceable, offset=0, length=None):
300 if length is not None:
301 return sliceable[offset:offset + length]
302 return sliceable[offset:]
303 else:
304 def buffer(sliceable, offset=0, length=None):
305 if length is not None:
306 return memoryview(sliceable)[offset:offset + length]
307 return memoryview(sliceable)[offset:]
298 def buffer(sliceable, offset=0, length=None):
299 if length is not None:
300 return memoryview(sliceable)[offset:offset + length]
301 return memoryview(sliceable)[offset:]
308 302
309 303 closefds = pycompat.osname == 'posix'
310 304
311 305 _chunksize = 4096
312 306
313 307 class bufferedinputpipe(object):
314 308 """a manually buffered input pipe
315 309
316 310 Python will not let us use buffered IO and lazy reading with 'polling' at
317 311 the same time. We cannot probe the buffer state and select will not detect
318 312 that data are ready to read if they are already buffered.
319 313
320 314 This class let us work around that by implementing its own buffering
321 315 (allowing efficient readline) while offering a way to know if the buffer is
322 316 empty from the output (allowing collaboration of the buffer with polling).
323 317
324 318 This class lives in the 'util' module because it makes use of the 'os'
325 319 module from the python stdlib.
326 320 """
327 321
328 322 def __init__(self, input):
329 323 self._input = input
330 324 self._buffer = []
331 325 self._eof = False
332 326 self._lenbuf = 0
333 327
334 328 @property
335 329 def hasbuffer(self):
336 330 """True is any data is currently buffered
337 331
338 332 This will be used externally a pre-step for polling IO. If there is
339 333 already data then no polling should be set in place."""
340 334 return bool(self._buffer)
341 335
342 336 @property
343 337 def closed(self):
344 338 return self._input.closed
345 339
346 340 def fileno(self):
347 341 return self._input.fileno()
348 342
349 343 def close(self):
350 344 return self._input.close()
351 345
352 346 def read(self, size):
353 347 while (not self._eof) and (self._lenbuf < size):
354 348 self._fillbuffer()
355 349 return self._frombuffer(size)
356 350
357 351 def readline(self, *args, **kwargs):
358 352 if 1 < len(self._buffer):
359 353 # this should not happen because both read and readline end with a
360 354 # _frombuffer call that collapse it.
361 355 self._buffer = [''.join(self._buffer)]
362 356 self._lenbuf = len(self._buffer[0])
363 357 lfi = -1
364 358 if self._buffer:
365 359 lfi = self._buffer[-1].find('\n')
366 360 while (not self._eof) and lfi < 0:
367 361 self._fillbuffer()
368 362 if self._buffer:
369 363 lfi = self._buffer[-1].find('\n')
370 364 size = lfi + 1
371 365 if lfi < 0: # end of file
372 366 size = self._lenbuf
373 367 elif 1 < len(self._buffer):
374 368 # we need to take previous chunks into account
375 369 size += self._lenbuf - len(self._buffer[-1])
376 370 return self._frombuffer(size)
377 371
378 372 def _frombuffer(self, size):
379 373 """return at most 'size' data from the buffer
380 374
381 375 The data are removed from the buffer."""
382 376 if size == 0 or not self._buffer:
383 377 return ''
384 378 buf = self._buffer[0]
385 379 if 1 < len(self._buffer):
386 380 buf = ''.join(self._buffer)
387 381
388 382 data = buf[:size]
389 383 buf = buf[len(data):]
390 384 if buf:
391 385 self._buffer = [buf]
392 386 self._lenbuf = len(buf)
393 387 else:
394 388 self._buffer = []
395 389 self._lenbuf = 0
396 390 return data
397 391
398 392 def _fillbuffer(self):
399 393 """read data to the buffer"""
400 394 data = os.read(self._input.fileno(), _chunksize)
401 395 if not data:
402 396 self._eof = True
403 397 else:
404 398 self._lenbuf += len(data)
405 399 self._buffer.append(data)
406 400
407 401 def popen2(cmd, env=None, newlines=False):
408 402 # Setting bufsize to -1 lets the system decide the buffer size.
409 403 # The default for bufsize is 0, meaning unbuffered. This leads to
410 404 # poor performance on Mac OS X: http://bugs.python.org/issue4194
411 405 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
412 406 close_fds=closefds,
413 407 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
414 408 universal_newlines=newlines,
415 409 env=env)
416 410 return p.stdin, p.stdout
417 411
418 412 def popen3(cmd, env=None, newlines=False):
419 413 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
420 414 return stdin, stdout, stderr
421 415
422 416 def popen4(cmd, env=None, newlines=False, bufsize=-1):
423 417 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
424 418 close_fds=closefds,
425 419 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
426 420 stderr=subprocess.PIPE,
427 421 universal_newlines=newlines,
428 422 env=env)
429 423 return p.stdin, p.stdout, p.stderr, p
430 424
431 425 def version():
432 426 """Return version information if available."""
433 427 try:
434 428 from . import __version__
435 429 return __version__.version
436 430 except ImportError:
437 431 return 'unknown'
438 432
439 433 def versiontuple(v=None, n=4):
440 434 """Parses a Mercurial version string into an N-tuple.
441 435
442 436 The version string to be parsed is specified with the ``v`` argument.
443 437 If it isn't defined, the current Mercurial version string will be parsed.
444 438
445 439 ``n`` can be 2, 3, or 4. Here is how some version strings map to
446 440 returned values:
447 441
448 442 >>> v = '3.6.1+190-df9b73d2d444'
449 443 >>> versiontuple(v, 2)
450 444 (3, 6)
451 445 >>> versiontuple(v, 3)
452 446 (3, 6, 1)
453 447 >>> versiontuple(v, 4)
454 448 (3, 6, 1, '190-df9b73d2d444')
455 449
456 450 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
457 451 (3, 6, 1, '190-df9b73d2d444+20151118')
458 452
459 453 >>> v = '3.6'
460 454 >>> versiontuple(v, 2)
461 455 (3, 6)
462 456 >>> versiontuple(v, 3)
463 457 (3, 6, None)
464 458 >>> versiontuple(v, 4)
465 459 (3, 6, None, None)
466 460
467 461 >>> v = '3.9-rc'
468 462 >>> versiontuple(v, 2)
469 463 (3, 9)
470 464 >>> versiontuple(v, 3)
471 465 (3, 9, None)
472 466 >>> versiontuple(v, 4)
473 467 (3, 9, None, 'rc')
474 468
475 469 >>> v = '3.9-rc+2-02a8fea4289b'
476 470 >>> versiontuple(v, 2)
477 471 (3, 9)
478 472 >>> versiontuple(v, 3)
479 473 (3, 9, None)
480 474 >>> versiontuple(v, 4)
481 475 (3, 9, None, 'rc+2-02a8fea4289b')
482 476 """
483 477 if not v:
484 478 v = version()
485 479 parts = remod.split('[\+-]', v, 1)
486 480 if len(parts) == 1:
487 481 vparts, extra = parts[0], None
488 482 else:
489 483 vparts, extra = parts
490 484
491 485 vints = []
492 486 for i in vparts.split('.'):
493 487 try:
494 488 vints.append(int(i))
495 489 except ValueError:
496 490 break
497 491 # (3, 6) -> (3, 6, None)
498 492 while len(vints) < 3:
499 493 vints.append(None)
500 494
501 495 if n == 2:
502 496 return (vints[0], vints[1])
503 497 if n == 3:
504 498 return (vints[0], vints[1], vints[2])
505 499 if n == 4:
506 500 return (vints[0], vints[1], vints[2], extra)
507 501
508 502 # used by parsedate
509 503 defaultdateformats = (
510 504 '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
511 505 '%Y-%m-%dT%H:%M', # without seconds
512 506 '%Y-%m-%dT%H%M%S', # another awful but legal variant without :
513 507 '%Y-%m-%dT%H%M', # without seconds
514 508 '%Y-%m-%d %H:%M:%S', # our common legal variant
515 509 '%Y-%m-%d %H:%M', # without seconds
516 510 '%Y-%m-%d %H%M%S', # without :
517 511 '%Y-%m-%d %H%M', # without seconds
518 512 '%Y-%m-%d %I:%M:%S%p',
519 513 '%Y-%m-%d %H:%M',
520 514 '%Y-%m-%d %I:%M%p',
521 515 '%Y-%m-%d',
522 516 '%m-%d',
523 517 '%m/%d',
524 518 '%m/%d/%y',
525 519 '%m/%d/%Y',
526 520 '%a %b %d %H:%M:%S %Y',
527 521 '%a %b %d %I:%M:%S%p %Y',
528 522 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
529 523 '%b %d %H:%M:%S %Y',
530 524 '%b %d %I:%M:%S%p %Y',
531 525 '%b %d %H:%M:%S',
532 526 '%b %d %I:%M:%S%p',
533 527 '%b %d %H:%M',
534 528 '%b %d %I:%M%p',
535 529 '%b %d %Y',
536 530 '%b %d',
537 531 '%H:%M:%S',
538 532 '%I:%M:%S%p',
539 533 '%H:%M',
540 534 '%I:%M%p',
541 535 )
542 536
543 537 extendeddateformats = defaultdateformats + (
544 538 "%Y",
545 539 "%Y-%m",
546 540 "%b",
547 541 "%b %Y",
548 542 )
549 543
550 544 def cachefunc(func):
551 545 '''cache the result of function calls'''
552 546 # XXX doesn't handle keywords args
553 547 if func.__code__.co_argcount == 0:
554 548 cache = []
555 549 def f():
556 550 if len(cache) == 0:
557 551 cache.append(func())
558 552 return cache[0]
559 553 return f
560 554 cache = {}
561 555 if func.__code__.co_argcount == 1:
562 556 # we gain a small amount of time because
563 557 # we don't need to pack/unpack the list
564 558 def f(arg):
565 559 if arg not in cache:
566 560 cache[arg] = func(arg)
567 561 return cache[arg]
568 562 else:
569 563 def f(*args):
570 564 if args not in cache:
571 565 cache[args] = func(*args)
572 566 return cache[args]
573 567
574 568 return f
575 569
576 570 class sortdict(collections.OrderedDict):
577 571 '''a simple sorted dictionary
578 572
579 573 >>> d1 = sortdict([('a', 0), ('b', 1)])
580 574 >>> d2 = d1.copy()
581 575 >>> d2
582 576 sortdict([('a', 0), ('b', 1)])
583 577 >>> d2.update([('a', 2)])
584 578 >>> d2.keys() # should still be in last-set order
585 579 ['b', 'a']
586 580 '''
587 581
588 582 def __setitem__(self, key, value):
589 583 if key in self:
590 584 del self[key]
591 585 super(sortdict, self).__setitem__(key, value)
592 586
593 587 @contextlib.contextmanager
594 588 def acceptintervention(tr=None):
595 589 """A context manager that closes the transaction on InterventionRequired
596 590
597 591 If no transaction was provided, this simply runs the body and returns
598 592 """
599 593 if not tr:
600 594 yield
601 595 return
602 596 try:
603 597 yield
604 598 tr.close()
605 599 except error.InterventionRequired:
606 600 tr.close()
607 601 raise
608 602 finally:
609 603 tr.release()
610 604
611 605 class _lrucachenode(object):
612 606 """A node in a doubly linked list.
613 607
614 608 Holds a reference to nodes on either side as well as a key-value
615 609 pair for the dictionary entry.
616 610 """
617 611 __slots__ = (u'next', u'prev', u'key', u'value')
618 612
619 613 def __init__(self):
620 614 self.next = None
621 615 self.prev = None
622 616
623 617 self.key = _notset
624 618 self.value = None
625 619
626 620 def markempty(self):
627 621 """Mark the node as emptied."""
628 622 self.key = _notset
629 623
630 624 class lrucachedict(object):
631 625 """Dict that caches most recent accesses and sets.
632 626
633 627 The dict consists of an actual backing dict - indexed by original
634 628 key - and a doubly linked circular list defining the order of entries in
635 629 the cache.
636 630
637 631 The head node is the newest entry in the cache. If the cache is full,
638 632 we recycle head.prev and make it the new head. Cache accesses result in
639 633 the node being moved to before the existing head and being marked as the
640 634 new head node.
641 635 """
642 636 def __init__(self, max):
643 637 self._cache = {}
644 638
645 639 self._head = head = _lrucachenode()
646 640 head.prev = head
647 641 head.next = head
648 642 self._size = 1
649 643 self._capacity = max
650 644
651 645 def __len__(self):
652 646 return len(self._cache)
653 647
654 648 def __contains__(self, k):
655 649 return k in self._cache
656 650
657 651 def __iter__(self):
658 652 # We don't have to iterate in cache order, but why not.
659 653 n = self._head
660 654 for i in range(len(self._cache)):
661 655 yield n.key
662 656 n = n.next
663 657
664 658 def __getitem__(self, k):
665 659 node = self._cache[k]
666 660 self._movetohead(node)
667 661 return node.value
668 662
669 663 def __setitem__(self, k, v):
670 664 node = self._cache.get(k)
671 665 # Replace existing value and mark as newest.
672 666 if node is not None:
673 667 node.value = v
674 668 self._movetohead(node)
675 669 return
676 670
677 671 if self._size < self._capacity:
678 672 node = self._addcapacity()
679 673 else:
680 674 # Grab the last/oldest item.
681 675 node = self._head.prev
682 676
683 677 # At capacity. Kill the old entry.
684 678 if node.key is not _notset:
685 679 del self._cache[node.key]
686 680
687 681 node.key = k
688 682 node.value = v
689 683 self._cache[k] = node
690 684 # And mark it as newest entry. No need to adjust order since it
691 685 # is already self._head.prev.
692 686 self._head = node
693 687
694 688 def __delitem__(self, k):
695 689 node = self._cache.pop(k)
696 690 node.markempty()
697 691
698 692 # Temporarily mark as newest item before re-adjusting head to make
699 693 # this node the oldest item.
700 694 self._movetohead(node)
701 695 self._head = node.next
702 696
703 697 # Additional dict methods.
704 698
705 699 def get(self, k, default=None):
706 700 try:
707 701 return self._cache[k].value
708 702 except KeyError:
709 703 return default
710 704
711 705 def clear(self):
712 706 n = self._head
713 707 while n.key is not _notset:
714 708 n.markempty()
715 709 n = n.next
716 710
717 711 self._cache.clear()
718 712
719 713 def copy(self):
720 714 result = lrucachedict(self._capacity)
721 715 n = self._head.prev
722 716 # Iterate in oldest-to-newest order, so the copy has the right ordering
723 717 for i in range(len(self._cache)):
724 718 result[n.key] = n.value
725 719 n = n.prev
726 720 return result
727 721
728 722 def _movetohead(self, node):
729 723 """Mark a node as the newest, making it the new head.
730 724
731 725 When a node is accessed, it becomes the freshest entry in the LRU
732 726 list, which is denoted by self._head.
733 727
734 728 Visually, let's make ``N`` the new head node (* denotes head):
735 729
736 730 previous/oldest <-> head <-> next/next newest
737 731
738 732 ----<->--- A* ---<->-----
739 733 | |
740 734 E <-> D <-> N <-> C <-> B
741 735
742 736 To:
743 737
744 738 ----<->--- N* ---<->-----
745 739 | |
746 740 E <-> D <-> C <-> B <-> A
747 741
748 742 This requires the following moves:
749 743
750 744 C.next = D (node.prev.next = node.next)
751 745 D.prev = C (node.next.prev = node.prev)
752 746 E.next = N (head.prev.next = node)
753 747 N.prev = E (node.prev = head.prev)
754 748 N.next = A (node.next = head)
755 749 A.prev = N (head.prev = node)
756 750 """
757 751 head = self._head
758 752 # C.next = D
759 753 node.prev.next = node.next
760 754 # D.prev = C
761 755 node.next.prev = node.prev
762 756 # N.prev = E
763 757 node.prev = head.prev
764 758 # N.next = A
765 759 # It is tempting to do just "head" here, however if node is
766 760 # adjacent to head, this will do bad things.
767 761 node.next = head.prev.next
768 762 # E.next = N
769 763 node.next.prev = node
770 764 # A.prev = N
771 765 node.prev.next = node
772 766
773 767 self._head = node
774 768
775 769 def _addcapacity(self):
776 770 """Add a node to the circular linked list.
777 771
778 772 The new node is inserted before the head node.
779 773 """
780 774 head = self._head
781 775 node = _lrucachenode()
782 776 head.prev.next = node
783 777 node.prev = head.prev
784 778 node.next = head
785 779 head.prev = node
786 780 self._size += 1
787 781 return node
788 782
789 783 def lrucachefunc(func):
790 784 '''cache most recent results of function calls'''
791 785 cache = {}
792 786 order = collections.deque()
793 787 if func.__code__.co_argcount == 1:
794 788 def f(arg):
795 789 if arg not in cache:
796 790 if len(cache) > 20:
797 791 del cache[order.popleft()]
798 792 cache[arg] = func(arg)
799 793 else:
800 794 order.remove(arg)
801 795 order.append(arg)
802 796 return cache[arg]
803 797 else:
804 798 def f(*args):
805 799 if args not in cache:
806 800 if len(cache) > 20:
807 801 del cache[order.popleft()]
808 802 cache[args] = func(*args)
809 803 else:
810 804 order.remove(args)
811 805 order.append(args)
812 806 return cache[args]
813 807
814 808 return f
815 809
816 810 class propertycache(object):
817 811 def __init__(self, func):
818 812 self.func = func
819 813 self.name = func.__name__
820 814 def __get__(self, obj, type=None):
821 815 result = self.func(obj)
822 816 self.cachevalue(obj, result)
823 817 return result
824 818
825 819 def cachevalue(self, obj, value):
826 820 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
827 821 obj.__dict__[self.name] = value
828 822
829 823 def pipefilter(s, cmd):
830 824 '''filter string S through command CMD, returning its output'''
831 825 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
832 826 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
833 827 pout, perr = p.communicate(s)
834 828 return pout
835 829
836 830 def tempfilter(s, cmd):
837 831 '''filter string S through a pair of temporary files with CMD.
838 832 CMD is used as a template to create the real command to be run,
839 833 with the strings INFILE and OUTFILE replaced by the real names of
840 834 the temporary files generated.'''
841 835 inname, outname = None, None
842 836 try:
843 837 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
844 838 fp = os.fdopen(infd, pycompat.sysstr('wb'))
845 839 fp.write(s)
846 840 fp.close()
847 841 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
848 842 os.close(outfd)
849 843 cmd = cmd.replace('INFILE', inname)
850 844 cmd = cmd.replace('OUTFILE', outname)
851 845 code = os.system(cmd)
852 846 if pycompat.sysplatform == 'OpenVMS' and code & 1:
853 847 code = 0
854 848 if code:
855 849 raise Abort(_("command '%s' failed: %s") %
856 850 (cmd, explainexit(code)))
857 851 return readfile(outname)
858 852 finally:
859 853 try:
860 854 if inname:
861 855 os.unlink(inname)
862 856 except OSError:
863 857 pass
864 858 try:
865 859 if outname:
866 860 os.unlink(outname)
867 861 except OSError:
868 862 pass
869 863
870 864 filtertable = {
871 865 'tempfile:': tempfilter,
872 866 'pipe:': pipefilter,
873 867 }
874 868
875 869 def filter(s, cmd):
876 870 "filter a string through a command that transforms its input to its output"
877 871 for name, fn in filtertable.iteritems():
878 872 if cmd.startswith(name):
879 873 return fn(s, cmd[len(name):].lstrip())
880 874 return pipefilter(s, cmd)
881 875
882 876 def binary(s):
883 877 """return true if a string is binary data"""
884 878 return bool(s and '\0' in s)
885 879
886 880 def increasingchunks(source, min=1024, max=65536):
887 881 '''return no less than min bytes per chunk while data remains,
888 882 doubling min after each chunk until it reaches max'''
889 883 def log2(x):
890 884 if not x:
891 885 return 0
892 886 i = 0
893 887 while x:
894 888 x >>= 1
895 889 i += 1
896 890 return i - 1
897 891
898 892 buf = []
899 893 blen = 0
900 894 for chunk in source:
901 895 buf.append(chunk)
902 896 blen += len(chunk)
903 897 if blen >= min:
904 898 if min < max:
905 899 min = min << 1
906 900 nmin = 1 << log2(blen)
907 901 if nmin > min:
908 902 min = nmin
909 903 if min > max:
910 904 min = max
911 905 yield ''.join(buf)
912 906 blen = 0
913 907 buf = []
914 908 if buf:
915 909 yield ''.join(buf)
916 910
917 911 Abort = error.Abort
918 912
919 913 def always(fn):
920 914 return True
921 915
922 916 def never(fn):
923 917 return False
924 918
925 919 def nogc(func):
926 920 """disable garbage collector
927 921
928 922 Python's garbage collector triggers a GC each time a certain number of
929 923 container objects (the number being defined by gc.get_threshold()) are
930 924 allocated even when marked not to be tracked by the collector. Tracking has
931 925 no effect on when GCs are triggered, only on what objects the GC looks
932 926 into. As a workaround, disable GC while building complex (huge)
933 927 containers.
934 928
935 929 This garbage collector issue have been fixed in 2.7.
936 930 """
937 931 if sys.version_info >= (2, 7):
938 932 return func
939 933 def wrapper(*args, **kwargs):
940 934 gcenabled = gc.isenabled()
941 935 gc.disable()
942 936 try:
943 937 return func(*args, **kwargs)
944 938 finally:
945 939 if gcenabled:
946 940 gc.enable()
947 941 return wrapper
948 942
949 943 def pathto(root, n1, n2):
950 944 '''return the relative path from one place to another.
951 945 root should use os.sep to separate directories
952 946 n1 should use os.sep to separate directories
953 947 n2 should use "/" to separate directories
954 948 returns an os.sep-separated path.
955 949
956 950 If n1 is a relative path, it's assumed it's
957 951 relative to root.
958 952 n2 should always be relative to root.
959 953 '''
960 954 if not n1:
961 955 return localpath(n2)
962 956 if os.path.isabs(n1):
963 957 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
964 958 return os.path.join(root, localpath(n2))
965 959 n2 = '/'.join((pconvert(root), n2))
966 960 a, b = splitpath(n1), n2.split('/')
967 961 a.reverse()
968 962 b.reverse()
969 963 while a and b and a[-1] == b[-1]:
970 964 a.pop()
971 965 b.pop()
972 966 b.reverse()
973 967 return pycompat.ossep.join((['..'] * len(a)) + b) or '.'
974 968
975 969 def mainfrozen():
976 970 """return True if we are a frozen executable.
977 971
978 972 The code supports py2exe (most common, Windows only) and tools/freeze
979 973 (portable, not much used).
980 974 """
981 975 return (safehasattr(sys, "frozen") or # new py2exe
982 976 safehasattr(sys, "importers") or # old py2exe
983 977 imp.is_frozen(u"__main__")) # tools/freeze
984 978
985 979 # the location of data files matching the source code
986 980 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
987 981 # executable version (py2exe) doesn't support __file__
988 982 datapath = os.path.dirname(pycompat.sysexecutable)
989 983 else:
990 984 datapath = os.path.dirname(pycompat.fsencode(__file__))
991 985
992 986 i18n.setdatapath(datapath)
993 987
994 988 _hgexecutable = None
995 989
996 990 def hgexecutable():
997 991 """return location of the 'hg' executable.
998 992
999 993 Defaults to $HG or 'hg' in the search path.
1000 994 """
1001 995 if _hgexecutable is None:
1002 996 hg = encoding.environ.get('HG')
1003 997 mainmod = sys.modules[pycompat.sysstr('__main__')]
1004 998 if hg:
1005 999 _sethgexecutable(hg)
1006 1000 elif mainfrozen():
1007 1001 if getattr(sys, 'frozen', None) == 'macosx_app':
1008 1002 # Env variable set by py2app
1009 1003 _sethgexecutable(encoding.environ['EXECUTABLEPATH'])
1010 1004 else:
1011 1005 _sethgexecutable(pycompat.sysexecutable)
1012 1006 elif (os.path.basename(
1013 1007 pycompat.fsencode(getattr(mainmod, '__file__', ''))) == 'hg'):
1014 1008 _sethgexecutable(pycompat.fsencode(mainmod.__file__))
1015 1009 else:
1016 1010 exe = findexe('hg') or os.path.basename(sys.argv[0])
1017 1011 _sethgexecutable(exe)
1018 1012 return _hgexecutable
1019 1013
1020 1014 def _sethgexecutable(path):
1021 1015 """set location of the 'hg' executable"""
1022 1016 global _hgexecutable
1023 1017 _hgexecutable = path
1024 1018
1025 1019 def _isstdout(f):
1026 1020 fileno = getattr(f, 'fileno', None)
1027 1021 return fileno and fileno() == sys.__stdout__.fileno()
1028 1022
1029 1023 def shellenviron(environ=None):
1030 1024 """return environ with optional override, useful for shelling out"""
1031 1025 def py2shell(val):
1032 1026 'convert python object into string that is useful to shell'
1033 1027 if val is None or val is False:
1034 1028 return '0'
1035 1029 if val is True:
1036 1030 return '1'
1037 1031 return str(val)
1038 1032 env = dict(encoding.environ)
1039 1033 if environ:
1040 1034 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1041 1035 env['HG'] = hgexecutable()
1042 1036 return env
1043 1037
1044 1038 def system(cmd, environ=None, cwd=None, out=None):
1045 1039 '''enhanced shell command execution.
1046 1040 run with environment maybe modified, maybe in different dir.
1047 1041
1048 1042 if out is specified, it is assumed to be a file-like object that has a
1049 1043 write() method. stdout and stderr will be redirected to out.'''
1050 1044 try:
1051 1045 stdout.flush()
1052 1046 except Exception:
1053 1047 pass
1054 1048 cmd = quotecommand(cmd)
1055 1049 env = shellenviron(environ)
1056 1050 if out is None or _isstdout(out):
1057 1051 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1058 1052 env=env, cwd=cwd)
1059 1053 else:
1060 1054 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1061 1055 env=env, cwd=cwd, stdout=subprocess.PIPE,
1062 1056 stderr=subprocess.STDOUT)
1063 1057 for line in iter(proc.stdout.readline, ''):
1064 1058 out.write(line)
1065 1059 proc.wait()
1066 1060 rc = proc.returncode
1067 1061 if pycompat.sysplatform == 'OpenVMS' and rc & 1:
1068 1062 rc = 0
1069 1063 return rc
1070 1064
1071 1065 def checksignature(func):
1072 1066 '''wrap a function with code to check for calling errors'''
1073 1067 def check(*args, **kwargs):
1074 1068 try:
1075 1069 return func(*args, **kwargs)
1076 1070 except TypeError:
1077 1071 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1078 1072 raise error.SignatureError
1079 1073 raise
1080 1074
1081 1075 return check
1082 1076
1083 1077 # a whilelist of known filesystems where hardlink works reliably
1084 1078 _hardlinkfswhitelist = {
1085 1079 'btrfs',
1086 1080 'ext2',
1087 1081 'ext3',
1088 1082 'ext4',
1089 1083 'hfs',
1090 1084 'jfs',
1091 1085 'reiserfs',
1092 1086 'tmpfs',
1093 1087 'ufs',
1094 1088 'xfs',
1095 1089 'zfs',
1096 1090 }
1097 1091
1098 1092 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1099 1093 '''copy a file, preserving mode and optionally other stat info like
1100 1094 atime/mtime
1101 1095
1102 1096 checkambig argument is used with filestat, and is useful only if
1103 1097 destination file is guarded by any lock (e.g. repo.lock or
1104 1098 repo.wlock).
1105 1099
1106 1100 copystat and checkambig should be exclusive.
1107 1101 '''
1108 1102 assert not (copystat and checkambig)
1109 1103 oldstat = None
1110 1104 if os.path.lexists(dest):
1111 1105 if checkambig:
1112 1106 oldstat = checkambig and filestat.frompath(dest)
1113 1107 unlink(dest)
1114 1108 if hardlink:
1115 1109 # Hardlinks are problematic on CIFS (issue4546), do not allow hardlinks
1116 1110 # unless we are confident that dest is on a whitelisted filesystem.
1117 1111 try:
1118 1112 fstype = getfstype(os.path.dirname(dest))
1119 1113 except OSError:
1120 1114 fstype = None
1121 1115 if fstype not in _hardlinkfswhitelist:
1122 1116 hardlink = False
1123 1117 if hardlink:
1124 1118 try:
1125 1119 oslink(src, dest)
1126 1120 return
1127 1121 except (IOError, OSError):
1128 1122 pass # fall back to normal copy
1129 1123 if os.path.islink(src):
1130 1124 os.symlink(os.readlink(src), dest)
1131 1125 # copytime is ignored for symlinks, but in general copytime isn't needed
1132 1126 # for them anyway
1133 1127 else:
1134 1128 try:
1135 1129 shutil.copyfile(src, dest)
1136 1130 if copystat:
1137 1131 # copystat also copies mode
1138 1132 shutil.copystat(src, dest)
1139 1133 else:
1140 1134 shutil.copymode(src, dest)
1141 1135 if oldstat and oldstat.stat:
1142 1136 newstat = filestat.frompath(dest)
1143 1137 if newstat.isambig(oldstat):
1144 1138 # stat of copied file is ambiguous to original one
1145 1139 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1146 1140 os.utime(dest, (advanced, advanced))
1147 1141 except shutil.Error as inst:
1148 1142 raise Abort(str(inst))
1149 1143
1150 1144 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1151 1145 """Copy a directory tree using hardlinks if possible."""
1152 1146 num = 0
1153 1147
1154 1148 gettopic = lambda: hardlink and _('linking') or _('copying')
1155 1149
1156 1150 if os.path.isdir(src):
1157 1151 if hardlink is None:
1158 1152 hardlink = (os.stat(src).st_dev ==
1159 1153 os.stat(os.path.dirname(dst)).st_dev)
1160 1154 topic = gettopic()
1161 1155 os.mkdir(dst)
1162 1156 for name, kind in listdir(src):
1163 1157 srcname = os.path.join(src, name)
1164 1158 dstname = os.path.join(dst, name)
1165 1159 def nprog(t, pos):
1166 1160 if pos is not None:
1167 1161 return progress(t, pos + num)
1168 1162 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1169 1163 num += n
1170 1164 else:
1171 1165 if hardlink is None:
1172 1166 hardlink = (os.stat(os.path.dirname(src)).st_dev ==
1173 1167 os.stat(os.path.dirname(dst)).st_dev)
1174 1168 topic = gettopic()
1175 1169
1176 1170 if hardlink:
1177 1171 try:
1178 1172 oslink(src, dst)
1179 1173 except (IOError, OSError):
1180 1174 hardlink = False
1181 1175 shutil.copy(src, dst)
1182 1176 else:
1183 1177 shutil.copy(src, dst)
1184 1178 num += 1
1185 1179 progress(topic, num)
1186 1180 progress(topic, None)
1187 1181
1188 1182 return hardlink, num
1189 1183
1190 1184 _winreservednames = b'''con prn aux nul
1191 1185 com1 com2 com3 com4 com5 com6 com7 com8 com9
1192 1186 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1193 1187 _winreservedchars = ':*?"<>|'
1194 1188 def checkwinfilename(path):
1195 1189 r'''Check that the base-relative path is a valid filename on Windows.
1196 1190 Returns None if the path is ok, or a UI string describing the problem.
1197 1191
1198 1192 >>> checkwinfilename("just/a/normal/path")
1199 1193 >>> checkwinfilename("foo/bar/con.xml")
1200 1194 "filename contains 'con', which is reserved on Windows"
1201 1195 >>> checkwinfilename("foo/con.xml/bar")
1202 1196 "filename contains 'con', which is reserved on Windows"
1203 1197 >>> checkwinfilename("foo/bar/xml.con")
1204 1198 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1205 1199 "filename contains 'AUX', which is reserved on Windows"
1206 1200 >>> checkwinfilename("foo/bar/bla:.txt")
1207 1201 "filename contains ':', which is reserved on Windows"
1208 1202 >>> checkwinfilename("foo/bar/b\07la.txt")
1209 1203 "filename contains '\\x07', which is invalid on Windows"
1210 1204 >>> checkwinfilename("foo/bar/bla ")
1211 1205 "filename ends with ' ', which is not allowed on Windows"
1212 1206 >>> checkwinfilename("../bar")
1213 1207 >>> checkwinfilename("foo\\")
1214 1208 "filename ends with '\\', which is invalid on Windows"
1215 1209 >>> checkwinfilename("foo\\/bar")
1216 1210 "directory name ends with '\\', which is invalid on Windows"
1217 1211 '''
1218 1212 if path.endswith('\\'):
1219 1213 return _("filename ends with '\\', which is invalid on Windows")
1220 1214 if '\\/' in path:
1221 1215 return _("directory name ends with '\\', which is invalid on Windows")
1222 1216 for n in path.replace('\\', '/').split('/'):
1223 1217 if not n:
1224 1218 continue
1225 1219 for c in _filenamebytestr(n):
1226 1220 if c in _winreservedchars:
1227 1221 return _("filename contains '%s', which is reserved "
1228 1222 "on Windows") % c
1229 1223 if ord(c) <= 31:
1230 1224 return _("filename contains %r, which is invalid "
1231 1225 "on Windows") % c
1232 1226 base = n.split('.')[0]
1233 1227 if base and base.lower() in _winreservednames:
1234 1228 return _("filename contains '%s', which is reserved "
1235 1229 "on Windows") % base
1236 1230 t = n[-1]
1237 1231 if t in '. ' and n not in '..':
1238 1232 return _("filename ends with '%s', which is not allowed "
1239 1233 "on Windows") % t
1240 1234
1241 1235 if pycompat.osname == 'nt':
1242 1236 checkosfilename = checkwinfilename
1243 1237 timer = time.clock
1244 1238 else:
1245 1239 checkosfilename = platform.checkosfilename
1246 1240 timer = time.time
1247 1241
1248 1242 if safehasattr(time, "perf_counter"):
1249 1243 timer = time.perf_counter
1250 1244
1251 1245 def makelock(info, pathname):
1252 1246 try:
1253 1247 return os.symlink(info, pathname)
1254 1248 except OSError as why:
1255 1249 if why.errno == errno.EEXIST:
1256 1250 raise
1257 1251 except AttributeError: # no symlink in os
1258 1252 pass
1259 1253
1260 1254 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1261 1255 os.write(ld, info)
1262 1256 os.close(ld)
1263 1257
1264 1258 def readlock(pathname):
1265 1259 try:
1266 1260 return os.readlink(pathname)
1267 1261 except OSError as why:
1268 1262 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1269 1263 raise
1270 1264 except AttributeError: # no symlink in os
1271 1265 pass
1272 1266 fp = posixfile(pathname)
1273 1267 r = fp.read()
1274 1268 fp.close()
1275 1269 return r
1276 1270
1277 1271 def fstat(fp):
1278 1272 '''stat file object that may not have fileno method.'''
1279 1273 try:
1280 1274 return os.fstat(fp.fileno())
1281 1275 except AttributeError:
1282 1276 return os.stat(fp.name)
1283 1277
1284 1278 # File system features
1285 1279
1286 1280 def fscasesensitive(path):
1287 1281 """
1288 1282 Return true if the given path is on a case-sensitive filesystem
1289 1283
1290 1284 Requires a path (like /foo/.hg) ending with a foldable final
1291 1285 directory component.
1292 1286 """
1293 1287 s1 = os.lstat(path)
1294 1288 d, b = os.path.split(path)
1295 1289 b2 = b.upper()
1296 1290 if b == b2:
1297 1291 b2 = b.lower()
1298 1292 if b == b2:
1299 1293 return True # no evidence against case sensitivity
1300 1294 p2 = os.path.join(d, b2)
1301 1295 try:
1302 1296 s2 = os.lstat(p2)
1303 1297 if s2 == s1:
1304 1298 return False
1305 1299 return True
1306 1300 except OSError:
1307 1301 return True
1308 1302
1309 1303 try:
1310 1304 import re2
1311 1305 _re2 = None
1312 1306 except ImportError:
1313 1307 _re2 = False
1314 1308
1315 1309 class _re(object):
1316 1310 def _checkre2(self):
1317 1311 global _re2
1318 1312 try:
1319 1313 # check if match works, see issue3964
1320 1314 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1321 1315 except ImportError:
1322 1316 _re2 = False
1323 1317
1324 1318 def compile(self, pat, flags=0):
1325 1319 '''Compile a regular expression, using re2 if possible
1326 1320
1327 1321 For best performance, use only re2-compatible regexp features. The
1328 1322 only flags from the re module that are re2-compatible are
1329 1323 IGNORECASE and MULTILINE.'''
1330 1324 if _re2 is None:
1331 1325 self._checkre2()
1332 1326 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1333 1327 if flags & remod.IGNORECASE:
1334 1328 pat = '(?i)' + pat
1335 1329 if flags & remod.MULTILINE:
1336 1330 pat = '(?m)' + pat
1337 1331 try:
1338 1332 return re2.compile(pat)
1339 1333 except re2.error:
1340 1334 pass
1341 1335 return remod.compile(pat, flags)
1342 1336
1343 1337 @propertycache
1344 1338 def escape(self):
1345 1339 '''Return the version of escape corresponding to self.compile.
1346 1340
1347 1341 This is imperfect because whether re2 or re is used for a particular
1348 1342 function depends on the flags, etc, but it's the best we can do.
1349 1343 '''
1350 1344 global _re2
1351 1345 if _re2 is None:
1352 1346 self._checkre2()
1353 1347 if _re2:
1354 1348 return re2.escape
1355 1349 else:
1356 1350 return remod.escape
1357 1351
1358 1352 re = _re()
1359 1353
1360 1354 _fspathcache = {}
1361 1355 def fspath(name, root):
1362 1356 '''Get name in the case stored in the filesystem
1363 1357
1364 1358 The name should be relative to root, and be normcase-ed for efficiency.
1365 1359
1366 1360 Note that this function is unnecessary, and should not be
1367 1361 called, for case-sensitive filesystems (simply because it's expensive).
1368 1362
1369 1363 The root should be normcase-ed, too.
1370 1364 '''
1371 1365 def _makefspathcacheentry(dir):
1372 1366 return dict((normcase(n), n) for n in os.listdir(dir))
1373 1367
1374 1368 seps = pycompat.ossep
1375 1369 if pycompat.osaltsep:
1376 1370 seps = seps + pycompat.osaltsep
1377 1371 # Protect backslashes. This gets silly very quickly.
1378 1372 seps.replace('\\','\\\\')
1379 1373 pattern = remod.compile(br'([^%s]+)|([%s]+)' % (seps, seps))
1380 1374 dir = os.path.normpath(root)
1381 1375 result = []
1382 1376 for part, sep in pattern.findall(name):
1383 1377 if sep:
1384 1378 result.append(sep)
1385 1379 continue
1386 1380
1387 1381 if dir not in _fspathcache:
1388 1382 _fspathcache[dir] = _makefspathcacheentry(dir)
1389 1383 contents = _fspathcache[dir]
1390 1384
1391 1385 found = contents.get(part)
1392 1386 if not found:
1393 1387 # retry "once per directory" per "dirstate.walk" which
1394 1388 # may take place for each patches of "hg qpush", for example
1395 1389 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1396 1390 found = contents.get(part)
1397 1391
1398 1392 result.append(found or part)
1399 1393 dir = os.path.join(dir, part)
1400 1394
1401 1395 return ''.join(result)
1402 1396
1403 1397 def getfstype(dirpath):
1404 1398 '''Get the filesystem type name from a directory (best-effort)
1405 1399
1406 1400 Returns None if we are unsure. Raises OSError on ENOENT, EPERM, etc.
1407 1401 '''
1408 1402 return getattr(osutil, 'getfstype', lambda x: None)(dirpath)
1409 1403
1410 1404 def checknlink(testfile):
1411 1405 '''check whether hardlink count reporting works properly'''
1412 1406
1413 1407 # testfile may be open, so we need a separate file for checking to
1414 1408 # work around issue2543 (or testfile may get lost on Samba shares)
1415 1409 f1 = testfile + ".hgtmp1"
1416 1410 if os.path.lexists(f1):
1417 1411 return False
1418 1412 try:
1419 1413 posixfile(f1, 'w').close()
1420 1414 except IOError:
1421 1415 try:
1422 1416 os.unlink(f1)
1423 1417 except OSError:
1424 1418 pass
1425 1419 return False
1426 1420
1427 1421 f2 = testfile + ".hgtmp2"
1428 1422 fd = None
1429 1423 try:
1430 1424 oslink(f1, f2)
1431 1425 # nlinks() may behave differently for files on Windows shares if
1432 1426 # the file is open.
1433 1427 fd = posixfile(f2)
1434 1428 return nlinks(f2) > 1
1435 1429 except OSError:
1436 1430 return False
1437 1431 finally:
1438 1432 if fd is not None:
1439 1433 fd.close()
1440 1434 for f in (f1, f2):
1441 1435 try:
1442 1436 os.unlink(f)
1443 1437 except OSError:
1444 1438 pass
1445 1439
1446 1440 def endswithsep(path):
1447 1441 '''Check path ends with os.sep or os.altsep.'''
1448 1442 return (path.endswith(pycompat.ossep)
1449 1443 or pycompat.osaltsep and path.endswith(pycompat.osaltsep))
1450 1444
1451 1445 def splitpath(path):
1452 1446 '''Split path by os.sep.
1453 1447 Note that this function does not use os.altsep because this is
1454 1448 an alternative of simple "xxx.split(os.sep)".
1455 1449 It is recommended to use os.path.normpath() before using this
1456 1450 function if need.'''
1457 1451 return path.split(pycompat.ossep)
1458 1452
1459 1453 def gui():
1460 1454 '''Are we running in a GUI?'''
1461 1455 if pycompat.sysplatform == 'darwin':
1462 1456 if 'SSH_CONNECTION' in encoding.environ:
1463 1457 # handle SSH access to a box where the user is logged in
1464 1458 return False
1465 1459 elif getattr(osutil, 'isgui', None):
1466 1460 # check if a CoreGraphics session is available
1467 1461 return osutil.isgui()
1468 1462 else:
1469 1463 # pure build; use a safe default
1470 1464 return True
1471 1465 else:
1472 1466 return pycompat.osname == "nt" or encoding.environ.get("DISPLAY")
1473 1467
1474 1468 def mktempcopy(name, emptyok=False, createmode=None):
1475 1469 """Create a temporary file with the same contents from name
1476 1470
1477 1471 The permission bits are copied from the original file.
1478 1472
1479 1473 If the temporary file is going to be truncated immediately, you
1480 1474 can use emptyok=True as an optimization.
1481 1475
1482 1476 Returns the name of the temporary file.
1483 1477 """
1484 1478 d, fn = os.path.split(name)
1485 1479 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1486 1480 os.close(fd)
1487 1481 # Temporary files are created with mode 0600, which is usually not
1488 1482 # what we want. If the original file already exists, just copy
1489 1483 # its mode. Otherwise, manually obey umask.
1490 1484 copymode(name, temp, createmode)
1491 1485 if emptyok:
1492 1486 return temp
1493 1487 try:
1494 1488 try:
1495 1489 ifp = posixfile(name, "rb")
1496 1490 except IOError as inst:
1497 1491 if inst.errno == errno.ENOENT:
1498 1492 return temp
1499 1493 if not getattr(inst, 'filename', None):
1500 1494 inst.filename = name
1501 1495 raise
1502 1496 ofp = posixfile(temp, "wb")
1503 1497 for chunk in filechunkiter(ifp):
1504 1498 ofp.write(chunk)
1505 1499 ifp.close()
1506 1500 ofp.close()
1507 1501 except: # re-raises
1508 1502 try: os.unlink(temp)
1509 1503 except OSError: pass
1510 1504 raise
1511 1505 return temp
1512 1506
1513 1507 class filestat(object):
1514 1508 """help to exactly detect change of a file
1515 1509
1516 1510 'stat' attribute is result of 'os.stat()' if specified 'path'
1517 1511 exists. Otherwise, it is None. This can avoid preparative
1518 1512 'exists()' examination on client side of this class.
1519 1513 """
1520 1514 def __init__(self, stat):
1521 1515 self.stat = stat
1522 1516
1523 1517 @classmethod
1524 1518 def frompath(cls, path):
1525 1519 try:
1526 1520 stat = os.stat(path)
1527 1521 except OSError as err:
1528 1522 if err.errno != errno.ENOENT:
1529 1523 raise
1530 1524 stat = None
1531 1525 return cls(stat)
1532 1526
1533 1527 @classmethod
1534 1528 def fromfp(cls, fp):
1535 1529 stat = os.fstat(fp.fileno())
1536 1530 return cls(stat)
1537 1531
1538 1532 __hash__ = object.__hash__
1539 1533
1540 1534 def __eq__(self, old):
1541 1535 try:
1542 1536 # if ambiguity between stat of new and old file is
1543 1537 # avoided, comparison of size, ctime and mtime is enough
1544 1538 # to exactly detect change of a file regardless of platform
1545 1539 return (self.stat.st_size == old.stat.st_size and
1546 1540 self.stat.st_ctime == old.stat.st_ctime and
1547 1541 self.stat.st_mtime == old.stat.st_mtime)
1548 1542 except AttributeError:
1549 1543 pass
1550 1544 try:
1551 1545 return self.stat is None and old.stat is None
1552 1546 except AttributeError:
1553 1547 return False
1554 1548
1555 1549 def isambig(self, old):
1556 1550 """Examine whether new (= self) stat is ambiguous against old one
1557 1551
1558 1552 "S[N]" below means stat of a file at N-th change:
1559 1553
1560 1554 - S[n-1].ctime < S[n].ctime: can detect change of a file
1561 1555 - S[n-1].ctime == S[n].ctime
1562 1556 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1563 1557 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1564 1558 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1565 1559 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1566 1560
1567 1561 Case (*2) above means that a file was changed twice or more at
1568 1562 same time in sec (= S[n-1].ctime), and comparison of timestamp
1569 1563 is ambiguous.
1570 1564
1571 1565 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1572 1566 timestamp is ambiguous".
1573 1567
1574 1568 But advancing mtime only in case (*2) doesn't work as
1575 1569 expected, because naturally advanced S[n].mtime in case (*1)
1576 1570 might be equal to manually advanced S[n-1 or earlier].mtime.
1577 1571
1578 1572 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1579 1573 treated as ambiguous regardless of mtime, to avoid overlooking
1580 1574 by confliction between such mtime.
1581 1575
1582 1576 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1583 1577 S[n].mtime", even if size of a file isn't changed.
1584 1578 """
1585 1579 try:
1586 1580 return (self.stat.st_ctime == old.stat.st_ctime)
1587 1581 except AttributeError:
1588 1582 return False
1589 1583
1590 1584 def avoidambig(self, path, old):
1591 1585 """Change file stat of specified path to avoid ambiguity
1592 1586
1593 1587 'old' should be previous filestat of 'path'.
1594 1588
1595 1589 This skips avoiding ambiguity, if a process doesn't have
1596 1590 appropriate privileges for 'path'. This returns False in this
1597 1591 case.
1598 1592
1599 1593 Otherwise, this returns True, as "ambiguity is avoided".
1600 1594 """
1601 1595 advanced = (old.stat.st_mtime + 1) & 0x7fffffff
1602 1596 try:
1603 1597 os.utime(path, (advanced, advanced))
1604 1598 except OSError as inst:
1605 1599 if inst.errno == errno.EPERM:
1606 1600 # utime() on the file created by another user causes EPERM,
1607 1601 # if a process doesn't have appropriate privileges
1608 1602 return False
1609 1603 raise
1610 1604 return True
1611 1605
1612 1606 def __ne__(self, other):
1613 1607 return not self == other
1614 1608
1615 1609 class atomictempfile(object):
1616 1610 '''writable file object that atomically updates a file
1617 1611
1618 1612 All writes will go to a temporary copy of the original file. Call
1619 1613 close() when you are done writing, and atomictempfile will rename
1620 1614 the temporary copy to the original name, making the changes
1621 1615 visible. If the object is destroyed without being closed, all your
1622 1616 writes are discarded.
1623 1617
1624 1618 checkambig argument of constructor is used with filestat, and is
1625 1619 useful only if target file is guarded by any lock (e.g. repo.lock
1626 1620 or repo.wlock).
1627 1621 '''
1628 1622 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1629 1623 self.__name = name # permanent name
1630 1624 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1631 1625 createmode=createmode)
1632 1626 self._fp = posixfile(self._tempname, mode)
1633 1627 self._checkambig = checkambig
1634 1628
1635 1629 # delegated methods
1636 1630 self.read = self._fp.read
1637 1631 self.write = self._fp.write
1638 1632 self.seek = self._fp.seek
1639 1633 self.tell = self._fp.tell
1640 1634 self.fileno = self._fp.fileno
1641 1635
1642 1636 def close(self):
1643 1637 if not self._fp.closed:
1644 1638 self._fp.close()
1645 1639 filename = localpath(self.__name)
1646 1640 oldstat = self._checkambig and filestat.frompath(filename)
1647 1641 if oldstat and oldstat.stat:
1648 1642 rename(self._tempname, filename)
1649 1643 newstat = filestat.frompath(filename)
1650 1644 if newstat.isambig(oldstat):
1651 1645 # stat of changed file is ambiguous to original one
1652 1646 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1653 1647 os.utime(filename, (advanced, advanced))
1654 1648 else:
1655 1649 rename(self._tempname, filename)
1656 1650
1657 1651 def discard(self):
1658 1652 if not self._fp.closed:
1659 1653 try:
1660 1654 os.unlink(self._tempname)
1661 1655 except OSError:
1662 1656 pass
1663 1657 self._fp.close()
1664 1658
1665 1659 def __del__(self):
1666 1660 if safehasattr(self, '_fp'): # constructor actually did something
1667 1661 self.discard()
1668 1662
1669 1663 def __enter__(self):
1670 1664 return self
1671 1665
1672 1666 def __exit__(self, exctype, excvalue, traceback):
1673 1667 if exctype is not None:
1674 1668 self.discard()
1675 1669 else:
1676 1670 self.close()
1677 1671
1678 1672 def unlinkpath(f, ignoremissing=False):
1679 1673 """unlink and remove the directory if it is empty"""
1680 1674 if ignoremissing:
1681 1675 tryunlink(f)
1682 1676 else:
1683 1677 unlink(f)
1684 1678 # try removing directories that might now be empty
1685 1679 try:
1686 1680 removedirs(os.path.dirname(f))
1687 1681 except OSError:
1688 1682 pass
1689 1683
1690 1684 def tryunlink(f):
1691 1685 """Attempt to remove a file, ignoring ENOENT errors."""
1692 1686 try:
1693 1687 unlink(f)
1694 1688 except OSError as e:
1695 1689 if e.errno != errno.ENOENT:
1696 1690 raise
1697 1691
1698 1692 def makedirs(name, mode=None, notindexed=False):
1699 1693 """recursive directory creation with parent mode inheritance
1700 1694
1701 1695 Newly created directories are marked as "not to be indexed by
1702 1696 the content indexing service", if ``notindexed`` is specified
1703 1697 for "write" mode access.
1704 1698 """
1705 1699 try:
1706 1700 makedir(name, notindexed)
1707 1701 except OSError as err:
1708 1702 if err.errno == errno.EEXIST:
1709 1703 return
1710 1704 if err.errno != errno.ENOENT or not name:
1711 1705 raise
1712 1706 parent = os.path.dirname(os.path.abspath(name))
1713 1707 if parent == name:
1714 1708 raise
1715 1709 makedirs(parent, mode, notindexed)
1716 1710 try:
1717 1711 makedir(name, notindexed)
1718 1712 except OSError as err:
1719 1713 # Catch EEXIST to handle races
1720 1714 if err.errno == errno.EEXIST:
1721 1715 return
1722 1716 raise
1723 1717 if mode is not None:
1724 1718 os.chmod(name, mode)
1725 1719
1726 1720 def readfile(path):
1727 1721 with open(path, 'rb') as fp:
1728 1722 return fp.read()
1729 1723
1730 1724 def writefile(path, text):
1731 1725 with open(path, 'wb') as fp:
1732 1726 fp.write(text)
1733 1727
1734 1728 def appendfile(path, text):
1735 1729 with open(path, 'ab') as fp:
1736 1730 fp.write(text)
1737 1731
1738 1732 class chunkbuffer(object):
1739 1733 """Allow arbitrary sized chunks of data to be efficiently read from an
1740 1734 iterator over chunks of arbitrary size."""
1741 1735
1742 1736 def __init__(self, in_iter):
1743 1737 """in_iter is the iterator that's iterating over the input chunks."""
1744 1738 def splitbig(chunks):
1745 1739 for chunk in chunks:
1746 1740 if len(chunk) > 2**20:
1747 1741 pos = 0
1748 1742 while pos < len(chunk):
1749 1743 end = pos + 2 ** 18
1750 1744 yield chunk[pos:end]
1751 1745 pos = end
1752 1746 else:
1753 1747 yield chunk
1754 1748 self.iter = splitbig(in_iter)
1755 1749 self._queue = collections.deque()
1756 1750 self._chunkoffset = 0
1757 1751
1758 1752 def read(self, l=None):
1759 1753 """Read L bytes of data from the iterator of chunks of data.
1760 1754 Returns less than L bytes if the iterator runs dry.
1761 1755
1762 1756 If size parameter is omitted, read everything"""
1763 1757 if l is None:
1764 1758 return ''.join(self.iter)
1765 1759
1766 1760 left = l
1767 1761 buf = []
1768 1762 queue = self._queue
1769 1763 while left > 0:
1770 1764 # refill the queue
1771 1765 if not queue:
1772 1766 target = 2**18
1773 1767 for chunk in self.iter:
1774 1768 queue.append(chunk)
1775 1769 target -= len(chunk)
1776 1770 if target <= 0:
1777 1771 break
1778 1772 if not queue:
1779 1773 break
1780 1774
1781 1775 # The easy way to do this would be to queue.popleft(), modify the
1782 1776 # chunk (if necessary), then queue.appendleft(). However, for cases
1783 1777 # where we read partial chunk content, this incurs 2 dequeue
1784 1778 # mutations and creates a new str for the remaining chunk in the
1785 1779 # queue. Our code below avoids this overhead.
1786 1780
1787 1781 chunk = queue[0]
1788 1782 chunkl = len(chunk)
1789 1783 offset = self._chunkoffset
1790 1784
1791 1785 # Use full chunk.
1792 1786 if offset == 0 and left >= chunkl:
1793 1787 left -= chunkl
1794 1788 queue.popleft()
1795 1789 buf.append(chunk)
1796 1790 # self._chunkoffset remains at 0.
1797 1791 continue
1798 1792
1799 1793 chunkremaining = chunkl - offset
1800 1794
1801 1795 # Use all of unconsumed part of chunk.
1802 1796 if left >= chunkremaining:
1803 1797 left -= chunkremaining
1804 1798 queue.popleft()
1805 1799 # offset == 0 is enabled by block above, so this won't merely
1806 1800 # copy via ``chunk[0:]``.
1807 1801 buf.append(chunk[offset:])
1808 1802 self._chunkoffset = 0
1809 1803
1810 1804 # Partial chunk needed.
1811 1805 else:
1812 1806 buf.append(chunk[offset:offset + left])
1813 1807 self._chunkoffset += left
1814 1808 left -= chunkremaining
1815 1809
1816 1810 return ''.join(buf)
1817 1811
1818 1812 def filechunkiter(f, size=131072, limit=None):
1819 1813 """Create a generator that produces the data in the file size
1820 1814 (default 131072) bytes at a time, up to optional limit (default is
1821 1815 to read all data). Chunks may be less than size bytes if the
1822 1816 chunk is the last chunk in the file, or the file is a socket or
1823 1817 some other type of file that sometimes reads less data than is
1824 1818 requested."""
1825 1819 assert size >= 0
1826 1820 assert limit is None or limit >= 0
1827 1821 while True:
1828 1822 if limit is None:
1829 1823 nbytes = size
1830 1824 else:
1831 1825 nbytes = min(limit, size)
1832 1826 s = nbytes and f.read(nbytes)
1833 1827 if not s:
1834 1828 break
1835 1829 if limit:
1836 1830 limit -= len(s)
1837 1831 yield s
1838 1832
1839 1833 def makedate(timestamp=None):
1840 1834 '''Return a unix timestamp (or the current time) as a (unixtime,
1841 1835 offset) tuple based off the local timezone.'''
1842 1836 if timestamp is None:
1843 1837 timestamp = time.time()
1844 1838 if timestamp < 0:
1845 1839 hint = _("check your clock")
1846 1840 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1847 1841 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1848 1842 datetime.datetime.fromtimestamp(timestamp))
1849 1843 tz = delta.days * 86400 + delta.seconds
1850 1844 return timestamp, tz
1851 1845
1852 1846 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1853 1847 """represent a (unixtime, offset) tuple as a localized time.
1854 1848 unixtime is seconds since the epoch, and offset is the time zone's
1855 1849 number of seconds away from UTC.
1856 1850
1857 1851 >>> datestr((0, 0))
1858 1852 'Thu Jan 01 00:00:00 1970 +0000'
1859 1853 >>> datestr((42, 0))
1860 1854 'Thu Jan 01 00:00:42 1970 +0000'
1861 1855 >>> datestr((-42, 0))
1862 1856 'Wed Dec 31 23:59:18 1969 +0000'
1863 1857 >>> datestr((0x7fffffff, 0))
1864 1858 'Tue Jan 19 03:14:07 2038 +0000'
1865 1859 >>> datestr((-0x80000000, 0))
1866 1860 'Fri Dec 13 20:45:52 1901 +0000'
1867 1861 """
1868 1862 t, tz = date or makedate()
1869 1863 if "%1" in format or "%2" in format or "%z" in format:
1870 1864 sign = (tz > 0) and "-" or "+"
1871 1865 minutes = abs(tz) // 60
1872 1866 q, r = divmod(minutes, 60)
1873 1867 format = format.replace("%z", "%1%2")
1874 1868 format = format.replace("%1", "%c%02d" % (sign, q))
1875 1869 format = format.replace("%2", "%02d" % r)
1876 1870 d = t - tz
1877 1871 if d > 0x7fffffff:
1878 1872 d = 0x7fffffff
1879 1873 elif d < -0x80000000:
1880 1874 d = -0x80000000
1881 1875 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1882 1876 # because they use the gmtime() system call which is buggy on Windows
1883 1877 # for negative values.
1884 1878 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1885 1879 s = encoding.strtolocal(t.strftime(encoding.strfromlocal(format)))
1886 1880 return s
1887 1881
1888 1882 def shortdate(date=None):
1889 1883 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1890 1884 return datestr(date, format='%Y-%m-%d')
1891 1885
1892 1886 def parsetimezone(s):
1893 1887 """find a trailing timezone, if any, in string, and return a
1894 1888 (offset, remainder) pair"""
1895 1889
1896 1890 if s.endswith("GMT") or s.endswith("UTC"):
1897 1891 return 0, s[:-3].rstrip()
1898 1892
1899 1893 # Unix-style timezones [+-]hhmm
1900 1894 if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
1901 1895 sign = (s[-5] == "+") and 1 or -1
1902 1896 hours = int(s[-4:-2])
1903 1897 minutes = int(s[-2:])
1904 1898 return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
1905 1899
1906 1900 # ISO8601 trailing Z
1907 1901 if s.endswith("Z") and s[-2:-1].isdigit():
1908 1902 return 0, s[:-1]
1909 1903
1910 1904 # ISO8601-style [+-]hh:mm
1911 1905 if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
1912 1906 s[-5:-3].isdigit() and s[-2:].isdigit()):
1913 1907 sign = (s[-6] == "+") and 1 or -1
1914 1908 hours = int(s[-5:-3])
1915 1909 minutes = int(s[-2:])
1916 1910 return -sign * (hours * 60 + minutes) * 60, s[:-6]
1917 1911
1918 1912 return None, s
1919 1913
1920 1914 def strdate(string, format, defaults=None):
1921 1915 """parse a localized time string and return a (unixtime, offset) tuple.
1922 1916 if the string cannot be parsed, ValueError is raised."""
1923 1917 if defaults is None:
1924 1918 defaults = {}
1925 1919
1926 1920 # NOTE: unixtime = localunixtime + offset
1927 1921 offset, date = parsetimezone(string)
1928 1922
1929 1923 # add missing elements from defaults
1930 1924 usenow = False # default to using biased defaults
1931 1925 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1932 1926 part = pycompat.bytestr(part)
1933 1927 found = [True for p in part if ("%"+p) in format]
1934 1928 if not found:
1935 1929 date += "@" + defaults[part][usenow]
1936 1930 format += "@%" + part[0]
1937 1931 else:
1938 1932 # We've found a specific time element, less specific time
1939 1933 # elements are relative to today
1940 1934 usenow = True
1941 1935
1942 1936 timetuple = time.strptime(encoding.strfromlocal(date),
1943 1937 encoding.strfromlocal(format))
1944 1938 localunixtime = int(calendar.timegm(timetuple))
1945 1939 if offset is None:
1946 1940 # local timezone
1947 1941 unixtime = int(time.mktime(timetuple))
1948 1942 offset = unixtime - localunixtime
1949 1943 else:
1950 1944 unixtime = localunixtime + offset
1951 1945 return unixtime, offset
1952 1946
1953 1947 def parsedate(date, formats=None, bias=None):
1954 1948 """parse a localized date/time and return a (unixtime, offset) tuple.
1955 1949
1956 1950 The date may be a "unixtime offset" string or in one of the specified
1957 1951 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1958 1952
1959 1953 >>> parsedate(' today ') == parsedate(\
1960 1954 datetime.date.today().strftime('%b %d'))
1961 1955 True
1962 1956 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1963 1957 datetime.timedelta(days=1)\
1964 1958 ).strftime('%b %d'))
1965 1959 True
1966 1960 >>> now, tz = makedate()
1967 1961 >>> strnow, strtz = parsedate('now')
1968 1962 >>> (strnow - now) < 1
1969 1963 True
1970 1964 >>> tz == strtz
1971 1965 True
1972 1966 """
1973 1967 if bias is None:
1974 1968 bias = {}
1975 1969 if not date:
1976 1970 return 0, 0
1977 1971 if isinstance(date, tuple) and len(date) == 2:
1978 1972 return date
1979 1973 if not formats:
1980 1974 formats = defaultdateformats
1981 1975 date = date.strip()
1982 1976
1983 1977 if date == 'now' or date == _('now'):
1984 1978 return makedate()
1985 1979 if date == 'today' or date == _('today'):
1986 1980 date = datetime.date.today().strftime('%b %d')
1987 1981 elif date == 'yesterday' or date == _('yesterday'):
1988 1982 date = (datetime.date.today() -
1989 1983 datetime.timedelta(days=1)).strftime('%b %d')
1990 1984
1991 1985 try:
1992 1986 when, offset = map(int, date.split(' '))
1993 1987 except ValueError:
1994 1988 # fill out defaults
1995 1989 now = makedate()
1996 1990 defaults = {}
1997 1991 for part in ("d", "mb", "yY", "HI", "M", "S"):
1998 1992 # this piece is for rounding the specific end of unknowns
1999 1993 b = bias.get(part)
2000 1994 if b is None:
2001 1995 if part[0:1] in "HMS":
2002 1996 b = "00"
2003 1997 else:
2004 1998 b = "0"
2005 1999
2006 2000 # this piece is for matching the generic end to today's date
2007 2001 n = datestr(now, "%" + part[0:1])
2008 2002
2009 2003 defaults[part] = (b, n)
2010 2004
2011 2005 for format in formats:
2012 2006 try:
2013 2007 when, offset = strdate(date, format, defaults)
2014 2008 except (ValueError, OverflowError):
2015 2009 pass
2016 2010 else:
2017 2011 break
2018 2012 else:
2019 2013 raise error.ParseError(_('invalid date: %r') % date)
2020 2014 # validate explicit (probably user-specified) date and
2021 2015 # time zone offset. values must fit in signed 32 bits for
2022 2016 # current 32-bit linux runtimes. timezones go from UTC-12
2023 2017 # to UTC+14
2024 2018 if when < -0x80000000 or when > 0x7fffffff:
2025 2019 raise error.ParseError(_('date exceeds 32 bits: %d') % when)
2026 2020 if offset < -50400 or offset > 43200:
2027 2021 raise error.ParseError(_('impossible time zone offset: %d') % offset)
2028 2022 return when, offset
2029 2023
2030 2024 def matchdate(date):
2031 2025 """Return a function that matches a given date match specifier
2032 2026
2033 2027 Formats include:
2034 2028
2035 2029 '{date}' match a given date to the accuracy provided
2036 2030
2037 2031 '<{date}' on or before a given date
2038 2032
2039 2033 '>{date}' on or after a given date
2040 2034
2041 2035 >>> p1 = parsedate("10:29:59")
2042 2036 >>> p2 = parsedate("10:30:00")
2043 2037 >>> p3 = parsedate("10:30:59")
2044 2038 >>> p4 = parsedate("10:31:00")
2045 2039 >>> p5 = parsedate("Sep 15 10:30:00 1999")
2046 2040 >>> f = matchdate("10:30")
2047 2041 >>> f(p1[0])
2048 2042 False
2049 2043 >>> f(p2[0])
2050 2044 True
2051 2045 >>> f(p3[0])
2052 2046 True
2053 2047 >>> f(p4[0])
2054 2048 False
2055 2049 >>> f(p5[0])
2056 2050 False
2057 2051 """
2058 2052
2059 2053 def lower(date):
2060 2054 d = {'mb': "1", 'd': "1"}
2061 2055 return parsedate(date, extendeddateformats, d)[0]
2062 2056
2063 2057 def upper(date):
2064 2058 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
2065 2059 for days in ("31", "30", "29"):
2066 2060 try:
2067 2061 d["d"] = days
2068 2062 return parsedate(date, extendeddateformats, d)[0]
2069 2063 except Abort:
2070 2064 pass
2071 2065 d["d"] = "28"
2072 2066 return parsedate(date, extendeddateformats, d)[0]
2073 2067
2074 2068 date = date.strip()
2075 2069
2076 2070 if not date:
2077 2071 raise Abort(_("dates cannot consist entirely of whitespace"))
2078 2072 elif date[0] == "<":
2079 2073 if not date[1:]:
2080 2074 raise Abort(_("invalid day spec, use '<DATE'"))
2081 2075 when = upper(date[1:])
2082 2076 return lambda x: x <= when
2083 2077 elif date[0] == ">":
2084 2078 if not date[1:]:
2085 2079 raise Abort(_("invalid day spec, use '>DATE'"))
2086 2080 when = lower(date[1:])
2087 2081 return lambda x: x >= when
2088 2082 elif date[0] == "-":
2089 2083 try:
2090 2084 days = int(date[1:])
2091 2085 except ValueError:
2092 2086 raise Abort(_("invalid day spec: %s") % date[1:])
2093 2087 if days < 0:
2094 2088 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
2095 2089 % date[1:])
2096 2090 when = makedate()[0] - days * 3600 * 24
2097 2091 return lambda x: x >= when
2098 2092 elif " to " in date:
2099 2093 a, b = date.split(" to ")
2100 2094 start, stop = lower(a), upper(b)
2101 2095 return lambda x: x >= start and x <= stop
2102 2096 else:
2103 2097 start, stop = lower(date), upper(date)
2104 2098 return lambda x: x >= start and x <= stop
2105 2099
2106 2100 def stringmatcher(pattern, casesensitive=True):
2107 2101 """
2108 2102 accepts a string, possibly starting with 're:' or 'literal:' prefix.
2109 2103 returns the matcher name, pattern, and matcher function.
2110 2104 missing or unknown prefixes are treated as literal matches.
2111 2105
2112 2106 helper for tests:
2113 2107 >>> def test(pattern, *tests):
2114 2108 ... kind, pattern, matcher = stringmatcher(pattern)
2115 2109 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2116 2110 >>> def itest(pattern, *tests):
2117 2111 ... kind, pattern, matcher = stringmatcher(pattern, casesensitive=False)
2118 2112 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
2119 2113
2120 2114 exact matching (no prefix):
2121 2115 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
2122 2116 ('literal', 'abcdefg', [False, False, True])
2123 2117
2124 2118 regex matching ('re:' prefix)
2125 2119 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
2126 2120 ('re', 'a.+b', [False, False, True])
2127 2121
2128 2122 force exact matches ('literal:' prefix)
2129 2123 >>> test('literal:re:foobar', 'foobar', 're:foobar')
2130 2124 ('literal', 're:foobar', [False, True])
2131 2125
2132 2126 unknown prefixes are ignored and treated as literals
2133 2127 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
2134 2128 ('literal', 'foo:bar', [False, False, True])
2135 2129
2136 2130 case insensitive regex matches
2137 2131 >>> itest('re:A.+b', 'nomatch', 'fooadef', 'fooadefBar')
2138 2132 ('re', 'A.+b', [False, False, True])
2139 2133
2140 2134 case insensitive literal matches
2141 2135 >>> itest('ABCDEFG', 'abc', 'def', 'abcdefg')
2142 2136 ('literal', 'ABCDEFG', [False, False, True])
2143 2137 """
2144 2138 if pattern.startswith('re:'):
2145 2139 pattern = pattern[3:]
2146 2140 try:
2147 2141 flags = 0
2148 2142 if not casesensitive:
2149 2143 flags = remod.I
2150 2144 regex = remod.compile(pattern, flags)
2151 2145 except remod.error as e:
2152 2146 raise error.ParseError(_('invalid regular expression: %s')
2153 2147 % e)
2154 2148 return 're', pattern, regex.search
2155 2149 elif pattern.startswith('literal:'):
2156 2150 pattern = pattern[8:]
2157 2151
2158 2152 match = pattern.__eq__
2159 2153
2160 2154 if not casesensitive:
2161 2155 ipat = encoding.lower(pattern)
2162 2156 match = lambda s: ipat == encoding.lower(s)
2163 2157 return 'literal', pattern, match
2164 2158
2165 2159 def shortuser(user):
2166 2160 """Return a short representation of a user name or email address."""
2167 2161 f = user.find('@')
2168 2162 if f >= 0:
2169 2163 user = user[:f]
2170 2164 f = user.find('<')
2171 2165 if f >= 0:
2172 2166 user = user[f + 1:]
2173 2167 f = user.find(' ')
2174 2168 if f >= 0:
2175 2169 user = user[:f]
2176 2170 f = user.find('.')
2177 2171 if f >= 0:
2178 2172 user = user[:f]
2179 2173 return user
2180 2174
2181 2175 def emailuser(user):
2182 2176 """Return the user portion of an email address."""
2183 2177 f = user.find('@')
2184 2178 if f >= 0:
2185 2179 user = user[:f]
2186 2180 f = user.find('<')
2187 2181 if f >= 0:
2188 2182 user = user[f + 1:]
2189 2183 return user
2190 2184
2191 2185 def email(author):
2192 2186 '''get email of author.'''
2193 2187 r = author.find('>')
2194 2188 if r == -1:
2195 2189 r = None
2196 2190 return author[author.find('<') + 1:r]
2197 2191
2198 2192 def ellipsis(text, maxlength=400):
2199 2193 """Trim string to at most maxlength (default: 400) columns in display."""
2200 2194 return encoding.trim(text, maxlength, ellipsis='...')
2201 2195
2202 2196 def unitcountfn(*unittable):
2203 2197 '''return a function that renders a readable count of some quantity'''
2204 2198
2205 2199 def go(count):
2206 2200 for multiplier, divisor, format in unittable:
2207 2201 if abs(count) >= divisor * multiplier:
2208 2202 return format % (count / float(divisor))
2209 2203 return unittable[-1][2] % count
2210 2204
2211 2205 return go
2212 2206
2213 2207 def processlinerange(fromline, toline):
2214 2208 """Check that linerange <fromline>:<toline> makes sense and return a
2215 2209 0-based range.
2216 2210
2217 2211 >>> processlinerange(10, 20)
2218 2212 (9, 20)
2219 2213 >>> processlinerange(2, 1)
2220 2214 Traceback (most recent call last):
2221 2215 ...
2222 2216 ParseError: line range must be positive
2223 2217 >>> processlinerange(0, 5)
2224 2218 Traceback (most recent call last):
2225 2219 ...
2226 2220 ParseError: fromline must be strictly positive
2227 2221 """
2228 2222 if toline - fromline < 0:
2229 2223 raise error.ParseError(_("line range must be positive"))
2230 2224 if fromline < 1:
2231 2225 raise error.ParseError(_("fromline must be strictly positive"))
2232 2226 return fromline - 1, toline
2233 2227
2234 2228 bytecount = unitcountfn(
2235 2229 (100, 1 << 30, _('%.0f GB')),
2236 2230 (10, 1 << 30, _('%.1f GB')),
2237 2231 (1, 1 << 30, _('%.2f GB')),
2238 2232 (100, 1 << 20, _('%.0f MB')),
2239 2233 (10, 1 << 20, _('%.1f MB')),
2240 2234 (1, 1 << 20, _('%.2f MB')),
2241 2235 (100, 1 << 10, _('%.0f KB')),
2242 2236 (10, 1 << 10, _('%.1f KB')),
2243 2237 (1, 1 << 10, _('%.2f KB')),
2244 2238 (1, 1, _('%.0f bytes')),
2245 2239 )
2246 2240
2247 2241 # Matches a single EOL which can either be a CRLF where repeated CR
2248 2242 # are removed or a LF. We do not care about old Macintosh files, so a
2249 2243 # stray CR is an error.
2250 2244 _eolre = remod.compile(br'\r*\n')
2251 2245
2252 2246 def tolf(s):
2253 2247 return _eolre.sub('\n', s)
2254 2248
2255 2249 def tocrlf(s):
2256 2250 return _eolre.sub('\r\n', s)
2257 2251
2258 2252 if pycompat.oslinesep == '\r\n':
2259 2253 tonativeeol = tocrlf
2260 2254 fromnativeeol = tolf
2261 2255 else:
2262 2256 tonativeeol = pycompat.identity
2263 2257 fromnativeeol = pycompat.identity
2264 2258
2265 2259 def escapestr(s):
2266 2260 # call underlying function of s.encode('string_escape') directly for
2267 2261 # Python 3 compatibility
2268 2262 return codecs.escape_encode(s)[0]
2269 2263
2270 2264 def unescapestr(s):
2271 2265 return codecs.escape_decode(s)[0]
2272 2266
2273 2267 def uirepr(s):
2274 2268 # Avoid double backslash in Windows path repr()
2275 2269 return repr(s).replace('\\\\', '\\')
2276 2270
2277 2271 # delay import of textwrap
2278 2272 def MBTextWrapper(**kwargs):
2279 2273 class tw(textwrap.TextWrapper):
2280 2274 """
2281 2275 Extend TextWrapper for width-awareness.
2282 2276
2283 2277 Neither number of 'bytes' in any encoding nor 'characters' is
2284 2278 appropriate to calculate terminal columns for specified string.
2285 2279
2286 2280 Original TextWrapper implementation uses built-in 'len()' directly,
2287 2281 so overriding is needed to use width information of each characters.
2288 2282
2289 2283 In addition, characters classified into 'ambiguous' width are
2290 2284 treated as wide in East Asian area, but as narrow in other.
2291 2285
2292 2286 This requires use decision to determine width of such characters.
2293 2287 """
2294 2288 def _cutdown(self, ucstr, space_left):
2295 2289 l = 0
2296 2290 colwidth = encoding.ucolwidth
2297 2291 for i in xrange(len(ucstr)):
2298 2292 l += colwidth(ucstr[i])
2299 2293 if space_left < l:
2300 2294 return (ucstr[:i], ucstr[i:])
2301 2295 return ucstr, ''
2302 2296
2303 2297 # overriding of base class
2304 2298 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2305 2299 space_left = max(width - cur_len, 1)
2306 2300
2307 2301 if self.break_long_words:
2308 2302 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2309 2303 cur_line.append(cut)
2310 2304 reversed_chunks[-1] = res
2311 2305 elif not cur_line:
2312 2306 cur_line.append(reversed_chunks.pop())
2313 2307
2314 2308 # this overriding code is imported from TextWrapper of Python 2.6
2315 2309 # to calculate columns of string by 'encoding.ucolwidth()'
2316 2310 def _wrap_chunks(self, chunks):
2317 2311 colwidth = encoding.ucolwidth
2318 2312
2319 2313 lines = []
2320 2314 if self.width <= 0:
2321 2315 raise ValueError("invalid width %r (must be > 0)" % self.width)
2322 2316
2323 2317 # Arrange in reverse order so items can be efficiently popped
2324 2318 # from a stack of chucks.
2325 2319 chunks.reverse()
2326 2320
2327 2321 while chunks:
2328 2322
2329 2323 # Start the list of chunks that will make up the current line.
2330 2324 # cur_len is just the length of all the chunks in cur_line.
2331 2325 cur_line = []
2332 2326 cur_len = 0
2333 2327
2334 2328 # Figure out which static string will prefix this line.
2335 2329 if lines:
2336 2330 indent = self.subsequent_indent
2337 2331 else:
2338 2332 indent = self.initial_indent
2339 2333
2340 2334 # Maximum width for this line.
2341 2335 width = self.width - len(indent)
2342 2336
2343 2337 # First chunk on line is whitespace -- drop it, unless this
2344 2338 # is the very beginning of the text (i.e. no lines started yet).
2345 2339 if self.drop_whitespace and chunks[-1].strip() == r'' and lines:
2346 2340 del chunks[-1]
2347 2341
2348 2342 while chunks:
2349 2343 l = colwidth(chunks[-1])
2350 2344
2351 2345 # Can at least squeeze this chunk onto the current line.
2352 2346 if cur_len + l <= width:
2353 2347 cur_line.append(chunks.pop())
2354 2348 cur_len += l
2355 2349
2356 2350 # Nope, this line is full.
2357 2351 else:
2358 2352 break
2359 2353
2360 2354 # The current line is full, and the next chunk is too big to
2361 2355 # fit on *any* line (not just this one).
2362 2356 if chunks and colwidth(chunks[-1]) > width:
2363 2357 self._handle_long_word(chunks, cur_line, cur_len, width)
2364 2358
2365 2359 # If the last chunk on this line is all whitespace, drop it.
2366 2360 if (self.drop_whitespace and
2367 2361 cur_line and cur_line[-1].strip() == r''):
2368 2362 del cur_line[-1]
2369 2363
2370 2364 # Convert current line back to a string and store it in list
2371 2365 # of all lines (return value).
2372 2366 if cur_line:
2373 2367 lines.append(indent + r''.join(cur_line))
2374 2368
2375 2369 return lines
2376 2370
2377 2371 global MBTextWrapper
2378 2372 MBTextWrapper = tw
2379 2373 return tw(**kwargs)
2380 2374
2381 2375 def wrap(line, width, initindent='', hangindent=''):
2382 2376 maxindent = max(len(hangindent), len(initindent))
2383 2377 if width <= maxindent:
2384 2378 # adjust for weird terminal size
2385 2379 width = max(78, maxindent + 1)
2386 2380 line = line.decode(pycompat.sysstr(encoding.encoding),
2387 2381 pycompat.sysstr(encoding.encodingmode))
2388 2382 initindent = initindent.decode(pycompat.sysstr(encoding.encoding),
2389 2383 pycompat.sysstr(encoding.encodingmode))
2390 2384 hangindent = hangindent.decode(pycompat.sysstr(encoding.encoding),
2391 2385 pycompat.sysstr(encoding.encodingmode))
2392 2386 wrapper = MBTextWrapper(width=width,
2393 2387 initial_indent=initindent,
2394 2388 subsequent_indent=hangindent)
2395 2389 return wrapper.fill(line).encode(pycompat.sysstr(encoding.encoding))
2396 2390
2397 2391 if (pyplatform.python_implementation() == 'CPython' and
2398 2392 sys.version_info < (3, 0)):
2399 2393 # There is an issue in CPython that some IO methods do not handle EINTR
2400 2394 # correctly. The following table shows what CPython version (and functions)
2401 2395 # are affected (buggy: has the EINTR bug, okay: otherwise):
2402 2396 #
2403 2397 # | < 2.7.4 | 2.7.4 to 2.7.12 | >= 3.0
2404 2398 # --------------------------------------------------
2405 2399 # fp.__iter__ | buggy | buggy | okay
2406 2400 # fp.read* | buggy | okay [1] | okay
2407 2401 #
2408 2402 # [1]: fixed by changeset 67dc99a989cd in the cpython hg repo.
2409 2403 #
2410 2404 # Here we workaround the EINTR issue for fileobj.__iter__. Other methods
2411 2405 # like "read*" are ignored for now, as Python < 2.7.4 is a minority.
2412 2406 #
2413 2407 # Although we can workaround the EINTR issue for fp.__iter__, it is slower:
2414 2408 # "for x in fp" is 4x faster than "for x in iter(fp.readline, '')" in
2415 2409 # CPython 2, because CPython 2 maintains an internal readahead buffer for
2416 2410 # fp.__iter__ but not other fp.read* methods.
2417 2411 #
2418 2412 # On modern systems like Linux, the "read" syscall cannot be interrupted
2419 2413 # when reading "fast" files like on-disk files. So the EINTR issue only
2420 2414 # affects things like pipes, sockets, ttys etc. We treat "normal" (S_ISREG)
2421 2415 # files approximately as "fast" files and use the fast (unsafe) code path,
2422 2416 # to minimize the performance impact.
2423 2417 if sys.version_info >= (2, 7, 4):
2424 2418 # fp.readline deals with EINTR correctly, use it as a workaround.
2425 2419 def _safeiterfile(fp):
2426 2420 return iter(fp.readline, '')
2427 2421 else:
2428 2422 # fp.read* are broken too, manually deal with EINTR in a stupid way.
2429 2423 # note: this may block longer than necessary because of bufsize.
2430 2424 def _safeiterfile(fp, bufsize=4096):
2431 2425 fd = fp.fileno()
2432 2426 line = ''
2433 2427 while True:
2434 2428 try:
2435 2429 buf = os.read(fd, bufsize)
2436 2430 except OSError as ex:
2437 2431 # os.read only raises EINTR before any data is read
2438 2432 if ex.errno == errno.EINTR:
2439 2433 continue
2440 2434 else:
2441 2435 raise
2442 2436 line += buf
2443 2437 if '\n' in buf:
2444 2438 splitted = line.splitlines(True)
2445 2439 line = ''
2446 2440 for l in splitted:
2447 2441 if l[-1] == '\n':
2448 2442 yield l
2449 2443 else:
2450 2444 line = l
2451 2445 if not buf:
2452 2446 break
2453 2447 if line:
2454 2448 yield line
2455 2449
2456 2450 def iterfile(fp):
2457 2451 fastpath = True
2458 2452 if type(fp) is file:
2459 2453 fastpath = stat.S_ISREG(os.fstat(fp.fileno()).st_mode)
2460 2454 if fastpath:
2461 2455 return fp
2462 2456 else:
2463 2457 return _safeiterfile(fp)
2464 2458 else:
2465 2459 # PyPy and CPython 3 do not have the EINTR issue thus no workaround needed.
2466 2460 def iterfile(fp):
2467 2461 return fp
2468 2462
2469 2463 def iterlines(iterator):
2470 2464 for chunk in iterator:
2471 2465 for line in chunk.splitlines():
2472 2466 yield line
2473 2467
2474 2468 def expandpath(path):
2475 2469 return os.path.expanduser(os.path.expandvars(path))
2476 2470
2477 2471 def hgcmd():
2478 2472 """Return the command used to execute current hg
2479 2473
2480 2474 This is different from hgexecutable() because on Windows we want
2481 2475 to avoid things opening new shell windows like batch files, so we
2482 2476 get either the python call or current executable.
2483 2477 """
2484 2478 if mainfrozen():
2485 2479 if getattr(sys, 'frozen', None) == 'macosx_app':
2486 2480 # Env variable set by py2app
2487 2481 return [encoding.environ['EXECUTABLEPATH']]
2488 2482 else:
2489 2483 return [pycompat.sysexecutable]
2490 2484 return gethgcmd()
2491 2485
2492 2486 def rundetached(args, condfn):
2493 2487 """Execute the argument list in a detached process.
2494 2488
2495 2489 condfn is a callable which is called repeatedly and should return
2496 2490 True once the child process is known to have started successfully.
2497 2491 At this point, the child process PID is returned. If the child
2498 2492 process fails to start or finishes before condfn() evaluates to
2499 2493 True, return -1.
2500 2494 """
2501 2495 # Windows case is easier because the child process is either
2502 2496 # successfully starting and validating the condition or exiting
2503 2497 # on failure. We just poll on its PID. On Unix, if the child
2504 2498 # process fails to start, it will be left in a zombie state until
2505 2499 # the parent wait on it, which we cannot do since we expect a long
2506 2500 # running process on success. Instead we listen for SIGCHLD telling
2507 2501 # us our child process terminated.
2508 2502 terminated = set()
2509 2503 def handler(signum, frame):
2510 2504 terminated.add(os.wait())
2511 2505 prevhandler = None
2512 2506 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2513 2507 if SIGCHLD is not None:
2514 2508 prevhandler = signal.signal(SIGCHLD, handler)
2515 2509 try:
2516 2510 pid = spawndetached(args)
2517 2511 while not condfn():
2518 2512 if ((pid in terminated or not testpid(pid))
2519 2513 and not condfn()):
2520 2514 return -1
2521 2515 time.sleep(0.1)
2522 2516 return pid
2523 2517 finally:
2524 2518 if prevhandler is not None:
2525 2519 signal.signal(signal.SIGCHLD, prevhandler)
2526 2520
2527 2521 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2528 2522 """Return the result of interpolating items in the mapping into string s.
2529 2523
2530 2524 prefix is a single character string, or a two character string with
2531 2525 a backslash as the first character if the prefix needs to be escaped in
2532 2526 a regular expression.
2533 2527
2534 2528 fn is an optional function that will be applied to the replacement text
2535 2529 just before replacement.
2536 2530
2537 2531 escape_prefix is an optional flag that allows using doubled prefix for
2538 2532 its escaping.
2539 2533 """
2540 2534 fn = fn or (lambda s: s)
2541 2535 patterns = '|'.join(mapping.keys())
2542 2536 if escape_prefix:
2543 2537 patterns += '|' + prefix
2544 2538 if len(prefix) > 1:
2545 2539 prefix_char = prefix[1:]
2546 2540 else:
2547 2541 prefix_char = prefix
2548 2542 mapping[prefix_char] = prefix_char
2549 2543 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2550 2544 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2551 2545
2552 2546 def getport(port):
2553 2547 """Return the port for a given network service.
2554 2548
2555 2549 If port is an integer, it's returned as is. If it's a string, it's
2556 2550 looked up using socket.getservbyname(). If there's no matching
2557 2551 service, error.Abort is raised.
2558 2552 """
2559 2553 try:
2560 2554 return int(port)
2561 2555 except ValueError:
2562 2556 pass
2563 2557
2564 2558 try:
2565 2559 return socket.getservbyname(port)
2566 2560 except socket.error:
2567 2561 raise Abort(_("no port number associated with service '%s'") % port)
2568 2562
2569 2563 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2570 2564 '0': False, 'no': False, 'false': False, 'off': False,
2571 2565 'never': False}
2572 2566
2573 2567 def parsebool(s):
2574 2568 """Parse s into a boolean.
2575 2569
2576 2570 If s is not a valid boolean, returns None.
2577 2571 """
2578 2572 return _booleans.get(s.lower(), None)
2579 2573
2580 2574 _hextochr = dict((a + b, chr(int(a + b, 16)))
2581 2575 for a in string.hexdigits for b in string.hexdigits)
2582 2576
2583 2577 class url(object):
2584 2578 r"""Reliable URL parser.
2585 2579
2586 2580 This parses URLs and provides attributes for the following
2587 2581 components:
2588 2582
2589 2583 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2590 2584
2591 2585 Missing components are set to None. The only exception is
2592 2586 fragment, which is set to '' if present but empty.
2593 2587
2594 2588 If parsefragment is False, fragment is included in query. If
2595 2589 parsequery is False, query is included in path. If both are
2596 2590 False, both fragment and query are included in path.
2597 2591
2598 2592 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2599 2593
2600 2594 Note that for backward compatibility reasons, bundle URLs do not
2601 2595 take host names. That means 'bundle://../' has a path of '../'.
2602 2596
2603 2597 Examples:
2604 2598
2605 2599 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2606 2600 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2607 2601 >>> url('ssh://[::1]:2200//home/joe/repo')
2608 2602 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2609 2603 >>> url('file:///home/joe/repo')
2610 2604 <url scheme: 'file', path: '/home/joe/repo'>
2611 2605 >>> url('file:///c:/temp/foo/')
2612 2606 <url scheme: 'file', path: 'c:/temp/foo/'>
2613 2607 >>> url('bundle:foo')
2614 2608 <url scheme: 'bundle', path: 'foo'>
2615 2609 >>> url('bundle://../foo')
2616 2610 <url scheme: 'bundle', path: '../foo'>
2617 2611 >>> url(r'c:\foo\bar')
2618 2612 <url path: 'c:\\foo\\bar'>
2619 2613 >>> url(r'\\blah\blah\blah')
2620 2614 <url path: '\\\\blah\\blah\\blah'>
2621 2615 >>> url(r'\\blah\blah\blah#baz')
2622 2616 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2623 2617 >>> url(r'file:///C:\users\me')
2624 2618 <url scheme: 'file', path: 'C:\\users\\me'>
2625 2619
2626 2620 Authentication credentials:
2627 2621
2628 2622 >>> url('ssh://joe:xyz@x/repo')
2629 2623 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2630 2624 >>> url('ssh://joe@x/repo')
2631 2625 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2632 2626
2633 2627 Query strings and fragments:
2634 2628
2635 2629 >>> url('http://host/a?b#c')
2636 2630 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2637 2631 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2638 2632 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2639 2633
2640 2634 Empty path:
2641 2635
2642 2636 >>> url('')
2643 2637 <url path: ''>
2644 2638 >>> url('#a')
2645 2639 <url path: '', fragment: 'a'>
2646 2640 >>> url('http://host/')
2647 2641 <url scheme: 'http', host: 'host', path: ''>
2648 2642 >>> url('http://host/#a')
2649 2643 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2650 2644
2651 2645 Only scheme:
2652 2646
2653 2647 >>> url('http:')
2654 2648 <url scheme: 'http'>
2655 2649 """
2656 2650
2657 2651 _safechars = "!~*'()+"
2658 2652 _safepchars = "/!~*'()+:\\"
2659 2653 _matchscheme = remod.compile('^[a-zA-Z0-9+.\\-]+:').match
2660 2654
2661 2655 def __init__(self, path, parsequery=True, parsefragment=True):
2662 2656 # We slowly chomp away at path until we have only the path left
2663 2657 self.scheme = self.user = self.passwd = self.host = None
2664 2658 self.port = self.path = self.query = self.fragment = None
2665 2659 self._localpath = True
2666 2660 self._hostport = ''
2667 2661 self._origpath = path
2668 2662
2669 2663 if parsefragment and '#' in path:
2670 2664 path, self.fragment = path.split('#', 1)
2671 2665
2672 2666 # special case for Windows drive letters and UNC paths
2673 2667 if hasdriveletter(path) or path.startswith('\\\\'):
2674 2668 self.path = path
2675 2669 return
2676 2670
2677 2671 # For compatibility reasons, we can't handle bundle paths as
2678 2672 # normal URLS
2679 2673 if path.startswith('bundle:'):
2680 2674 self.scheme = 'bundle'
2681 2675 path = path[7:]
2682 2676 if path.startswith('//'):
2683 2677 path = path[2:]
2684 2678 self.path = path
2685 2679 return
2686 2680
2687 2681 if self._matchscheme(path):
2688 2682 parts = path.split(':', 1)
2689 2683 if parts[0]:
2690 2684 self.scheme, path = parts
2691 2685 self._localpath = False
2692 2686
2693 2687 if not path:
2694 2688 path = None
2695 2689 if self._localpath:
2696 2690 self.path = ''
2697 2691 return
2698 2692 else:
2699 2693 if self._localpath:
2700 2694 self.path = path
2701 2695 return
2702 2696
2703 2697 if parsequery and '?' in path:
2704 2698 path, self.query = path.split('?', 1)
2705 2699 if not path:
2706 2700 path = None
2707 2701 if not self.query:
2708 2702 self.query = None
2709 2703
2710 2704 # // is required to specify a host/authority
2711 2705 if path and path.startswith('//'):
2712 2706 parts = path[2:].split('/', 1)
2713 2707 if len(parts) > 1:
2714 2708 self.host, path = parts
2715 2709 else:
2716 2710 self.host = parts[0]
2717 2711 path = None
2718 2712 if not self.host:
2719 2713 self.host = None
2720 2714 # path of file:///d is /d
2721 2715 # path of file:///d:/ is d:/, not /d:/
2722 2716 if path and not hasdriveletter(path):
2723 2717 path = '/' + path
2724 2718
2725 2719 if self.host and '@' in self.host:
2726 2720 self.user, self.host = self.host.rsplit('@', 1)
2727 2721 if ':' in self.user:
2728 2722 self.user, self.passwd = self.user.split(':', 1)
2729 2723 if not self.host:
2730 2724 self.host = None
2731 2725
2732 2726 # Don't split on colons in IPv6 addresses without ports
2733 2727 if (self.host and ':' in self.host and
2734 2728 not (self.host.startswith('[') and self.host.endswith(']'))):
2735 2729 self._hostport = self.host
2736 2730 self.host, self.port = self.host.rsplit(':', 1)
2737 2731 if not self.host:
2738 2732 self.host = None
2739 2733
2740 2734 if (self.host and self.scheme == 'file' and
2741 2735 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2742 2736 raise Abort(_('file:// URLs can only refer to localhost'))
2743 2737
2744 2738 self.path = path
2745 2739
2746 2740 # leave the query string escaped
2747 2741 for a in ('user', 'passwd', 'host', 'port',
2748 2742 'path', 'fragment'):
2749 2743 v = getattr(self, a)
2750 2744 if v is not None:
2751 2745 setattr(self, a, urlreq.unquote(v))
2752 2746
2753 2747 def __repr__(self):
2754 2748 attrs = []
2755 2749 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2756 2750 'query', 'fragment'):
2757 2751 v = getattr(self, a)
2758 2752 if v is not None:
2759 2753 attrs.append('%s: %r' % (a, v))
2760 2754 return '<url %s>' % ', '.join(attrs)
2761 2755
2762 2756 def __bytes__(self):
2763 2757 r"""Join the URL's components back into a URL string.
2764 2758
2765 2759 Examples:
2766 2760
2767 2761 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2768 2762 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2769 2763 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2770 2764 'http://user:pw@host:80/?foo=bar&baz=42'
2771 2765 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2772 2766 'http://user:pw@host:80/?foo=bar%3dbaz'
2773 2767 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2774 2768 'ssh://user:pw@[::1]:2200//home/joe#'
2775 2769 >>> str(url('http://localhost:80//'))
2776 2770 'http://localhost:80//'
2777 2771 >>> str(url('http://localhost:80/'))
2778 2772 'http://localhost:80/'
2779 2773 >>> str(url('http://localhost:80'))
2780 2774 'http://localhost:80/'
2781 2775 >>> str(url('bundle:foo'))
2782 2776 'bundle:foo'
2783 2777 >>> str(url('bundle://../foo'))
2784 2778 'bundle:../foo'
2785 2779 >>> str(url('path'))
2786 2780 'path'
2787 2781 >>> str(url('file:///tmp/foo/bar'))
2788 2782 'file:///tmp/foo/bar'
2789 2783 >>> str(url('file:///c:/tmp/foo/bar'))
2790 2784 'file:///c:/tmp/foo/bar'
2791 2785 >>> print url(r'bundle:foo\bar')
2792 2786 bundle:foo\bar
2793 2787 >>> print url(r'file:///D:\data\hg')
2794 2788 file:///D:\data\hg
2795 2789 """
2796 2790 if self._localpath:
2797 2791 s = self.path
2798 2792 if self.scheme == 'bundle':
2799 2793 s = 'bundle:' + s
2800 2794 if self.fragment:
2801 2795 s += '#' + self.fragment
2802 2796 return s
2803 2797
2804 2798 s = self.scheme + ':'
2805 2799 if self.user or self.passwd or self.host:
2806 2800 s += '//'
2807 2801 elif self.scheme and (not self.path or self.path.startswith('/')
2808 2802 or hasdriveletter(self.path)):
2809 2803 s += '//'
2810 2804 if hasdriveletter(self.path):
2811 2805 s += '/'
2812 2806 if self.user:
2813 2807 s += urlreq.quote(self.user, safe=self._safechars)
2814 2808 if self.passwd:
2815 2809 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2816 2810 if self.user or self.passwd:
2817 2811 s += '@'
2818 2812 if self.host:
2819 2813 if not (self.host.startswith('[') and self.host.endswith(']')):
2820 2814 s += urlreq.quote(self.host)
2821 2815 else:
2822 2816 s += self.host
2823 2817 if self.port:
2824 2818 s += ':' + urlreq.quote(self.port)
2825 2819 if self.host:
2826 2820 s += '/'
2827 2821 if self.path:
2828 2822 # TODO: similar to the query string, we should not unescape the
2829 2823 # path when we store it, the path might contain '%2f' = '/',
2830 2824 # which we should *not* escape.
2831 2825 s += urlreq.quote(self.path, safe=self._safepchars)
2832 2826 if self.query:
2833 2827 # we store the query in escaped form.
2834 2828 s += '?' + self.query
2835 2829 if self.fragment is not None:
2836 2830 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2837 2831 return s
2838 2832
2839 2833 __str__ = encoding.strmethod(__bytes__)
2840 2834
2841 2835 def authinfo(self):
2842 2836 user, passwd = self.user, self.passwd
2843 2837 try:
2844 2838 self.user, self.passwd = None, None
2845 2839 s = bytes(self)
2846 2840 finally:
2847 2841 self.user, self.passwd = user, passwd
2848 2842 if not self.user:
2849 2843 return (s, None)
2850 2844 # authinfo[1] is passed to urllib2 password manager, and its
2851 2845 # URIs must not contain credentials. The host is passed in the
2852 2846 # URIs list because Python < 2.4.3 uses only that to search for
2853 2847 # a password.
2854 2848 return (s, (None, (s, self.host),
2855 2849 self.user, self.passwd or ''))
2856 2850
2857 2851 def isabs(self):
2858 2852 if self.scheme and self.scheme != 'file':
2859 2853 return True # remote URL
2860 2854 if hasdriveletter(self.path):
2861 2855 return True # absolute for our purposes - can't be joined()
2862 2856 if self.path.startswith(br'\\'):
2863 2857 return True # Windows UNC path
2864 2858 if self.path.startswith('/'):
2865 2859 return True # POSIX-style
2866 2860 return False
2867 2861
2868 2862 def localpath(self):
2869 2863 if self.scheme == 'file' or self.scheme == 'bundle':
2870 2864 path = self.path or '/'
2871 2865 # For Windows, we need to promote hosts containing drive
2872 2866 # letters to paths with drive letters.
2873 2867 if hasdriveletter(self._hostport):
2874 2868 path = self._hostport + '/' + self.path
2875 2869 elif (self.host is not None and self.path
2876 2870 and not hasdriveletter(path)):
2877 2871 path = '/' + path
2878 2872 return path
2879 2873 return self._origpath
2880 2874
2881 2875 def islocal(self):
2882 2876 '''whether localpath will return something that posixfile can open'''
2883 2877 return (not self.scheme or self.scheme == 'file'
2884 2878 or self.scheme == 'bundle')
2885 2879
2886 2880 def hasscheme(path):
2887 2881 return bool(url(path).scheme)
2888 2882
2889 2883 def hasdriveletter(path):
2890 2884 return path and path[1:2] == ':' and path[0:1].isalpha()
2891 2885
2892 2886 def urllocalpath(path):
2893 2887 return url(path, parsequery=False, parsefragment=False).localpath()
2894 2888
2895 2889 def hidepassword(u):
2896 2890 '''hide user credential in a url string'''
2897 2891 u = url(u)
2898 2892 if u.passwd:
2899 2893 u.passwd = '***'
2900 2894 return bytes(u)
2901 2895
2902 2896 def removeauth(u):
2903 2897 '''remove all authentication information from a url string'''
2904 2898 u = url(u)
2905 2899 u.user = u.passwd = None
2906 2900 return str(u)
2907 2901
2908 2902 timecount = unitcountfn(
2909 2903 (1, 1e3, _('%.0f s')),
2910 2904 (100, 1, _('%.1f s')),
2911 2905 (10, 1, _('%.2f s')),
2912 2906 (1, 1, _('%.3f s')),
2913 2907 (100, 0.001, _('%.1f ms')),
2914 2908 (10, 0.001, _('%.2f ms')),
2915 2909 (1, 0.001, _('%.3f ms')),
2916 2910 (100, 0.000001, _('%.1f us')),
2917 2911 (10, 0.000001, _('%.2f us')),
2918 2912 (1, 0.000001, _('%.3f us')),
2919 2913 (100, 0.000000001, _('%.1f ns')),
2920 2914 (10, 0.000000001, _('%.2f ns')),
2921 2915 (1, 0.000000001, _('%.3f ns')),
2922 2916 )
2923 2917
2924 2918 _timenesting = [0]
2925 2919
2926 2920 def timed(func):
2927 2921 '''Report the execution time of a function call to stderr.
2928 2922
2929 2923 During development, use as a decorator when you need to measure
2930 2924 the cost of a function, e.g. as follows:
2931 2925
2932 2926 @util.timed
2933 2927 def foo(a, b, c):
2934 2928 pass
2935 2929 '''
2936 2930
2937 2931 def wrapper(*args, **kwargs):
2938 2932 start = timer()
2939 2933 indent = 2
2940 2934 _timenesting[0] += indent
2941 2935 try:
2942 2936 return func(*args, **kwargs)
2943 2937 finally:
2944 2938 elapsed = timer() - start
2945 2939 _timenesting[0] -= indent
2946 2940 stderr.write('%s%s: %s\n' %
2947 2941 (' ' * _timenesting[0], func.__name__,
2948 2942 timecount(elapsed)))
2949 2943 return wrapper
2950 2944
2951 2945 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2952 2946 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2953 2947
2954 2948 def sizetoint(s):
2955 2949 '''Convert a space specifier to a byte count.
2956 2950
2957 2951 >>> sizetoint('30')
2958 2952 30
2959 2953 >>> sizetoint('2.2kb')
2960 2954 2252
2961 2955 >>> sizetoint('6M')
2962 2956 6291456
2963 2957 '''
2964 2958 t = s.strip().lower()
2965 2959 try:
2966 2960 for k, u in _sizeunits:
2967 2961 if t.endswith(k):
2968 2962 return int(float(t[:-len(k)]) * u)
2969 2963 return int(t)
2970 2964 except ValueError:
2971 2965 raise error.ParseError(_("couldn't parse size: %s") % s)
2972 2966
2973 2967 class hooks(object):
2974 2968 '''A collection of hook functions that can be used to extend a
2975 2969 function's behavior. Hooks are called in lexicographic order,
2976 2970 based on the names of their sources.'''
2977 2971
2978 2972 def __init__(self):
2979 2973 self._hooks = []
2980 2974
2981 2975 def add(self, source, hook):
2982 2976 self._hooks.append((source, hook))
2983 2977
2984 2978 def __call__(self, *args):
2985 2979 self._hooks.sort(key=lambda x: x[0])
2986 2980 results = []
2987 2981 for source, hook in self._hooks:
2988 2982 results.append(hook(*args))
2989 2983 return results
2990 2984
2991 2985 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s', depth=0):
2992 2986 '''Yields lines for a nicely formatted stacktrace.
2993 2987 Skips the 'skip' last entries, then return the last 'depth' entries.
2994 2988 Each file+linenumber is formatted according to fileline.
2995 2989 Each line is formatted according to line.
2996 2990 If line is None, it yields:
2997 2991 length of longest filepath+line number,
2998 2992 filepath+linenumber,
2999 2993 function
3000 2994
3001 2995 Not be used in production code but very convenient while developing.
3002 2996 '''
3003 2997 entries = [(fileline % (fn, ln), func)
3004 2998 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]
3005 2999 ][-depth:]
3006 3000 if entries:
3007 3001 fnmax = max(len(entry[0]) for entry in entries)
3008 3002 for fnln, func in entries:
3009 3003 if line is None:
3010 3004 yield (fnmax, fnln, func)
3011 3005 else:
3012 3006 yield line % (fnmax, fnln, func)
3013 3007
3014 3008 def debugstacktrace(msg='stacktrace', skip=0,
3015 3009 f=stderr, otherf=stdout, depth=0):
3016 3010 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
3017 3011 Skips the 'skip' entries closest to the call, then show 'depth' entries.
3018 3012 By default it will flush stdout first.
3019 3013 It can be used everywhere and intentionally does not require an ui object.
3020 3014 Not be used in production code but very convenient while developing.
3021 3015 '''
3022 3016 if otherf:
3023 3017 otherf.flush()
3024 3018 f.write('%s at:\n' % msg.rstrip())
3025 3019 for line in getstackframes(skip + 1, depth=depth):
3026 3020 f.write(line)
3027 3021 f.flush()
3028 3022
3029 3023 class dirs(object):
3030 3024 '''a multiset of directory names from a dirstate or manifest'''
3031 3025
3032 3026 def __init__(self, map, skip=None):
3033 3027 self._dirs = {}
3034 3028 addpath = self.addpath
3035 3029 if safehasattr(map, 'iteritems') and skip is not None:
3036 3030 for f, s in map.iteritems():
3037 3031 if s[0] != skip:
3038 3032 addpath(f)
3039 3033 else:
3040 3034 for f in map:
3041 3035 addpath(f)
3042 3036
3043 3037 def addpath(self, path):
3044 3038 dirs = self._dirs
3045 3039 for base in finddirs(path):
3046 3040 if base in dirs:
3047 3041 dirs[base] += 1
3048 3042 return
3049 3043 dirs[base] = 1
3050 3044
3051 3045 def delpath(self, path):
3052 3046 dirs = self._dirs
3053 3047 for base in finddirs(path):
3054 3048 if dirs[base] > 1:
3055 3049 dirs[base] -= 1
3056 3050 return
3057 3051 del dirs[base]
3058 3052
3059 3053 def __iter__(self):
3060 3054 return iter(self._dirs)
3061 3055
3062 3056 def __contains__(self, d):
3063 3057 return d in self._dirs
3064 3058
3065 3059 if safehasattr(parsers, 'dirs'):
3066 3060 dirs = parsers.dirs
3067 3061
3068 3062 def finddirs(path):
3069 3063 pos = path.rfind('/')
3070 3064 while pos != -1:
3071 3065 yield path[:pos]
3072 3066 pos = path.rfind('/', 0, pos)
3073 3067
3074 3068 # compression code
3075 3069
3076 3070 SERVERROLE = 'server'
3077 3071 CLIENTROLE = 'client'
3078 3072
3079 3073 compewireprotosupport = collections.namedtuple(u'compenginewireprotosupport',
3080 3074 (u'name', u'serverpriority',
3081 3075 u'clientpriority'))
3082 3076
3083 3077 class compressormanager(object):
3084 3078 """Holds registrations of various compression engines.
3085 3079
3086 3080 This class essentially abstracts the differences between compression
3087 3081 engines to allow new compression formats to be added easily, possibly from
3088 3082 extensions.
3089 3083
3090 3084 Compressors are registered against the global instance by calling its
3091 3085 ``register()`` method.
3092 3086 """
3093 3087 def __init__(self):
3094 3088 self._engines = {}
3095 3089 # Bundle spec human name to engine name.
3096 3090 self._bundlenames = {}
3097 3091 # Internal bundle identifier to engine name.
3098 3092 self._bundletypes = {}
3099 3093 # Revlog header to engine name.
3100 3094 self._revlogheaders = {}
3101 3095 # Wire proto identifier to engine name.
3102 3096 self._wiretypes = {}
3103 3097
3104 3098 def __getitem__(self, key):
3105 3099 return self._engines[key]
3106 3100
3107 3101 def __contains__(self, key):
3108 3102 return key in self._engines
3109 3103
3110 3104 def __iter__(self):
3111 3105 return iter(self._engines.keys())
3112 3106
3113 3107 def register(self, engine):
3114 3108 """Register a compression engine with the manager.
3115 3109
3116 3110 The argument must be a ``compressionengine`` instance.
3117 3111 """
3118 3112 if not isinstance(engine, compressionengine):
3119 3113 raise ValueError(_('argument must be a compressionengine'))
3120 3114
3121 3115 name = engine.name()
3122 3116
3123 3117 if name in self._engines:
3124 3118 raise error.Abort(_('compression engine %s already registered') %
3125 3119 name)
3126 3120
3127 3121 bundleinfo = engine.bundletype()
3128 3122 if bundleinfo:
3129 3123 bundlename, bundletype = bundleinfo
3130 3124
3131 3125 if bundlename in self._bundlenames:
3132 3126 raise error.Abort(_('bundle name %s already registered') %
3133 3127 bundlename)
3134 3128 if bundletype in self._bundletypes:
3135 3129 raise error.Abort(_('bundle type %s already registered by %s') %
3136 3130 (bundletype, self._bundletypes[bundletype]))
3137 3131
3138 3132 # No external facing name declared.
3139 3133 if bundlename:
3140 3134 self._bundlenames[bundlename] = name
3141 3135
3142 3136 self._bundletypes[bundletype] = name
3143 3137
3144 3138 wiresupport = engine.wireprotosupport()
3145 3139 if wiresupport:
3146 3140 wiretype = wiresupport.name
3147 3141 if wiretype in self._wiretypes:
3148 3142 raise error.Abort(_('wire protocol compression %s already '
3149 3143 'registered by %s') %
3150 3144 (wiretype, self._wiretypes[wiretype]))
3151 3145
3152 3146 self._wiretypes[wiretype] = name
3153 3147
3154 3148 revlogheader = engine.revlogheader()
3155 3149 if revlogheader and revlogheader in self._revlogheaders:
3156 3150 raise error.Abort(_('revlog header %s already registered by %s') %
3157 3151 (revlogheader, self._revlogheaders[revlogheader]))
3158 3152
3159 3153 if revlogheader:
3160 3154 self._revlogheaders[revlogheader] = name
3161 3155
3162 3156 self._engines[name] = engine
3163 3157
3164 3158 @property
3165 3159 def supportedbundlenames(self):
3166 3160 return set(self._bundlenames.keys())
3167 3161
3168 3162 @property
3169 3163 def supportedbundletypes(self):
3170 3164 return set(self._bundletypes.keys())
3171 3165
3172 3166 def forbundlename(self, bundlename):
3173 3167 """Obtain a compression engine registered to a bundle name.
3174 3168
3175 3169 Will raise KeyError if the bundle type isn't registered.
3176 3170
3177 3171 Will abort if the engine is known but not available.
3178 3172 """
3179 3173 engine = self._engines[self._bundlenames[bundlename]]
3180 3174 if not engine.available():
3181 3175 raise error.Abort(_('compression engine %s could not be loaded') %
3182 3176 engine.name())
3183 3177 return engine
3184 3178
3185 3179 def forbundletype(self, bundletype):
3186 3180 """Obtain a compression engine registered to a bundle type.
3187 3181
3188 3182 Will raise KeyError if the bundle type isn't registered.
3189 3183
3190 3184 Will abort if the engine is known but not available.
3191 3185 """
3192 3186 engine = self._engines[self._bundletypes[bundletype]]
3193 3187 if not engine.available():
3194 3188 raise error.Abort(_('compression engine %s could not be loaded') %
3195 3189 engine.name())
3196 3190 return engine
3197 3191
3198 3192 def supportedwireengines(self, role, onlyavailable=True):
3199 3193 """Obtain compression engines that support the wire protocol.
3200 3194
3201 3195 Returns a list of engines in prioritized order, most desired first.
3202 3196
3203 3197 If ``onlyavailable`` is set, filter out engines that can't be
3204 3198 loaded.
3205 3199 """
3206 3200 assert role in (SERVERROLE, CLIENTROLE)
3207 3201
3208 3202 attr = 'serverpriority' if role == SERVERROLE else 'clientpriority'
3209 3203
3210 3204 engines = [self._engines[e] for e in self._wiretypes.values()]
3211 3205 if onlyavailable:
3212 3206 engines = [e for e in engines if e.available()]
3213 3207
3214 3208 def getkey(e):
3215 3209 # Sort first by priority, highest first. In case of tie, sort
3216 3210 # alphabetically. This is arbitrary, but ensures output is
3217 3211 # stable.
3218 3212 w = e.wireprotosupport()
3219 3213 return -1 * getattr(w, attr), w.name
3220 3214
3221 3215 return list(sorted(engines, key=getkey))
3222 3216
3223 3217 def forwiretype(self, wiretype):
3224 3218 engine = self._engines[self._wiretypes[wiretype]]
3225 3219 if not engine.available():
3226 3220 raise error.Abort(_('compression engine %s could not be loaded') %
3227 3221 engine.name())
3228 3222 return engine
3229 3223
3230 3224 def forrevlogheader(self, header):
3231 3225 """Obtain a compression engine registered to a revlog header.
3232 3226
3233 3227 Will raise KeyError if the revlog header value isn't registered.
3234 3228 """
3235 3229 return self._engines[self._revlogheaders[header]]
3236 3230
3237 3231 compengines = compressormanager()
3238 3232
3239 3233 class compressionengine(object):
3240 3234 """Base class for compression engines.
3241 3235
3242 3236 Compression engines must implement the interface defined by this class.
3243 3237 """
3244 3238 def name(self):
3245 3239 """Returns the name of the compression engine.
3246 3240
3247 3241 This is the key the engine is registered under.
3248 3242
3249 3243 This method must be implemented.
3250 3244 """
3251 3245 raise NotImplementedError()
3252 3246
3253 3247 def available(self):
3254 3248 """Whether the compression engine is available.
3255 3249
3256 3250 The intent of this method is to allow optional compression engines
3257 3251 that may not be available in all installations (such as engines relying
3258 3252 on C extensions that may not be present).
3259 3253 """
3260 3254 return True
3261 3255
3262 3256 def bundletype(self):
3263 3257 """Describes bundle identifiers for this engine.
3264 3258
3265 3259 If this compression engine isn't supported for bundles, returns None.
3266 3260
3267 3261 If this engine can be used for bundles, returns a 2-tuple of strings of
3268 3262 the user-facing "bundle spec" compression name and an internal
3269 3263 identifier used to denote the compression format within bundles. To
3270 3264 exclude the name from external usage, set the first element to ``None``.
3271 3265
3272 3266 If bundle compression is supported, the class must also implement
3273 3267 ``compressstream`` and `decompressorreader``.
3274 3268
3275 3269 The docstring of this method is used in the help system to tell users
3276 3270 about this engine.
3277 3271 """
3278 3272 return None
3279 3273
3280 3274 def wireprotosupport(self):
3281 3275 """Declare support for this compression format on the wire protocol.
3282 3276
3283 3277 If this compression engine isn't supported for compressing wire
3284 3278 protocol payloads, returns None.
3285 3279
3286 3280 Otherwise, returns ``compenginewireprotosupport`` with the following
3287 3281 fields:
3288 3282
3289 3283 * String format identifier
3290 3284 * Integer priority for the server
3291 3285 * Integer priority for the client
3292 3286
3293 3287 The integer priorities are used to order the advertisement of format
3294 3288 support by server and client. The highest integer is advertised
3295 3289 first. Integers with non-positive values aren't advertised.
3296 3290
3297 3291 The priority values are somewhat arbitrary and only used for default
3298 3292 ordering. The relative order can be changed via config options.
3299 3293
3300 3294 If wire protocol compression is supported, the class must also implement
3301 3295 ``compressstream`` and ``decompressorreader``.
3302 3296 """
3303 3297 return None
3304 3298
3305 3299 def revlogheader(self):
3306 3300 """Header added to revlog chunks that identifies this engine.
3307 3301
3308 3302 If this engine can be used to compress revlogs, this method should
3309 3303 return the bytes used to identify chunks compressed with this engine.
3310 3304 Else, the method should return ``None`` to indicate it does not
3311 3305 participate in revlog compression.
3312 3306 """
3313 3307 return None
3314 3308
3315 3309 def compressstream(self, it, opts=None):
3316 3310 """Compress an iterator of chunks.
3317 3311
3318 3312 The method receives an iterator (ideally a generator) of chunks of
3319 3313 bytes to be compressed. It returns an iterator (ideally a generator)
3320 3314 of bytes of chunks representing the compressed output.
3321 3315
3322 3316 Optionally accepts an argument defining how to perform compression.
3323 3317 Each engine treats this argument differently.
3324 3318 """
3325 3319 raise NotImplementedError()
3326 3320
3327 3321 def decompressorreader(self, fh):
3328 3322 """Perform decompression on a file object.
3329 3323
3330 3324 Argument is an object with a ``read(size)`` method that returns
3331 3325 compressed data. Return value is an object with a ``read(size)`` that
3332 3326 returns uncompressed data.
3333 3327 """
3334 3328 raise NotImplementedError()
3335 3329
3336 3330 def revlogcompressor(self, opts=None):
3337 3331 """Obtain an object that can be used to compress revlog entries.
3338 3332
3339 3333 The object has a ``compress(data)`` method that compresses binary
3340 3334 data. This method returns compressed binary data or ``None`` if
3341 3335 the data could not be compressed (too small, not compressible, etc).
3342 3336 The returned data should have a header uniquely identifying this
3343 3337 compression format so decompression can be routed to this engine.
3344 3338 This header should be identified by the ``revlogheader()`` return
3345 3339 value.
3346 3340
3347 3341 The object has a ``decompress(data)`` method that decompresses
3348 3342 data. The method will only be called if ``data`` begins with
3349 3343 ``revlogheader()``. The method should return the raw, uncompressed
3350 3344 data or raise a ``RevlogError``.
3351 3345
3352 3346 The object is reusable but is not thread safe.
3353 3347 """
3354 3348 raise NotImplementedError()
3355 3349
3356 3350 class _zlibengine(compressionengine):
3357 3351 def name(self):
3358 3352 return 'zlib'
3359 3353
3360 3354 def bundletype(self):
3361 3355 """zlib compression using the DEFLATE algorithm.
3362 3356
3363 3357 All Mercurial clients should support this format. The compression
3364 3358 algorithm strikes a reasonable balance between compression ratio
3365 3359 and size.
3366 3360 """
3367 3361 return 'gzip', 'GZ'
3368 3362
3369 3363 def wireprotosupport(self):
3370 3364 return compewireprotosupport('zlib', 20, 20)
3371 3365
3372 3366 def revlogheader(self):
3373 3367 return 'x'
3374 3368
3375 3369 def compressstream(self, it, opts=None):
3376 3370 opts = opts or {}
3377 3371
3378 3372 z = zlib.compressobj(opts.get('level', -1))
3379 3373 for chunk in it:
3380 3374 data = z.compress(chunk)
3381 3375 # Not all calls to compress emit data. It is cheaper to inspect
3382 3376 # here than to feed empty chunks through generator.
3383 3377 if data:
3384 3378 yield data
3385 3379
3386 3380 yield z.flush()
3387 3381
3388 3382 def decompressorreader(self, fh):
3389 3383 def gen():
3390 3384 d = zlib.decompressobj()
3391 3385 for chunk in filechunkiter(fh):
3392 3386 while chunk:
3393 3387 # Limit output size to limit memory.
3394 3388 yield d.decompress(chunk, 2 ** 18)
3395 3389 chunk = d.unconsumed_tail
3396 3390
3397 3391 return chunkbuffer(gen())
3398 3392
3399 3393 class zlibrevlogcompressor(object):
3400 3394 def compress(self, data):
3401 3395 insize = len(data)
3402 3396 # Caller handles empty input case.
3403 3397 assert insize > 0
3404 3398
3405 3399 if insize < 44:
3406 3400 return None
3407 3401
3408 3402 elif insize <= 1000000:
3409 3403 compressed = zlib.compress(data)
3410 3404 if len(compressed) < insize:
3411 3405 return compressed
3412 3406 return None
3413 3407
3414 3408 # zlib makes an internal copy of the input buffer, doubling
3415 3409 # memory usage for large inputs. So do streaming compression
3416 3410 # on large inputs.
3417 3411 else:
3418 3412 z = zlib.compressobj()
3419 3413 parts = []
3420 3414 pos = 0
3421 3415 while pos < insize:
3422 3416 pos2 = pos + 2**20
3423 3417 parts.append(z.compress(data[pos:pos2]))
3424 3418 pos = pos2
3425 3419 parts.append(z.flush())
3426 3420
3427 3421 if sum(map(len, parts)) < insize:
3428 3422 return ''.join(parts)
3429 3423 return None
3430 3424
3431 3425 def decompress(self, data):
3432 3426 try:
3433 3427 return zlib.decompress(data)
3434 3428 except zlib.error as e:
3435 3429 raise error.RevlogError(_('revlog decompress error: %s') %
3436 3430 str(e))
3437 3431
3438 3432 def revlogcompressor(self, opts=None):
3439 3433 return self.zlibrevlogcompressor()
3440 3434
3441 3435 compengines.register(_zlibengine())
3442 3436
3443 3437 class _bz2engine(compressionengine):
3444 3438 def name(self):
3445 3439 return 'bz2'
3446 3440
3447 3441 def bundletype(self):
3448 3442 """An algorithm that produces smaller bundles than ``gzip``.
3449 3443
3450 3444 All Mercurial clients should support this format.
3451 3445
3452 3446 This engine will likely produce smaller bundles than ``gzip`` but
3453 3447 will be significantly slower, both during compression and
3454 3448 decompression.
3455 3449
3456 3450 If available, the ``zstd`` engine can yield similar or better
3457 3451 compression at much higher speeds.
3458 3452 """
3459 3453 return 'bzip2', 'BZ'
3460 3454
3461 3455 # We declare a protocol name but don't advertise by default because
3462 3456 # it is slow.
3463 3457 def wireprotosupport(self):
3464 3458 return compewireprotosupport('bzip2', 0, 0)
3465 3459
3466 3460 def compressstream(self, it, opts=None):
3467 3461 opts = opts or {}
3468 3462 z = bz2.BZ2Compressor(opts.get('level', 9))
3469 3463 for chunk in it:
3470 3464 data = z.compress(chunk)
3471 3465 if data:
3472 3466 yield data
3473 3467
3474 3468 yield z.flush()
3475 3469
3476 3470 def decompressorreader(self, fh):
3477 3471 def gen():
3478 3472 d = bz2.BZ2Decompressor()
3479 3473 for chunk in filechunkiter(fh):
3480 3474 yield d.decompress(chunk)
3481 3475
3482 3476 return chunkbuffer(gen())
3483 3477
3484 3478 compengines.register(_bz2engine())
3485 3479
3486 3480 class _truncatedbz2engine(compressionengine):
3487 3481 def name(self):
3488 3482 return 'bz2truncated'
3489 3483
3490 3484 def bundletype(self):
3491 3485 return None, '_truncatedBZ'
3492 3486
3493 3487 # We don't implement compressstream because it is hackily handled elsewhere.
3494 3488
3495 3489 def decompressorreader(self, fh):
3496 3490 def gen():
3497 3491 # The input stream doesn't have the 'BZ' header. So add it back.
3498 3492 d = bz2.BZ2Decompressor()
3499 3493 d.decompress('BZ')
3500 3494 for chunk in filechunkiter(fh):
3501 3495 yield d.decompress(chunk)
3502 3496
3503 3497 return chunkbuffer(gen())
3504 3498
3505 3499 compengines.register(_truncatedbz2engine())
3506 3500
3507 3501 class _noopengine(compressionengine):
3508 3502 def name(self):
3509 3503 return 'none'
3510 3504
3511 3505 def bundletype(self):
3512 3506 """No compression is performed.
3513 3507
3514 3508 Use this compression engine to explicitly disable compression.
3515 3509 """
3516 3510 return 'none', 'UN'
3517 3511
3518 3512 # Clients always support uncompressed payloads. Servers don't because
3519 3513 # unless you are on a fast network, uncompressed payloads can easily
3520 3514 # saturate your network pipe.
3521 3515 def wireprotosupport(self):
3522 3516 return compewireprotosupport('none', 0, 10)
3523 3517
3524 3518 # We don't implement revlogheader because it is handled specially
3525 3519 # in the revlog class.
3526 3520
3527 3521 def compressstream(self, it, opts=None):
3528 3522 return it
3529 3523
3530 3524 def decompressorreader(self, fh):
3531 3525 return fh
3532 3526
3533 3527 class nooprevlogcompressor(object):
3534 3528 def compress(self, data):
3535 3529 return None
3536 3530
3537 3531 def revlogcompressor(self, opts=None):
3538 3532 return self.nooprevlogcompressor()
3539 3533
3540 3534 compengines.register(_noopengine())
3541 3535
3542 3536 class _zstdengine(compressionengine):
3543 3537 def name(self):
3544 3538 return 'zstd'
3545 3539
3546 3540 @propertycache
3547 3541 def _module(self):
3548 3542 # Not all installs have the zstd module available. So defer importing
3549 3543 # until first access.
3550 3544 try:
3551 3545 from . import zstd
3552 3546 # Force delayed import.
3553 3547 zstd.__version__
3554 3548 return zstd
3555 3549 except ImportError:
3556 3550 return None
3557 3551
3558 3552 def available(self):
3559 3553 return bool(self._module)
3560 3554
3561 3555 def bundletype(self):
3562 3556 """A modern compression algorithm that is fast and highly flexible.
3563 3557
3564 3558 Only supported by Mercurial 4.1 and newer clients.
3565 3559
3566 3560 With the default settings, zstd compression is both faster and yields
3567 3561 better compression than ``gzip``. It also frequently yields better
3568 3562 compression than ``bzip2`` while operating at much higher speeds.
3569 3563
3570 3564 If this engine is available and backwards compatibility is not a
3571 3565 concern, it is likely the best available engine.
3572 3566 """
3573 3567 return 'zstd', 'ZS'
3574 3568
3575 3569 def wireprotosupport(self):
3576 3570 return compewireprotosupport('zstd', 50, 50)
3577 3571
3578 3572 def revlogheader(self):
3579 3573 return '\x28'
3580 3574
3581 3575 def compressstream(self, it, opts=None):
3582 3576 opts = opts or {}
3583 3577 # zstd level 3 is almost always significantly faster than zlib
3584 3578 # while providing no worse compression. It strikes a good balance
3585 3579 # between speed and compression.
3586 3580 level = opts.get('level', 3)
3587 3581
3588 3582 zstd = self._module
3589 3583 z = zstd.ZstdCompressor(level=level).compressobj()
3590 3584 for chunk in it:
3591 3585 data = z.compress(chunk)
3592 3586 if data:
3593 3587 yield data
3594 3588
3595 3589 yield z.flush()
3596 3590
3597 3591 def decompressorreader(self, fh):
3598 3592 zstd = self._module
3599 3593 dctx = zstd.ZstdDecompressor()
3600 3594 return chunkbuffer(dctx.read_from(fh))
3601 3595
3602 3596 class zstdrevlogcompressor(object):
3603 3597 def __init__(self, zstd, level=3):
3604 3598 # Writing the content size adds a few bytes to the output. However,
3605 3599 # it allows decompression to be more optimal since we can
3606 3600 # pre-allocate a buffer to hold the result.
3607 3601 self._cctx = zstd.ZstdCompressor(level=level,
3608 3602 write_content_size=True)
3609 3603 self._dctx = zstd.ZstdDecompressor()
3610 3604 self._compinsize = zstd.COMPRESSION_RECOMMENDED_INPUT_SIZE
3611 3605 self._decompinsize = zstd.DECOMPRESSION_RECOMMENDED_INPUT_SIZE
3612 3606
3613 3607 def compress(self, data):
3614 3608 insize = len(data)
3615 3609 # Caller handles empty input case.
3616 3610 assert insize > 0
3617 3611
3618 3612 if insize < 50:
3619 3613 return None
3620 3614
3621 3615 elif insize <= 1000000:
3622 3616 compressed = self._cctx.compress(data)
3623 3617 if len(compressed) < insize:
3624 3618 return compressed
3625 3619 return None
3626 3620 else:
3627 3621 z = self._cctx.compressobj()
3628 3622 chunks = []
3629 3623 pos = 0
3630 3624 while pos < insize:
3631 3625 pos2 = pos + self._compinsize
3632 3626 chunk = z.compress(data[pos:pos2])
3633 3627 if chunk:
3634 3628 chunks.append(chunk)
3635 3629 pos = pos2
3636 3630 chunks.append(z.flush())
3637 3631
3638 3632 if sum(map(len, chunks)) < insize:
3639 3633 return ''.join(chunks)
3640 3634 return None
3641 3635
3642 3636 def decompress(self, data):
3643 3637 insize = len(data)
3644 3638
3645 3639 try:
3646 3640 # This was measured to be faster than other streaming
3647 3641 # decompressors.
3648 3642 dobj = self._dctx.decompressobj()
3649 3643 chunks = []
3650 3644 pos = 0
3651 3645 while pos < insize:
3652 3646 pos2 = pos + self._decompinsize
3653 3647 chunk = dobj.decompress(data[pos:pos2])
3654 3648 if chunk:
3655 3649 chunks.append(chunk)
3656 3650 pos = pos2
3657 3651 # Frame should be exhausted, so no finish() API.
3658 3652
3659 3653 return ''.join(chunks)
3660 3654 except Exception as e:
3661 3655 raise error.RevlogError(_('revlog decompress error: %s') %
3662 3656 str(e))
3663 3657
3664 3658 def revlogcompressor(self, opts=None):
3665 3659 opts = opts or {}
3666 3660 return self.zstdrevlogcompressor(self._module,
3667 3661 level=opts.get('level', 3))
3668 3662
3669 3663 compengines.register(_zstdengine())
3670 3664
3671 3665 def bundlecompressiontopics():
3672 3666 """Obtains a list of available bundle compressions for use in help."""
3673 3667 # help.makeitemsdocs() expects a dict of names to items with a .__doc__.
3674 3668 items = {}
3675 3669
3676 3670 # We need to format the docstring. So use a dummy object/type to hold it
3677 3671 # rather than mutating the original.
3678 3672 class docobject(object):
3679 3673 pass
3680 3674
3681 3675 for name in compengines:
3682 3676 engine = compengines[name]
3683 3677
3684 3678 if not engine.available():
3685 3679 continue
3686 3680
3687 3681 bt = engine.bundletype()
3688 3682 if not bt or not bt[0]:
3689 3683 continue
3690 3684
3691 3685 doc = pycompat.sysstr('``%s``\n %s') % (
3692 3686 bt[0], engine.bundletype.__doc__)
3693 3687
3694 3688 value = docobject()
3695 3689 value.__doc__ = doc
3696 3690
3697 3691 items[bt[0]] = value
3698 3692
3699 3693 return items
3700 3694
3701 3695 # convenient shortcut
3702 3696 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now