##// END OF EJS Templates
util: make hashlib import unconditional...
Gregory Szorc -
r27357:7f5a0bd4 default
parent child Browse files
Show More
@@ -1,2489 +1,2484
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 import i18n
17 17 _ = i18n._
18 18 import error, osutil, encoding, parsers
19 19 import errno, shutil, sys, tempfile, traceback
20 20 import re as remod
21 21 import os, time, datetime, calendar, textwrap, signal, collections
22 22 import imp, socket, urllib
23 23 import gc
24 24 import bz2
25 25 import zlib
26 import hashlib
26 27
27 28 if os.name == 'nt':
28 29 import windows as platform
29 30 else:
30 31 import posix as platform
31 32
33 md5 = hashlib.md5
34 sha1 = hashlib.sha1
35 sha512 = hashlib.sha512
36
32 37 cachestat = platform.cachestat
33 38 checkexec = platform.checkexec
34 39 checklink = platform.checklink
35 40 copymode = platform.copymode
36 41 executablepath = platform.executablepath
37 42 expandglobs = platform.expandglobs
38 43 explainexit = platform.explainexit
39 44 findexe = platform.findexe
40 45 gethgcmd = platform.gethgcmd
41 46 getuser = platform.getuser
42 47 groupmembers = platform.groupmembers
43 48 groupname = platform.groupname
44 49 hidewindow = platform.hidewindow
45 50 isexec = platform.isexec
46 51 isowner = platform.isowner
47 52 localpath = platform.localpath
48 53 lookupreg = platform.lookupreg
49 54 makedir = platform.makedir
50 55 nlinks = platform.nlinks
51 56 normpath = platform.normpath
52 57 normcase = platform.normcase
53 58 normcasespec = platform.normcasespec
54 59 normcasefallback = platform.normcasefallback
55 60 openhardlinks = platform.openhardlinks
56 61 oslink = platform.oslink
57 62 parsepatchoutput = platform.parsepatchoutput
58 63 pconvert = platform.pconvert
59 64 poll = platform.poll
60 65 popen = platform.popen
61 66 posixfile = platform.posixfile
62 67 quotecommand = platform.quotecommand
63 68 readpipe = platform.readpipe
64 69 rename = platform.rename
65 70 removedirs = platform.removedirs
66 71 samedevice = platform.samedevice
67 72 samefile = platform.samefile
68 73 samestat = platform.samestat
69 74 setbinary = platform.setbinary
70 75 setflags = platform.setflags
71 76 setsignalhandler = platform.setsignalhandler
72 77 shellquote = platform.shellquote
73 78 spawndetached = platform.spawndetached
74 79 split = platform.split
75 80 sshargs = platform.sshargs
76 81 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
77 82 statisexec = platform.statisexec
78 83 statislink = platform.statislink
79 84 termwidth = platform.termwidth
80 85 testpid = platform.testpid
81 86 umask = platform.umask
82 87 unlink = platform.unlink
83 88 unlinkpath = platform.unlinkpath
84 89 username = platform.username
85 90
86 91 # Python compatibility
87 92
88 93 _notset = object()
89 94
90 95 # disable Python's problematic floating point timestamps (issue4836)
91 96 # (Python hypocritically says you shouldn't change this behavior in
92 97 # libraries, and sure enough Mercurial is not a library.)
93 98 os.stat_float_times(False)
94 99
95 100 def safehasattr(thing, attr):
96 101 return getattr(thing, attr, _notset) is not _notset
97 102
98 from hashlib import md5, sha1
99
100 103 DIGESTS = {
101 104 'md5': md5,
102 105 'sha1': sha1,
106 'sha512': sha512,
103 107 }
104 108 # List of digest types from strongest to weakest
105 DIGESTS_BY_STRENGTH = ['sha1', 'md5']
106
107 try:
108 import hashlib
109 DIGESTS.update({
110 'sha512': hashlib.sha512,
111 })
112 DIGESTS_BY_STRENGTH.insert(0, 'sha512')
113 except ImportError:
114 pass
109 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
115 110
116 111 for k in DIGESTS_BY_STRENGTH:
117 112 assert k in DIGESTS
118 113
119 114 class digester(object):
120 115 """helper to compute digests.
121 116
122 117 This helper can be used to compute one or more digests given their name.
123 118
124 119 >>> d = digester(['md5', 'sha1'])
125 120 >>> d.update('foo')
126 121 >>> [k for k in sorted(d)]
127 122 ['md5', 'sha1']
128 123 >>> d['md5']
129 124 'acbd18db4cc2f85cedef654fccc4a4d8'
130 125 >>> d['sha1']
131 126 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
132 127 >>> digester.preferred(['md5', 'sha1'])
133 128 'sha1'
134 129 """
135 130
136 131 def __init__(self, digests, s=''):
137 132 self._hashes = {}
138 133 for k in digests:
139 134 if k not in DIGESTS:
140 135 raise Abort(_('unknown digest type: %s') % k)
141 136 self._hashes[k] = DIGESTS[k]()
142 137 if s:
143 138 self.update(s)
144 139
145 140 def update(self, data):
146 141 for h in self._hashes.values():
147 142 h.update(data)
148 143
149 144 def __getitem__(self, key):
150 145 if key not in DIGESTS:
151 146 raise Abort(_('unknown digest type: %s') % k)
152 147 return self._hashes[key].hexdigest()
153 148
154 149 def __iter__(self):
155 150 return iter(self._hashes)
156 151
157 152 @staticmethod
158 153 def preferred(supported):
159 154 """returns the strongest digest type in both supported and DIGESTS."""
160 155
161 156 for k in DIGESTS_BY_STRENGTH:
162 157 if k in supported:
163 158 return k
164 159 return None
165 160
166 161 class digestchecker(object):
167 162 """file handle wrapper that additionally checks content against a given
168 163 size and digests.
169 164
170 165 d = digestchecker(fh, size, {'md5': '...'})
171 166
172 167 When multiple digests are given, all of them are validated.
173 168 """
174 169
175 170 def __init__(self, fh, size, digests):
176 171 self._fh = fh
177 172 self._size = size
178 173 self._got = 0
179 174 self._digests = dict(digests)
180 175 self._digester = digester(self._digests.keys())
181 176
182 177 def read(self, length=-1):
183 178 content = self._fh.read(length)
184 179 self._digester.update(content)
185 180 self._got += len(content)
186 181 return content
187 182
188 183 def validate(self):
189 184 if self._size != self._got:
190 185 raise Abort(_('size mismatch: expected %d, got %d') %
191 186 (self._size, self._got))
192 187 for k, v in self._digests.items():
193 188 if v != self._digester[k]:
194 189 # i18n: first parameter is a digest name
195 190 raise Abort(_('%s mismatch: expected %s, got %s') %
196 191 (k, v, self._digester[k]))
197 192
198 193 try:
199 194 buffer = buffer
200 195 except NameError:
201 196 if sys.version_info[0] < 3:
202 197 def buffer(sliceable, offset=0):
203 198 return sliceable[offset:]
204 199 else:
205 200 def buffer(sliceable, offset=0):
206 201 return memoryview(sliceable)[offset:]
207 202
208 203 import subprocess
209 204 closefds = os.name == 'posix'
210 205
211 206 _chunksize = 4096
212 207
213 208 class bufferedinputpipe(object):
214 209 """a manually buffered input pipe
215 210
216 211 Python will not let us use buffered IO and lazy reading with 'polling' at
217 212 the same time. We cannot probe the buffer state and select will not detect
218 213 that data are ready to read if they are already buffered.
219 214
220 215 This class let us work around that by implementing its own buffering
221 216 (allowing efficient readline) while offering a way to know if the buffer is
222 217 empty from the output (allowing collaboration of the buffer with polling).
223 218
224 219 This class lives in the 'util' module because it makes use of the 'os'
225 220 module from the python stdlib.
226 221 """
227 222
228 223 def __init__(self, input):
229 224 self._input = input
230 225 self._buffer = []
231 226 self._eof = False
232 227 self._lenbuf = 0
233 228
234 229 @property
235 230 def hasbuffer(self):
236 231 """True is any data is currently buffered
237 232
238 233 This will be used externally a pre-step for polling IO. If there is
239 234 already data then no polling should be set in place."""
240 235 return bool(self._buffer)
241 236
242 237 @property
243 238 def closed(self):
244 239 return self._input.closed
245 240
246 241 def fileno(self):
247 242 return self._input.fileno()
248 243
249 244 def close(self):
250 245 return self._input.close()
251 246
252 247 def read(self, size):
253 248 while (not self._eof) and (self._lenbuf < size):
254 249 self._fillbuffer()
255 250 return self._frombuffer(size)
256 251
257 252 def readline(self, *args, **kwargs):
258 253 if 1 < len(self._buffer):
259 254 # this should not happen because both read and readline end with a
260 255 # _frombuffer call that collapse it.
261 256 self._buffer = [''.join(self._buffer)]
262 257 self._lenbuf = len(self._buffer[0])
263 258 lfi = -1
264 259 if self._buffer:
265 260 lfi = self._buffer[-1].find('\n')
266 261 while (not self._eof) and lfi < 0:
267 262 self._fillbuffer()
268 263 if self._buffer:
269 264 lfi = self._buffer[-1].find('\n')
270 265 size = lfi + 1
271 266 if lfi < 0: # end of file
272 267 size = self._lenbuf
273 268 elif 1 < len(self._buffer):
274 269 # we need to take previous chunks into account
275 270 size += self._lenbuf - len(self._buffer[-1])
276 271 return self._frombuffer(size)
277 272
278 273 def _frombuffer(self, size):
279 274 """return at most 'size' data from the buffer
280 275
281 276 The data are removed from the buffer."""
282 277 if size == 0 or not self._buffer:
283 278 return ''
284 279 buf = self._buffer[0]
285 280 if 1 < len(self._buffer):
286 281 buf = ''.join(self._buffer)
287 282
288 283 data = buf[:size]
289 284 buf = buf[len(data):]
290 285 if buf:
291 286 self._buffer = [buf]
292 287 self._lenbuf = len(buf)
293 288 else:
294 289 self._buffer = []
295 290 self._lenbuf = 0
296 291 return data
297 292
298 293 def _fillbuffer(self):
299 294 """read data to the buffer"""
300 295 data = os.read(self._input.fileno(), _chunksize)
301 296 if not data:
302 297 self._eof = True
303 298 else:
304 299 self._lenbuf += len(data)
305 300 self._buffer.append(data)
306 301
307 302 def popen2(cmd, env=None, newlines=False):
308 303 # Setting bufsize to -1 lets the system decide the buffer size.
309 304 # The default for bufsize is 0, meaning unbuffered. This leads to
310 305 # poor performance on Mac OS X: http://bugs.python.org/issue4194
311 306 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
312 307 close_fds=closefds,
313 308 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
314 309 universal_newlines=newlines,
315 310 env=env)
316 311 return p.stdin, p.stdout
317 312
318 313 def popen3(cmd, env=None, newlines=False):
319 314 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
320 315 return stdin, stdout, stderr
321 316
322 317 def popen4(cmd, env=None, newlines=False, bufsize=-1):
323 318 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
324 319 close_fds=closefds,
325 320 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
326 321 stderr=subprocess.PIPE,
327 322 universal_newlines=newlines,
328 323 env=env)
329 324 return p.stdin, p.stdout, p.stderr, p
330 325
331 326 def version():
332 327 """Return version information if available."""
333 328 try:
334 329 import __version__
335 330 return __version__.version
336 331 except ImportError:
337 332 return 'unknown'
338 333
339 334 def versiontuple(v=None, n=4):
340 335 """Parses a Mercurial version string into an N-tuple.
341 336
342 337 The version string to be parsed is specified with the ``v`` argument.
343 338 If it isn't defined, the current Mercurial version string will be parsed.
344 339
345 340 ``n`` can be 2, 3, or 4. Here is how some version strings map to
346 341 returned values:
347 342
348 343 >>> v = '3.6.1+190-df9b73d2d444'
349 344 >>> versiontuple(v, 2)
350 345 (3, 6)
351 346 >>> versiontuple(v, 3)
352 347 (3, 6, 1)
353 348 >>> versiontuple(v, 4)
354 349 (3, 6, 1, '190-df9b73d2d444')
355 350
356 351 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
357 352 (3, 6, 1, '190-df9b73d2d444+20151118')
358 353
359 354 >>> v = '3.6'
360 355 >>> versiontuple(v, 2)
361 356 (3, 6)
362 357 >>> versiontuple(v, 3)
363 358 (3, 6, None)
364 359 >>> versiontuple(v, 4)
365 360 (3, 6, None, None)
366 361 """
367 362 if not v:
368 363 v = version()
369 364 parts = v.split('+', 1)
370 365 if len(parts) == 1:
371 366 vparts, extra = parts[0], None
372 367 else:
373 368 vparts, extra = parts
374 369
375 370 vints = []
376 371 for i in vparts.split('.'):
377 372 try:
378 373 vints.append(int(i))
379 374 except ValueError:
380 375 break
381 376 # (3, 6) -> (3, 6, None)
382 377 while len(vints) < 3:
383 378 vints.append(None)
384 379
385 380 if n == 2:
386 381 return (vints[0], vints[1])
387 382 if n == 3:
388 383 return (vints[0], vints[1], vints[2])
389 384 if n == 4:
390 385 return (vints[0], vints[1], vints[2], extra)
391 386
392 387 # used by parsedate
393 388 defaultdateformats = (
394 389 '%Y-%m-%d %H:%M:%S',
395 390 '%Y-%m-%d %I:%M:%S%p',
396 391 '%Y-%m-%d %H:%M',
397 392 '%Y-%m-%d %I:%M%p',
398 393 '%Y-%m-%d',
399 394 '%m-%d',
400 395 '%m/%d',
401 396 '%m/%d/%y',
402 397 '%m/%d/%Y',
403 398 '%a %b %d %H:%M:%S %Y',
404 399 '%a %b %d %I:%M:%S%p %Y',
405 400 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
406 401 '%b %d %H:%M:%S %Y',
407 402 '%b %d %I:%M:%S%p %Y',
408 403 '%b %d %H:%M:%S',
409 404 '%b %d %I:%M:%S%p',
410 405 '%b %d %H:%M',
411 406 '%b %d %I:%M%p',
412 407 '%b %d %Y',
413 408 '%b %d',
414 409 '%H:%M:%S',
415 410 '%I:%M:%S%p',
416 411 '%H:%M',
417 412 '%I:%M%p',
418 413 )
419 414
420 415 extendeddateformats = defaultdateformats + (
421 416 "%Y",
422 417 "%Y-%m",
423 418 "%b",
424 419 "%b %Y",
425 420 )
426 421
427 422 def cachefunc(func):
428 423 '''cache the result of function calls'''
429 424 # XXX doesn't handle keywords args
430 425 if func.func_code.co_argcount == 0:
431 426 cache = []
432 427 def f():
433 428 if len(cache) == 0:
434 429 cache.append(func())
435 430 return cache[0]
436 431 return f
437 432 cache = {}
438 433 if func.func_code.co_argcount == 1:
439 434 # we gain a small amount of time because
440 435 # we don't need to pack/unpack the list
441 436 def f(arg):
442 437 if arg not in cache:
443 438 cache[arg] = func(arg)
444 439 return cache[arg]
445 440 else:
446 441 def f(*args):
447 442 if args not in cache:
448 443 cache[args] = func(*args)
449 444 return cache[args]
450 445
451 446 return f
452 447
453 448 class sortdict(dict):
454 449 '''a simple sorted dictionary'''
455 450 def __init__(self, data=None):
456 451 self._list = []
457 452 if data:
458 453 self.update(data)
459 454 def copy(self):
460 455 return sortdict(self)
461 456 def __setitem__(self, key, val):
462 457 if key in self:
463 458 self._list.remove(key)
464 459 self._list.append(key)
465 460 dict.__setitem__(self, key, val)
466 461 def __iter__(self):
467 462 return self._list.__iter__()
468 463 def update(self, src):
469 464 if isinstance(src, dict):
470 465 src = src.iteritems()
471 466 for k, v in src:
472 467 self[k] = v
473 468 def clear(self):
474 469 dict.clear(self)
475 470 self._list = []
476 471 def items(self):
477 472 return [(k, self[k]) for k in self._list]
478 473 def __delitem__(self, key):
479 474 dict.__delitem__(self, key)
480 475 self._list.remove(key)
481 476 def pop(self, key, *args, **kwargs):
482 477 dict.pop(self, key, *args, **kwargs)
483 478 try:
484 479 self._list.remove(key)
485 480 except ValueError:
486 481 pass
487 482 def keys(self):
488 483 return self._list
489 484 def iterkeys(self):
490 485 return self._list.__iter__()
491 486 def iteritems(self):
492 487 for k in self._list:
493 488 yield k, self[k]
494 489 def insert(self, index, key, val):
495 490 self._list.insert(index, key)
496 491 dict.__setitem__(self, key, val)
497 492
498 493 class lrucachedict(object):
499 494 '''cache most recent gets from or sets to this dictionary'''
500 495 def __init__(self, maxsize):
501 496 self._cache = {}
502 497 self._maxsize = maxsize
503 498 self._order = collections.deque()
504 499
505 500 def __getitem__(self, key):
506 501 value = self._cache[key]
507 502 self._order.remove(key)
508 503 self._order.append(key)
509 504 return value
510 505
511 506 def __setitem__(self, key, value):
512 507 if key not in self._cache:
513 508 if len(self._cache) >= self._maxsize:
514 509 del self._cache[self._order.popleft()]
515 510 else:
516 511 self._order.remove(key)
517 512 self._cache[key] = value
518 513 self._order.append(key)
519 514
520 515 def __contains__(self, key):
521 516 return key in self._cache
522 517
523 518 def clear(self):
524 519 self._cache.clear()
525 520 self._order = collections.deque()
526 521
527 522 def lrucachefunc(func):
528 523 '''cache most recent results of function calls'''
529 524 cache = {}
530 525 order = collections.deque()
531 526 if func.func_code.co_argcount == 1:
532 527 def f(arg):
533 528 if arg not in cache:
534 529 if len(cache) > 20:
535 530 del cache[order.popleft()]
536 531 cache[arg] = func(arg)
537 532 else:
538 533 order.remove(arg)
539 534 order.append(arg)
540 535 return cache[arg]
541 536 else:
542 537 def f(*args):
543 538 if args not in cache:
544 539 if len(cache) > 20:
545 540 del cache[order.popleft()]
546 541 cache[args] = func(*args)
547 542 else:
548 543 order.remove(args)
549 544 order.append(args)
550 545 return cache[args]
551 546
552 547 return f
553 548
554 549 class propertycache(object):
555 550 def __init__(self, func):
556 551 self.func = func
557 552 self.name = func.__name__
558 553 def __get__(self, obj, type=None):
559 554 result = self.func(obj)
560 555 self.cachevalue(obj, result)
561 556 return result
562 557
563 558 def cachevalue(self, obj, value):
564 559 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
565 560 obj.__dict__[self.name] = value
566 561
567 562 def pipefilter(s, cmd):
568 563 '''filter string S through command CMD, returning its output'''
569 564 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
570 565 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
571 566 pout, perr = p.communicate(s)
572 567 return pout
573 568
574 569 def tempfilter(s, cmd):
575 570 '''filter string S through a pair of temporary files with CMD.
576 571 CMD is used as a template to create the real command to be run,
577 572 with the strings INFILE and OUTFILE replaced by the real names of
578 573 the temporary files generated.'''
579 574 inname, outname = None, None
580 575 try:
581 576 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
582 577 fp = os.fdopen(infd, 'wb')
583 578 fp.write(s)
584 579 fp.close()
585 580 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
586 581 os.close(outfd)
587 582 cmd = cmd.replace('INFILE', inname)
588 583 cmd = cmd.replace('OUTFILE', outname)
589 584 code = os.system(cmd)
590 585 if sys.platform == 'OpenVMS' and code & 1:
591 586 code = 0
592 587 if code:
593 588 raise Abort(_("command '%s' failed: %s") %
594 589 (cmd, explainexit(code)))
595 590 fp = open(outname, 'rb')
596 591 r = fp.read()
597 592 fp.close()
598 593 return r
599 594 finally:
600 595 try:
601 596 if inname:
602 597 os.unlink(inname)
603 598 except OSError:
604 599 pass
605 600 try:
606 601 if outname:
607 602 os.unlink(outname)
608 603 except OSError:
609 604 pass
610 605
611 606 filtertable = {
612 607 'tempfile:': tempfilter,
613 608 'pipe:': pipefilter,
614 609 }
615 610
616 611 def filter(s, cmd):
617 612 "filter a string through a command that transforms its input to its output"
618 613 for name, fn in filtertable.iteritems():
619 614 if cmd.startswith(name):
620 615 return fn(s, cmd[len(name):].lstrip())
621 616 return pipefilter(s, cmd)
622 617
623 618 def binary(s):
624 619 """return true if a string is binary data"""
625 620 return bool(s and '\0' in s)
626 621
627 622 def increasingchunks(source, min=1024, max=65536):
628 623 '''return no less than min bytes per chunk while data remains,
629 624 doubling min after each chunk until it reaches max'''
630 625 def log2(x):
631 626 if not x:
632 627 return 0
633 628 i = 0
634 629 while x:
635 630 x >>= 1
636 631 i += 1
637 632 return i - 1
638 633
639 634 buf = []
640 635 blen = 0
641 636 for chunk in source:
642 637 buf.append(chunk)
643 638 blen += len(chunk)
644 639 if blen >= min:
645 640 if min < max:
646 641 min = min << 1
647 642 nmin = 1 << log2(blen)
648 643 if nmin > min:
649 644 min = nmin
650 645 if min > max:
651 646 min = max
652 647 yield ''.join(buf)
653 648 blen = 0
654 649 buf = []
655 650 if buf:
656 651 yield ''.join(buf)
657 652
658 653 Abort = error.Abort
659 654
660 655 def always(fn):
661 656 return True
662 657
663 658 def never(fn):
664 659 return False
665 660
666 661 def nogc(func):
667 662 """disable garbage collector
668 663
669 664 Python's garbage collector triggers a GC each time a certain number of
670 665 container objects (the number being defined by gc.get_threshold()) are
671 666 allocated even when marked not to be tracked by the collector. Tracking has
672 667 no effect on when GCs are triggered, only on what objects the GC looks
673 668 into. As a workaround, disable GC while building complex (huge)
674 669 containers.
675 670
676 671 This garbage collector issue have been fixed in 2.7.
677 672 """
678 673 def wrapper(*args, **kwargs):
679 674 gcenabled = gc.isenabled()
680 675 gc.disable()
681 676 try:
682 677 return func(*args, **kwargs)
683 678 finally:
684 679 if gcenabled:
685 680 gc.enable()
686 681 return wrapper
687 682
688 683 def pathto(root, n1, n2):
689 684 '''return the relative path from one place to another.
690 685 root should use os.sep to separate directories
691 686 n1 should use os.sep to separate directories
692 687 n2 should use "/" to separate directories
693 688 returns an os.sep-separated path.
694 689
695 690 If n1 is a relative path, it's assumed it's
696 691 relative to root.
697 692 n2 should always be relative to root.
698 693 '''
699 694 if not n1:
700 695 return localpath(n2)
701 696 if os.path.isabs(n1):
702 697 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
703 698 return os.path.join(root, localpath(n2))
704 699 n2 = '/'.join((pconvert(root), n2))
705 700 a, b = splitpath(n1), n2.split('/')
706 701 a.reverse()
707 702 b.reverse()
708 703 while a and b and a[-1] == b[-1]:
709 704 a.pop()
710 705 b.pop()
711 706 b.reverse()
712 707 return os.sep.join((['..'] * len(a)) + b) or '.'
713 708
714 709 def mainfrozen():
715 710 """return True if we are a frozen executable.
716 711
717 712 The code supports py2exe (most common, Windows only) and tools/freeze
718 713 (portable, not much used).
719 714 """
720 715 return (safehasattr(sys, "frozen") or # new py2exe
721 716 safehasattr(sys, "importers") or # old py2exe
722 717 imp.is_frozen("__main__")) # tools/freeze
723 718
724 719 # the location of data files matching the source code
725 720 if mainfrozen():
726 721 # executable version (py2exe) doesn't support __file__
727 722 datapath = os.path.dirname(sys.executable)
728 723 else:
729 724 datapath = os.path.dirname(__file__)
730 725
731 726 i18n.setdatapath(datapath)
732 727
733 728 _hgexecutable = None
734 729
735 730 def hgexecutable():
736 731 """return location of the 'hg' executable.
737 732
738 733 Defaults to $HG or 'hg' in the search path.
739 734 """
740 735 if _hgexecutable is None:
741 736 hg = os.environ.get('HG')
742 737 mainmod = sys.modules['__main__']
743 738 if hg:
744 739 _sethgexecutable(hg)
745 740 elif mainfrozen():
746 741 _sethgexecutable(sys.executable)
747 742 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
748 743 _sethgexecutable(mainmod.__file__)
749 744 else:
750 745 exe = findexe('hg') or os.path.basename(sys.argv[0])
751 746 _sethgexecutable(exe)
752 747 return _hgexecutable
753 748
754 749 def _sethgexecutable(path):
755 750 """set location of the 'hg' executable"""
756 751 global _hgexecutable
757 752 _hgexecutable = path
758 753
759 754 def _isstdout(f):
760 755 fileno = getattr(f, 'fileno', None)
761 756 return fileno and fileno() == sys.__stdout__.fileno()
762 757
763 758 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
764 759 '''enhanced shell command execution.
765 760 run with environment maybe modified, maybe in different dir.
766 761
767 762 if command fails and onerr is None, return status, else raise onerr
768 763 object as exception.
769 764
770 765 if out is specified, it is assumed to be a file-like object that has a
771 766 write() method. stdout and stderr will be redirected to out.'''
772 767 if environ is None:
773 768 environ = {}
774 769 try:
775 770 sys.stdout.flush()
776 771 except Exception:
777 772 pass
778 773 def py2shell(val):
779 774 'convert python object into string that is useful to shell'
780 775 if val is None or val is False:
781 776 return '0'
782 777 if val is True:
783 778 return '1'
784 779 return str(val)
785 780 origcmd = cmd
786 781 cmd = quotecommand(cmd)
787 782 if sys.platform == 'plan9' and (sys.version_info[0] == 2
788 783 and sys.version_info[1] < 7):
789 784 # subprocess kludge to work around issues in half-baked Python
790 785 # ports, notably bichued/python:
791 786 if not cwd is None:
792 787 os.chdir(cwd)
793 788 rc = os.system(cmd)
794 789 else:
795 790 env = dict(os.environ)
796 791 env.update((k, py2shell(v)) for k, v in environ.iteritems())
797 792 env['HG'] = hgexecutable()
798 793 if out is None or _isstdout(out):
799 794 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
800 795 env=env, cwd=cwd)
801 796 else:
802 797 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
803 798 env=env, cwd=cwd, stdout=subprocess.PIPE,
804 799 stderr=subprocess.STDOUT)
805 800 while True:
806 801 line = proc.stdout.readline()
807 802 if not line:
808 803 break
809 804 out.write(line)
810 805 proc.wait()
811 806 rc = proc.returncode
812 807 if sys.platform == 'OpenVMS' and rc & 1:
813 808 rc = 0
814 809 if rc and onerr:
815 810 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
816 811 explainexit(rc)[0])
817 812 if errprefix:
818 813 errmsg = '%s: %s' % (errprefix, errmsg)
819 814 raise onerr(errmsg)
820 815 return rc
821 816
822 817 def checksignature(func):
823 818 '''wrap a function with code to check for calling errors'''
824 819 def check(*args, **kwargs):
825 820 try:
826 821 return func(*args, **kwargs)
827 822 except TypeError:
828 823 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
829 824 raise error.SignatureError
830 825 raise
831 826
832 827 return check
833 828
834 829 def copyfile(src, dest, hardlink=False):
835 830 "copy a file, preserving mode and atime/mtime"
836 831 if os.path.lexists(dest):
837 832 unlink(dest)
838 833 # hardlinks are problematic on CIFS, quietly ignore this flag
839 834 # until we find a way to work around it cleanly (issue4546)
840 835 if False and hardlink:
841 836 try:
842 837 oslink(src, dest)
843 838 return
844 839 except (IOError, OSError):
845 840 pass # fall back to normal copy
846 841 if os.path.islink(src):
847 842 os.symlink(os.readlink(src), dest)
848 843 else:
849 844 try:
850 845 shutil.copyfile(src, dest)
851 846 shutil.copymode(src, dest)
852 847 except shutil.Error as inst:
853 848 raise Abort(str(inst))
854 849
855 850 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
856 851 """Copy a directory tree using hardlinks if possible."""
857 852 num = 0
858 853
859 854 if hardlink is None:
860 855 hardlink = (os.stat(src).st_dev ==
861 856 os.stat(os.path.dirname(dst)).st_dev)
862 857 if hardlink:
863 858 topic = _('linking')
864 859 else:
865 860 topic = _('copying')
866 861
867 862 if os.path.isdir(src):
868 863 os.mkdir(dst)
869 864 for name, kind in osutil.listdir(src):
870 865 srcname = os.path.join(src, name)
871 866 dstname = os.path.join(dst, name)
872 867 def nprog(t, pos):
873 868 if pos is not None:
874 869 return progress(t, pos + num)
875 870 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
876 871 num += n
877 872 else:
878 873 if hardlink:
879 874 try:
880 875 oslink(src, dst)
881 876 except (IOError, OSError):
882 877 hardlink = False
883 878 shutil.copy(src, dst)
884 879 else:
885 880 shutil.copy(src, dst)
886 881 num += 1
887 882 progress(topic, num)
888 883 progress(topic, None)
889 884
890 885 return hardlink, num
891 886
892 887 _winreservednames = '''con prn aux nul
893 888 com1 com2 com3 com4 com5 com6 com7 com8 com9
894 889 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
895 890 _winreservedchars = ':*?"<>|'
896 891 def checkwinfilename(path):
897 892 r'''Check that the base-relative path is a valid filename on Windows.
898 893 Returns None if the path is ok, or a UI string describing the problem.
899 894
900 895 >>> checkwinfilename("just/a/normal/path")
901 896 >>> checkwinfilename("foo/bar/con.xml")
902 897 "filename contains 'con', which is reserved on Windows"
903 898 >>> checkwinfilename("foo/con.xml/bar")
904 899 "filename contains 'con', which is reserved on Windows"
905 900 >>> checkwinfilename("foo/bar/xml.con")
906 901 >>> checkwinfilename("foo/bar/AUX/bla.txt")
907 902 "filename contains 'AUX', which is reserved on Windows"
908 903 >>> checkwinfilename("foo/bar/bla:.txt")
909 904 "filename contains ':', which is reserved on Windows"
910 905 >>> checkwinfilename("foo/bar/b\07la.txt")
911 906 "filename contains '\\x07', which is invalid on Windows"
912 907 >>> checkwinfilename("foo/bar/bla ")
913 908 "filename ends with ' ', which is not allowed on Windows"
914 909 >>> checkwinfilename("../bar")
915 910 >>> checkwinfilename("foo\\")
916 911 "filename ends with '\\', which is invalid on Windows"
917 912 >>> checkwinfilename("foo\\/bar")
918 913 "directory name ends with '\\', which is invalid on Windows"
919 914 '''
920 915 if path.endswith('\\'):
921 916 return _("filename ends with '\\', which is invalid on Windows")
922 917 if '\\/' in path:
923 918 return _("directory name ends with '\\', which is invalid on Windows")
924 919 for n in path.replace('\\', '/').split('/'):
925 920 if not n:
926 921 continue
927 922 for c in n:
928 923 if c in _winreservedchars:
929 924 return _("filename contains '%s', which is reserved "
930 925 "on Windows") % c
931 926 if ord(c) <= 31:
932 927 return _("filename contains %r, which is invalid "
933 928 "on Windows") % c
934 929 base = n.split('.')[0]
935 930 if base and base.lower() in _winreservednames:
936 931 return _("filename contains '%s', which is reserved "
937 932 "on Windows") % base
938 933 t = n[-1]
939 934 if t in '. ' and n not in '..':
940 935 return _("filename ends with '%s', which is not allowed "
941 936 "on Windows") % t
942 937
943 938 if os.name == 'nt':
944 939 checkosfilename = checkwinfilename
945 940 else:
946 941 checkosfilename = platform.checkosfilename
947 942
948 943 def makelock(info, pathname):
949 944 try:
950 945 return os.symlink(info, pathname)
951 946 except OSError as why:
952 947 if why.errno == errno.EEXIST:
953 948 raise
954 949 except AttributeError: # no symlink in os
955 950 pass
956 951
957 952 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
958 953 os.write(ld, info)
959 954 os.close(ld)
960 955
961 956 def readlock(pathname):
962 957 try:
963 958 return os.readlink(pathname)
964 959 except OSError as why:
965 960 if why.errno not in (errno.EINVAL, errno.ENOSYS):
966 961 raise
967 962 except AttributeError: # no symlink in os
968 963 pass
969 964 fp = posixfile(pathname)
970 965 r = fp.read()
971 966 fp.close()
972 967 return r
973 968
974 969 def fstat(fp):
975 970 '''stat file object that may not have fileno method.'''
976 971 try:
977 972 return os.fstat(fp.fileno())
978 973 except AttributeError:
979 974 return os.stat(fp.name)
980 975
981 976 # File system features
982 977
983 978 def checkcase(path):
984 979 """
985 980 Return true if the given path is on a case-sensitive filesystem
986 981
987 982 Requires a path (like /foo/.hg) ending with a foldable final
988 983 directory component.
989 984 """
990 985 s1 = os.lstat(path)
991 986 d, b = os.path.split(path)
992 987 b2 = b.upper()
993 988 if b == b2:
994 989 b2 = b.lower()
995 990 if b == b2:
996 991 return True # no evidence against case sensitivity
997 992 p2 = os.path.join(d, b2)
998 993 try:
999 994 s2 = os.lstat(p2)
1000 995 if s2 == s1:
1001 996 return False
1002 997 return True
1003 998 except OSError:
1004 999 return True
1005 1000
1006 1001 try:
1007 1002 import re2
1008 1003 _re2 = None
1009 1004 except ImportError:
1010 1005 _re2 = False
1011 1006
1012 1007 class _re(object):
1013 1008 def _checkre2(self):
1014 1009 global _re2
1015 1010 try:
1016 1011 # check if match works, see issue3964
1017 1012 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1018 1013 except ImportError:
1019 1014 _re2 = False
1020 1015
1021 1016 def compile(self, pat, flags=0):
1022 1017 '''Compile a regular expression, using re2 if possible
1023 1018
1024 1019 For best performance, use only re2-compatible regexp features. The
1025 1020 only flags from the re module that are re2-compatible are
1026 1021 IGNORECASE and MULTILINE.'''
1027 1022 if _re2 is None:
1028 1023 self._checkre2()
1029 1024 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1030 1025 if flags & remod.IGNORECASE:
1031 1026 pat = '(?i)' + pat
1032 1027 if flags & remod.MULTILINE:
1033 1028 pat = '(?m)' + pat
1034 1029 try:
1035 1030 return re2.compile(pat)
1036 1031 except re2.error:
1037 1032 pass
1038 1033 return remod.compile(pat, flags)
1039 1034
1040 1035 @propertycache
1041 1036 def escape(self):
1042 1037 '''Return the version of escape corresponding to self.compile.
1043 1038
1044 1039 This is imperfect because whether re2 or re is used for a particular
1045 1040 function depends on the flags, etc, but it's the best we can do.
1046 1041 '''
1047 1042 global _re2
1048 1043 if _re2 is None:
1049 1044 self._checkre2()
1050 1045 if _re2:
1051 1046 return re2.escape
1052 1047 else:
1053 1048 return remod.escape
1054 1049
1055 1050 re = _re()
1056 1051
1057 1052 _fspathcache = {}
1058 1053 def fspath(name, root):
1059 1054 '''Get name in the case stored in the filesystem
1060 1055
1061 1056 The name should be relative to root, and be normcase-ed for efficiency.
1062 1057
1063 1058 Note that this function is unnecessary, and should not be
1064 1059 called, for case-sensitive filesystems (simply because it's expensive).
1065 1060
1066 1061 The root should be normcase-ed, too.
1067 1062 '''
1068 1063 def _makefspathcacheentry(dir):
1069 1064 return dict((normcase(n), n) for n in os.listdir(dir))
1070 1065
1071 1066 seps = os.sep
1072 1067 if os.altsep:
1073 1068 seps = seps + os.altsep
1074 1069 # Protect backslashes. This gets silly very quickly.
1075 1070 seps.replace('\\','\\\\')
1076 1071 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1077 1072 dir = os.path.normpath(root)
1078 1073 result = []
1079 1074 for part, sep in pattern.findall(name):
1080 1075 if sep:
1081 1076 result.append(sep)
1082 1077 continue
1083 1078
1084 1079 if dir not in _fspathcache:
1085 1080 _fspathcache[dir] = _makefspathcacheentry(dir)
1086 1081 contents = _fspathcache[dir]
1087 1082
1088 1083 found = contents.get(part)
1089 1084 if not found:
1090 1085 # retry "once per directory" per "dirstate.walk" which
1091 1086 # may take place for each patches of "hg qpush", for example
1092 1087 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1093 1088 found = contents.get(part)
1094 1089
1095 1090 result.append(found or part)
1096 1091 dir = os.path.join(dir, part)
1097 1092
1098 1093 return ''.join(result)
1099 1094
1100 1095 def checknlink(testfile):
1101 1096 '''check whether hardlink count reporting works properly'''
1102 1097
1103 1098 # testfile may be open, so we need a separate file for checking to
1104 1099 # work around issue2543 (or testfile may get lost on Samba shares)
1105 1100 f1 = testfile + ".hgtmp1"
1106 1101 if os.path.lexists(f1):
1107 1102 return False
1108 1103 try:
1109 1104 posixfile(f1, 'w').close()
1110 1105 except IOError:
1111 1106 return False
1112 1107
1113 1108 f2 = testfile + ".hgtmp2"
1114 1109 fd = None
1115 1110 try:
1116 1111 oslink(f1, f2)
1117 1112 # nlinks() may behave differently for files on Windows shares if
1118 1113 # the file is open.
1119 1114 fd = posixfile(f2)
1120 1115 return nlinks(f2) > 1
1121 1116 except OSError:
1122 1117 return False
1123 1118 finally:
1124 1119 if fd is not None:
1125 1120 fd.close()
1126 1121 for f in (f1, f2):
1127 1122 try:
1128 1123 os.unlink(f)
1129 1124 except OSError:
1130 1125 pass
1131 1126
1132 1127 def endswithsep(path):
1133 1128 '''Check path ends with os.sep or os.altsep.'''
1134 1129 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1135 1130
1136 1131 def splitpath(path):
1137 1132 '''Split path by os.sep.
1138 1133 Note that this function does not use os.altsep because this is
1139 1134 an alternative of simple "xxx.split(os.sep)".
1140 1135 It is recommended to use os.path.normpath() before using this
1141 1136 function if need.'''
1142 1137 return path.split(os.sep)
1143 1138
1144 1139 def gui():
1145 1140 '''Are we running in a GUI?'''
1146 1141 if sys.platform == 'darwin':
1147 1142 if 'SSH_CONNECTION' in os.environ:
1148 1143 # handle SSH access to a box where the user is logged in
1149 1144 return False
1150 1145 elif getattr(osutil, 'isgui', None):
1151 1146 # check if a CoreGraphics session is available
1152 1147 return osutil.isgui()
1153 1148 else:
1154 1149 # pure build; use a safe default
1155 1150 return True
1156 1151 else:
1157 1152 return os.name == "nt" or os.environ.get("DISPLAY")
1158 1153
1159 1154 def mktempcopy(name, emptyok=False, createmode=None):
1160 1155 """Create a temporary file with the same contents from name
1161 1156
1162 1157 The permission bits are copied from the original file.
1163 1158
1164 1159 If the temporary file is going to be truncated immediately, you
1165 1160 can use emptyok=True as an optimization.
1166 1161
1167 1162 Returns the name of the temporary file.
1168 1163 """
1169 1164 d, fn = os.path.split(name)
1170 1165 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1171 1166 os.close(fd)
1172 1167 # Temporary files are created with mode 0600, which is usually not
1173 1168 # what we want. If the original file already exists, just copy
1174 1169 # its mode. Otherwise, manually obey umask.
1175 1170 copymode(name, temp, createmode)
1176 1171 if emptyok:
1177 1172 return temp
1178 1173 try:
1179 1174 try:
1180 1175 ifp = posixfile(name, "rb")
1181 1176 except IOError as inst:
1182 1177 if inst.errno == errno.ENOENT:
1183 1178 return temp
1184 1179 if not getattr(inst, 'filename', None):
1185 1180 inst.filename = name
1186 1181 raise
1187 1182 ofp = posixfile(temp, "wb")
1188 1183 for chunk in filechunkiter(ifp):
1189 1184 ofp.write(chunk)
1190 1185 ifp.close()
1191 1186 ofp.close()
1192 1187 except: # re-raises
1193 1188 try: os.unlink(temp)
1194 1189 except OSError: pass
1195 1190 raise
1196 1191 return temp
1197 1192
1198 1193 class atomictempfile(object):
1199 1194 '''writable file object that atomically updates a file
1200 1195
1201 1196 All writes will go to a temporary copy of the original file. Call
1202 1197 close() when you are done writing, and atomictempfile will rename
1203 1198 the temporary copy to the original name, making the changes
1204 1199 visible. If the object is destroyed without being closed, all your
1205 1200 writes are discarded.
1206 1201 '''
1207 1202 def __init__(self, name, mode='w+b', createmode=None):
1208 1203 self.__name = name # permanent name
1209 1204 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1210 1205 createmode=createmode)
1211 1206 self._fp = posixfile(self._tempname, mode)
1212 1207
1213 1208 # delegated methods
1214 1209 self.write = self._fp.write
1215 1210 self.seek = self._fp.seek
1216 1211 self.tell = self._fp.tell
1217 1212 self.fileno = self._fp.fileno
1218 1213
1219 1214 def close(self):
1220 1215 if not self._fp.closed:
1221 1216 self._fp.close()
1222 1217 rename(self._tempname, localpath(self.__name))
1223 1218
1224 1219 def discard(self):
1225 1220 if not self._fp.closed:
1226 1221 try:
1227 1222 os.unlink(self._tempname)
1228 1223 except OSError:
1229 1224 pass
1230 1225 self._fp.close()
1231 1226
1232 1227 def __del__(self):
1233 1228 if safehasattr(self, '_fp'): # constructor actually did something
1234 1229 self.discard()
1235 1230
1236 1231 def makedirs(name, mode=None, notindexed=False):
1237 1232 """recursive directory creation with parent mode inheritance"""
1238 1233 try:
1239 1234 makedir(name, notindexed)
1240 1235 except OSError as err:
1241 1236 if err.errno == errno.EEXIST:
1242 1237 return
1243 1238 if err.errno != errno.ENOENT or not name:
1244 1239 raise
1245 1240 parent = os.path.dirname(os.path.abspath(name))
1246 1241 if parent == name:
1247 1242 raise
1248 1243 makedirs(parent, mode, notindexed)
1249 1244 makedir(name, notindexed)
1250 1245 if mode is not None:
1251 1246 os.chmod(name, mode)
1252 1247
1253 1248 def ensuredirs(name, mode=None, notindexed=False):
1254 1249 """race-safe recursive directory creation
1255 1250
1256 1251 Newly created directories are marked as "not to be indexed by
1257 1252 the content indexing service", if ``notindexed`` is specified
1258 1253 for "write" mode access.
1259 1254 """
1260 1255 if os.path.isdir(name):
1261 1256 return
1262 1257 parent = os.path.dirname(os.path.abspath(name))
1263 1258 if parent != name:
1264 1259 ensuredirs(parent, mode, notindexed)
1265 1260 try:
1266 1261 makedir(name, notindexed)
1267 1262 except OSError as err:
1268 1263 if err.errno == errno.EEXIST and os.path.isdir(name):
1269 1264 # someone else seems to have won a directory creation race
1270 1265 return
1271 1266 raise
1272 1267 if mode is not None:
1273 1268 os.chmod(name, mode)
1274 1269
1275 1270 def readfile(path):
1276 1271 fp = open(path, 'rb')
1277 1272 try:
1278 1273 return fp.read()
1279 1274 finally:
1280 1275 fp.close()
1281 1276
1282 1277 def writefile(path, text):
1283 1278 fp = open(path, 'wb')
1284 1279 try:
1285 1280 fp.write(text)
1286 1281 finally:
1287 1282 fp.close()
1288 1283
1289 1284 def appendfile(path, text):
1290 1285 fp = open(path, 'ab')
1291 1286 try:
1292 1287 fp.write(text)
1293 1288 finally:
1294 1289 fp.close()
1295 1290
1296 1291 class chunkbuffer(object):
1297 1292 """Allow arbitrary sized chunks of data to be efficiently read from an
1298 1293 iterator over chunks of arbitrary size."""
1299 1294
1300 1295 def __init__(self, in_iter):
1301 1296 """in_iter is the iterator that's iterating over the input chunks.
1302 1297 targetsize is how big a buffer to try to maintain."""
1303 1298 def splitbig(chunks):
1304 1299 for chunk in chunks:
1305 1300 if len(chunk) > 2**20:
1306 1301 pos = 0
1307 1302 while pos < len(chunk):
1308 1303 end = pos + 2 ** 18
1309 1304 yield chunk[pos:end]
1310 1305 pos = end
1311 1306 else:
1312 1307 yield chunk
1313 1308 self.iter = splitbig(in_iter)
1314 1309 self._queue = collections.deque()
1315 1310 self._chunkoffset = 0
1316 1311
1317 1312 def read(self, l=None):
1318 1313 """Read L bytes of data from the iterator of chunks of data.
1319 1314 Returns less than L bytes if the iterator runs dry.
1320 1315
1321 1316 If size parameter is omitted, read everything"""
1322 1317 if l is None:
1323 1318 return ''.join(self.iter)
1324 1319
1325 1320 left = l
1326 1321 buf = []
1327 1322 queue = self._queue
1328 1323 while left > 0:
1329 1324 # refill the queue
1330 1325 if not queue:
1331 1326 target = 2**18
1332 1327 for chunk in self.iter:
1333 1328 queue.append(chunk)
1334 1329 target -= len(chunk)
1335 1330 if target <= 0:
1336 1331 break
1337 1332 if not queue:
1338 1333 break
1339 1334
1340 1335 # The easy way to do this would be to queue.popleft(), modify the
1341 1336 # chunk (if necessary), then queue.appendleft(). However, for cases
1342 1337 # where we read partial chunk content, this incurs 2 dequeue
1343 1338 # mutations and creates a new str for the remaining chunk in the
1344 1339 # queue. Our code below avoids this overhead.
1345 1340
1346 1341 chunk = queue[0]
1347 1342 chunkl = len(chunk)
1348 1343 offset = self._chunkoffset
1349 1344
1350 1345 # Use full chunk.
1351 1346 if offset == 0 and left >= chunkl:
1352 1347 left -= chunkl
1353 1348 queue.popleft()
1354 1349 buf.append(chunk)
1355 1350 # self._chunkoffset remains at 0.
1356 1351 continue
1357 1352
1358 1353 chunkremaining = chunkl - offset
1359 1354
1360 1355 # Use all of unconsumed part of chunk.
1361 1356 if left >= chunkremaining:
1362 1357 left -= chunkremaining
1363 1358 queue.popleft()
1364 1359 # offset == 0 is enabled by block above, so this won't merely
1365 1360 # copy via ``chunk[0:]``.
1366 1361 buf.append(chunk[offset:])
1367 1362 self._chunkoffset = 0
1368 1363
1369 1364 # Partial chunk needed.
1370 1365 else:
1371 1366 buf.append(chunk[offset:offset + left])
1372 1367 self._chunkoffset += left
1373 1368 left -= chunkremaining
1374 1369
1375 1370 return ''.join(buf)
1376 1371
1377 1372 def filechunkiter(f, size=65536, limit=None):
1378 1373 """Create a generator that produces the data in the file size
1379 1374 (default 65536) bytes at a time, up to optional limit (default is
1380 1375 to read all data). Chunks may be less than size bytes if the
1381 1376 chunk is the last chunk in the file, or the file is a socket or
1382 1377 some other type of file that sometimes reads less data than is
1383 1378 requested."""
1384 1379 assert size >= 0
1385 1380 assert limit is None or limit >= 0
1386 1381 while True:
1387 1382 if limit is None:
1388 1383 nbytes = size
1389 1384 else:
1390 1385 nbytes = min(limit, size)
1391 1386 s = nbytes and f.read(nbytes)
1392 1387 if not s:
1393 1388 break
1394 1389 if limit:
1395 1390 limit -= len(s)
1396 1391 yield s
1397 1392
1398 1393 def makedate(timestamp=None):
1399 1394 '''Return a unix timestamp (or the current time) as a (unixtime,
1400 1395 offset) tuple based off the local timezone.'''
1401 1396 if timestamp is None:
1402 1397 timestamp = time.time()
1403 1398 if timestamp < 0:
1404 1399 hint = _("check your clock")
1405 1400 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1406 1401 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1407 1402 datetime.datetime.fromtimestamp(timestamp))
1408 1403 tz = delta.days * 86400 + delta.seconds
1409 1404 return timestamp, tz
1410 1405
1411 1406 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1412 1407 """represent a (unixtime, offset) tuple as a localized time.
1413 1408 unixtime is seconds since the epoch, and offset is the time zone's
1414 1409 number of seconds away from UTC. if timezone is false, do not
1415 1410 append time zone to string."""
1416 1411 t, tz = date or makedate()
1417 1412 if t < 0:
1418 1413 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1419 1414 tz = 0
1420 1415 if "%1" in format or "%2" in format or "%z" in format:
1421 1416 sign = (tz > 0) and "-" or "+"
1422 1417 minutes = abs(tz) // 60
1423 1418 q, r = divmod(minutes, 60)
1424 1419 format = format.replace("%z", "%1%2")
1425 1420 format = format.replace("%1", "%c%02d" % (sign, q))
1426 1421 format = format.replace("%2", "%02d" % r)
1427 1422 try:
1428 1423 t = time.gmtime(float(t) - tz)
1429 1424 except ValueError:
1430 1425 # time was out of range
1431 1426 t = time.gmtime(sys.maxint)
1432 1427 s = time.strftime(format, t)
1433 1428 return s
1434 1429
1435 1430 def shortdate(date=None):
1436 1431 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1437 1432 return datestr(date, format='%Y-%m-%d')
1438 1433
1439 1434 def parsetimezone(tz):
1440 1435 """parse a timezone string and return an offset integer"""
1441 1436 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1442 1437 sign = (tz[0] == "+") and 1 or -1
1443 1438 hours = int(tz[1:3])
1444 1439 minutes = int(tz[3:5])
1445 1440 return -sign * (hours * 60 + minutes) * 60
1446 1441 if tz == "GMT" or tz == "UTC":
1447 1442 return 0
1448 1443 return None
1449 1444
1450 1445 def strdate(string, format, defaults=[]):
1451 1446 """parse a localized time string and return a (unixtime, offset) tuple.
1452 1447 if the string cannot be parsed, ValueError is raised."""
1453 1448 # NOTE: unixtime = localunixtime + offset
1454 1449 offset, date = parsetimezone(string.split()[-1]), string
1455 1450 if offset is not None:
1456 1451 date = " ".join(string.split()[:-1])
1457 1452
1458 1453 # add missing elements from defaults
1459 1454 usenow = False # default to using biased defaults
1460 1455 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1461 1456 found = [True for p in part if ("%"+p) in format]
1462 1457 if not found:
1463 1458 date += "@" + defaults[part][usenow]
1464 1459 format += "@%" + part[0]
1465 1460 else:
1466 1461 # We've found a specific time element, less specific time
1467 1462 # elements are relative to today
1468 1463 usenow = True
1469 1464
1470 1465 timetuple = time.strptime(date, format)
1471 1466 localunixtime = int(calendar.timegm(timetuple))
1472 1467 if offset is None:
1473 1468 # local timezone
1474 1469 unixtime = int(time.mktime(timetuple))
1475 1470 offset = unixtime - localunixtime
1476 1471 else:
1477 1472 unixtime = localunixtime + offset
1478 1473 return unixtime, offset
1479 1474
1480 1475 def parsedate(date, formats=None, bias=None):
1481 1476 """parse a localized date/time and return a (unixtime, offset) tuple.
1482 1477
1483 1478 The date may be a "unixtime offset" string or in one of the specified
1484 1479 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1485 1480
1486 1481 >>> parsedate(' today ') == parsedate(\
1487 1482 datetime.date.today().strftime('%b %d'))
1488 1483 True
1489 1484 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1490 1485 datetime.timedelta(days=1)\
1491 1486 ).strftime('%b %d'))
1492 1487 True
1493 1488 >>> now, tz = makedate()
1494 1489 >>> strnow, strtz = parsedate('now')
1495 1490 >>> (strnow - now) < 1
1496 1491 True
1497 1492 >>> tz == strtz
1498 1493 True
1499 1494 """
1500 1495 if bias is None:
1501 1496 bias = {}
1502 1497 if not date:
1503 1498 return 0, 0
1504 1499 if isinstance(date, tuple) and len(date) == 2:
1505 1500 return date
1506 1501 if not formats:
1507 1502 formats = defaultdateformats
1508 1503 date = date.strip()
1509 1504
1510 1505 if date == 'now' or date == _('now'):
1511 1506 return makedate()
1512 1507 if date == 'today' or date == _('today'):
1513 1508 date = datetime.date.today().strftime('%b %d')
1514 1509 elif date == 'yesterday' or date == _('yesterday'):
1515 1510 date = (datetime.date.today() -
1516 1511 datetime.timedelta(days=1)).strftime('%b %d')
1517 1512
1518 1513 try:
1519 1514 when, offset = map(int, date.split(' '))
1520 1515 except ValueError:
1521 1516 # fill out defaults
1522 1517 now = makedate()
1523 1518 defaults = {}
1524 1519 for part in ("d", "mb", "yY", "HI", "M", "S"):
1525 1520 # this piece is for rounding the specific end of unknowns
1526 1521 b = bias.get(part)
1527 1522 if b is None:
1528 1523 if part[0] in "HMS":
1529 1524 b = "00"
1530 1525 else:
1531 1526 b = "0"
1532 1527
1533 1528 # this piece is for matching the generic end to today's date
1534 1529 n = datestr(now, "%" + part[0])
1535 1530
1536 1531 defaults[part] = (b, n)
1537 1532
1538 1533 for format in formats:
1539 1534 try:
1540 1535 when, offset = strdate(date, format, defaults)
1541 1536 except (ValueError, OverflowError):
1542 1537 pass
1543 1538 else:
1544 1539 break
1545 1540 else:
1546 1541 raise Abort(_('invalid date: %r') % date)
1547 1542 # validate explicit (probably user-specified) date and
1548 1543 # time zone offset. values must fit in signed 32 bits for
1549 1544 # current 32-bit linux runtimes. timezones go from UTC-12
1550 1545 # to UTC+14
1551 1546 if abs(when) > 0x7fffffff:
1552 1547 raise Abort(_('date exceeds 32 bits: %d') % when)
1553 1548 if when < 0:
1554 1549 raise Abort(_('negative date value: %d') % when)
1555 1550 if offset < -50400 or offset > 43200:
1556 1551 raise Abort(_('impossible time zone offset: %d') % offset)
1557 1552 return when, offset
1558 1553
1559 1554 def matchdate(date):
1560 1555 """Return a function that matches a given date match specifier
1561 1556
1562 1557 Formats include:
1563 1558
1564 1559 '{date}' match a given date to the accuracy provided
1565 1560
1566 1561 '<{date}' on or before a given date
1567 1562
1568 1563 '>{date}' on or after a given date
1569 1564
1570 1565 >>> p1 = parsedate("10:29:59")
1571 1566 >>> p2 = parsedate("10:30:00")
1572 1567 >>> p3 = parsedate("10:30:59")
1573 1568 >>> p4 = parsedate("10:31:00")
1574 1569 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1575 1570 >>> f = matchdate("10:30")
1576 1571 >>> f(p1[0])
1577 1572 False
1578 1573 >>> f(p2[0])
1579 1574 True
1580 1575 >>> f(p3[0])
1581 1576 True
1582 1577 >>> f(p4[0])
1583 1578 False
1584 1579 >>> f(p5[0])
1585 1580 False
1586 1581 """
1587 1582
1588 1583 def lower(date):
1589 1584 d = {'mb': "1", 'd': "1"}
1590 1585 return parsedate(date, extendeddateformats, d)[0]
1591 1586
1592 1587 def upper(date):
1593 1588 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1594 1589 for days in ("31", "30", "29"):
1595 1590 try:
1596 1591 d["d"] = days
1597 1592 return parsedate(date, extendeddateformats, d)[0]
1598 1593 except Abort:
1599 1594 pass
1600 1595 d["d"] = "28"
1601 1596 return parsedate(date, extendeddateformats, d)[0]
1602 1597
1603 1598 date = date.strip()
1604 1599
1605 1600 if not date:
1606 1601 raise Abort(_("dates cannot consist entirely of whitespace"))
1607 1602 elif date[0] == "<":
1608 1603 if not date[1:]:
1609 1604 raise Abort(_("invalid day spec, use '<DATE'"))
1610 1605 when = upper(date[1:])
1611 1606 return lambda x: x <= when
1612 1607 elif date[0] == ">":
1613 1608 if not date[1:]:
1614 1609 raise Abort(_("invalid day spec, use '>DATE'"))
1615 1610 when = lower(date[1:])
1616 1611 return lambda x: x >= when
1617 1612 elif date[0] == "-":
1618 1613 try:
1619 1614 days = int(date[1:])
1620 1615 except ValueError:
1621 1616 raise Abort(_("invalid day spec: %s") % date[1:])
1622 1617 if days < 0:
1623 1618 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1624 1619 % date[1:])
1625 1620 when = makedate()[0] - days * 3600 * 24
1626 1621 return lambda x: x >= when
1627 1622 elif " to " in date:
1628 1623 a, b = date.split(" to ")
1629 1624 start, stop = lower(a), upper(b)
1630 1625 return lambda x: x >= start and x <= stop
1631 1626 else:
1632 1627 start, stop = lower(date), upper(date)
1633 1628 return lambda x: x >= start and x <= stop
1634 1629
1635 1630 def stringmatcher(pattern):
1636 1631 """
1637 1632 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1638 1633 returns the matcher name, pattern, and matcher function.
1639 1634 missing or unknown prefixes are treated as literal matches.
1640 1635
1641 1636 helper for tests:
1642 1637 >>> def test(pattern, *tests):
1643 1638 ... kind, pattern, matcher = stringmatcher(pattern)
1644 1639 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1645 1640
1646 1641 exact matching (no prefix):
1647 1642 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1648 1643 ('literal', 'abcdefg', [False, False, True])
1649 1644
1650 1645 regex matching ('re:' prefix)
1651 1646 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1652 1647 ('re', 'a.+b', [False, False, True])
1653 1648
1654 1649 force exact matches ('literal:' prefix)
1655 1650 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1656 1651 ('literal', 're:foobar', [False, True])
1657 1652
1658 1653 unknown prefixes are ignored and treated as literals
1659 1654 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1660 1655 ('literal', 'foo:bar', [False, False, True])
1661 1656 """
1662 1657 if pattern.startswith('re:'):
1663 1658 pattern = pattern[3:]
1664 1659 try:
1665 1660 regex = remod.compile(pattern)
1666 1661 except remod.error as e:
1667 1662 raise error.ParseError(_('invalid regular expression: %s')
1668 1663 % e)
1669 1664 return 're', pattern, regex.search
1670 1665 elif pattern.startswith('literal:'):
1671 1666 pattern = pattern[8:]
1672 1667 return 'literal', pattern, pattern.__eq__
1673 1668
1674 1669 def shortuser(user):
1675 1670 """Return a short representation of a user name or email address."""
1676 1671 f = user.find('@')
1677 1672 if f >= 0:
1678 1673 user = user[:f]
1679 1674 f = user.find('<')
1680 1675 if f >= 0:
1681 1676 user = user[f + 1:]
1682 1677 f = user.find(' ')
1683 1678 if f >= 0:
1684 1679 user = user[:f]
1685 1680 f = user.find('.')
1686 1681 if f >= 0:
1687 1682 user = user[:f]
1688 1683 return user
1689 1684
1690 1685 def emailuser(user):
1691 1686 """Return the user portion of an email address."""
1692 1687 f = user.find('@')
1693 1688 if f >= 0:
1694 1689 user = user[:f]
1695 1690 f = user.find('<')
1696 1691 if f >= 0:
1697 1692 user = user[f + 1:]
1698 1693 return user
1699 1694
1700 1695 def email(author):
1701 1696 '''get email of author.'''
1702 1697 r = author.find('>')
1703 1698 if r == -1:
1704 1699 r = None
1705 1700 return author[author.find('<') + 1:r]
1706 1701
1707 1702 def ellipsis(text, maxlength=400):
1708 1703 """Trim string to at most maxlength (default: 400) columns in display."""
1709 1704 return encoding.trim(text, maxlength, ellipsis='...')
1710 1705
1711 1706 def unitcountfn(*unittable):
1712 1707 '''return a function that renders a readable count of some quantity'''
1713 1708
1714 1709 def go(count):
1715 1710 for multiplier, divisor, format in unittable:
1716 1711 if count >= divisor * multiplier:
1717 1712 return format % (count / float(divisor))
1718 1713 return unittable[-1][2] % count
1719 1714
1720 1715 return go
1721 1716
1722 1717 bytecount = unitcountfn(
1723 1718 (100, 1 << 30, _('%.0f GB')),
1724 1719 (10, 1 << 30, _('%.1f GB')),
1725 1720 (1, 1 << 30, _('%.2f GB')),
1726 1721 (100, 1 << 20, _('%.0f MB')),
1727 1722 (10, 1 << 20, _('%.1f MB')),
1728 1723 (1, 1 << 20, _('%.2f MB')),
1729 1724 (100, 1 << 10, _('%.0f KB')),
1730 1725 (10, 1 << 10, _('%.1f KB')),
1731 1726 (1, 1 << 10, _('%.2f KB')),
1732 1727 (1, 1, _('%.0f bytes')),
1733 1728 )
1734 1729
1735 1730 def uirepr(s):
1736 1731 # Avoid double backslash in Windows path repr()
1737 1732 return repr(s).replace('\\\\', '\\')
1738 1733
1739 1734 # delay import of textwrap
1740 1735 def MBTextWrapper(**kwargs):
1741 1736 class tw(textwrap.TextWrapper):
1742 1737 """
1743 1738 Extend TextWrapper for width-awareness.
1744 1739
1745 1740 Neither number of 'bytes' in any encoding nor 'characters' is
1746 1741 appropriate to calculate terminal columns for specified string.
1747 1742
1748 1743 Original TextWrapper implementation uses built-in 'len()' directly,
1749 1744 so overriding is needed to use width information of each characters.
1750 1745
1751 1746 In addition, characters classified into 'ambiguous' width are
1752 1747 treated as wide in East Asian area, but as narrow in other.
1753 1748
1754 1749 This requires use decision to determine width of such characters.
1755 1750 """
1756 1751 def _cutdown(self, ucstr, space_left):
1757 1752 l = 0
1758 1753 colwidth = encoding.ucolwidth
1759 1754 for i in xrange(len(ucstr)):
1760 1755 l += colwidth(ucstr[i])
1761 1756 if space_left < l:
1762 1757 return (ucstr[:i], ucstr[i:])
1763 1758 return ucstr, ''
1764 1759
1765 1760 # overriding of base class
1766 1761 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1767 1762 space_left = max(width - cur_len, 1)
1768 1763
1769 1764 if self.break_long_words:
1770 1765 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1771 1766 cur_line.append(cut)
1772 1767 reversed_chunks[-1] = res
1773 1768 elif not cur_line:
1774 1769 cur_line.append(reversed_chunks.pop())
1775 1770
1776 1771 # this overriding code is imported from TextWrapper of Python 2.6
1777 1772 # to calculate columns of string by 'encoding.ucolwidth()'
1778 1773 def _wrap_chunks(self, chunks):
1779 1774 colwidth = encoding.ucolwidth
1780 1775
1781 1776 lines = []
1782 1777 if self.width <= 0:
1783 1778 raise ValueError("invalid width %r (must be > 0)" % self.width)
1784 1779
1785 1780 # Arrange in reverse order so items can be efficiently popped
1786 1781 # from a stack of chucks.
1787 1782 chunks.reverse()
1788 1783
1789 1784 while chunks:
1790 1785
1791 1786 # Start the list of chunks that will make up the current line.
1792 1787 # cur_len is just the length of all the chunks in cur_line.
1793 1788 cur_line = []
1794 1789 cur_len = 0
1795 1790
1796 1791 # Figure out which static string will prefix this line.
1797 1792 if lines:
1798 1793 indent = self.subsequent_indent
1799 1794 else:
1800 1795 indent = self.initial_indent
1801 1796
1802 1797 # Maximum width for this line.
1803 1798 width = self.width - len(indent)
1804 1799
1805 1800 # First chunk on line is whitespace -- drop it, unless this
1806 1801 # is the very beginning of the text (i.e. no lines started yet).
1807 1802 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1808 1803 del chunks[-1]
1809 1804
1810 1805 while chunks:
1811 1806 l = colwidth(chunks[-1])
1812 1807
1813 1808 # Can at least squeeze this chunk onto the current line.
1814 1809 if cur_len + l <= width:
1815 1810 cur_line.append(chunks.pop())
1816 1811 cur_len += l
1817 1812
1818 1813 # Nope, this line is full.
1819 1814 else:
1820 1815 break
1821 1816
1822 1817 # The current line is full, and the next chunk is too big to
1823 1818 # fit on *any* line (not just this one).
1824 1819 if chunks and colwidth(chunks[-1]) > width:
1825 1820 self._handle_long_word(chunks, cur_line, cur_len, width)
1826 1821
1827 1822 # If the last chunk on this line is all whitespace, drop it.
1828 1823 if (self.drop_whitespace and
1829 1824 cur_line and cur_line[-1].strip() == ''):
1830 1825 del cur_line[-1]
1831 1826
1832 1827 # Convert current line back to a string and store it in list
1833 1828 # of all lines (return value).
1834 1829 if cur_line:
1835 1830 lines.append(indent + ''.join(cur_line))
1836 1831
1837 1832 return lines
1838 1833
1839 1834 global MBTextWrapper
1840 1835 MBTextWrapper = tw
1841 1836 return tw(**kwargs)
1842 1837
1843 1838 def wrap(line, width, initindent='', hangindent=''):
1844 1839 maxindent = max(len(hangindent), len(initindent))
1845 1840 if width <= maxindent:
1846 1841 # adjust for weird terminal size
1847 1842 width = max(78, maxindent + 1)
1848 1843 line = line.decode(encoding.encoding, encoding.encodingmode)
1849 1844 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1850 1845 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1851 1846 wrapper = MBTextWrapper(width=width,
1852 1847 initial_indent=initindent,
1853 1848 subsequent_indent=hangindent)
1854 1849 return wrapper.fill(line).encode(encoding.encoding)
1855 1850
1856 1851 def iterlines(iterator):
1857 1852 for chunk in iterator:
1858 1853 for line in chunk.splitlines():
1859 1854 yield line
1860 1855
1861 1856 def expandpath(path):
1862 1857 return os.path.expanduser(os.path.expandvars(path))
1863 1858
1864 1859 def hgcmd():
1865 1860 """Return the command used to execute current hg
1866 1861
1867 1862 This is different from hgexecutable() because on Windows we want
1868 1863 to avoid things opening new shell windows like batch files, so we
1869 1864 get either the python call or current executable.
1870 1865 """
1871 1866 if mainfrozen():
1872 1867 return [sys.executable]
1873 1868 return gethgcmd()
1874 1869
1875 1870 def rundetached(args, condfn):
1876 1871 """Execute the argument list in a detached process.
1877 1872
1878 1873 condfn is a callable which is called repeatedly and should return
1879 1874 True once the child process is known to have started successfully.
1880 1875 At this point, the child process PID is returned. If the child
1881 1876 process fails to start or finishes before condfn() evaluates to
1882 1877 True, return -1.
1883 1878 """
1884 1879 # Windows case is easier because the child process is either
1885 1880 # successfully starting and validating the condition or exiting
1886 1881 # on failure. We just poll on its PID. On Unix, if the child
1887 1882 # process fails to start, it will be left in a zombie state until
1888 1883 # the parent wait on it, which we cannot do since we expect a long
1889 1884 # running process on success. Instead we listen for SIGCHLD telling
1890 1885 # us our child process terminated.
1891 1886 terminated = set()
1892 1887 def handler(signum, frame):
1893 1888 terminated.add(os.wait())
1894 1889 prevhandler = None
1895 1890 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1896 1891 if SIGCHLD is not None:
1897 1892 prevhandler = signal.signal(SIGCHLD, handler)
1898 1893 try:
1899 1894 pid = spawndetached(args)
1900 1895 while not condfn():
1901 1896 if ((pid in terminated or not testpid(pid))
1902 1897 and not condfn()):
1903 1898 return -1
1904 1899 time.sleep(0.1)
1905 1900 return pid
1906 1901 finally:
1907 1902 if prevhandler is not None:
1908 1903 signal.signal(signal.SIGCHLD, prevhandler)
1909 1904
1910 1905 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1911 1906 """Return the result of interpolating items in the mapping into string s.
1912 1907
1913 1908 prefix is a single character string, or a two character string with
1914 1909 a backslash as the first character if the prefix needs to be escaped in
1915 1910 a regular expression.
1916 1911
1917 1912 fn is an optional function that will be applied to the replacement text
1918 1913 just before replacement.
1919 1914
1920 1915 escape_prefix is an optional flag that allows using doubled prefix for
1921 1916 its escaping.
1922 1917 """
1923 1918 fn = fn or (lambda s: s)
1924 1919 patterns = '|'.join(mapping.keys())
1925 1920 if escape_prefix:
1926 1921 patterns += '|' + prefix
1927 1922 if len(prefix) > 1:
1928 1923 prefix_char = prefix[1:]
1929 1924 else:
1930 1925 prefix_char = prefix
1931 1926 mapping[prefix_char] = prefix_char
1932 1927 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1933 1928 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1934 1929
1935 1930 def getport(port):
1936 1931 """Return the port for a given network service.
1937 1932
1938 1933 If port is an integer, it's returned as is. If it's a string, it's
1939 1934 looked up using socket.getservbyname(). If there's no matching
1940 1935 service, error.Abort is raised.
1941 1936 """
1942 1937 try:
1943 1938 return int(port)
1944 1939 except ValueError:
1945 1940 pass
1946 1941
1947 1942 try:
1948 1943 return socket.getservbyname(port)
1949 1944 except socket.error:
1950 1945 raise Abort(_("no port number associated with service '%s'") % port)
1951 1946
1952 1947 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1953 1948 '0': False, 'no': False, 'false': False, 'off': False,
1954 1949 'never': False}
1955 1950
1956 1951 def parsebool(s):
1957 1952 """Parse s into a boolean.
1958 1953
1959 1954 If s is not a valid boolean, returns None.
1960 1955 """
1961 1956 return _booleans.get(s.lower(), None)
1962 1957
1963 1958 _hexdig = '0123456789ABCDEFabcdef'
1964 1959 _hextochr = dict((a + b, chr(int(a + b, 16)))
1965 1960 for a in _hexdig for b in _hexdig)
1966 1961
1967 1962 def _urlunquote(s):
1968 1963 """Decode HTTP/HTML % encoding.
1969 1964
1970 1965 >>> _urlunquote('abc%20def')
1971 1966 'abc def'
1972 1967 """
1973 1968 res = s.split('%')
1974 1969 # fastpath
1975 1970 if len(res) == 1:
1976 1971 return s
1977 1972 s = res[0]
1978 1973 for item in res[1:]:
1979 1974 try:
1980 1975 s += _hextochr[item[:2]] + item[2:]
1981 1976 except KeyError:
1982 1977 s += '%' + item
1983 1978 except UnicodeDecodeError:
1984 1979 s += unichr(int(item[:2], 16)) + item[2:]
1985 1980 return s
1986 1981
1987 1982 class url(object):
1988 1983 r"""Reliable URL parser.
1989 1984
1990 1985 This parses URLs and provides attributes for the following
1991 1986 components:
1992 1987
1993 1988 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1994 1989
1995 1990 Missing components are set to None. The only exception is
1996 1991 fragment, which is set to '' if present but empty.
1997 1992
1998 1993 If parsefragment is False, fragment is included in query. If
1999 1994 parsequery is False, query is included in path. If both are
2000 1995 False, both fragment and query are included in path.
2001 1996
2002 1997 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2003 1998
2004 1999 Note that for backward compatibility reasons, bundle URLs do not
2005 2000 take host names. That means 'bundle://../' has a path of '../'.
2006 2001
2007 2002 Examples:
2008 2003
2009 2004 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2010 2005 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2011 2006 >>> url('ssh://[::1]:2200//home/joe/repo')
2012 2007 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2013 2008 >>> url('file:///home/joe/repo')
2014 2009 <url scheme: 'file', path: '/home/joe/repo'>
2015 2010 >>> url('file:///c:/temp/foo/')
2016 2011 <url scheme: 'file', path: 'c:/temp/foo/'>
2017 2012 >>> url('bundle:foo')
2018 2013 <url scheme: 'bundle', path: 'foo'>
2019 2014 >>> url('bundle://../foo')
2020 2015 <url scheme: 'bundle', path: '../foo'>
2021 2016 >>> url(r'c:\foo\bar')
2022 2017 <url path: 'c:\\foo\\bar'>
2023 2018 >>> url(r'\\blah\blah\blah')
2024 2019 <url path: '\\\\blah\\blah\\blah'>
2025 2020 >>> url(r'\\blah\blah\blah#baz')
2026 2021 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2027 2022 >>> url(r'file:///C:\users\me')
2028 2023 <url scheme: 'file', path: 'C:\\users\\me'>
2029 2024
2030 2025 Authentication credentials:
2031 2026
2032 2027 >>> url('ssh://joe:xyz@x/repo')
2033 2028 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2034 2029 >>> url('ssh://joe@x/repo')
2035 2030 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2036 2031
2037 2032 Query strings and fragments:
2038 2033
2039 2034 >>> url('http://host/a?b#c')
2040 2035 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2041 2036 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2042 2037 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2043 2038 """
2044 2039
2045 2040 _safechars = "!~*'()+"
2046 2041 _safepchars = "/!~*'()+:\\"
2047 2042 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2048 2043
2049 2044 def __init__(self, path, parsequery=True, parsefragment=True):
2050 2045 # We slowly chomp away at path until we have only the path left
2051 2046 self.scheme = self.user = self.passwd = self.host = None
2052 2047 self.port = self.path = self.query = self.fragment = None
2053 2048 self._localpath = True
2054 2049 self._hostport = ''
2055 2050 self._origpath = path
2056 2051
2057 2052 if parsefragment and '#' in path:
2058 2053 path, self.fragment = path.split('#', 1)
2059 2054 if not path:
2060 2055 path = None
2061 2056
2062 2057 # special case for Windows drive letters and UNC paths
2063 2058 if hasdriveletter(path) or path.startswith(r'\\'):
2064 2059 self.path = path
2065 2060 return
2066 2061
2067 2062 # For compatibility reasons, we can't handle bundle paths as
2068 2063 # normal URLS
2069 2064 if path.startswith('bundle:'):
2070 2065 self.scheme = 'bundle'
2071 2066 path = path[7:]
2072 2067 if path.startswith('//'):
2073 2068 path = path[2:]
2074 2069 self.path = path
2075 2070 return
2076 2071
2077 2072 if self._matchscheme(path):
2078 2073 parts = path.split(':', 1)
2079 2074 if parts[0]:
2080 2075 self.scheme, path = parts
2081 2076 self._localpath = False
2082 2077
2083 2078 if not path:
2084 2079 path = None
2085 2080 if self._localpath:
2086 2081 self.path = ''
2087 2082 return
2088 2083 else:
2089 2084 if self._localpath:
2090 2085 self.path = path
2091 2086 return
2092 2087
2093 2088 if parsequery and '?' in path:
2094 2089 path, self.query = path.split('?', 1)
2095 2090 if not path:
2096 2091 path = None
2097 2092 if not self.query:
2098 2093 self.query = None
2099 2094
2100 2095 # // is required to specify a host/authority
2101 2096 if path and path.startswith('//'):
2102 2097 parts = path[2:].split('/', 1)
2103 2098 if len(parts) > 1:
2104 2099 self.host, path = parts
2105 2100 else:
2106 2101 self.host = parts[0]
2107 2102 path = None
2108 2103 if not self.host:
2109 2104 self.host = None
2110 2105 # path of file:///d is /d
2111 2106 # path of file:///d:/ is d:/, not /d:/
2112 2107 if path and not hasdriveletter(path):
2113 2108 path = '/' + path
2114 2109
2115 2110 if self.host and '@' in self.host:
2116 2111 self.user, self.host = self.host.rsplit('@', 1)
2117 2112 if ':' in self.user:
2118 2113 self.user, self.passwd = self.user.split(':', 1)
2119 2114 if not self.host:
2120 2115 self.host = None
2121 2116
2122 2117 # Don't split on colons in IPv6 addresses without ports
2123 2118 if (self.host and ':' in self.host and
2124 2119 not (self.host.startswith('[') and self.host.endswith(']'))):
2125 2120 self._hostport = self.host
2126 2121 self.host, self.port = self.host.rsplit(':', 1)
2127 2122 if not self.host:
2128 2123 self.host = None
2129 2124
2130 2125 if (self.host and self.scheme == 'file' and
2131 2126 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2132 2127 raise Abort(_('file:// URLs can only refer to localhost'))
2133 2128
2134 2129 self.path = path
2135 2130
2136 2131 # leave the query string escaped
2137 2132 for a in ('user', 'passwd', 'host', 'port',
2138 2133 'path', 'fragment'):
2139 2134 v = getattr(self, a)
2140 2135 if v is not None:
2141 2136 setattr(self, a, _urlunquote(v))
2142 2137
2143 2138 def __repr__(self):
2144 2139 attrs = []
2145 2140 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2146 2141 'query', 'fragment'):
2147 2142 v = getattr(self, a)
2148 2143 if v is not None:
2149 2144 attrs.append('%s: %r' % (a, v))
2150 2145 return '<url %s>' % ', '.join(attrs)
2151 2146
2152 2147 def __str__(self):
2153 2148 r"""Join the URL's components back into a URL string.
2154 2149
2155 2150 Examples:
2156 2151
2157 2152 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2158 2153 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2159 2154 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2160 2155 'http://user:pw@host:80/?foo=bar&baz=42'
2161 2156 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2162 2157 'http://user:pw@host:80/?foo=bar%3dbaz'
2163 2158 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2164 2159 'ssh://user:pw@[::1]:2200//home/joe#'
2165 2160 >>> str(url('http://localhost:80//'))
2166 2161 'http://localhost:80//'
2167 2162 >>> str(url('http://localhost:80/'))
2168 2163 'http://localhost:80/'
2169 2164 >>> str(url('http://localhost:80'))
2170 2165 'http://localhost:80/'
2171 2166 >>> str(url('bundle:foo'))
2172 2167 'bundle:foo'
2173 2168 >>> str(url('bundle://../foo'))
2174 2169 'bundle:../foo'
2175 2170 >>> str(url('path'))
2176 2171 'path'
2177 2172 >>> str(url('file:///tmp/foo/bar'))
2178 2173 'file:///tmp/foo/bar'
2179 2174 >>> str(url('file:///c:/tmp/foo/bar'))
2180 2175 'file:///c:/tmp/foo/bar'
2181 2176 >>> print url(r'bundle:foo\bar')
2182 2177 bundle:foo\bar
2183 2178 >>> print url(r'file:///D:\data\hg')
2184 2179 file:///D:\data\hg
2185 2180 """
2186 2181 if self._localpath:
2187 2182 s = self.path
2188 2183 if self.scheme == 'bundle':
2189 2184 s = 'bundle:' + s
2190 2185 if self.fragment:
2191 2186 s += '#' + self.fragment
2192 2187 return s
2193 2188
2194 2189 s = self.scheme + ':'
2195 2190 if self.user or self.passwd or self.host:
2196 2191 s += '//'
2197 2192 elif self.scheme and (not self.path or self.path.startswith('/')
2198 2193 or hasdriveletter(self.path)):
2199 2194 s += '//'
2200 2195 if hasdriveletter(self.path):
2201 2196 s += '/'
2202 2197 if self.user:
2203 2198 s += urllib.quote(self.user, safe=self._safechars)
2204 2199 if self.passwd:
2205 2200 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2206 2201 if self.user or self.passwd:
2207 2202 s += '@'
2208 2203 if self.host:
2209 2204 if not (self.host.startswith('[') and self.host.endswith(']')):
2210 2205 s += urllib.quote(self.host)
2211 2206 else:
2212 2207 s += self.host
2213 2208 if self.port:
2214 2209 s += ':' + urllib.quote(self.port)
2215 2210 if self.host:
2216 2211 s += '/'
2217 2212 if self.path:
2218 2213 # TODO: similar to the query string, we should not unescape the
2219 2214 # path when we store it, the path might contain '%2f' = '/',
2220 2215 # which we should *not* escape.
2221 2216 s += urllib.quote(self.path, safe=self._safepchars)
2222 2217 if self.query:
2223 2218 # we store the query in escaped form.
2224 2219 s += '?' + self.query
2225 2220 if self.fragment is not None:
2226 2221 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2227 2222 return s
2228 2223
2229 2224 def authinfo(self):
2230 2225 user, passwd = self.user, self.passwd
2231 2226 try:
2232 2227 self.user, self.passwd = None, None
2233 2228 s = str(self)
2234 2229 finally:
2235 2230 self.user, self.passwd = user, passwd
2236 2231 if not self.user:
2237 2232 return (s, None)
2238 2233 # authinfo[1] is passed to urllib2 password manager, and its
2239 2234 # URIs must not contain credentials. The host is passed in the
2240 2235 # URIs list because Python < 2.4.3 uses only that to search for
2241 2236 # a password.
2242 2237 return (s, (None, (s, self.host),
2243 2238 self.user, self.passwd or ''))
2244 2239
2245 2240 def isabs(self):
2246 2241 if self.scheme and self.scheme != 'file':
2247 2242 return True # remote URL
2248 2243 if hasdriveletter(self.path):
2249 2244 return True # absolute for our purposes - can't be joined()
2250 2245 if self.path.startswith(r'\\'):
2251 2246 return True # Windows UNC path
2252 2247 if self.path.startswith('/'):
2253 2248 return True # POSIX-style
2254 2249 return False
2255 2250
2256 2251 def localpath(self):
2257 2252 if self.scheme == 'file' or self.scheme == 'bundle':
2258 2253 path = self.path or '/'
2259 2254 # For Windows, we need to promote hosts containing drive
2260 2255 # letters to paths with drive letters.
2261 2256 if hasdriveletter(self._hostport):
2262 2257 path = self._hostport + '/' + self.path
2263 2258 elif (self.host is not None and self.path
2264 2259 and not hasdriveletter(path)):
2265 2260 path = '/' + path
2266 2261 return path
2267 2262 return self._origpath
2268 2263
2269 2264 def islocal(self):
2270 2265 '''whether localpath will return something that posixfile can open'''
2271 2266 return (not self.scheme or self.scheme == 'file'
2272 2267 or self.scheme == 'bundle')
2273 2268
2274 2269 def hasscheme(path):
2275 2270 return bool(url(path).scheme)
2276 2271
2277 2272 def hasdriveletter(path):
2278 2273 return path and path[1:2] == ':' and path[0:1].isalpha()
2279 2274
2280 2275 def urllocalpath(path):
2281 2276 return url(path, parsequery=False, parsefragment=False).localpath()
2282 2277
2283 2278 def hidepassword(u):
2284 2279 '''hide user credential in a url string'''
2285 2280 u = url(u)
2286 2281 if u.passwd:
2287 2282 u.passwd = '***'
2288 2283 return str(u)
2289 2284
2290 2285 def removeauth(u):
2291 2286 '''remove all authentication information from a url string'''
2292 2287 u = url(u)
2293 2288 u.user = u.passwd = None
2294 2289 return str(u)
2295 2290
2296 2291 def isatty(fd):
2297 2292 try:
2298 2293 return fd.isatty()
2299 2294 except AttributeError:
2300 2295 return False
2301 2296
2302 2297 timecount = unitcountfn(
2303 2298 (1, 1e3, _('%.0f s')),
2304 2299 (100, 1, _('%.1f s')),
2305 2300 (10, 1, _('%.2f s')),
2306 2301 (1, 1, _('%.3f s')),
2307 2302 (100, 0.001, _('%.1f ms')),
2308 2303 (10, 0.001, _('%.2f ms')),
2309 2304 (1, 0.001, _('%.3f ms')),
2310 2305 (100, 0.000001, _('%.1f us')),
2311 2306 (10, 0.000001, _('%.2f us')),
2312 2307 (1, 0.000001, _('%.3f us')),
2313 2308 (100, 0.000000001, _('%.1f ns')),
2314 2309 (10, 0.000000001, _('%.2f ns')),
2315 2310 (1, 0.000000001, _('%.3f ns')),
2316 2311 )
2317 2312
2318 2313 _timenesting = [0]
2319 2314
2320 2315 def timed(func):
2321 2316 '''Report the execution time of a function call to stderr.
2322 2317
2323 2318 During development, use as a decorator when you need to measure
2324 2319 the cost of a function, e.g. as follows:
2325 2320
2326 2321 @util.timed
2327 2322 def foo(a, b, c):
2328 2323 pass
2329 2324 '''
2330 2325
2331 2326 def wrapper(*args, **kwargs):
2332 2327 start = time.time()
2333 2328 indent = 2
2334 2329 _timenesting[0] += indent
2335 2330 try:
2336 2331 return func(*args, **kwargs)
2337 2332 finally:
2338 2333 elapsed = time.time() - start
2339 2334 _timenesting[0] -= indent
2340 2335 sys.stderr.write('%s%s: %s\n' %
2341 2336 (' ' * _timenesting[0], func.__name__,
2342 2337 timecount(elapsed)))
2343 2338 return wrapper
2344 2339
2345 2340 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2346 2341 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2347 2342
2348 2343 def sizetoint(s):
2349 2344 '''Convert a space specifier to a byte count.
2350 2345
2351 2346 >>> sizetoint('30')
2352 2347 30
2353 2348 >>> sizetoint('2.2kb')
2354 2349 2252
2355 2350 >>> sizetoint('6M')
2356 2351 6291456
2357 2352 '''
2358 2353 t = s.strip().lower()
2359 2354 try:
2360 2355 for k, u in _sizeunits:
2361 2356 if t.endswith(k):
2362 2357 return int(float(t[:-len(k)]) * u)
2363 2358 return int(t)
2364 2359 except ValueError:
2365 2360 raise error.ParseError(_("couldn't parse size: %s") % s)
2366 2361
2367 2362 class hooks(object):
2368 2363 '''A collection of hook functions that can be used to extend a
2369 2364 function's behavior. Hooks are called in lexicographic order,
2370 2365 based on the names of their sources.'''
2371 2366
2372 2367 def __init__(self):
2373 2368 self._hooks = []
2374 2369
2375 2370 def add(self, source, hook):
2376 2371 self._hooks.append((source, hook))
2377 2372
2378 2373 def __call__(self, *args):
2379 2374 self._hooks.sort(key=lambda x: x[0])
2380 2375 results = []
2381 2376 for source, hook in self._hooks:
2382 2377 results.append(hook(*args))
2383 2378 return results
2384 2379
2385 2380 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2386 2381 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2387 2382 Skips the 'skip' last entries. By default it will flush stdout first.
2388 2383 It can be used everywhere and do intentionally not require an ui object.
2389 2384 Not be used in production code but very convenient while developing.
2390 2385 '''
2391 2386 if otherf:
2392 2387 otherf.flush()
2393 2388 f.write('%s at:\n' % msg)
2394 2389 entries = [('%s:%s' % (fn, ln), func)
2395 2390 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2396 2391 if entries:
2397 2392 fnmax = max(len(entry[0]) for entry in entries)
2398 2393 for fnln, func in entries:
2399 2394 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2400 2395 f.flush()
2401 2396
2402 2397 class dirs(object):
2403 2398 '''a multiset of directory names from a dirstate or manifest'''
2404 2399
2405 2400 def __init__(self, map, skip=None):
2406 2401 self._dirs = {}
2407 2402 addpath = self.addpath
2408 2403 if safehasattr(map, 'iteritems') and skip is not None:
2409 2404 for f, s in map.iteritems():
2410 2405 if s[0] != skip:
2411 2406 addpath(f)
2412 2407 else:
2413 2408 for f in map:
2414 2409 addpath(f)
2415 2410
2416 2411 def addpath(self, path):
2417 2412 dirs = self._dirs
2418 2413 for base in finddirs(path):
2419 2414 if base in dirs:
2420 2415 dirs[base] += 1
2421 2416 return
2422 2417 dirs[base] = 1
2423 2418
2424 2419 def delpath(self, path):
2425 2420 dirs = self._dirs
2426 2421 for base in finddirs(path):
2427 2422 if dirs[base] > 1:
2428 2423 dirs[base] -= 1
2429 2424 return
2430 2425 del dirs[base]
2431 2426
2432 2427 def __iter__(self):
2433 2428 return self._dirs.iterkeys()
2434 2429
2435 2430 def __contains__(self, d):
2436 2431 return d in self._dirs
2437 2432
2438 2433 if safehasattr(parsers, 'dirs'):
2439 2434 dirs = parsers.dirs
2440 2435
2441 2436 def finddirs(path):
2442 2437 pos = path.rfind('/')
2443 2438 while pos != -1:
2444 2439 yield path[:pos]
2445 2440 pos = path.rfind('/', 0, pos)
2446 2441
2447 2442 # compression utility
2448 2443
2449 2444 class nocompress(object):
2450 2445 def compress(self, x):
2451 2446 return x
2452 2447 def flush(self):
2453 2448 return ""
2454 2449
2455 2450 compressors = {
2456 2451 None: nocompress,
2457 2452 # lambda to prevent early import
2458 2453 'BZ': lambda: bz2.BZ2Compressor(),
2459 2454 'GZ': lambda: zlib.compressobj(),
2460 2455 }
2461 2456 # also support the old form by courtesies
2462 2457 compressors['UN'] = compressors[None]
2463 2458
2464 2459 def _makedecompressor(decompcls):
2465 2460 def generator(f):
2466 2461 d = decompcls()
2467 2462 for chunk in filechunkiter(f):
2468 2463 yield d.decompress(chunk)
2469 2464 def func(fh):
2470 2465 return chunkbuffer(generator(fh))
2471 2466 return func
2472 2467
2473 2468 def _bz2():
2474 2469 d = bz2.BZ2Decompressor()
2475 2470 # Bzip2 stream start with BZ, but we stripped it.
2476 2471 # we put it back for good measure.
2477 2472 d.decompress('BZ')
2478 2473 return d
2479 2474
2480 2475 decompressors = {None: lambda fh: fh,
2481 2476 '_truncatedBZ': _makedecompressor(_bz2),
2482 2477 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2483 2478 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2484 2479 }
2485 2480 # also support the old form by courtesies
2486 2481 decompressors['UN'] = decompressors[None]
2487 2482
2488 2483 # convenient shortcut
2489 2484 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now