##// END OF EJS Templates
util: refactor getstackframes
timeless -
r28497:906fece8 default
parent child Browse files
Show More
@@ -1,2717 +1,2735 b''
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 from __future__ import absolute_import
17 17
18 18 import bz2
19 19 import calendar
20 20 import collections
21 21 import datetime
22 22 import errno
23 23 import gc
24 24 import hashlib
25 25 import imp
26 26 import os
27 27 import re as remod
28 28 import shutil
29 29 import signal
30 30 import socket
31 31 import subprocess
32 32 import sys
33 33 import tempfile
34 34 import textwrap
35 35 import time
36 36 import traceback
37 37 import urllib
38 38 import zlib
39 39
40 40 from . import (
41 41 encoding,
42 42 error,
43 43 i18n,
44 44 osutil,
45 45 parsers,
46 46 )
47 47
48 48 if os.name == 'nt':
49 49 from . import windows as platform
50 50 else:
51 51 from . import posix as platform
52 52
53 53 md5 = hashlib.md5
54 54 sha1 = hashlib.sha1
55 55 sha512 = hashlib.sha512
56 56 _ = i18n._
57 57
58 58 cachestat = platform.cachestat
59 59 checkexec = platform.checkexec
60 60 checklink = platform.checklink
61 61 copymode = platform.copymode
62 62 executablepath = platform.executablepath
63 63 expandglobs = platform.expandglobs
64 64 explainexit = platform.explainexit
65 65 findexe = platform.findexe
66 66 gethgcmd = platform.gethgcmd
67 67 getuser = platform.getuser
68 68 getpid = os.getpid
69 69 groupmembers = platform.groupmembers
70 70 groupname = platform.groupname
71 71 hidewindow = platform.hidewindow
72 72 isexec = platform.isexec
73 73 isowner = platform.isowner
74 74 localpath = platform.localpath
75 75 lookupreg = platform.lookupreg
76 76 makedir = platform.makedir
77 77 nlinks = platform.nlinks
78 78 normpath = platform.normpath
79 79 normcase = platform.normcase
80 80 normcasespec = platform.normcasespec
81 81 normcasefallback = platform.normcasefallback
82 82 openhardlinks = platform.openhardlinks
83 83 oslink = platform.oslink
84 84 parsepatchoutput = platform.parsepatchoutput
85 85 pconvert = platform.pconvert
86 86 poll = platform.poll
87 87 popen = platform.popen
88 88 posixfile = platform.posixfile
89 89 quotecommand = platform.quotecommand
90 90 readpipe = platform.readpipe
91 91 rename = platform.rename
92 92 removedirs = platform.removedirs
93 93 samedevice = platform.samedevice
94 94 samefile = platform.samefile
95 95 samestat = platform.samestat
96 96 setbinary = platform.setbinary
97 97 setflags = platform.setflags
98 98 setsignalhandler = platform.setsignalhandler
99 99 shellquote = platform.shellquote
100 100 spawndetached = platform.spawndetached
101 101 split = platform.split
102 102 sshargs = platform.sshargs
103 103 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
104 104 statisexec = platform.statisexec
105 105 statislink = platform.statislink
106 106 termwidth = platform.termwidth
107 107 testpid = platform.testpid
108 108 umask = platform.umask
109 109 unlink = platform.unlink
110 110 unlinkpath = platform.unlinkpath
111 111 username = platform.username
112 112
113 113 # Python compatibility
114 114
115 115 _notset = object()
116 116
117 117 # disable Python's problematic floating point timestamps (issue4836)
118 118 # (Python hypocritically says you shouldn't change this behavior in
119 119 # libraries, and sure enough Mercurial is not a library.)
120 120 os.stat_float_times(False)
121 121
122 122 def safehasattr(thing, attr):
123 123 return getattr(thing, attr, _notset) is not _notset
124 124
125 125 DIGESTS = {
126 126 'md5': md5,
127 127 'sha1': sha1,
128 128 'sha512': sha512,
129 129 }
130 130 # List of digest types from strongest to weakest
131 131 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
132 132
133 133 for k in DIGESTS_BY_STRENGTH:
134 134 assert k in DIGESTS
135 135
136 136 class digester(object):
137 137 """helper to compute digests.
138 138
139 139 This helper can be used to compute one or more digests given their name.
140 140
141 141 >>> d = digester(['md5', 'sha1'])
142 142 >>> d.update('foo')
143 143 >>> [k for k in sorted(d)]
144 144 ['md5', 'sha1']
145 145 >>> d['md5']
146 146 'acbd18db4cc2f85cedef654fccc4a4d8'
147 147 >>> d['sha1']
148 148 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
149 149 >>> digester.preferred(['md5', 'sha1'])
150 150 'sha1'
151 151 """
152 152
153 153 def __init__(self, digests, s=''):
154 154 self._hashes = {}
155 155 for k in digests:
156 156 if k not in DIGESTS:
157 157 raise Abort(_('unknown digest type: %s') % k)
158 158 self._hashes[k] = DIGESTS[k]()
159 159 if s:
160 160 self.update(s)
161 161
162 162 def update(self, data):
163 163 for h in self._hashes.values():
164 164 h.update(data)
165 165
166 166 def __getitem__(self, key):
167 167 if key not in DIGESTS:
168 168 raise Abort(_('unknown digest type: %s') % k)
169 169 return self._hashes[key].hexdigest()
170 170
171 171 def __iter__(self):
172 172 return iter(self._hashes)
173 173
174 174 @staticmethod
175 175 def preferred(supported):
176 176 """returns the strongest digest type in both supported and DIGESTS."""
177 177
178 178 for k in DIGESTS_BY_STRENGTH:
179 179 if k in supported:
180 180 return k
181 181 return None
182 182
183 183 class digestchecker(object):
184 184 """file handle wrapper that additionally checks content against a given
185 185 size and digests.
186 186
187 187 d = digestchecker(fh, size, {'md5': '...'})
188 188
189 189 When multiple digests are given, all of them are validated.
190 190 """
191 191
192 192 def __init__(self, fh, size, digests):
193 193 self._fh = fh
194 194 self._size = size
195 195 self._got = 0
196 196 self._digests = dict(digests)
197 197 self._digester = digester(self._digests.keys())
198 198
199 199 def read(self, length=-1):
200 200 content = self._fh.read(length)
201 201 self._digester.update(content)
202 202 self._got += len(content)
203 203 return content
204 204
205 205 def validate(self):
206 206 if self._size != self._got:
207 207 raise Abort(_('size mismatch: expected %d, got %d') %
208 208 (self._size, self._got))
209 209 for k, v in self._digests.items():
210 210 if v != self._digester[k]:
211 211 # i18n: first parameter is a digest name
212 212 raise Abort(_('%s mismatch: expected %s, got %s') %
213 213 (k, v, self._digester[k]))
214 214
215 215 try:
216 216 buffer = buffer
217 217 except NameError:
218 218 if sys.version_info[0] < 3:
219 219 def buffer(sliceable, offset=0):
220 220 return sliceable[offset:]
221 221 else:
222 222 def buffer(sliceable, offset=0):
223 223 return memoryview(sliceable)[offset:]
224 224
225 225 closefds = os.name == 'posix'
226 226
227 227 _chunksize = 4096
228 228
229 229 class bufferedinputpipe(object):
230 230 """a manually buffered input pipe
231 231
232 232 Python will not let us use buffered IO and lazy reading with 'polling' at
233 233 the same time. We cannot probe the buffer state and select will not detect
234 234 that data are ready to read if they are already buffered.
235 235
236 236 This class let us work around that by implementing its own buffering
237 237 (allowing efficient readline) while offering a way to know if the buffer is
238 238 empty from the output (allowing collaboration of the buffer with polling).
239 239
240 240 This class lives in the 'util' module because it makes use of the 'os'
241 241 module from the python stdlib.
242 242 """
243 243
244 244 def __init__(self, input):
245 245 self._input = input
246 246 self._buffer = []
247 247 self._eof = False
248 248 self._lenbuf = 0
249 249
250 250 @property
251 251 def hasbuffer(self):
252 252 """True is any data is currently buffered
253 253
254 254 This will be used externally a pre-step for polling IO. If there is
255 255 already data then no polling should be set in place."""
256 256 return bool(self._buffer)
257 257
258 258 @property
259 259 def closed(self):
260 260 return self._input.closed
261 261
262 262 def fileno(self):
263 263 return self._input.fileno()
264 264
265 265 def close(self):
266 266 return self._input.close()
267 267
268 268 def read(self, size):
269 269 while (not self._eof) and (self._lenbuf < size):
270 270 self._fillbuffer()
271 271 return self._frombuffer(size)
272 272
273 273 def readline(self, *args, **kwargs):
274 274 if 1 < len(self._buffer):
275 275 # this should not happen because both read and readline end with a
276 276 # _frombuffer call that collapse it.
277 277 self._buffer = [''.join(self._buffer)]
278 278 self._lenbuf = len(self._buffer[0])
279 279 lfi = -1
280 280 if self._buffer:
281 281 lfi = self._buffer[-1].find('\n')
282 282 while (not self._eof) and lfi < 0:
283 283 self._fillbuffer()
284 284 if self._buffer:
285 285 lfi = self._buffer[-1].find('\n')
286 286 size = lfi + 1
287 287 if lfi < 0: # end of file
288 288 size = self._lenbuf
289 289 elif 1 < len(self._buffer):
290 290 # we need to take previous chunks into account
291 291 size += self._lenbuf - len(self._buffer[-1])
292 292 return self._frombuffer(size)
293 293
294 294 def _frombuffer(self, size):
295 295 """return at most 'size' data from the buffer
296 296
297 297 The data are removed from the buffer."""
298 298 if size == 0 or not self._buffer:
299 299 return ''
300 300 buf = self._buffer[0]
301 301 if 1 < len(self._buffer):
302 302 buf = ''.join(self._buffer)
303 303
304 304 data = buf[:size]
305 305 buf = buf[len(data):]
306 306 if buf:
307 307 self._buffer = [buf]
308 308 self._lenbuf = len(buf)
309 309 else:
310 310 self._buffer = []
311 311 self._lenbuf = 0
312 312 return data
313 313
314 314 def _fillbuffer(self):
315 315 """read data to the buffer"""
316 316 data = os.read(self._input.fileno(), _chunksize)
317 317 if not data:
318 318 self._eof = True
319 319 else:
320 320 self._lenbuf += len(data)
321 321 self._buffer.append(data)
322 322
323 323 def popen2(cmd, env=None, newlines=False):
324 324 # Setting bufsize to -1 lets the system decide the buffer size.
325 325 # The default for bufsize is 0, meaning unbuffered. This leads to
326 326 # poor performance on Mac OS X: http://bugs.python.org/issue4194
327 327 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
328 328 close_fds=closefds,
329 329 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
330 330 universal_newlines=newlines,
331 331 env=env)
332 332 return p.stdin, p.stdout
333 333
334 334 def popen3(cmd, env=None, newlines=False):
335 335 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
336 336 return stdin, stdout, stderr
337 337
338 338 def popen4(cmd, env=None, newlines=False, bufsize=-1):
339 339 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
340 340 close_fds=closefds,
341 341 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
342 342 stderr=subprocess.PIPE,
343 343 universal_newlines=newlines,
344 344 env=env)
345 345 return p.stdin, p.stdout, p.stderr, p
346 346
347 347 def version():
348 348 """Return version information if available."""
349 349 try:
350 350 from . import __version__
351 351 return __version__.version
352 352 except ImportError:
353 353 return 'unknown'
354 354
355 355 def versiontuple(v=None, n=4):
356 356 """Parses a Mercurial version string into an N-tuple.
357 357
358 358 The version string to be parsed is specified with the ``v`` argument.
359 359 If it isn't defined, the current Mercurial version string will be parsed.
360 360
361 361 ``n`` can be 2, 3, or 4. Here is how some version strings map to
362 362 returned values:
363 363
364 364 >>> v = '3.6.1+190-df9b73d2d444'
365 365 >>> versiontuple(v, 2)
366 366 (3, 6)
367 367 >>> versiontuple(v, 3)
368 368 (3, 6, 1)
369 369 >>> versiontuple(v, 4)
370 370 (3, 6, 1, '190-df9b73d2d444')
371 371
372 372 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
373 373 (3, 6, 1, '190-df9b73d2d444+20151118')
374 374
375 375 >>> v = '3.6'
376 376 >>> versiontuple(v, 2)
377 377 (3, 6)
378 378 >>> versiontuple(v, 3)
379 379 (3, 6, None)
380 380 >>> versiontuple(v, 4)
381 381 (3, 6, None, None)
382 382 """
383 383 if not v:
384 384 v = version()
385 385 parts = v.split('+', 1)
386 386 if len(parts) == 1:
387 387 vparts, extra = parts[0], None
388 388 else:
389 389 vparts, extra = parts
390 390
391 391 vints = []
392 392 for i in vparts.split('.'):
393 393 try:
394 394 vints.append(int(i))
395 395 except ValueError:
396 396 break
397 397 # (3, 6) -> (3, 6, None)
398 398 while len(vints) < 3:
399 399 vints.append(None)
400 400
401 401 if n == 2:
402 402 return (vints[0], vints[1])
403 403 if n == 3:
404 404 return (vints[0], vints[1], vints[2])
405 405 if n == 4:
406 406 return (vints[0], vints[1], vints[2], extra)
407 407
408 408 # used by parsedate
409 409 defaultdateformats = (
410 410 '%Y-%m-%d %H:%M:%S',
411 411 '%Y-%m-%d %I:%M:%S%p',
412 412 '%Y-%m-%d %H:%M',
413 413 '%Y-%m-%d %I:%M%p',
414 414 '%Y-%m-%d',
415 415 '%m-%d',
416 416 '%m/%d',
417 417 '%m/%d/%y',
418 418 '%m/%d/%Y',
419 419 '%a %b %d %H:%M:%S %Y',
420 420 '%a %b %d %I:%M:%S%p %Y',
421 421 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
422 422 '%b %d %H:%M:%S %Y',
423 423 '%b %d %I:%M:%S%p %Y',
424 424 '%b %d %H:%M:%S',
425 425 '%b %d %I:%M:%S%p',
426 426 '%b %d %H:%M',
427 427 '%b %d %I:%M%p',
428 428 '%b %d %Y',
429 429 '%b %d',
430 430 '%H:%M:%S',
431 431 '%I:%M:%S%p',
432 432 '%H:%M',
433 433 '%I:%M%p',
434 434 )
435 435
436 436 extendeddateformats = defaultdateformats + (
437 437 "%Y",
438 438 "%Y-%m",
439 439 "%b",
440 440 "%b %Y",
441 441 )
442 442
443 443 def cachefunc(func):
444 444 '''cache the result of function calls'''
445 445 # XXX doesn't handle keywords args
446 446 if func.func_code.co_argcount == 0:
447 447 cache = []
448 448 def f():
449 449 if len(cache) == 0:
450 450 cache.append(func())
451 451 return cache[0]
452 452 return f
453 453 cache = {}
454 454 if func.func_code.co_argcount == 1:
455 455 # we gain a small amount of time because
456 456 # we don't need to pack/unpack the list
457 457 def f(arg):
458 458 if arg not in cache:
459 459 cache[arg] = func(arg)
460 460 return cache[arg]
461 461 else:
462 462 def f(*args):
463 463 if args not in cache:
464 464 cache[args] = func(*args)
465 465 return cache[args]
466 466
467 467 return f
468 468
469 469 class sortdict(dict):
470 470 '''a simple sorted dictionary'''
471 471 def __init__(self, data=None):
472 472 self._list = []
473 473 if data:
474 474 self.update(data)
475 475 def copy(self):
476 476 return sortdict(self)
477 477 def __setitem__(self, key, val):
478 478 if key in self:
479 479 self._list.remove(key)
480 480 self._list.append(key)
481 481 dict.__setitem__(self, key, val)
482 482 def __iter__(self):
483 483 return self._list.__iter__()
484 484 def update(self, src):
485 485 if isinstance(src, dict):
486 486 src = src.iteritems()
487 487 for k, v in src:
488 488 self[k] = v
489 489 def clear(self):
490 490 dict.clear(self)
491 491 self._list = []
492 492 def items(self):
493 493 return [(k, self[k]) for k in self._list]
494 494 def __delitem__(self, key):
495 495 dict.__delitem__(self, key)
496 496 self._list.remove(key)
497 497 def pop(self, key, *args, **kwargs):
498 498 dict.pop(self, key, *args, **kwargs)
499 499 try:
500 500 self._list.remove(key)
501 501 except ValueError:
502 502 pass
503 503 def keys(self):
504 504 return self._list
505 505 def iterkeys(self):
506 506 return self._list.__iter__()
507 507 def iteritems(self):
508 508 for k in self._list:
509 509 yield k, self[k]
510 510 def insert(self, index, key, val):
511 511 self._list.insert(index, key)
512 512 dict.__setitem__(self, key, val)
513 513
514 514 class _lrucachenode(object):
515 515 """A node in a doubly linked list.
516 516
517 517 Holds a reference to nodes on either side as well as a key-value
518 518 pair for the dictionary entry.
519 519 """
520 520 __slots__ = ('next', 'prev', 'key', 'value')
521 521
522 522 def __init__(self):
523 523 self.next = None
524 524 self.prev = None
525 525
526 526 self.key = _notset
527 527 self.value = None
528 528
529 529 def markempty(self):
530 530 """Mark the node as emptied."""
531 531 self.key = _notset
532 532
533 533 class lrucachedict(object):
534 534 """Dict that caches most recent accesses and sets.
535 535
536 536 The dict consists of an actual backing dict - indexed by original
537 537 key - and a doubly linked circular list defining the order of entries in
538 538 the cache.
539 539
540 540 The head node is the newest entry in the cache. If the cache is full,
541 541 we recycle head.prev and make it the new head. Cache accesses result in
542 542 the node being moved to before the existing head and being marked as the
543 543 new head node.
544 544 """
545 545 def __init__(self, max):
546 546 self._cache = {}
547 547
548 548 self._head = head = _lrucachenode()
549 549 head.prev = head
550 550 head.next = head
551 551 self._size = 1
552 552 self._capacity = max
553 553
554 554 def __len__(self):
555 555 return len(self._cache)
556 556
557 557 def __contains__(self, k):
558 558 return k in self._cache
559 559
560 560 def __iter__(self):
561 561 # We don't have to iterate in cache order, but why not.
562 562 n = self._head
563 563 for i in range(len(self._cache)):
564 564 yield n.key
565 565 n = n.next
566 566
567 567 def __getitem__(self, k):
568 568 node = self._cache[k]
569 569 self._movetohead(node)
570 570 return node.value
571 571
572 572 def __setitem__(self, k, v):
573 573 node = self._cache.get(k)
574 574 # Replace existing value and mark as newest.
575 575 if node is not None:
576 576 node.value = v
577 577 self._movetohead(node)
578 578 return
579 579
580 580 if self._size < self._capacity:
581 581 node = self._addcapacity()
582 582 else:
583 583 # Grab the last/oldest item.
584 584 node = self._head.prev
585 585
586 586 # At capacity. Kill the old entry.
587 587 if node.key is not _notset:
588 588 del self._cache[node.key]
589 589
590 590 node.key = k
591 591 node.value = v
592 592 self._cache[k] = node
593 593 # And mark it as newest entry. No need to adjust order since it
594 594 # is already self._head.prev.
595 595 self._head = node
596 596
597 597 def __delitem__(self, k):
598 598 node = self._cache.pop(k)
599 599 node.markempty()
600 600
601 601 # Temporarily mark as newest item before re-adjusting head to make
602 602 # this node the oldest item.
603 603 self._movetohead(node)
604 604 self._head = node.next
605 605
606 606 # Additional dict methods.
607 607
608 608 def get(self, k, default=None):
609 609 try:
610 610 return self._cache[k]
611 611 except KeyError:
612 612 return default
613 613
614 614 def clear(self):
615 615 n = self._head
616 616 while n.key is not _notset:
617 617 n.markempty()
618 618 n = n.next
619 619
620 620 self._cache.clear()
621 621
622 622 def copy(self):
623 623 result = lrucachedict(self._capacity)
624 624 n = self._head.prev
625 625 # Iterate in oldest-to-newest order, so the copy has the right ordering
626 626 for i in range(len(self._cache)):
627 627 result[n.key] = n.value
628 628 n = n.prev
629 629 return result
630 630
631 631 def _movetohead(self, node):
632 632 """Mark a node as the newest, making it the new head.
633 633
634 634 When a node is accessed, it becomes the freshest entry in the LRU
635 635 list, which is denoted by self._head.
636 636
637 637 Visually, let's make ``N`` the new head node (* denotes head):
638 638
639 639 previous/oldest <-> head <-> next/next newest
640 640
641 641 ----<->--- A* ---<->-----
642 642 | |
643 643 E <-> D <-> N <-> C <-> B
644 644
645 645 To:
646 646
647 647 ----<->--- N* ---<->-----
648 648 | |
649 649 E <-> D <-> C <-> B <-> A
650 650
651 651 This requires the following moves:
652 652
653 653 C.next = D (node.prev.next = node.next)
654 654 D.prev = C (node.next.prev = node.prev)
655 655 E.next = N (head.prev.next = node)
656 656 N.prev = E (node.prev = head.prev)
657 657 N.next = A (node.next = head)
658 658 A.prev = N (head.prev = node)
659 659 """
660 660 head = self._head
661 661 # C.next = D
662 662 node.prev.next = node.next
663 663 # D.prev = C
664 664 node.next.prev = node.prev
665 665 # N.prev = E
666 666 node.prev = head.prev
667 667 # N.next = A
668 668 # It is tempting to do just "head" here, however if node is
669 669 # adjacent to head, this will do bad things.
670 670 node.next = head.prev.next
671 671 # E.next = N
672 672 node.next.prev = node
673 673 # A.prev = N
674 674 node.prev.next = node
675 675
676 676 self._head = node
677 677
678 678 def _addcapacity(self):
679 679 """Add a node to the circular linked list.
680 680
681 681 The new node is inserted before the head node.
682 682 """
683 683 head = self._head
684 684 node = _lrucachenode()
685 685 head.prev.next = node
686 686 node.prev = head.prev
687 687 node.next = head
688 688 head.prev = node
689 689 self._size += 1
690 690 return node
691 691
692 692 def lrucachefunc(func):
693 693 '''cache most recent results of function calls'''
694 694 cache = {}
695 695 order = collections.deque()
696 696 if func.func_code.co_argcount == 1:
697 697 def f(arg):
698 698 if arg not in cache:
699 699 if len(cache) > 20:
700 700 del cache[order.popleft()]
701 701 cache[arg] = func(arg)
702 702 else:
703 703 order.remove(arg)
704 704 order.append(arg)
705 705 return cache[arg]
706 706 else:
707 707 def f(*args):
708 708 if args not in cache:
709 709 if len(cache) > 20:
710 710 del cache[order.popleft()]
711 711 cache[args] = func(*args)
712 712 else:
713 713 order.remove(args)
714 714 order.append(args)
715 715 return cache[args]
716 716
717 717 return f
718 718
719 719 class propertycache(object):
720 720 def __init__(self, func):
721 721 self.func = func
722 722 self.name = func.__name__
723 723 def __get__(self, obj, type=None):
724 724 result = self.func(obj)
725 725 self.cachevalue(obj, result)
726 726 return result
727 727
728 728 def cachevalue(self, obj, value):
729 729 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
730 730 obj.__dict__[self.name] = value
731 731
732 732 def pipefilter(s, cmd):
733 733 '''filter string S through command CMD, returning its output'''
734 734 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
735 735 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
736 736 pout, perr = p.communicate(s)
737 737 return pout
738 738
739 739 def tempfilter(s, cmd):
740 740 '''filter string S through a pair of temporary files with CMD.
741 741 CMD is used as a template to create the real command to be run,
742 742 with the strings INFILE and OUTFILE replaced by the real names of
743 743 the temporary files generated.'''
744 744 inname, outname = None, None
745 745 try:
746 746 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
747 747 fp = os.fdopen(infd, 'wb')
748 748 fp.write(s)
749 749 fp.close()
750 750 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
751 751 os.close(outfd)
752 752 cmd = cmd.replace('INFILE', inname)
753 753 cmd = cmd.replace('OUTFILE', outname)
754 754 code = os.system(cmd)
755 755 if sys.platform == 'OpenVMS' and code & 1:
756 756 code = 0
757 757 if code:
758 758 raise Abort(_("command '%s' failed: %s") %
759 759 (cmd, explainexit(code)))
760 760 return readfile(outname)
761 761 finally:
762 762 try:
763 763 if inname:
764 764 os.unlink(inname)
765 765 except OSError:
766 766 pass
767 767 try:
768 768 if outname:
769 769 os.unlink(outname)
770 770 except OSError:
771 771 pass
772 772
773 773 filtertable = {
774 774 'tempfile:': tempfilter,
775 775 'pipe:': pipefilter,
776 776 }
777 777
778 778 def filter(s, cmd):
779 779 "filter a string through a command that transforms its input to its output"
780 780 for name, fn in filtertable.iteritems():
781 781 if cmd.startswith(name):
782 782 return fn(s, cmd[len(name):].lstrip())
783 783 return pipefilter(s, cmd)
784 784
785 785 def binary(s):
786 786 """return true if a string is binary data"""
787 787 return bool(s and '\0' in s)
788 788
789 789 def increasingchunks(source, min=1024, max=65536):
790 790 '''return no less than min bytes per chunk while data remains,
791 791 doubling min after each chunk until it reaches max'''
792 792 def log2(x):
793 793 if not x:
794 794 return 0
795 795 i = 0
796 796 while x:
797 797 x >>= 1
798 798 i += 1
799 799 return i - 1
800 800
801 801 buf = []
802 802 blen = 0
803 803 for chunk in source:
804 804 buf.append(chunk)
805 805 blen += len(chunk)
806 806 if blen >= min:
807 807 if min < max:
808 808 min = min << 1
809 809 nmin = 1 << log2(blen)
810 810 if nmin > min:
811 811 min = nmin
812 812 if min > max:
813 813 min = max
814 814 yield ''.join(buf)
815 815 blen = 0
816 816 buf = []
817 817 if buf:
818 818 yield ''.join(buf)
819 819
820 820 Abort = error.Abort
821 821
822 822 def always(fn):
823 823 return True
824 824
825 825 def never(fn):
826 826 return False
827 827
828 828 def nogc(func):
829 829 """disable garbage collector
830 830
831 831 Python's garbage collector triggers a GC each time a certain number of
832 832 container objects (the number being defined by gc.get_threshold()) are
833 833 allocated even when marked not to be tracked by the collector. Tracking has
834 834 no effect on when GCs are triggered, only on what objects the GC looks
835 835 into. As a workaround, disable GC while building complex (huge)
836 836 containers.
837 837
838 838 This garbage collector issue have been fixed in 2.7.
839 839 """
840 840 def wrapper(*args, **kwargs):
841 841 gcenabled = gc.isenabled()
842 842 gc.disable()
843 843 try:
844 844 return func(*args, **kwargs)
845 845 finally:
846 846 if gcenabled:
847 847 gc.enable()
848 848 return wrapper
849 849
850 850 def pathto(root, n1, n2):
851 851 '''return the relative path from one place to another.
852 852 root should use os.sep to separate directories
853 853 n1 should use os.sep to separate directories
854 854 n2 should use "/" to separate directories
855 855 returns an os.sep-separated path.
856 856
857 857 If n1 is a relative path, it's assumed it's
858 858 relative to root.
859 859 n2 should always be relative to root.
860 860 '''
861 861 if not n1:
862 862 return localpath(n2)
863 863 if os.path.isabs(n1):
864 864 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
865 865 return os.path.join(root, localpath(n2))
866 866 n2 = '/'.join((pconvert(root), n2))
867 867 a, b = splitpath(n1), n2.split('/')
868 868 a.reverse()
869 869 b.reverse()
870 870 while a and b and a[-1] == b[-1]:
871 871 a.pop()
872 872 b.pop()
873 873 b.reverse()
874 874 return os.sep.join((['..'] * len(a)) + b) or '.'
875 875
876 876 def mainfrozen():
877 877 """return True if we are a frozen executable.
878 878
879 879 The code supports py2exe (most common, Windows only) and tools/freeze
880 880 (portable, not much used).
881 881 """
882 882 return (safehasattr(sys, "frozen") or # new py2exe
883 883 safehasattr(sys, "importers") or # old py2exe
884 884 imp.is_frozen("__main__")) # tools/freeze
885 885
886 886 # the location of data files matching the source code
887 887 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
888 888 # executable version (py2exe) doesn't support __file__
889 889 datapath = os.path.dirname(sys.executable)
890 890 else:
891 891 datapath = os.path.dirname(__file__)
892 892
893 893 i18n.setdatapath(datapath)
894 894
895 895 _hgexecutable = None
896 896
897 897 def hgexecutable():
898 898 """return location of the 'hg' executable.
899 899
900 900 Defaults to $HG or 'hg' in the search path.
901 901 """
902 902 if _hgexecutable is None:
903 903 hg = os.environ.get('HG')
904 904 mainmod = sys.modules['__main__']
905 905 if hg:
906 906 _sethgexecutable(hg)
907 907 elif mainfrozen():
908 908 if getattr(sys, 'frozen', None) == 'macosx_app':
909 909 # Env variable set by py2app
910 910 _sethgexecutable(os.environ['EXECUTABLEPATH'])
911 911 else:
912 912 _sethgexecutable(sys.executable)
913 913 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
914 914 _sethgexecutable(mainmod.__file__)
915 915 else:
916 916 exe = findexe('hg') or os.path.basename(sys.argv[0])
917 917 _sethgexecutable(exe)
918 918 return _hgexecutable
919 919
920 920 def _sethgexecutable(path):
921 921 """set location of the 'hg' executable"""
922 922 global _hgexecutable
923 923 _hgexecutable = path
924 924
925 925 def _isstdout(f):
926 926 fileno = getattr(f, 'fileno', None)
927 927 return fileno and fileno() == sys.__stdout__.fileno()
928 928
929 929 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
930 930 '''enhanced shell command execution.
931 931 run with environment maybe modified, maybe in different dir.
932 932
933 933 if command fails and onerr is None, return status, else raise onerr
934 934 object as exception.
935 935
936 936 if out is specified, it is assumed to be a file-like object that has a
937 937 write() method. stdout and stderr will be redirected to out.'''
938 938 if environ is None:
939 939 environ = {}
940 940 try:
941 941 sys.stdout.flush()
942 942 except Exception:
943 943 pass
944 944 def py2shell(val):
945 945 'convert python object into string that is useful to shell'
946 946 if val is None or val is False:
947 947 return '0'
948 948 if val is True:
949 949 return '1'
950 950 return str(val)
951 951 origcmd = cmd
952 952 cmd = quotecommand(cmd)
953 953 if sys.platform == 'plan9' and (sys.version_info[0] == 2
954 954 and sys.version_info[1] < 7):
955 955 # subprocess kludge to work around issues in half-baked Python
956 956 # ports, notably bichued/python:
957 957 if not cwd is None:
958 958 os.chdir(cwd)
959 959 rc = os.system(cmd)
960 960 else:
961 961 env = dict(os.environ)
962 962 env.update((k, py2shell(v)) for k, v in environ.iteritems())
963 963 env['HG'] = hgexecutable()
964 964 if out is None or _isstdout(out):
965 965 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
966 966 env=env, cwd=cwd)
967 967 else:
968 968 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
969 969 env=env, cwd=cwd, stdout=subprocess.PIPE,
970 970 stderr=subprocess.STDOUT)
971 971 while True:
972 972 line = proc.stdout.readline()
973 973 if not line:
974 974 break
975 975 out.write(line)
976 976 proc.wait()
977 977 rc = proc.returncode
978 978 if sys.platform == 'OpenVMS' and rc & 1:
979 979 rc = 0
980 980 if rc and onerr:
981 981 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
982 982 explainexit(rc)[0])
983 983 if errprefix:
984 984 errmsg = '%s: %s' % (errprefix, errmsg)
985 985 raise onerr(errmsg)
986 986 return rc
987 987
988 988 def checksignature(func):
989 989 '''wrap a function with code to check for calling errors'''
990 990 def check(*args, **kwargs):
991 991 try:
992 992 return func(*args, **kwargs)
993 993 except TypeError:
994 994 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
995 995 raise error.SignatureError
996 996 raise
997 997
998 998 return check
999 999
1000 1000 def copyfile(src, dest, hardlink=False, copystat=False):
1001 1001 '''copy a file, preserving mode and optionally other stat info like
1002 1002 atime/mtime'''
1003 1003 if os.path.lexists(dest):
1004 1004 unlink(dest)
1005 1005 # hardlinks are problematic on CIFS, quietly ignore this flag
1006 1006 # until we find a way to work around it cleanly (issue4546)
1007 1007 if False and hardlink:
1008 1008 try:
1009 1009 oslink(src, dest)
1010 1010 return
1011 1011 except (IOError, OSError):
1012 1012 pass # fall back to normal copy
1013 1013 if os.path.islink(src):
1014 1014 os.symlink(os.readlink(src), dest)
1015 1015 # copytime is ignored for symlinks, but in general copytime isn't needed
1016 1016 # for them anyway
1017 1017 else:
1018 1018 try:
1019 1019 shutil.copyfile(src, dest)
1020 1020 if copystat:
1021 1021 # copystat also copies mode
1022 1022 shutil.copystat(src, dest)
1023 1023 else:
1024 1024 shutil.copymode(src, dest)
1025 1025 except shutil.Error as inst:
1026 1026 raise Abort(str(inst))
1027 1027
1028 1028 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1029 1029 """Copy a directory tree using hardlinks if possible."""
1030 1030 num = 0
1031 1031
1032 1032 if hardlink is None:
1033 1033 hardlink = (os.stat(src).st_dev ==
1034 1034 os.stat(os.path.dirname(dst)).st_dev)
1035 1035 if hardlink:
1036 1036 topic = _('linking')
1037 1037 else:
1038 1038 topic = _('copying')
1039 1039
1040 1040 if os.path.isdir(src):
1041 1041 os.mkdir(dst)
1042 1042 for name, kind in osutil.listdir(src):
1043 1043 srcname = os.path.join(src, name)
1044 1044 dstname = os.path.join(dst, name)
1045 1045 def nprog(t, pos):
1046 1046 if pos is not None:
1047 1047 return progress(t, pos + num)
1048 1048 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1049 1049 num += n
1050 1050 else:
1051 1051 if hardlink:
1052 1052 try:
1053 1053 oslink(src, dst)
1054 1054 except (IOError, OSError):
1055 1055 hardlink = False
1056 1056 shutil.copy(src, dst)
1057 1057 else:
1058 1058 shutil.copy(src, dst)
1059 1059 num += 1
1060 1060 progress(topic, num)
1061 1061 progress(topic, None)
1062 1062
1063 1063 return hardlink, num
1064 1064
1065 1065 _winreservednames = '''con prn aux nul
1066 1066 com1 com2 com3 com4 com5 com6 com7 com8 com9
1067 1067 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1068 1068 _winreservedchars = ':*?"<>|'
1069 1069 def checkwinfilename(path):
1070 1070 r'''Check that the base-relative path is a valid filename on Windows.
1071 1071 Returns None if the path is ok, or a UI string describing the problem.
1072 1072
1073 1073 >>> checkwinfilename("just/a/normal/path")
1074 1074 >>> checkwinfilename("foo/bar/con.xml")
1075 1075 "filename contains 'con', which is reserved on Windows"
1076 1076 >>> checkwinfilename("foo/con.xml/bar")
1077 1077 "filename contains 'con', which is reserved on Windows"
1078 1078 >>> checkwinfilename("foo/bar/xml.con")
1079 1079 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1080 1080 "filename contains 'AUX', which is reserved on Windows"
1081 1081 >>> checkwinfilename("foo/bar/bla:.txt")
1082 1082 "filename contains ':', which is reserved on Windows"
1083 1083 >>> checkwinfilename("foo/bar/b\07la.txt")
1084 1084 "filename contains '\\x07', which is invalid on Windows"
1085 1085 >>> checkwinfilename("foo/bar/bla ")
1086 1086 "filename ends with ' ', which is not allowed on Windows"
1087 1087 >>> checkwinfilename("../bar")
1088 1088 >>> checkwinfilename("foo\\")
1089 1089 "filename ends with '\\', which is invalid on Windows"
1090 1090 >>> checkwinfilename("foo\\/bar")
1091 1091 "directory name ends with '\\', which is invalid on Windows"
1092 1092 '''
1093 1093 if path.endswith('\\'):
1094 1094 return _("filename ends with '\\', which is invalid on Windows")
1095 1095 if '\\/' in path:
1096 1096 return _("directory name ends with '\\', which is invalid on Windows")
1097 1097 for n in path.replace('\\', '/').split('/'):
1098 1098 if not n:
1099 1099 continue
1100 1100 for c in n:
1101 1101 if c in _winreservedchars:
1102 1102 return _("filename contains '%s', which is reserved "
1103 1103 "on Windows") % c
1104 1104 if ord(c) <= 31:
1105 1105 return _("filename contains %r, which is invalid "
1106 1106 "on Windows") % c
1107 1107 base = n.split('.')[0]
1108 1108 if base and base.lower() in _winreservednames:
1109 1109 return _("filename contains '%s', which is reserved "
1110 1110 "on Windows") % base
1111 1111 t = n[-1]
1112 1112 if t in '. ' and n not in '..':
1113 1113 return _("filename ends with '%s', which is not allowed "
1114 1114 "on Windows") % t
1115 1115
1116 1116 if os.name == 'nt':
1117 1117 checkosfilename = checkwinfilename
1118 1118 else:
1119 1119 checkosfilename = platform.checkosfilename
1120 1120
1121 1121 def makelock(info, pathname):
1122 1122 try:
1123 1123 return os.symlink(info, pathname)
1124 1124 except OSError as why:
1125 1125 if why.errno == errno.EEXIST:
1126 1126 raise
1127 1127 except AttributeError: # no symlink in os
1128 1128 pass
1129 1129
1130 1130 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1131 1131 os.write(ld, info)
1132 1132 os.close(ld)
1133 1133
1134 1134 def readlock(pathname):
1135 1135 try:
1136 1136 return os.readlink(pathname)
1137 1137 except OSError as why:
1138 1138 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1139 1139 raise
1140 1140 except AttributeError: # no symlink in os
1141 1141 pass
1142 1142 fp = posixfile(pathname)
1143 1143 r = fp.read()
1144 1144 fp.close()
1145 1145 return r
1146 1146
1147 1147 def fstat(fp):
1148 1148 '''stat file object that may not have fileno method.'''
1149 1149 try:
1150 1150 return os.fstat(fp.fileno())
1151 1151 except AttributeError:
1152 1152 return os.stat(fp.name)
1153 1153
1154 1154 # File system features
1155 1155
1156 1156 def checkcase(path):
1157 1157 """
1158 1158 Return true if the given path is on a case-sensitive filesystem
1159 1159
1160 1160 Requires a path (like /foo/.hg) ending with a foldable final
1161 1161 directory component.
1162 1162 """
1163 1163 s1 = os.lstat(path)
1164 1164 d, b = os.path.split(path)
1165 1165 b2 = b.upper()
1166 1166 if b == b2:
1167 1167 b2 = b.lower()
1168 1168 if b == b2:
1169 1169 return True # no evidence against case sensitivity
1170 1170 p2 = os.path.join(d, b2)
1171 1171 try:
1172 1172 s2 = os.lstat(p2)
1173 1173 if s2 == s1:
1174 1174 return False
1175 1175 return True
1176 1176 except OSError:
1177 1177 return True
1178 1178
1179 1179 try:
1180 1180 import re2
1181 1181 _re2 = None
1182 1182 except ImportError:
1183 1183 _re2 = False
1184 1184
1185 1185 class _re(object):
1186 1186 def _checkre2(self):
1187 1187 global _re2
1188 1188 try:
1189 1189 # check if match works, see issue3964
1190 1190 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1191 1191 except ImportError:
1192 1192 _re2 = False
1193 1193
1194 1194 def compile(self, pat, flags=0):
1195 1195 '''Compile a regular expression, using re2 if possible
1196 1196
1197 1197 For best performance, use only re2-compatible regexp features. The
1198 1198 only flags from the re module that are re2-compatible are
1199 1199 IGNORECASE and MULTILINE.'''
1200 1200 if _re2 is None:
1201 1201 self._checkre2()
1202 1202 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1203 1203 if flags & remod.IGNORECASE:
1204 1204 pat = '(?i)' + pat
1205 1205 if flags & remod.MULTILINE:
1206 1206 pat = '(?m)' + pat
1207 1207 try:
1208 1208 return re2.compile(pat)
1209 1209 except re2.error:
1210 1210 pass
1211 1211 return remod.compile(pat, flags)
1212 1212
1213 1213 @propertycache
1214 1214 def escape(self):
1215 1215 '''Return the version of escape corresponding to self.compile.
1216 1216
1217 1217 This is imperfect because whether re2 or re is used for a particular
1218 1218 function depends on the flags, etc, but it's the best we can do.
1219 1219 '''
1220 1220 global _re2
1221 1221 if _re2 is None:
1222 1222 self._checkre2()
1223 1223 if _re2:
1224 1224 return re2.escape
1225 1225 else:
1226 1226 return remod.escape
1227 1227
1228 1228 re = _re()
1229 1229
1230 1230 _fspathcache = {}
1231 1231 def fspath(name, root):
1232 1232 '''Get name in the case stored in the filesystem
1233 1233
1234 1234 The name should be relative to root, and be normcase-ed for efficiency.
1235 1235
1236 1236 Note that this function is unnecessary, and should not be
1237 1237 called, for case-sensitive filesystems (simply because it's expensive).
1238 1238
1239 1239 The root should be normcase-ed, too.
1240 1240 '''
1241 1241 def _makefspathcacheentry(dir):
1242 1242 return dict((normcase(n), n) for n in os.listdir(dir))
1243 1243
1244 1244 seps = os.sep
1245 1245 if os.altsep:
1246 1246 seps = seps + os.altsep
1247 1247 # Protect backslashes. This gets silly very quickly.
1248 1248 seps.replace('\\','\\\\')
1249 1249 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1250 1250 dir = os.path.normpath(root)
1251 1251 result = []
1252 1252 for part, sep in pattern.findall(name):
1253 1253 if sep:
1254 1254 result.append(sep)
1255 1255 continue
1256 1256
1257 1257 if dir not in _fspathcache:
1258 1258 _fspathcache[dir] = _makefspathcacheentry(dir)
1259 1259 contents = _fspathcache[dir]
1260 1260
1261 1261 found = contents.get(part)
1262 1262 if not found:
1263 1263 # retry "once per directory" per "dirstate.walk" which
1264 1264 # may take place for each patches of "hg qpush", for example
1265 1265 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1266 1266 found = contents.get(part)
1267 1267
1268 1268 result.append(found or part)
1269 1269 dir = os.path.join(dir, part)
1270 1270
1271 1271 return ''.join(result)
1272 1272
1273 1273 def checknlink(testfile):
1274 1274 '''check whether hardlink count reporting works properly'''
1275 1275
1276 1276 # testfile may be open, so we need a separate file for checking to
1277 1277 # work around issue2543 (or testfile may get lost on Samba shares)
1278 1278 f1 = testfile + ".hgtmp1"
1279 1279 if os.path.lexists(f1):
1280 1280 return False
1281 1281 try:
1282 1282 posixfile(f1, 'w').close()
1283 1283 except IOError:
1284 1284 return False
1285 1285
1286 1286 f2 = testfile + ".hgtmp2"
1287 1287 fd = None
1288 1288 try:
1289 1289 oslink(f1, f2)
1290 1290 # nlinks() may behave differently for files on Windows shares if
1291 1291 # the file is open.
1292 1292 fd = posixfile(f2)
1293 1293 return nlinks(f2) > 1
1294 1294 except OSError:
1295 1295 return False
1296 1296 finally:
1297 1297 if fd is not None:
1298 1298 fd.close()
1299 1299 for f in (f1, f2):
1300 1300 try:
1301 1301 os.unlink(f)
1302 1302 except OSError:
1303 1303 pass
1304 1304
1305 1305 def endswithsep(path):
1306 1306 '''Check path ends with os.sep or os.altsep.'''
1307 1307 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1308 1308
1309 1309 def splitpath(path):
1310 1310 '''Split path by os.sep.
1311 1311 Note that this function does not use os.altsep because this is
1312 1312 an alternative of simple "xxx.split(os.sep)".
1313 1313 It is recommended to use os.path.normpath() before using this
1314 1314 function if need.'''
1315 1315 return path.split(os.sep)
1316 1316
1317 1317 def gui():
1318 1318 '''Are we running in a GUI?'''
1319 1319 if sys.platform == 'darwin':
1320 1320 if 'SSH_CONNECTION' in os.environ:
1321 1321 # handle SSH access to a box where the user is logged in
1322 1322 return False
1323 1323 elif getattr(osutil, 'isgui', None):
1324 1324 # check if a CoreGraphics session is available
1325 1325 return osutil.isgui()
1326 1326 else:
1327 1327 # pure build; use a safe default
1328 1328 return True
1329 1329 else:
1330 1330 return os.name == "nt" or os.environ.get("DISPLAY")
1331 1331
1332 1332 def mktempcopy(name, emptyok=False, createmode=None):
1333 1333 """Create a temporary file with the same contents from name
1334 1334
1335 1335 The permission bits are copied from the original file.
1336 1336
1337 1337 If the temporary file is going to be truncated immediately, you
1338 1338 can use emptyok=True as an optimization.
1339 1339
1340 1340 Returns the name of the temporary file.
1341 1341 """
1342 1342 d, fn = os.path.split(name)
1343 1343 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1344 1344 os.close(fd)
1345 1345 # Temporary files are created with mode 0600, which is usually not
1346 1346 # what we want. If the original file already exists, just copy
1347 1347 # its mode. Otherwise, manually obey umask.
1348 1348 copymode(name, temp, createmode)
1349 1349 if emptyok:
1350 1350 return temp
1351 1351 try:
1352 1352 try:
1353 1353 ifp = posixfile(name, "rb")
1354 1354 except IOError as inst:
1355 1355 if inst.errno == errno.ENOENT:
1356 1356 return temp
1357 1357 if not getattr(inst, 'filename', None):
1358 1358 inst.filename = name
1359 1359 raise
1360 1360 ofp = posixfile(temp, "wb")
1361 1361 for chunk in filechunkiter(ifp):
1362 1362 ofp.write(chunk)
1363 1363 ifp.close()
1364 1364 ofp.close()
1365 1365 except: # re-raises
1366 1366 try: os.unlink(temp)
1367 1367 except OSError: pass
1368 1368 raise
1369 1369 return temp
1370 1370
1371 1371 class atomictempfile(object):
1372 1372 '''writable file object that atomically updates a file
1373 1373
1374 1374 All writes will go to a temporary copy of the original file. Call
1375 1375 close() when you are done writing, and atomictempfile will rename
1376 1376 the temporary copy to the original name, making the changes
1377 1377 visible. If the object is destroyed without being closed, all your
1378 1378 writes are discarded.
1379 1379 '''
1380 1380 def __init__(self, name, mode='w+b', createmode=None):
1381 1381 self.__name = name # permanent name
1382 1382 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1383 1383 createmode=createmode)
1384 1384 self._fp = posixfile(self._tempname, mode)
1385 1385
1386 1386 # delegated methods
1387 1387 self.write = self._fp.write
1388 1388 self.seek = self._fp.seek
1389 1389 self.tell = self._fp.tell
1390 1390 self.fileno = self._fp.fileno
1391 1391
1392 1392 def close(self):
1393 1393 if not self._fp.closed:
1394 1394 self._fp.close()
1395 1395 rename(self._tempname, localpath(self.__name))
1396 1396
1397 1397 def discard(self):
1398 1398 if not self._fp.closed:
1399 1399 try:
1400 1400 os.unlink(self._tempname)
1401 1401 except OSError:
1402 1402 pass
1403 1403 self._fp.close()
1404 1404
1405 1405 def __del__(self):
1406 1406 if safehasattr(self, '_fp'): # constructor actually did something
1407 1407 self.discard()
1408 1408
1409 1409 def makedirs(name, mode=None, notindexed=False):
1410 1410 """recursive directory creation with parent mode inheritance"""
1411 1411 try:
1412 1412 makedir(name, notindexed)
1413 1413 except OSError as err:
1414 1414 if err.errno == errno.EEXIST:
1415 1415 return
1416 1416 if err.errno != errno.ENOENT or not name:
1417 1417 raise
1418 1418 parent = os.path.dirname(os.path.abspath(name))
1419 1419 if parent == name:
1420 1420 raise
1421 1421 makedirs(parent, mode, notindexed)
1422 1422 makedir(name, notindexed)
1423 1423 if mode is not None:
1424 1424 os.chmod(name, mode)
1425 1425
1426 1426 def ensuredirs(name, mode=None, notindexed=False):
1427 1427 """race-safe recursive directory creation
1428 1428
1429 1429 Newly created directories are marked as "not to be indexed by
1430 1430 the content indexing service", if ``notindexed`` is specified
1431 1431 for "write" mode access.
1432 1432 """
1433 1433 if os.path.isdir(name):
1434 1434 return
1435 1435 parent = os.path.dirname(os.path.abspath(name))
1436 1436 if parent != name:
1437 1437 ensuredirs(parent, mode, notindexed)
1438 1438 try:
1439 1439 makedir(name, notindexed)
1440 1440 except OSError as err:
1441 1441 if err.errno == errno.EEXIST and os.path.isdir(name):
1442 1442 # someone else seems to have won a directory creation race
1443 1443 return
1444 1444 raise
1445 1445 if mode is not None:
1446 1446 os.chmod(name, mode)
1447 1447
1448 1448 def readfile(path):
1449 1449 with open(path, 'rb') as fp:
1450 1450 return fp.read()
1451 1451
1452 1452 def writefile(path, text):
1453 1453 with open(path, 'wb') as fp:
1454 1454 fp.write(text)
1455 1455
1456 1456 def appendfile(path, text):
1457 1457 with open(path, 'ab') as fp:
1458 1458 fp.write(text)
1459 1459
1460 1460 class chunkbuffer(object):
1461 1461 """Allow arbitrary sized chunks of data to be efficiently read from an
1462 1462 iterator over chunks of arbitrary size."""
1463 1463
1464 1464 def __init__(self, in_iter):
1465 1465 """in_iter is the iterator that's iterating over the input chunks.
1466 1466 targetsize is how big a buffer to try to maintain."""
1467 1467 def splitbig(chunks):
1468 1468 for chunk in chunks:
1469 1469 if len(chunk) > 2**20:
1470 1470 pos = 0
1471 1471 while pos < len(chunk):
1472 1472 end = pos + 2 ** 18
1473 1473 yield chunk[pos:end]
1474 1474 pos = end
1475 1475 else:
1476 1476 yield chunk
1477 1477 self.iter = splitbig(in_iter)
1478 1478 self._queue = collections.deque()
1479 1479 self._chunkoffset = 0
1480 1480
1481 1481 def read(self, l=None):
1482 1482 """Read L bytes of data from the iterator of chunks of data.
1483 1483 Returns less than L bytes if the iterator runs dry.
1484 1484
1485 1485 If size parameter is omitted, read everything"""
1486 1486 if l is None:
1487 1487 return ''.join(self.iter)
1488 1488
1489 1489 left = l
1490 1490 buf = []
1491 1491 queue = self._queue
1492 1492 while left > 0:
1493 1493 # refill the queue
1494 1494 if not queue:
1495 1495 target = 2**18
1496 1496 for chunk in self.iter:
1497 1497 queue.append(chunk)
1498 1498 target -= len(chunk)
1499 1499 if target <= 0:
1500 1500 break
1501 1501 if not queue:
1502 1502 break
1503 1503
1504 1504 # The easy way to do this would be to queue.popleft(), modify the
1505 1505 # chunk (if necessary), then queue.appendleft(). However, for cases
1506 1506 # where we read partial chunk content, this incurs 2 dequeue
1507 1507 # mutations and creates a new str for the remaining chunk in the
1508 1508 # queue. Our code below avoids this overhead.
1509 1509
1510 1510 chunk = queue[0]
1511 1511 chunkl = len(chunk)
1512 1512 offset = self._chunkoffset
1513 1513
1514 1514 # Use full chunk.
1515 1515 if offset == 0 and left >= chunkl:
1516 1516 left -= chunkl
1517 1517 queue.popleft()
1518 1518 buf.append(chunk)
1519 1519 # self._chunkoffset remains at 0.
1520 1520 continue
1521 1521
1522 1522 chunkremaining = chunkl - offset
1523 1523
1524 1524 # Use all of unconsumed part of chunk.
1525 1525 if left >= chunkremaining:
1526 1526 left -= chunkremaining
1527 1527 queue.popleft()
1528 1528 # offset == 0 is enabled by block above, so this won't merely
1529 1529 # copy via ``chunk[0:]``.
1530 1530 buf.append(chunk[offset:])
1531 1531 self._chunkoffset = 0
1532 1532
1533 1533 # Partial chunk needed.
1534 1534 else:
1535 1535 buf.append(chunk[offset:offset + left])
1536 1536 self._chunkoffset += left
1537 1537 left -= chunkremaining
1538 1538
1539 1539 return ''.join(buf)
1540 1540
1541 1541 def filechunkiter(f, size=65536, limit=None):
1542 1542 """Create a generator that produces the data in the file size
1543 1543 (default 65536) bytes at a time, up to optional limit (default is
1544 1544 to read all data). Chunks may be less than size bytes if the
1545 1545 chunk is the last chunk in the file, or the file is a socket or
1546 1546 some other type of file that sometimes reads less data than is
1547 1547 requested."""
1548 1548 assert size >= 0
1549 1549 assert limit is None or limit >= 0
1550 1550 while True:
1551 1551 if limit is None:
1552 1552 nbytes = size
1553 1553 else:
1554 1554 nbytes = min(limit, size)
1555 1555 s = nbytes and f.read(nbytes)
1556 1556 if not s:
1557 1557 break
1558 1558 if limit:
1559 1559 limit -= len(s)
1560 1560 yield s
1561 1561
1562 1562 def makedate(timestamp=None):
1563 1563 '''Return a unix timestamp (or the current time) as a (unixtime,
1564 1564 offset) tuple based off the local timezone.'''
1565 1565 if timestamp is None:
1566 1566 timestamp = time.time()
1567 1567 if timestamp < 0:
1568 1568 hint = _("check your clock")
1569 1569 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1570 1570 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1571 1571 datetime.datetime.fromtimestamp(timestamp))
1572 1572 tz = delta.days * 86400 + delta.seconds
1573 1573 return timestamp, tz
1574 1574
1575 1575 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1576 1576 """represent a (unixtime, offset) tuple as a localized time.
1577 1577 unixtime is seconds since the epoch, and offset is the time zone's
1578 1578 number of seconds away from UTC. if timezone is false, do not
1579 1579 append time zone to string."""
1580 1580 t, tz = date or makedate()
1581 1581 if t < 0:
1582 1582 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1583 1583 tz = 0
1584 1584 if "%1" in format or "%2" in format or "%z" in format:
1585 1585 sign = (tz > 0) and "-" or "+"
1586 1586 minutes = abs(tz) // 60
1587 1587 q, r = divmod(minutes, 60)
1588 1588 format = format.replace("%z", "%1%2")
1589 1589 format = format.replace("%1", "%c%02d" % (sign, q))
1590 1590 format = format.replace("%2", "%02d" % r)
1591 1591 try:
1592 1592 t = time.gmtime(float(t) - tz)
1593 1593 except ValueError:
1594 1594 # time was out of range
1595 1595 t = time.gmtime(sys.maxint)
1596 1596 s = time.strftime(format, t)
1597 1597 return s
1598 1598
1599 1599 def shortdate(date=None):
1600 1600 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1601 1601 return datestr(date, format='%Y-%m-%d')
1602 1602
1603 1603 def parsetimezone(tz):
1604 1604 """parse a timezone string and return an offset integer"""
1605 1605 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1606 1606 sign = (tz[0] == "+") and 1 or -1
1607 1607 hours = int(tz[1:3])
1608 1608 minutes = int(tz[3:5])
1609 1609 return -sign * (hours * 60 + minutes) * 60
1610 1610 if tz == "GMT" or tz == "UTC":
1611 1611 return 0
1612 1612 return None
1613 1613
1614 1614 def strdate(string, format, defaults=[]):
1615 1615 """parse a localized time string and return a (unixtime, offset) tuple.
1616 1616 if the string cannot be parsed, ValueError is raised."""
1617 1617 # NOTE: unixtime = localunixtime + offset
1618 1618 offset, date = parsetimezone(string.split()[-1]), string
1619 1619 if offset is not None:
1620 1620 date = " ".join(string.split()[:-1])
1621 1621
1622 1622 # add missing elements from defaults
1623 1623 usenow = False # default to using biased defaults
1624 1624 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1625 1625 found = [True for p in part if ("%"+p) in format]
1626 1626 if not found:
1627 1627 date += "@" + defaults[part][usenow]
1628 1628 format += "@%" + part[0]
1629 1629 else:
1630 1630 # We've found a specific time element, less specific time
1631 1631 # elements are relative to today
1632 1632 usenow = True
1633 1633
1634 1634 timetuple = time.strptime(date, format)
1635 1635 localunixtime = int(calendar.timegm(timetuple))
1636 1636 if offset is None:
1637 1637 # local timezone
1638 1638 unixtime = int(time.mktime(timetuple))
1639 1639 offset = unixtime - localunixtime
1640 1640 else:
1641 1641 unixtime = localunixtime + offset
1642 1642 return unixtime, offset
1643 1643
1644 1644 def parsedate(date, formats=None, bias=None):
1645 1645 """parse a localized date/time and return a (unixtime, offset) tuple.
1646 1646
1647 1647 The date may be a "unixtime offset" string or in one of the specified
1648 1648 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1649 1649
1650 1650 >>> parsedate(' today ') == parsedate(\
1651 1651 datetime.date.today().strftime('%b %d'))
1652 1652 True
1653 1653 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1654 1654 datetime.timedelta(days=1)\
1655 1655 ).strftime('%b %d'))
1656 1656 True
1657 1657 >>> now, tz = makedate()
1658 1658 >>> strnow, strtz = parsedate('now')
1659 1659 >>> (strnow - now) < 1
1660 1660 True
1661 1661 >>> tz == strtz
1662 1662 True
1663 1663 """
1664 1664 if bias is None:
1665 1665 bias = {}
1666 1666 if not date:
1667 1667 return 0, 0
1668 1668 if isinstance(date, tuple) and len(date) == 2:
1669 1669 return date
1670 1670 if not formats:
1671 1671 formats = defaultdateformats
1672 1672 date = date.strip()
1673 1673
1674 1674 if date == 'now' or date == _('now'):
1675 1675 return makedate()
1676 1676 if date == 'today' or date == _('today'):
1677 1677 date = datetime.date.today().strftime('%b %d')
1678 1678 elif date == 'yesterday' or date == _('yesterday'):
1679 1679 date = (datetime.date.today() -
1680 1680 datetime.timedelta(days=1)).strftime('%b %d')
1681 1681
1682 1682 try:
1683 1683 when, offset = map(int, date.split(' '))
1684 1684 except ValueError:
1685 1685 # fill out defaults
1686 1686 now = makedate()
1687 1687 defaults = {}
1688 1688 for part in ("d", "mb", "yY", "HI", "M", "S"):
1689 1689 # this piece is for rounding the specific end of unknowns
1690 1690 b = bias.get(part)
1691 1691 if b is None:
1692 1692 if part[0] in "HMS":
1693 1693 b = "00"
1694 1694 else:
1695 1695 b = "0"
1696 1696
1697 1697 # this piece is for matching the generic end to today's date
1698 1698 n = datestr(now, "%" + part[0])
1699 1699
1700 1700 defaults[part] = (b, n)
1701 1701
1702 1702 for format in formats:
1703 1703 try:
1704 1704 when, offset = strdate(date, format, defaults)
1705 1705 except (ValueError, OverflowError):
1706 1706 pass
1707 1707 else:
1708 1708 break
1709 1709 else:
1710 1710 raise Abort(_('invalid date: %r') % date)
1711 1711 # validate explicit (probably user-specified) date and
1712 1712 # time zone offset. values must fit in signed 32 bits for
1713 1713 # current 32-bit linux runtimes. timezones go from UTC-12
1714 1714 # to UTC+14
1715 1715 if abs(when) > 0x7fffffff:
1716 1716 raise Abort(_('date exceeds 32 bits: %d') % when)
1717 1717 if when < 0:
1718 1718 raise Abort(_('negative date value: %d') % when)
1719 1719 if offset < -50400 or offset > 43200:
1720 1720 raise Abort(_('impossible time zone offset: %d') % offset)
1721 1721 return when, offset
1722 1722
1723 1723 def matchdate(date):
1724 1724 """Return a function that matches a given date match specifier
1725 1725
1726 1726 Formats include:
1727 1727
1728 1728 '{date}' match a given date to the accuracy provided
1729 1729
1730 1730 '<{date}' on or before a given date
1731 1731
1732 1732 '>{date}' on or after a given date
1733 1733
1734 1734 >>> p1 = parsedate("10:29:59")
1735 1735 >>> p2 = parsedate("10:30:00")
1736 1736 >>> p3 = parsedate("10:30:59")
1737 1737 >>> p4 = parsedate("10:31:00")
1738 1738 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1739 1739 >>> f = matchdate("10:30")
1740 1740 >>> f(p1[0])
1741 1741 False
1742 1742 >>> f(p2[0])
1743 1743 True
1744 1744 >>> f(p3[0])
1745 1745 True
1746 1746 >>> f(p4[0])
1747 1747 False
1748 1748 >>> f(p5[0])
1749 1749 False
1750 1750 """
1751 1751
1752 1752 def lower(date):
1753 1753 d = {'mb': "1", 'd': "1"}
1754 1754 return parsedate(date, extendeddateformats, d)[0]
1755 1755
1756 1756 def upper(date):
1757 1757 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1758 1758 for days in ("31", "30", "29"):
1759 1759 try:
1760 1760 d["d"] = days
1761 1761 return parsedate(date, extendeddateformats, d)[0]
1762 1762 except Abort:
1763 1763 pass
1764 1764 d["d"] = "28"
1765 1765 return parsedate(date, extendeddateformats, d)[0]
1766 1766
1767 1767 date = date.strip()
1768 1768
1769 1769 if not date:
1770 1770 raise Abort(_("dates cannot consist entirely of whitespace"))
1771 1771 elif date[0] == "<":
1772 1772 if not date[1:]:
1773 1773 raise Abort(_("invalid day spec, use '<DATE'"))
1774 1774 when = upper(date[1:])
1775 1775 return lambda x: x <= when
1776 1776 elif date[0] == ">":
1777 1777 if not date[1:]:
1778 1778 raise Abort(_("invalid day spec, use '>DATE'"))
1779 1779 when = lower(date[1:])
1780 1780 return lambda x: x >= when
1781 1781 elif date[0] == "-":
1782 1782 try:
1783 1783 days = int(date[1:])
1784 1784 except ValueError:
1785 1785 raise Abort(_("invalid day spec: %s") % date[1:])
1786 1786 if days < 0:
1787 1787 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1788 1788 % date[1:])
1789 1789 when = makedate()[0] - days * 3600 * 24
1790 1790 return lambda x: x >= when
1791 1791 elif " to " in date:
1792 1792 a, b = date.split(" to ")
1793 1793 start, stop = lower(a), upper(b)
1794 1794 return lambda x: x >= start and x <= stop
1795 1795 else:
1796 1796 start, stop = lower(date), upper(date)
1797 1797 return lambda x: x >= start and x <= stop
1798 1798
1799 1799 def stringmatcher(pattern):
1800 1800 """
1801 1801 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1802 1802 returns the matcher name, pattern, and matcher function.
1803 1803 missing or unknown prefixes are treated as literal matches.
1804 1804
1805 1805 helper for tests:
1806 1806 >>> def test(pattern, *tests):
1807 1807 ... kind, pattern, matcher = stringmatcher(pattern)
1808 1808 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1809 1809
1810 1810 exact matching (no prefix):
1811 1811 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1812 1812 ('literal', 'abcdefg', [False, False, True])
1813 1813
1814 1814 regex matching ('re:' prefix)
1815 1815 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1816 1816 ('re', 'a.+b', [False, False, True])
1817 1817
1818 1818 force exact matches ('literal:' prefix)
1819 1819 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1820 1820 ('literal', 're:foobar', [False, True])
1821 1821
1822 1822 unknown prefixes are ignored and treated as literals
1823 1823 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1824 1824 ('literal', 'foo:bar', [False, False, True])
1825 1825 """
1826 1826 if pattern.startswith('re:'):
1827 1827 pattern = pattern[3:]
1828 1828 try:
1829 1829 regex = remod.compile(pattern)
1830 1830 except remod.error as e:
1831 1831 raise error.ParseError(_('invalid regular expression: %s')
1832 1832 % e)
1833 1833 return 're', pattern, regex.search
1834 1834 elif pattern.startswith('literal:'):
1835 1835 pattern = pattern[8:]
1836 1836 return 'literal', pattern, pattern.__eq__
1837 1837
1838 1838 def shortuser(user):
1839 1839 """Return a short representation of a user name or email address."""
1840 1840 f = user.find('@')
1841 1841 if f >= 0:
1842 1842 user = user[:f]
1843 1843 f = user.find('<')
1844 1844 if f >= 0:
1845 1845 user = user[f + 1:]
1846 1846 f = user.find(' ')
1847 1847 if f >= 0:
1848 1848 user = user[:f]
1849 1849 f = user.find('.')
1850 1850 if f >= 0:
1851 1851 user = user[:f]
1852 1852 return user
1853 1853
1854 1854 def emailuser(user):
1855 1855 """Return the user portion of an email address."""
1856 1856 f = user.find('@')
1857 1857 if f >= 0:
1858 1858 user = user[:f]
1859 1859 f = user.find('<')
1860 1860 if f >= 0:
1861 1861 user = user[f + 1:]
1862 1862 return user
1863 1863
1864 1864 def email(author):
1865 1865 '''get email of author.'''
1866 1866 r = author.find('>')
1867 1867 if r == -1:
1868 1868 r = None
1869 1869 return author[author.find('<') + 1:r]
1870 1870
1871 1871 def ellipsis(text, maxlength=400):
1872 1872 """Trim string to at most maxlength (default: 400) columns in display."""
1873 1873 return encoding.trim(text, maxlength, ellipsis='...')
1874 1874
1875 1875 def unitcountfn(*unittable):
1876 1876 '''return a function that renders a readable count of some quantity'''
1877 1877
1878 1878 def go(count):
1879 1879 for multiplier, divisor, format in unittable:
1880 1880 if count >= divisor * multiplier:
1881 1881 return format % (count / float(divisor))
1882 1882 return unittable[-1][2] % count
1883 1883
1884 1884 return go
1885 1885
1886 1886 bytecount = unitcountfn(
1887 1887 (100, 1 << 30, _('%.0f GB')),
1888 1888 (10, 1 << 30, _('%.1f GB')),
1889 1889 (1, 1 << 30, _('%.2f GB')),
1890 1890 (100, 1 << 20, _('%.0f MB')),
1891 1891 (10, 1 << 20, _('%.1f MB')),
1892 1892 (1, 1 << 20, _('%.2f MB')),
1893 1893 (100, 1 << 10, _('%.0f KB')),
1894 1894 (10, 1 << 10, _('%.1f KB')),
1895 1895 (1, 1 << 10, _('%.2f KB')),
1896 1896 (1, 1, _('%.0f bytes')),
1897 1897 )
1898 1898
1899 1899 def uirepr(s):
1900 1900 # Avoid double backslash in Windows path repr()
1901 1901 return repr(s).replace('\\\\', '\\')
1902 1902
1903 1903 # delay import of textwrap
1904 1904 def MBTextWrapper(**kwargs):
1905 1905 class tw(textwrap.TextWrapper):
1906 1906 """
1907 1907 Extend TextWrapper for width-awareness.
1908 1908
1909 1909 Neither number of 'bytes' in any encoding nor 'characters' is
1910 1910 appropriate to calculate terminal columns for specified string.
1911 1911
1912 1912 Original TextWrapper implementation uses built-in 'len()' directly,
1913 1913 so overriding is needed to use width information of each characters.
1914 1914
1915 1915 In addition, characters classified into 'ambiguous' width are
1916 1916 treated as wide in East Asian area, but as narrow in other.
1917 1917
1918 1918 This requires use decision to determine width of such characters.
1919 1919 """
1920 1920 def _cutdown(self, ucstr, space_left):
1921 1921 l = 0
1922 1922 colwidth = encoding.ucolwidth
1923 1923 for i in xrange(len(ucstr)):
1924 1924 l += colwidth(ucstr[i])
1925 1925 if space_left < l:
1926 1926 return (ucstr[:i], ucstr[i:])
1927 1927 return ucstr, ''
1928 1928
1929 1929 # overriding of base class
1930 1930 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1931 1931 space_left = max(width - cur_len, 1)
1932 1932
1933 1933 if self.break_long_words:
1934 1934 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1935 1935 cur_line.append(cut)
1936 1936 reversed_chunks[-1] = res
1937 1937 elif not cur_line:
1938 1938 cur_line.append(reversed_chunks.pop())
1939 1939
1940 1940 # this overriding code is imported from TextWrapper of Python 2.6
1941 1941 # to calculate columns of string by 'encoding.ucolwidth()'
1942 1942 def _wrap_chunks(self, chunks):
1943 1943 colwidth = encoding.ucolwidth
1944 1944
1945 1945 lines = []
1946 1946 if self.width <= 0:
1947 1947 raise ValueError("invalid width %r (must be > 0)" % self.width)
1948 1948
1949 1949 # Arrange in reverse order so items can be efficiently popped
1950 1950 # from a stack of chucks.
1951 1951 chunks.reverse()
1952 1952
1953 1953 while chunks:
1954 1954
1955 1955 # Start the list of chunks that will make up the current line.
1956 1956 # cur_len is just the length of all the chunks in cur_line.
1957 1957 cur_line = []
1958 1958 cur_len = 0
1959 1959
1960 1960 # Figure out which static string will prefix this line.
1961 1961 if lines:
1962 1962 indent = self.subsequent_indent
1963 1963 else:
1964 1964 indent = self.initial_indent
1965 1965
1966 1966 # Maximum width for this line.
1967 1967 width = self.width - len(indent)
1968 1968
1969 1969 # First chunk on line is whitespace -- drop it, unless this
1970 1970 # is the very beginning of the text (i.e. no lines started yet).
1971 1971 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1972 1972 del chunks[-1]
1973 1973
1974 1974 while chunks:
1975 1975 l = colwidth(chunks[-1])
1976 1976
1977 1977 # Can at least squeeze this chunk onto the current line.
1978 1978 if cur_len + l <= width:
1979 1979 cur_line.append(chunks.pop())
1980 1980 cur_len += l
1981 1981
1982 1982 # Nope, this line is full.
1983 1983 else:
1984 1984 break
1985 1985
1986 1986 # The current line is full, and the next chunk is too big to
1987 1987 # fit on *any* line (not just this one).
1988 1988 if chunks and colwidth(chunks[-1]) > width:
1989 1989 self._handle_long_word(chunks, cur_line, cur_len, width)
1990 1990
1991 1991 # If the last chunk on this line is all whitespace, drop it.
1992 1992 if (self.drop_whitespace and
1993 1993 cur_line and cur_line[-1].strip() == ''):
1994 1994 del cur_line[-1]
1995 1995
1996 1996 # Convert current line back to a string and store it in list
1997 1997 # of all lines (return value).
1998 1998 if cur_line:
1999 1999 lines.append(indent + ''.join(cur_line))
2000 2000
2001 2001 return lines
2002 2002
2003 2003 global MBTextWrapper
2004 2004 MBTextWrapper = tw
2005 2005 return tw(**kwargs)
2006 2006
2007 2007 def wrap(line, width, initindent='', hangindent=''):
2008 2008 maxindent = max(len(hangindent), len(initindent))
2009 2009 if width <= maxindent:
2010 2010 # adjust for weird terminal size
2011 2011 width = max(78, maxindent + 1)
2012 2012 line = line.decode(encoding.encoding, encoding.encodingmode)
2013 2013 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2014 2014 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2015 2015 wrapper = MBTextWrapper(width=width,
2016 2016 initial_indent=initindent,
2017 2017 subsequent_indent=hangindent)
2018 2018 return wrapper.fill(line).encode(encoding.encoding)
2019 2019
2020 2020 def iterlines(iterator):
2021 2021 for chunk in iterator:
2022 2022 for line in chunk.splitlines():
2023 2023 yield line
2024 2024
2025 2025 def expandpath(path):
2026 2026 return os.path.expanduser(os.path.expandvars(path))
2027 2027
2028 2028 def hgcmd():
2029 2029 """Return the command used to execute current hg
2030 2030
2031 2031 This is different from hgexecutable() because on Windows we want
2032 2032 to avoid things opening new shell windows like batch files, so we
2033 2033 get either the python call or current executable.
2034 2034 """
2035 2035 if mainfrozen():
2036 2036 if getattr(sys, 'frozen', None) == 'macosx_app':
2037 2037 # Env variable set by py2app
2038 2038 return [os.environ['EXECUTABLEPATH']]
2039 2039 else:
2040 2040 return [sys.executable]
2041 2041 return gethgcmd()
2042 2042
2043 2043 def rundetached(args, condfn):
2044 2044 """Execute the argument list in a detached process.
2045 2045
2046 2046 condfn is a callable which is called repeatedly and should return
2047 2047 True once the child process is known to have started successfully.
2048 2048 At this point, the child process PID is returned. If the child
2049 2049 process fails to start or finishes before condfn() evaluates to
2050 2050 True, return -1.
2051 2051 """
2052 2052 # Windows case is easier because the child process is either
2053 2053 # successfully starting and validating the condition or exiting
2054 2054 # on failure. We just poll on its PID. On Unix, if the child
2055 2055 # process fails to start, it will be left in a zombie state until
2056 2056 # the parent wait on it, which we cannot do since we expect a long
2057 2057 # running process on success. Instead we listen for SIGCHLD telling
2058 2058 # us our child process terminated.
2059 2059 terminated = set()
2060 2060 def handler(signum, frame):
2061 2061 terminated.add(os.wait())
2062 2062 prevhandler = None
2063 2063 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2064 2064 if SIGCHLD is not None:
2065 2065 prevhandler = signal.signal(SIGCHLD, handler)
2066 2066 try:
2067 2067 pid = spawndetached(args)
2068 2068 while not condfn():
2069 2069 if ((pid in terminated or not testpid(pid))
2070 2070 and not condfn()):
2071 2071 return -1
2072 2072 time.sleep(0.1)
2073 2073 return pid
2074 2074 finally:
2075 2075 if prevhandler is not None:
2076 2076 signal.signal(signal.SIGCHLD, prevhandler)
2077 2077
2078 2078 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2079 2079 """Return the result of interpolating items in the mapping into string s.
2080 2080
2081 2081 prefix is a single character string, or a two character string with
2082 2082 a backslash as the first character if the prefix needs to be escaped in
2083 2083 a regular expression.
2084 2084
2085 2085 fn is an optional function that will be applied to the replacement text
2086 2086 just before replacement.
2087 2087
2088 2088 escape_prefix is an optional flag that allows using doubled prefix for
2089 2089 its escaping.
2090 2090 """
2091 2091 fn = fn or (lambda s: s)
2092 2092 patterns = '|'.join(mapping.keys())
2093 2093 if escape_prefix:
2094 2094 patterns += '|' + prefix
2095 2095 if len(prefix) > 1:
2096 2096 prefix_char = prefix[1:]
2097 2097 else:
2098 2098 prefix_char = prefix
2099 2099 mapping[prefix_char] = prefix_char
2100 2100 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2101 2101 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2102 2102
2103 2103 def getport(port):
2104 2104 """Return the port for a given network service.
2105 2105
2106 2106 If port is an integer, it's returned as is. If it's a string, it's
2107 2107 looked up using socket.getservbyname(). If there's no matching
2108 2108 service, error.Abort is raised.
2109 2109 """
2110 2110 try:
2111 2111 return int(port)
2112 2112 except ValueError:
2113 2113 pass
2114 2114
2115 2115 try:
2116 2116 return socket.getservbyname(port)
2117 2117 except socket.error:
2118 2118 raise Abort(_("no port number associated with service '%s'") % port)
2119 2119
2120 2120 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2121 2121 '0': False, 'no': False, 'false': False, 'off': False,
2122 2122 'never': False}
2123 2123
2124 2124 def parsebool(s):
2125 2125 """Parse s into a boolean.
2126 2126
2127 2127 If s is not a valid boolean, returns None.
2128 2128 """
2129 2129 return _booleans.get(s.lower(), None)
2130 2130
2131 2131 _hexdig = '0123456789ABCDEFabcdef'
2132 2132 _hextochr = dict((a + b, chr(int(a + b, 16)))
2133 2133 for a in _hexdig for b in _hexdig)
2134 2134
2135 2135 def _urlunquote(s):
2136 2136 """Decode HTTP/HTML % encoding.
2137 2137
2138 2138 >>> _urlunquote('abc%20def')
2139 2139 'abc def'
2140 2140 """
2141 2141 res = s.split('%')
2142 2142 # fastpath
2143 2143 if len(res) == 1:
2144 2144 return s
2145 2145 s = res[0]
2146 2146 for item in res[1:]:
2147 2147 try:
2148 2148 s += _hextochr[item[:2]] + item[2:]
2149 2149 except KeyError:
2150 2150 s += '%' + item
2151 2151 except UnicodeDecodeError:
2152 2152 s += unichr(int(item[:2], 16)) + item[2:]
2153 2153 return s
2154 2154
2155 2155 class url(object):
2156 2156 r"""Reliable URL parser.
2157 2157
2158 2158 This parses URLs and provides attributes for the following
2159 2159 components:
2160 2160
2161 2161 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2162 2162
2163 2163 Missing components are set to None. The only exception is
2164 2164 fragment, which is set to '' if present but empty.
2165 2165
2166 2166 If parsefragment is False, fragment is included in query. If
2167 2167 parsequery is False, query is included in path. If both are
2168 2168 False, both fragment and query are included in path.
2169 2169
2170 2170 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2171 2171
2172 2172 Note that for backward compatibility reasons, bundle URLs do not
2173 2173 take host names. That means 'bundle://../' has a path of '../'.
2174 2174
2175 2175 Examples:
2176 2176
2177 2177 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2178 2178 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2179 2179 >>> url('ssh://[::1]:2200//home/joe/repo')
2180 2180 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2181 2181 >>> url('file:///home/joe/repo')
2182 2182 <url scheme: 'file', path: '/home/joe/repo'>
2183 2183 >>> url('file:///c:/temp/foo/')
2184 2184 <url scheme: 'file', path: 'c:/temp/foo/'>
2185 2185 >>> url('bundle:foo')
2186 2186 <url scheme: 'bundle', path: 'foo'>
2187 2187 >>> url('bundle://../foo')
2188 2188 <url scheme: 'bundle', path: '../foo'>
2189 2189 >>> url(r'c:\foo\bar')
2190 2190 <url path: 'c:\\foo\\bar'>
2191 2191 >>> url(r'\\blah\blah\blah')
2192 2192 <url path: '\\\\blah\\blah\\blah'>
2193 2193 >>> url(r'\\blah\blah\blah#baz')
2194 2194 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2195 2195 >>> url(r'file:///C:\users\me')
2196 2196 <url scheme: 'file', path: 'C:\\users\\me'>
2197 2197
2198 2198 Authentication credentials:
2199 2199
2200 2200 >>> url('ssh://joe:xyz@x/repo')
2201 2201 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2202 2202 >>> url('ssh://joe@x/repo')
2203 2203 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2204 2204
2205 2205 Query strings and fragments:
2206 2206
2207 2207 >>> url('http://host/a?b#c')
2208 2208 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2209 2209 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2210 2210 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2211 2211 """
2212 2212
2213 2213 _safechars = "!~*'()+"
2214 2214 _safepchars = "/!~*'()+:\\"
2215 2215 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2216 2216
2217 2217 def __init__(self, path, parsequery=True, parsefragment=True):
2218 2218 # We slowly chomp away at path until we have only the path left
2219 2219 self.scheme = self.user = self.passwd = self.host = None
2220 2220 self.port = self.path = self.query = self.fragment = None
2221 2221 self._localpath = True
2222 2222 self._hostport = ''
2223 2223 self._origpath = path
2224 2224
2225 2225 if parsefragment and '#' in path:
2226 2226 path, self.fragment = path.split('#', 1)
2227 2227 if not path:
2228 2228 path = None
2229 2229
2230 2230 # special case for Windows drive letters and UNC paths
2231 2231 if hasdriveletter(path) or path.startswith(r'\\'):
2232 2232 self.path = path
2233 2233 return
2234 2234
2235 2235 # For compatibility reasons, we can't handle bundle paths as
2236 2236 # normal URLS
2237 2237 if path.startswith('bundle:'):
2238 2238 self.scheme = 'bundle'
2239 2239 path = path[7:]
2240 2240 if path.startswith('//'):
2241 2241 path = path[2:]
2242 2242 self.path = path
2243 2243 return
2244 2244
2245 2245 if self._matchscheme(path):
2246 2246 parts = path.split(':', 1)
2247 2247 if parts[0]:
2248 2248 self.scheme, path = parts
2249 2249 self._localpath = False
2250 2250
2251 2251 if not path:
2252 2252 path = None
2253 2253 if self._localpath:
2254 2254 self.path = ''
2255 2255 return
2256 2256 else:
2257 2257 if self._localpath:
2258 2258 self.path = path
2259 2259 return
2260 2260
2261 2261 if parsequery and '?' in path:
2262 2262 path, self.query = path.split('?', 1)
2263 2263 if not path:
2264 2264 path = None
2265 2265 if not self.query:
2266 2266 self.query = None
2267 2267
2268 2268 # // is required to specify a host/authority
2269 2269 if path and path.startswith('//'):
2270 2270 parts = path[2:].split('/', 1)
2271 2271 if len(parts) > 1:
2272 2272 self.host, path = parts
2273 2273 else:
2274 2274 self.host = parts[0]
2275 2275 path = None
2276 2276 if not self.host:
2277 2277 self.host = None
2278 2278 # path of file:///d is /d
2279 2279 # path of file:///d:/ is d:/, not /d:/
2280 2280 if path and not hasdriveletter(path):
2281 2281 path = '/' + path
2282 2282
2283 2283 if self.host and '@' in self.host:
2284 2284 self.user, self.host = self.host.rsplit('@', 1)
2285 2285 if ':' in self.user:
2286 2286 self.user, self.passwd = self.user.split(':', 1)
2287 2287 if not self.host:
2288 2288 self.host = None
2289 2289
2290 2290 # Don't split on colons in IPv6 addresses without ports
2291 2291 if (self.host and ':' in self.host and
2292 2292 not (self.host.startswith('[') and self.host.endswith(']'))):
2293 2293 self._hostport = self.host
2294 2294 self.host, self.port = self.host.rsplit(':', 1)
2295 2295 if not self.host:
2296 2296 self.host = None
2297 2297
2298 2298 if (self.host and self.scheme == 'file' and
2299 2299 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2300 2300 raise Abort(_('file:// URLs can only refer to localhost'))
2301 2301
2302 2302 self.path = path
2303 2303
2304 2304 # leave the query string escaped
2305 2305 for a in ('user', 'passwd', 'host', 'port',
2306 2306 'path', 'fragment'):
2307 2307 v = getattr(self, a)
2308 2308 if v is not None:
2309 2309 setattr(self, a, _urlunquote(v))
2310 2310
2311 2311 def __repr__(self):
2312 2312 attrs = []
2313 2313 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2314 2314 'query', 'fragment'):
2315 2315 v = getattr(self, a)
2316 2316 if v is not None:
2317 2317 attrs.append('%s: %r' % (a, v))
2318 2318 return '<url %s>' % ', '.join(attrs)
2319 2319
2320 2320 def __str__(self):
2321 2321 r"""Join the URL's components back into a URL string.
2322 2322
2323 2323 Examples:
2324 2324
2325 2325 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2326 2326 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2327 2327 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2328 2328 'http://user:pw@host:80/?foo=bar&baz=42'
2329 2329 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2330 2330 'http://user:pw@host:80/?foo=bar%3dbaz'
2331 2331 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2332 2332 'ssh://user:pw@[::1]:2200//home/joe#'
2333 2333 >>> str(url('http://localhost:80//'))
2334 2334 'http://localhost:80//'
2335 2335 >>> str(url('http://localhost:80/'))
2336 2336 'http://localhost:80/'
2337 2337 >>> str(url('http://localhost:80'))
2338 2338 'http://localhost:80/'
2339 2339 >>> str(url('bundle:foo'))
2340 2340 'bundle:foo'
2341 2341 >>> str(url('bundle://../foo'))
2342 2342 'bundle:../foo'
2343 2343 >>> str(url('path'))
2344 2344 'path'
2345 2345 >>> str(url('file:///tmp/foo/bar'))
2346 2346 'file:///tmp/foo/bar'
2347 2347 >>> str(url('file:///c:/tmp/foo/bar'))
2348 2348 'file:///c:/tmp/foo/bar'
2349 2349 >>> print url(r'bundle:foo\bar')
2350 2350 bundle:foo\bar
2351 2351 >>> print url(r'file:///D:\data\hg')
2352 2352 file:///D:\data\hg
2353 2353 """
2354 2354 if self._localpath:
2355 2355 s = self.path
2356 2356 if self.scheme == 'bundle':
2357 2357 s = 'bundle:' + s
2358 2358 if self.fragment:
2359 2359 s += '#' + self.fragment
2360 2360 return s
2361 2361
2362 2362 s = self.scheme + ':'
2363 2363 if self.user or self.passwd or self.host:
2364 2364 s += '//'
2365 2365 elif self.scheme and (not self.path or self.path.startswith('/')
2366 2366 or hasdriveletter(self.path)):
2367 2367 s += '//'
2368 2368 if hasdriveletter(self.path):
2369 2369 s += '/'
2370 2370 if self.user:
2371 2371 s += urllib.quote(self.user, safe=self._safechars)
2372 2372 if self.passwd:
2373 2373 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2374 2374 if self.user or self.passwd:
2375 2375 s += '@'
2376 2376 if self.host:
2377 2377 if not (self.host.startswith('[') and self.host.endswith(']')):
2378 2378 s += urllib.quote(self.host)
2379 2379 else:
2380 2380 s += self.host
2381 2381 if self.port:
2382 2382 s += ':' + urllib.quote(self.port)
2383 2383 if self.host:
2384 2384 s += '/'
2385 2385 if self.path:
2386 2386 # TODO: similar to the query string, we should not unescape the
2387 2387 # path when we store it, the path might contain '%2f' = '/',
2388 2388 # which we should *not* escape.
2389 2389 s += urllib.quote(self.path, safe=self._safepchars)
2390 2390 if self.query:
2391 2391 # we store the query in escaped form.
2392 2392 s += '?' + self.query
2393 2393 if self.fragment is not None:
2394 2394 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2395 2395 return s
2396 2396
2397 2397 def authinfo(self):
2398 2398 user, passwd = self.user, self.passwd
2399 2399 try:
2400 2400 self.user, self.passwd = None, None
2401 2401 s = str(self)
2402 2402 finally:
2403 2403 self.user, self.passwd = user, passwd
2404 2404 if not self.user:
2405 2405 return (s, None)
2406 2406 # authinfo[1] is passed to urllib2 password manager, and its
2407 2407 # URIs must not contain credentials. The host is passed in the
2408 2408 # URIs list because Python < 2.4.3 uses only that to search for
2409 2409 # a password.
2410 2410 return (s, (None, (s, self.host),
2411 2411 self.user, self.passwd or ''))
2412 2412
2413 2413 def isabs(self):
2414 2414 if self.scheme and self.scheme != 'file':
2415 2415 return True # remote URL
2416 2416 if hasdriveletter(self.path):
2417 2417 return True # absolute for our purposes - can't be joined()
2418 2418 if self.path.startswith(r'\\'):
2419 2419 return True # Windows UNC path
2420 2420 if self.path.startswith('/'):
2421 2421 return True # POSIX-style
2422 2422 return False
2423 2423
2424 2424 def localpath(self):
2425 2425 if self.scheme == 'file' or self.scheme == 'bundle':
2426 2426 path = self.path or '/'
2427 2427 # For Windows, we need to promote hosts containing drive
2428 2428 # letters to paths with drive letters.
2429 2429 if hasdriveletter(self._hostport):
2430 2430 path = self._hostport + '/' + self.path
2431 2431 elif (self.host is not None and self.path
2432 2432 and not hasdriveletter(path)):
2433 2433 path = '/' + path
2434 2434 return path
2435 2435 return self._origpath
2436 2436
2437 2437 def islocal(self):
2438 2438 '''whether localpath will return something that posixfile can open'''
2439 2439 return (not self.scheme or self.scheme == 'file'
2440 2440 or self.scheme == 'bundle')
2441 2441
2442 2442 def hasscheme(path):
2443 2443 return bool(url(path).scheme)
2444 2444
2445 2445 def hasdriveletter(path):
2446 2446 return path and path[1:2] == ':' and path[0:1].isalpha()
2447 2447
2448 2448 def urllocalpath(path):
2449 2449 return url(path, parsequery=False, parsefragment=False).localpath()
2450 2450
2451 2451 def hidepassword(u):
2452 2452 '''hide user credential in a url string'''
2453 2453 u = url(u)
2454 2454 if u.passwd:
2455 2455 u.passwd = '***'
2456 2456 return str(u)
2457 2457
2458 2458 def removeauth(u):
2459 2459 '''remove all authentication information from a url string'''
2460 2460 u = url(u)
2461 2461 u.user = u.passwd = None
2462 2462 return str(u)
2463 2463
2464 2464 def isatty(fp):
2465 2465 try:
2466 2466 return fp.isatty()
2467 2467 except AttributeError:
2468 2468 return False
2469 2469
2470 2470 timecount = unitcountfn(
2471 2471 (1, 1e3, _('%.0f s')),
2472 2472 (100, 1, _('%.1f s')),
2473 2473 (10, 1, _('%.2f s')),
2474 2474 (1, 1, _('%.3f s')),
2475 2475 (100, 0.001, _('%.1f ms')),
2476 2476 (10, 0.001, _('%.2f ms')),
2477 2477 (1, 0.001, _('%.3f ms')),
2478 2478 (100, 0.000001, _('%.1f us')),
2479 2479 (10, 0.000001, _('%.2f us')),
2480 2480 (1, 0.000001, _('%.3f us')),
2481 2481 (100, 0.000000001, _('%.1f ns')),
2482 2482 (10, 0.000000001, _('%.2f ns')),
2483 2483 (1, 0.000000001, _('%.3f ns')),
2484 2484 )
2485 2485
2486 2486 _timenesting = [0]
2487 2487
2488 2488 def timed(func):
2489 2489 '''Report the execution time of a function call to stderr.
2490 2490
2491 2491 During development, use as a decorator when you need to measure
2492 2492 the cost of a function, e.g. as follows:
2493 2493
2494 2494 @util.timed
2495 2495 def foo(a, b, c):
2496 2496 pass
2497 2497 '''
2498 2498
2499 2499 def wrapper(*args, **kwargs):
2500 2500 start = time.time()
2501 2501 indent = 2
2502 2502 _timenesting[0] += indent
2503 2503 try:
2504 2504 return func(*args, **kwargs)
2505 2505 finally:
2506 2506 elapsed = time.time() - start
2507 2507 _timenesting[0] -= indent
2508 2508 sys.stderr.write('%s%s: %s\n' %
2509 2509 (' ' * _timenesting[0], func.__name__,
2510 2510 timecount(elapsed)))
2511 2511 return wrapper
2512 2512
2513 2513 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2514 2514 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2515 2515
2516 2516 def sizetoint(s):
2517 2517 '''Convert a space specifier to a byte count.
2518 2518
2519 2519 >>> sizetoint('30')
2520 2520 30
2521 2521 >>> sizetoint('2.2kb')
2522 2522 2252
2523 2523 >>> sizetoint('6M')
2524 2524 6291456
2525 2525 '''
2526 2526 t = s.strip().lower()
2527 2527 try:
2528 2528 for k, u in _sizeunits:
2529 2529 if t.endswith(k):
2530 2530 return int(float(t[:-len(k)]) * u)
2531 2531 return int(t)
2532 2532 except ValueError:
2533 2533 raise error.ParseError(_("couldn't parse size: %s") % s)
2534 2534
2535 2535 class hooks(object):
2536 2536 '''A collection of hook functions that can be used to extend a
2537 2537 function's behavior. Hooks are called in lexicographic order,
2538 2538 based on the names of their sources.'''
2539 2539
2540 2540 def __init__(self):
2541 2541 self._hooks = []
2542 2542
2543 2543 def add(self, source, hook):
2544 2544 self._hooks.append((source, hook))
2545 2545
2546 2546 def __call__(self, *args):
2547 2547 self._hooks.sort(key=lambda x: x[0])
2548 2548 results = []
2549 2549 for source, hook in self._hooks:
2550 2550 results.append(hook(*args))
2551 2551 return results
2552 2552
2553 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2554 '''Yields lines for a nicely formatted stacktrace.
2555 Skips the 'skip' last entries.
2556 Each file+linenumber is formatted according to fileline.
2557 Each line is formatted according to line.
2558 If line is None, it yields:
2559 length of longest filepath+line number,
2560 filepath+linenumber,
2561 function
2562
2563 Not be used in production code but very convenient while developing.
2564 '''
2565 entries = [(fileline % (fn, ln), func)
2566 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2567 if entries:
2568 fnmax = max(len(entry[0]) for entry in entries)
2569 for fnln, func in entries:
2570 if line is None:
2571 yield (fnmax, fnln, func)
2572 else:
2573 yield line % (fnmax, fnln, func)
2574
2553 2575 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2554 2576 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2555 2577 Skips the 'skip' last entries. By default it will flush stdout first.
2556 2578 It can be used everywhere and intentionally does not require an ui object.
2557 2579 Not be used in production code but very convenient while developing.
2558 2580 '''
2559 2581 if otherf:
2560 2582 otherf.flush()
2561 2583 f.write('%s at:\n' % msg)
2562 entries = [('%s:%s' % (fn, ln), func)
2563 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2564 if entries:
2565 fnmax = max(len(entry[0]) for entry in entries)
2566 for fnln, func in entries:
2567 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2584 for line in getstackframes(skip + 1):
2585 f.write(line)
2568 2586 f.flush()
2569 2587
2570 2588 class dirs(object):
2571 2589 '''a multiset of directory names from a dirstate or manifest'''
2572 2590
2573 2591 def __init__(self, map, skip=None):
2574 2592 self._dirs = {}
2575 2593 addpath = self.addpath
2576 2594 if safehasattr(map, 'iteritems') and skip is not None:
2577 2595 for f, s in map.iteritems():
2578 2596 if s[0] != skip:
2579 2597 addpath(f)
2580 2598 else:
2581 2599 for f in map:
2582 2600 addpath(f)
2583 2601
2584 2602 def addpath(self, path):
2585 2603 dirs = self._dirs
2586 2604 for base in finddirs(path):
2587 2605 if base in dirs:
2588 2606 dirs[base] += 1
2589 2607 return
2590 2608 dirs[base] = 1
2591 2609
2592 2610 def delpath(self, path):
2593 2611 dirs = self._dirs
2594 2612 for base in finddirs(path):
2595 2613 if dirs[base] > 1:
2596 2614 dirs[base] -= 1
2597 2615 return
2598 2616 del dirs[base]
2599 2617
2600 2618 def __iter__(self):
2601 2619 return self._dirs.iterkeys()
2602 2620
2603 2621 def __contains__(self, d):
2604 2622 return d in self._dirs
2605 2623
2606 2624 if safehasattr(parsers, 'dirs'):
2607 2625 dirs = parsers.dirs
2608 2626
2609 2627 def finddirs(path):
2610 2628 pos = path.rfind('/')
2611 2629 while pos != -1:
2612 2630 yield path[:pos]
2613 2631 pos = path.rfind('/', 0, pos)
2614 2632
2615 2633 # compression utility
2616 2634
2617 2635 class nocompress(object):
2618 2636 def compress(self, x):
2619 2637 return x
2620 2638 def flush(self):
2621 2639 return ""
2622 2640
2623 2641 compressors = {
2624 2642 None: nocompress,
2625 2643 # lambda to prevent early import
2626 2644 'BZ': lambda: bz2.BZ2Compressor(),
2627 2645 'GZ': lambda: zlib.compressobj(),
2628 2646 }
2629 2647 # also support the old form by courtesies
2630 2648 compressors['UN'] = compressors[None]
2631 2649
2632 2650 def _makedecompressor(decompcls):
2633 2651 def generator(f):
2634 2652 d = decompcls()
2635 2653 for chunk in filechunkiter(f):
2636 2654 yield d.decompress(chunk)
2637 2655 def func(fh):
2638 2656 return chunkbuffer(generator(fh))
2639 2657 return func
2640 2658
2641 2659 class ctxmanager(object):
2642 2660 '''A context manager for use in 'with' blocks to allow multiple
2643 2661 contexts to be entered at once. This is both safer and more
2644 2662 flexible than contextlib.nested.
2645 2663
2646 2664 Once Mercurial supports Python 2.7+, this will become mostly
2647 2665 unnecessary.
2648 2666 '''
2649 2667
2650 2668 def __init__(self, *args):
2651 2669 '''Accepts a list of no-argument functions that return context
2652 2670 managers. These will be invoked at __call__ time.'''
2653 2671 self._pending = args
2654 2672 self._atexit = []
2655 2673
2656 2674 def __enter__(self):
2657 2675 return self
2658 2676
2659 2677 def enter(self):
2660 2678 '''Create and enter context managers in the order in which they were
2661 2679 passed to the constructor.'''
2662 2680 values = []
2663 2681 for func in self._pending:
2664 2682 obj = func()
2665 2683 values.append(obj.__enter__())
2666 2684 self._atexit.append(obj.__exit__)
2667 2685 del self._pending
2668 2686 return values
2669 2687
2670 2688 def atexit(self, func, *args, **kwargs):
2671 2689 '''Add a function to call when this context manager exits. The
2672 2690 ordering of multiple atexit calls is unspecified, save that
2673 2691 they will happen before any __exit__ functions.'''
2674 2692 def wrapper(exc_type, exc_val, exc_tb):
2675 2693 func(*args, **kwargs)
2676 2694 self._atexit.append(wrapper)
2677 2695 return func
2678 2696
2679 2697 def __exit__(self, exc_type, exc_val, exc_tb):
2680 2698 '''Context managers are exited in the reverse order from which
2681 2699 they were created.'''
2682 2700 received = exc_type is not None
2683 2701 suppressed = False
2684 2702 pending = None
2685 2703 self._atexit.reverse()
2686 2704 for exitfunc in self._atexit:
2687 2705 try:
2688 2706 if exitfunc(exc_type, exc_val, exc_tb):
2689 2707 suppressed = True
2690 2708 exc_type = None
2691 2709 exc_val = None
2692 2710 exc_tb = None
2693 2711 except BaseException:
2694 2712 pending = sys.exc_info()
2695 2713 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2696 2714 del self._atexit
2697 2715 if pending:
2698 2716 raise exc_val
2699 2717 return received and suppressed
2700 2718
2701 2719 def _bz2():
2702 2720 d = bz2.BZ2Decompressor()
2703 2721 # Bzip2 stream start with BZ, but we stripped it.
2704 2722 # we put it back for good measure.
2705 2723 d.decompress('BZ')
2706 2724 return d
2707 2725
2708 2726 decompressors = {None: lambda fh: fh,
2709 2727 '_truncatedBZ': _makedecompressor(_bz2),
2710 2728 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2711 2729 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2712 2730 }
2713 2731 # also support the old form by courtesies
2714 2732 decompressors['UN'] = decompressors[None]
2715 2733
2716 2734 # convenient shortcut
2717 2735 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now