##// END OF EJS Templates
util: adjust hgcmd() to handle frozen Mercurial on OS X...
Matt Harbison -
r27766:198f78a5 default
parent child Browse files
Show More
@@ -1,2724 +1,2728
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 from __future__ import absolute_import
17 17
18 18 import bz2
19 19 import calendar
20 20 import collections
21 21 import datetime
22 22 import errno
23 23 import gc
24 24 import hashlib
25 25 import imp
26 26 import os
27 27 import re as remod
28 28 import shutil
29 29 import signal
30 30 import socket
31 31 import subprocess
32 32 import sys
33 33 import tempfile
34 34 import textwrap
35 35 import time
36 36 import traceback
37 37 import urllib
38 38 import zlib
39 39
40 40 from . import (
41 41 encoding,
42 42 error,
43 43 i18n,
44 44 osutil,
45 45 parsers,
46 46 )
47 47
48 48 if os.name == 'nt':
49 49 from . import windows as platform
50 50 else:
51 51 from . import posix as platform
52 52
53 53 md5 = hashlib.md5
54 54 sha1 = hashlib.sha1
55 55 sha512 = hashlib.sha512
56 56 _ = i18n._
57 57
58 58 cachestat = platform.cachestat
59 59 checkexec = platform.checkexec
60 60 checklink = platform.checklink
61 61 copymode = platform.copymode
62 62 executablepath = platform.executablepath
63 63 expandglobs = platform.expandglobs
64 64 explainexit = platform.explainexit
65 65 findexe = platform.findexe
66 66 gethgcmd = platform.gethgcmd
67 67 getuser = platform.getuser
68 68 groupmembers = platform.groupmembers
69 69 groupname = platform.groupname
70 70 hidewindow = platform.hidewindow
71 71 isexec = platform.isexec
72 72 isowner = platform.isowner
73 73 localpath = platform.localpath
74 74 lookupreg = platform.lookupreg
75 75 makedir = platform.makedir
76 76 nlinks = platform.nlinks
77 77 normpath = platform.normpath
78 78 normcase = platform.normcase
79 79 normcasespec = platform.normcasespec
80 80 normcasefallback = platform.normcasefallback
81 81 openhardlinks = platform.openhardlinks
82 82 oslink = platform.oslink
83 83 parsepatchoutput = platform.parsepatchoutput
84 84 pconvert = platform.pconvert
85 85 poll = platform.poll
86 86 popen = platform.popen
87 87 posixfile = platform.posixfile
88 88 quotecommand = platform.quotecommand
89 89 readpipe = platform.readpipe
90 90 rename = platform.rename
91 91 removedirs = platform.removedirs
92 92 samedevice = platform.samedevice
93 93 samefile = platform.samefile
94 94 samestat = platform.samestat
95 95 setbinary = platform.setbinary
96 96 setflags = platform.setflags
97 97 setsignalhandler = platform.setsignalhandler
98 98 shellquote = platform.shellquote
99 99 spawndetached = platform.spawndetached
100 100 split = platform.split
101 101 sshargs = platform.sshargs
102 102 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
103 103 statisexec = platform.statisexec
104 104 statislink = platform.statislink
105 105 termwidth = platform.termwidth
106 106 testpid = platform.testpid
107 107 umask = platform.umask
108 108 unlink = platform.unlink
109 109 unlinkpath = platform.unlinkpath
110 110 username = platform.username
111 111
112 112 # Python compatibility
113 113
114 114 _notset = object()
115 115
116 116 # disable Python's problematic floating point timestamps (issue4836)
117 117 # (Python hypocritically says you shouldn't change this behavior in
118 118 # libraries, and sure enough Mercurial is not a library.)
119 119 os.stat_float_times(False)
120 120
121 121 def safehasattr(thing, attr):
122 122 return getattr(thing, attr, _notset) is not _notset
123 123
124 124 DIGESTS = {
125 125 'md5': md5,
126 126 'sha1': sha1,
127 127 'sha512': sha512,
128 128 }
129 129 # List of digest types from strongest to weakest
130 130 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
131 131
132 132 for k in DIGESTS_BY_STRENGTH:
133 133 assert k in DIGESTS
134 134
135 135 class digester(object):
136 136 """helper to compute digests.
137 137
138 138 This helper can be used to compute one or more digests given their name.
139 139
140 140 >>> d = digester(['md5', 'sha1'])
141 141 >>> d.update('foo')
142 142 >>> [k for k in sorted(d)]
143 143 ['md5', 'sha1']
144 144 >>> d['md5']
145 145 'acbd18db4cc2f85cedef654fccc4a4d8'
146 146 >>> d['sha1']
147 147 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
148 148 >>> digester.preferred(['md5', 'sha1'])
149 149 'sha1'
150 150 """
151 151
152 152 def __init__(self, digests, s=''):
153 153 self._hashes = {}
154 154 for k in digests:
155 155 if k not in DIGESTS:
156 156 raise Abort(_('unknown digest type: %s') % k)
157 157 self._hashes[k] = DIGESTS[k]()
158 158 if s:
159 159 self.update(s)
160 160
161 161 def update(self, data):
162 162 for h in self._hashes.values():
163 163 h.update(data)
164 164
165 165 def __getitem__(self, key):
166 166 if key not in DIGESTS:
167 167 raise Abort(_('unknown digest type: %s') % k)
168 168 return self._hashes[key].hexdigest()
169 169
170 170 def __iter__(self):
171 171 return iter(self._hashes)
172 172
173 173 @staticmethod
174 174 def preferred(supported):
175 175 """returns the strongest digest type in both supported and DIGESTS."""
176 176
177 177 for k in DIGESTS_BY_STRENGTH:
178 178 if k in supported:
179 179 return k
180 180 return None
181 181
182 182 class digestchecker(object):
183 183 """file handle wrapper that additionally checks content against a given
184 184 size and digests.
185 185
186 186 d = digestchecker(fh, size, {'md5': '...'})
187 187
188 188 When multiple digests are given, all of them are validated.
189 189 """
190 190
191 191 def __init__(self, fh, size, digests):
192 192 self._fh = fh
193 193 self._size = size
194 194 self._got = 0
195 195 self._digests = dict(digests)
196 196 self._digester = digester(self._digests.keys())
197 197
198 198 def read(self, length=-1):
199 199 content = self._fh.read(length)
200 200 self._digester.update(content)
201 201 self._got += len(content)
202 202 return content
203 203
204 204 def validate(self):
205 205 if self._size != self._got:
206 206 raise Abort(_('size mismatch: expected %d, got %d') %
207 207 (self._size, self._got))
208 208 for k, v in self._digests.items():
209 209 if v != self._digester[k]:
210 210 # i18n: first parameter is a digest name
211 211 raise Abort(_('%s mismatch: expected %s, got %s') %
212 212 (k, v, self._digester[k]))
213 213
214 214 try:
215 215 buffer = buffer
216 216 except NameError:
217 217 if sys.version_info[0] < 3:
218 218 def buffer(sliceable, offset=0):
219 219 return sliceable[offset:]
220 220 else:
221 221 def buffer(sliceable, offset=0):
222 222 return memoryview(sliceable)[offset:]
223 223
224 224 closefds = os.name == 'posix'
225 225
226 226 _chunksize = 4096
227 227
228 228 class bufferedinputpipe(object):
229 229 """a manually buffered input pipe
230 230
231 231 Python will not let us use buffered IO and lazy reading with 'polling' at
232 232 the same time. We cannot probe the buffer state and select will not detect
233 233 that data are ready to read if they are already buffered.
234 234
235 235 This class let us work around that by implementing its own buffering
236 236 (allowing efficient readline) while offering a way to know if the buffer is
237 237 empty from the output (allowing collaboration of the buffer with polling).
238 238
239 239 This class lives in the 'util' module because it makes use of the 'os'
240 240 module from the python stdlib.
241 241 """
242 242
243 243 def __init__(self, input):
244 244 self._input = input
245 245 self._buffer = []
246 246 self._eof = False
247 247 self._lenbuf = 0
248 248
249 249 @property
250 250 def hasbuffer(self):
251 251 """True is any data is currently buffered
252 252
253 253 This will be used externally a pre-step for polling IO. If there is
254 254 already data then no polling should be set in place."""
255 255 return bool(self._buffer)
256 256
257 257 @property
258 258 def closed(self):
259 259 return self._input.closed
260 260
261 261 def fileno(self):
262 262 return self._input.fileno()
263 263
264 264 def close(self):
265 265 return self._input.close()
266 266
267 267 def read(self, size):
268 268 while (not self._eof) and (self._lenbuf < size):
269 269 self._fillbuffer()
270 270 return self._frombuffer(size)
271 271
272 272 def readline(self, *args, **kwargs):
273 273 if 1 < len(self._buffer):
274 274 # this should not happen because both read and readline end with a
275 275 # _frombuffer call that collapse it.
276 276 self._buffer = [''.join(self._buffer)]
277 277 self._lenbuf = len(self._buffer[0])
278 278 lfi = -1
279 279 if self._buffer:
280 280 lfi = self._buffer[-1].find('\n')
281 281 while (not self._eof) and lfi < 0:
282 282 self._fillbuffer()
283 283 if self._buffer:
284 284 lfi = self._buffer[-1].find('\n')
285 285 size = lfi + 1
286 286 if lfi < 0: # end of file
287 287 size = self._lenbuf
288 288 elif 1 < len(self._buffer):
289 289 # we need to take previous chunks into account
290 290 size += self._lenbuf - len(self._buffer[-1])
291 291 return self._frombuffer(size)
292 292
293 293 def _frombuffer(self, size):
294 294 """return at most 'size' data from the buffer
295 295
296 296 The data are removed from the buffer."""
297 297 if size == 0 or not self._buffer:
298 298 return ''
299 299 buf = self._buffer[0]
300 300 if 1 < len(self._buffer):
301 301 buf = ''.join(self._buffer)
302 302
303 303 data = buf[:size]
304 304 buf = buf[len(data):]
305 305 if buf:
306 306 self._buffer = [buf]
307 307 self._lenbuf = len(buf)
308 308 else:
309 309 self._buffer = []
310 310 self._lenbuf = 0
311 311 return data
312 312
313 313 def _fillbuffer(self):
314 314 """read data to the buffer"""
315 315 data = os.read(self._input.fileno(), _chunksize)
316 316 if not data:
317 317 self._eof = True
318 318 else:
319 319 self._lenbuf += len(data)
320 320 self._buffer.append(data)
321 321
322 322 def popen2(cmd, env=None, newlines=False):
323 323 # Setting bufsize to -1 lets the system decide the buffer size.
324 324 # The default for bufsize is 0, meaning unbuffered. This leads to
325 325 # poor performance on Mac OS X: http://bugs.python.org/issue4194
326 326 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
327 327 close_fds=closefds,
328 328 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
329 329 universal_newlines=newlines,
330 330 env=env)
331 331 return p.stdin, p.stdout
332 332
333 333 def popen3(cmd, env=None, newlines=False):
334 334 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
335 335 return stdin, stdout, stderr
336 336
337 337 def popen4(cmd, env=None, newlines=False, bufsize=-1):
338 338 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
339 339 close_fds=closefds,
340 340 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
341 341 stderr=subprocess.PIPE,
342 342 universal_newlines=newlines,
343 343 env=env)
344 344 return p.stdin, p.stdout, p.stderr, p
345 345
346 346 def version():
347 347 """Return version information if available."""
348 348 try:
349 349 from . import __version__
350 350 return __version__.version
351 351 except ImportError:
352 352 return 'unknown'
353 353
354 354 def versiontuple(v=None, n=4):
355 355 """Parses a Mercurial version string into an N-tuple.
356 356
357 357 The version string to be parsed is specified with the ``v`` argument.
358 358 If it isn't defined, the current Mercurial version string will be parsed.
359 359
360 360 ``n`` can be 2, 3, or 4. Here is how some version strings map to
361 361 returned values:
362 362
363 363 >>> v = '3.6.1+190-df9b73d2d444'
364 364 >>> versiontuple(v, 2)
365 365 (3, 6)
366 366 >>> versiontuple(v, 3)
367 367 (3, 6, 1)
368 368 >>> versiontuple(v, 4)
369 369 (3, 6, 1, '190-df9b73d2d444')
370 370
371 371 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
372 372 (3, 6, 1, '190-df9b73d2d444+20151118')
373 373
374 374 >>> v = '3.6'
375 375 >>> versiontuple(v, 2)
376 376 (3, 6)
377 377 >>> versiontuple(v, 3)
378 378 (3, 6, None)
379 379 >>> versiontuple(v, 4)
380 380 (3, 6, None, None)
381 381 """
382 382 if not v:
383 383 v = version()
384 384 parts = v.split('+', 1)
385 385 if len(parts) == 1:
386 386 vparts, extra = parts[0], None
387 387 else:
388 388 vparts, extra = parts
389 389
390 390 vints = []
391 391 for i in vparts.split('.'):
392 392 try:
393 393 vints.append(int(i))
394 394 except ValueError:
395 395 break
396 396 # (3, 6) -> (3, 6, None)
397 397 while len(vints) < 3:
398 398 vints.append(None)
399 399
400 400 if n == 2:
401 401 return (vints[0], vints[1])
402 402 if n == 3:
403 403 return (vints[0], vints[1], vints[2])
404 404 if n == 4:
405 405 return (vints[0], vints[1], vints[2], extra)
406 406
407 407 # used by parsedate
408 408 defaultdateformats = (
409 409 '%Y-%m-%d %H:%M:%S',
410 410 '%Y-%m-%d %I:%M:%S%p',
411 411 '%Y-%m-%d %H:%M',
412 412 '%Y-%m-%d %I:%M%p',
413 413 '%Y-%m-%d',
414 414 '%m-%d',
415 415 '%m/%d',
416 416 '%m/%d/%y',
417 417 '%m/%d/%Y',
418 418 '%a %b %d %H:%M:%S %Y',
419 419 '%a %b %d %I:%M:%S%p %Y',
420 420 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
421 421 '%b %d %H:%M:%S %Y',
422 422 '%b %d %I:%M:%S%p %Y',
423 423 '%b %d %H:%M:%S',
424 424 '%b %d %I:%M:%S%p',
425 425 '%b %d %H:%M',
426 426 '%b %d %I:%M%p',
427 427 '%b %d %Y',
428 428 '%b %d',
429 429 '%H:%M:%S',
430 430 '%I:%M:%S%p',
431 431 '%H:%M',
432 432 '%I:%M%p',
433 433 )
434 434
435 435 extendeddateformats = defaultdateformats + (
436 436 "%Y",
437 437 "%Y-%m",
438 438 "%b",
439 439 "%b %Y",
440 440 )
441 441
442 442 def cachefunc(func):
443 443 '''cache the result of function calls'''
444 444 # XXX doesn't handle keywords args
445 445 if func.func_code.co_argcount == 0:
446 446 cache = []
447 447 def f():
448 448 if len(cache) == 0:
449 449 cache.append(func())
450 450 return cache[0]
451 451 return f
452 452 cache = {}
453 453 if func.func_code.co_argcount == 1:
454 454 # we gain a small amount of time because
455 455 # we don't need to pack/unpack the list
456 456 def f(arg):
457 457 if arg not in cache:
458 458 cache[arg] = func(arg)
459 459 return cache[arg]
460 460 else:
461 461 def f(*args):
462 462 if args not in cache:
463 463 cache[args] = func(*args)
464 464 return cache[args]
465 465
466 466 return f
467 467
468 468 class sortdict(dict):
469 469 '''a simple sorted dictionary'''
470 470 def __init__(self, data=None):
471 471 self._list = []
472 472 if data:
473 473 self.update(data)
474 474 def copy(self):
475 475 return sortdict(self)
476 476 def __setitem__(self, key, val):
477 477 if key in self:
478 478 self._list.remove(key)
479 479 self._list.append(key)
480 480 dict.__setitem__(self, key, val)
481 481 def __iter__(self):
482 482 return self._list.__iter__()
483 483 def update(self, src):
484 484 if isinstance(src, dict):
485 485 src = src.iteritems()
486 486 for k, v in src:
487 487 self[k] = v
488 488 def clear(self):
489 489 dict.clear(self)
490 490 self._list = []
491 491 def items(self):
492 492 return [(k, self[k]) for k in self._list]
493 493 def __delitem__(self, key):
494 494 dict.__delitem__(self, key)
495 495 self._list.remove(key)
496 496 def pop(self, key, *args, **kwargs):
497 497 dict.pop(self, key, *args, **kwargs)
498 498 try:
499 499 self._list.remove(key)
500 500 except ValueError:
501 501 pass
502 502 def keys(self):
503 503 return self._list
504 504 def iterkeys(self):
505 505 return self._list.__iter__()
506 506 def iteritems(self):
507 507 for k in self._list:
508 508 yield k, self[k]
509 509 def insert(self, index, key, val):
510 510 self._list.insert(index, key)
511 511 dict.__setitem__(self, key, val)
512 512
513 513 class _lrucachenode(object):
514 514 """A node in a doubly linked list.
515 515
516 516 Holds a reference to nodes on either side as well as a key-value
517 517 pair for the dictionary entry.
518 518 """
519 519 __slots__ = ('next', 'prev', 'key', 'value')
520 520
521 521 def __init__(self):
522 522 self.next = None
523 523 self.prev = None
524 524
525 525 self.key = _notset
526 526 self.value = None
527 527
528 528 def markempty(self):
529 529 """Mark the node as emptied."""
530 530 self.key = _notset
531 531
532 532 class lrucachedict(object):
533 533 """Dict that caches most recent accesses and sets.
534 534
535 535 The dict consists of an actual backing dict - indexed by original
536 536 key - and a doubly linked circular list defining the order of entries in
537 537 the cache.
538 538
539 539 The head node is the newest entry in the cache. If the cache is full,
540 540 we recycle head.prev and make it the new head. Cache accesses result in
541 541 the node being moved to before the existing head and being marked as the
542 542 new head node.
543 543 """
544 544 def __init__(self, max):
545 545 self._cache = {}
546 546
547 547 self._head = head = _lrucachenode()
548 548 head.prev = head
549 549 head.next = head
550 550 self._size = 1
551 551 self._capacity = max
552 552
553 553 def __len__(self):
554 554 return len(self._cache)
555 555
556 556 def __contains__(self, k):
557 557 return k in self._cache
558 558
559 559 def __iter__(self):
560 560 # We don't have to iterate in cache order, but why not.
561 561 n = self._head
562 562 for i in range(len(self._cache)):
563 563 yield n.key
564 564 n = n.next
565 565
566 566 def __getitem__(self, k):
567 567 node = self._cache[k]
568 568 self._movetohead(node)
569 569 return node.value
570 570
571 571 def __setitem__(self, k, v):
572 572 node = self._cache.get(k)
573 573 # Replace existing value and mark as newest.
574 574 if node is not None:
575 575 node.value = v
576 576 self._movetohead(node)
577 577 return
578 578
579 579 if self._size < self._capacity:
580 580 node = self._addcapacity()
581 581 else:
582 582 # Grab the last/oldest item.
583 583 node = self._head.prev
584 584
585 585 # At capacity. Kill the old entry.
586 586 if node.key is not _notset:
587 587 del self._cache[node.key]
588 588
589 589 node.key = k
590 590 node.value = v
591 591 self._cache[k] = node
592 592 # And mark it as newest entry. No need to adjust order since it
593 593 # is already self._head.prev.
594 594 self._head = node
595 595
596 596 def __delitem__(self, k):
597 597 node = self._cache.pop(k)
598 598 node.markempty()
599 599
600 600 # Temporarily mark as newest item before re-adjusting head to make
601 601 # this node the oldest item.
602 602 self._movetohead(node)
603 603 self._head = node.next
604 604
605 605 # Additional dict methods.
606 606
607 607 def get(self, k, default=None):
608 608 try:
609 609 return self._cache[k]
610 610 except KeyError:
611 611 return default
612 612
613 613 def clear(self):
614 614 n = self._head
615 615 while n.key is not _notset:
616 616 n.markempty()
617 617 n = n.next
618 618
619 619 self._cache.clear()
620 620
621 621 def copy(self):
622 622 result = lrucachedict(self._capacity)
623 623 n = self._head.prev
624 624 # Iterate in oldest-to-newest order, so the copy has the right ordering
625 625 for i in range(len(self._cache)):
626 626 result[n.key] = n.value
627 627 n = n.prev
628 628 return result
629 629
630 630 def _movetohead(self, node):
631 631 """Mark a node as the newest, making it the new head.
632 632
633 633 When a node is accessed, it becomes the freshest entry in the LRU
634 634 list, which is denoted by self._head.
635 635
636 636 Visually, let's make ``N`` the new head node (* denotes head):
637 637
638 638 previous/oldest <-> head <-> next/next newest
639 639
640 640 ----<->--- A* ---<->-----
641 641 | |
642 642 E <-> D <-> N <-> C <-> B
643 643
644 644 To:
645 645
646 646 ----<->--- N* ---<->-----
647 647 | |
648 648 E <-> D <-> C <-> B <-> A
649 649
650 650 This requires the following moves:
651 651
652 652 C.next = D (node.prev.next = node.next)
653 653 D.prev = C (node.next.prev = node.prev)
654 654 E.next = N (head.prev.next = node)
655 655 N.prev = E (node.prev = head.prev)
656 656 N.next = A (node.next = head)
657 657 A.prev = N (head.prev = node)
658 658 """
659 659 head = self._head
660 660 # C.next = D
661 661 node.prev.next = node.next
662 662 # D.prev = C
663 663 node.next.prev = node.prev
664 664 # N.prev = E
665 665 node.prev = head.prev
666 666 # N.next = A
667 667 # It is tempting to do just "head" here, however if node is
668 668 # adjacent to head, this will do bad things.
669 669 node.next = head.prev.next
670 670 # E.next = N
671 671 node.next.prev = node
672 672 # A.prev = N
673 673 node.prev.next = node
674 674
675 675 self._head = node
676 676
677 677 def _addcapacity(self):
678 678 """Add a node to the circular linked list.
679 679
680 680 The new node is inserted before the head node.
681 681 """
682 682 head = self._head
683 683 node = _lrucachenode()
684 684 head.prev.next = node
685 685 node.prev = head.prev
686 686 node.next = head
687 687 head.prev = node
688 688 self._size += 1
689 689 return node
690 690
691 691 def lrucachefunc(func):
692 692 '''cache most recent results of function calls'''
693 693 cache = {}
694 694 order = collections.deque()
695 695 if func.func_code.co_argcount == 1:
696 696 def f(arg):
697 697 if arg not in cache:
698 698 if len(cache) > 20:
699 699 del cache[order.popleft()]
700 700 cache[arg] = func(arg)
701 701 else:
702 702 order.remove(arg)
703 703 order.append(arg)
704 704 return cache[arg]
705 705 else:
706 706 def f(*args):
707 707 if args not in cache:
708 708 if len(cache) > 20:
709 709 del cache[order.popleft()]
710 710 cache[args] = func(*args)
711 711 else:
712 712 order.remove(args)
713 713 order.append(args)
714 714 return cache[args]
715 715
716 716 return f
717 717
718 718 class propertycache(object):
719 719 def __init__(self, func):
720 720 self.func = func
721 721 self.name = func.__name__
722 722 def __get__(self, obj, type=None):
723 723 result = self.func(obj)
724 724 self.cachevalue(obj, result)
725 725 return result
726 726
727 727 def cachevalue(self, obj, value):
728 728 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
729 729 obj.__dict__[self.name] = value
730 730
731 731 def pipefilter(s, cmd):
732 732 '''filter string S through command CMD, returning its output'''
733 733 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
734 734 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
735 735 pout, perr = p.communicate(s)
736 736 return pout
737 737
738 738 def tempfilter(s, cmd):
739 739 '''filter string S through a pair of temporary files with CMD.
740 740 CMD is used as a template to create the real command to be run,
741 741 with the strings INFILE and OUTFILE replaced by the real names of
742 742 the temporary files generated.'''
743 743 inname, outname = None, None
744 744 try:
745 745 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
746 746 fp = os.fdopen(infd, 'wb')
747 747 fp.write(s)
748 748 fp.close()
749 749 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
750 750 os.close(outfd)
751 751 cmd = cmd.replace('INFILE', inname)
752 752 cmd = cmd.replace('OUTFILE', outname)
753 753 code = os.system(cmd)
754 754 if sys.platform == 'OpenVMS' and code & 1:
755 755 code = 0
756 756 if code:
757 757 raise Abort(_("command '%s' failed: %s") %
758 758 (cmd, explainexit(code)))
759 759 fp = open(outname, 'rb')
760 760 r = fp.read()
761 761 fp.close()
762 762 return r
763 763 finally:
764 764 try:
765 765 if inname:
766 766 os.unlink(inname)
767 767 except OSError:
768 768 pass
769 769 try:
770 770 if outname:
771 771 os.unlink(outname)
772 772 except OSError:
773 773 pass
774 774
775 775 filtertable = {
776 776 'tempfile:': tempfilter,
777 777 'pipe:': pipefilter,
778 778 }
779 779
780 780 def filter(s, cmd):
781 781 "filter a string through a command that transforms its input to its output"
782 782 for name, fn in filtertable.iteritems():
783 783 if cmd.startswith(name):
784 784 return fn(s, cmd[len(name):].lstrip())
785 785 return pipefilter(s, cmd)
786 786
787 787 def binary(s):
788 788 """return true if a string is binary data"""
789 789 return bool(s and '\0' in s)
790 790
791 791 def increasingchunks(source, min=1024, max=65536):
792 792 '''return no less than min bytes per chunk while data remains,
793 793 doubling min after each chunk until it reaches max'''
794 794 def log2(x):
795 795 if not x:
796 796 return 0
797 797 i = 0
798 798 while x:
799 799 x >>= 1
800 800 i += 1
801 801 return i - 1
802 802
803 803 buf = []
804 804 blen = 0
805 805 for chunk in source:
806 806 buf.append(chunk)
807 807 blen += len(chunk)
808 808 if blen >= min:
809 809 if min < max:
810 810 min = min << 1
811 811 nmin = 1 << log2(blen)
812 812 if nmin > min:
813 813 min = nmin
814 814 if min > max:
815 815 min = max
816 816 yield ''.join(buf)
817 817 blen = 0
818 818 buf = []
819 819 if buf:
820 820 yield ''.join(buf)
821 821
822 822 Abort = error.Abort
823 823
824 824 def always(fn):
825 825 return True
826 826
827 827 def never(fn):
828 828 return False
829 829
830 830 def nogc(func):
831 831 """disable garbage collector
832 832
833 833 Python's garbage collector triggers a GC each time a certain number of
834 834 container objects (the number being defined by gc.get_threshold()) are
835 835 allocated even when marked not to be tracked by the collector. Tracking has
836 836 no effect on when GCs are triggered, only on what objects the GC looks
837 837 into. As a workaround, disable GC while building complex (huge)
838 838 containers.
839 839
840 840 This garbage collector issue have been fixed in 2.7.
841 841 """
842 842 def wrapper(*args, **kwargs):
843 843 gcenabled = gc.isenabled()
844 844 gc.disable()
845 845 try:
846 846 return func(*args, **kwargs)
847 847 finally:
848 848 if gcenabled:
849 849 gc.enable()
850 850 return wrapper
851 851
852 852 def pathto(root, n1, n2):
853 853 '''return the relative path from one place to another.
854 854 root should use os.sep to separate directories
855 855 n1 should use os.sep to separate directories
856 856 n2 should use "/" to separate directories
857 857 returns an os.sep-separated path.
858 858
859 859 If n1 is a relative path, it's assumed it's
860 860 relative to root.
861 861 n2 should always be relative to root.
862 862 '''
863 863 if not n1:
864 864 return localpath(n2)
865 865 if os.path.isabs(n1):
866 866 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
867 867 return os.path.join(root, localpath(n2))
868 868 n2 = '/'.join((pconvert(root), n2))
869 869 a, b = splitpath(n1), n2.split('/')
870 870 a.reverse()
871 871 b.reverse()
872 872 while a and b and a[-1] == b[-1]:
873 873 a.pop()
874 874 b.pop()
875 875 b.reverse()
876 876 return os.sep.join((['..'] * len(a)) + b) or '.'
877 877
878 878 def mainfrozen():
879 879 """return True if we are a frozen executable.
880 880
881 881 The code supports py2exe (most common, Windows only) and tools/freeze
882 882 (portable, not much used).
883 883 """
884 884 return (safehasattr(sys, "frozen") or # new py2exe
885 885 safehasattr(sys, "importers") or # old py2exe
886 886 imp.is_frozen("__main__")) # tools/freeze
887 887
888 888 # the location of data files matching the source code
889 889 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
890 890 # executable version (py2exe) doesn't support __file__
891 891 datapath = os.path.dirname(sys.executable)
892 892 else:
893 893 datapath = os.path.dirname(__file__)
894 894
895 895 i18n.setdatapath(datapath)
896 896
897 897 _hgexecutable = None
898 898
899 899 def hgexecutable():
900 900 """return location of the 'hg' executable.
901 901
902 902 Defaults to $HG or 'hg' in the search path.
903 903 """
904 904 if _hgexecutable is None:
905 905 hg = os.environ.get('HG')
906 906 mainmod = sys.modules['__main__']
907 907 if hg:
908 908 _sethgexecutable(hg)
909 909 elif mainfrozen():
910 910 if getattr(sys, 'frozen', None) == 'macosx_app':
911 911 # Env variable set by py2app
912 912 _sethgexecutable(os.environ['EXECUTABLEPATH'])
913 913 else:
914 914 _sethgexecutable(sys.executable)
915 915 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
916 916 _sethgexecutable(mainmod.__file__)
917 917 else:
918 918 exe = findexe('hg') or os.path.basename(sys.argv[0])
919 919 _sethgexecutable(exe)
920 920 return _hgexecutable
921 921
922 922 def _sethgexecutable(path):
923 923 """set location of the 'hg' executable"""
924 924 global _hgexecutable
925 925 _hgexecutable = path
926 926
927 927 def _isstdout(f):
928 928 fileno = getattr(f, 'fileno', None)
929 929 return fileno and fileno() == sys.__stdout__.fileno()
930 930
931 931 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
932 932 '''enhanced shell command execution.
933 933 run with environment maybe modified, maybe in different dir.
934 934
935 935 if command fails and onerr is None, return status, else raise onerr
936 936 object as exception.
937 937
938 938 if out is specified, it is assumed to be a file-like object that has a
939 939 write() method. stdout and stderr will be redirected to out.'''
940 940 if environ is None:
941 941 environ = {}
942 942 try:
943 943 sys.stdout.flush()
944 944 except Exception:
945 945 pass
946 946 def py2shell(val):
947 947 'convert python object into string that is useful to shell'
948 948 if val is None or val is False:
949 949 return '0'
950 950 if val is True:
951 951 return '1'
952 952 return str(val)
953 953 origcmd = cmd
954 954 cmd = quotecommand(cmd)
955 955 if sys.platform == 'plan9' and (sys.version_info[0] == 2
956 956 and sys.version_info[1] < 7):
957 957 # subprocess kludge to work around issues in half-baked Python
958 958 # ports, notably bichued/python:
959 959 if not cwd is None:
960 960 os.chdir(cwd)
961 961 rc = os.system(cmd)
962 962 else:
963 963 env = dict(os.environ)
964 964 env.update((k, py2shell(v)) for k, v in environ.iteritems())
965 965 env['HG'] = hgexecutable()
966 966 if out is None or _isstdout(out):
967 967 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
968 968 env=env, cwd=cwd)
969 969 else:
970 970 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
971 971 env=env, cwd=cwd, stdout=subprocess.PIPE,
972 972 stderr=subprocess.STDOUT)
973 973 while True:
974 974 line = proc.stdout.readline()
975 975 if not line:
976 976 break
977 977 out.write(line)
978 978 proc.wait()
979 979 rc = proc.returncode
980 980 if sys.platform == 'OpenVMS' and rc & 1:
981 981 rc = 0
982 982 if rc and onerr:
983 983 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
984 984 explainexit(rc)[0])
985 985 if errprefix:
986 986 errmsg = '%s: %s' % (errprefix, errmsg)
987 987 raise onerr(errmsg)
988 988 return rc
989 989
990 990 def checksignature(func):
991 991 '''wrap a function with code to check for calling errors'''
992 992 def check(*args, **kwargs):
993 993 try:
994 994 return func(*args, **kwargs)
995 995 except TypeError:
996 996 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
997 997 raise error.SignatureError
998 998 raise
999 999
1000 1000 return check
1001 1001
1002 1002 def copyfile(src, dest, hardlink=False, copystat=False):
1003 1003 '''copy a file, preserving mode and optionally other stat info like
1004 1004 atime/mtime'''
1005 1005 if os.path.lexists(dest):
1006 1006 unlink(dest)
1007 1007 # hardlinks are problematic on CIFS, quietly ignore this flag
1008 1008 # until we find a way to work around it cleanly (issue4546)
1009 1009 if False and hardlink:
1010 1010 try:
1011 1011 oslink(src, dest)
1012 1012 return
1013 1013 except (IOError, OSError):
1014 1014 pass # fall back to normal copy
1015 1015 if os.path.islink(src):
1016 1016 os.symlink(os.readlink(src), dest)
1017 1017 # copytime is ignored for symlinks, but in general copytime isn't needed
1018 1018 # for them anyway
1019 1019 else:
1020 1020 try:
1021 1021 shutil.copyfile(src, dest)
1022 1022 if copystat:
1023 1023 # copystat also copies mode
1024 1024 shutil.copystat(src, dest)
1025 1025 else:
1026 1026 shutil.copymode(src, dest)
1027 1027 except shutil.Error as inst:
1028 1028 raise Abort(str(inst))
1029 1029
1030 1030 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1031 1031 """Copy a directory tree using hardlinks if possible."""
1032 1032 num = 0
1033 1033
1034 1034 if hardlink is None:
1035 1035 hardlink = (os.stat(src).st_dev ==
1036 1036 os.stat(os.path.dirname(dst)).st_dev)
1037 1037 if hardlink:
1038 1038 topic = _('linking')
1039 1039 else:
1040 1040 topic = _('copying')
1041 1041
1042 1042 if os.path.isdir(src):
1043 1043 os.mkdir(dst)
1044 1044 for name, kind in osutil.listdir(src):
1045 1045 srcname = os.path.join(src, name)
1046 1046 dstname = os.path.join(dst, name)
1047 1047 def nprog(t, pos):
1048 1048 if pos is not None:
1049 1049 return progress(t, pos + num)
1050 1050 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1051 1051 num += n
1052 1052 else:
1053 1053 if hardlink:
1054 1054 try:
1055 1055 oslink(src, dst)
1056 1056 except (IOError, OSError):
1057 1057 hardlink = False
1058 1058 shutil.copy(src, dst)
1059 1059 else:
1060 1060 shutil.copy(src, dst)
1061 1061 num += 1
1062 1062 progress(topic, num)
1063 1063 progress(topic, None)
1064 1064
1065 1065 return hardlink, num
1066 1066
1067 1067 _winreservednames = '''con prn aux nul
1068 1068 com1 com2 com3 com4 com5 com6 com7 com8 com9
1069 1069 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1070 1070 _winreservedchars = ':*?"<>|'
1071 1071 def checkwinfilename(path):
1072 1072 r'''Check that the base-relative path is a valid filename on Windows.
1073 1073 Returns None if the path is ok, or a UI string describing the problem.
1074 1074
1075 1075 >>> checkwinfilename("just/a/normal/path")
1076 1076 >>> checkwinfilename("foo/bar/con.xml")
1077 1077 "filename contains 'con', which is reserved on Windows"
1078 1078 >>> checkwinfilename("foo/con.xml/bar")
1079 1079 "filename contains 'con', which is reserved on Windows"
1080 1080 >>> checkwinfilename("foo/bar/xml.con")
1081 1081 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1082 1082 "filename contains 'AUX', which is reserved on Windows"
1083 1083 >>> checkwinfilename("foo/bar/bla:.txt")
1084 1084 "filename contains ':', which is reserved on Windows"
1085 1085 >>> checkwinfilename("foo/bar/b\07la.txt")
1086 1086 "filename contains '\\x07', which is invalid on Windows"
1087 1087 >>> checkwinfilename("foo/bar/bla ")
1088 1088 "filename ends with ' ', which is not allowed on Windows"
1089 1089 >>> checkwinfilename("../bar")
1090 1090 >>> checkwinfilename("foo\\")
1091 1091 "filename ends with '\\', which is invalid on Windows"
1092 1092 >>> checkwinfilename("foo\\/bar")
1093 1093 "directory name ends with '\\', which is invalid on Windows"
1094 1094 '''
1095 1095 if path.endswith('\\'):
1096 1096 return _("filename ends with '\\', which is invalid on Windows")
1097 1097 if '\\/' in path:
1098 1098 return _("directory name ends with '\\', which is invalid on Windows")
1099 1099 for n in path.replace('\\', '/').split('/'):
1100 1100 if not n:
1101 1101 continue
1102 1102 for c in n:
1103 1103 if c in _winreservedchars:
1104 1104 return _("filename contains '%s', which is reserved "
1105 1105 "on Windows") % c
1106 1106 if ord(c) <= 31:
1107 1107 return _("filename contains %r, which is invalid "
1108 1108 "on Windows") % c
1109 1109 base = n.split('.')[0]
1110 1110 if base and base.lower() in _winreservednames:
1111 1111 return _("filename contains '%s', which is reserved "
1112 1112 "on Windows") % base
1113 1113 t = n[-1]
1114 1114 if t in '. ' and n not in '..':
1115 1115 return _("filename ends with '%s', which is not allowed "
1116 1116 "on Windows") % t
1117 1117
1118 1118 if os.name == 'nt':
1119 1119 checkosfilename = checkwinfilename
1120 1120 else:
1121 1121 checkosfilename = platform.checkosfilename
1122 1122
1123 1123 def makelock(info, pathname):
1124 1124 try:
1125 1125 return os.symlink(info, pathname)
1126 1126 except OSError as why:
1127 1127 if why.errno == errno.EEXIST:
1128 1128 raise
1129 1129 except AttributeError: # no symlink in os
1130 1130 pass
1131 1131
1132 1132 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1133 1133 os.write(ld, info)
1134 1134 os.close(ld)
1135 1135
1136 1136 def readlock(pathname):
1137 1137 try:
1138 1138 return os.readlink(pathname)
1139 1139 except OSError as why:
1140 1140 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1141 1141 raise
1142 1142 except AttributeError: # no symlink in os
1143 1143 pass
1144 1144 fp = posixfile(pathname)
1145 1145 r = fp.read()
1146 1146 fp.close()
1147 1147 return r
1148 1148
1149 1149 def fstat(fp):
1150 1150 '''stat file object that may not have fileno method.'''
1151 1151 try:
1152 1152 return os.fstat(fp.fileno())
1153 1153 except AttributeError:
1154 1154 return os.stat(fp.name)
1155 1155
1156 1156 # File system features
1157 1157
1158 1158 def checkcase(path):
1159 1159 """
1160 1160 Return true if the given path is on a case-sensitive filesystem
1161 1161
1162 1162 Requires a path (like /foo/.hg) ending with a foldable final
1163 1163 directory component.
1164 1164 """
1165 1165 s1 = os.lstat(path)
1166 1166 d, b = os.path.split(path)
1167 1167 b2 = b.upper()
1168 1168 if b == b2:
1169 1169 b2 = b.lower()
1170 1170 if b == b2:
1171 1171 return True # no evidence against case sensitivity
1172 1172 p2 = os.path.join(d, b2)
1173 1173 try:
1174 1174 s2 = os.lstat(p2)
1175 1175 if s2 == s1:
1176 1176 return False
1177 1177 return True
1178 1178 except OSError:
1179 1179 return True
1180 1180
1181 1181 try:
1182 1182 import re2
1183 1183 _re2 = None
1184 1184 except ImportError:
1185 1185 _re2 = False
1186 1186
1187 1187 class _re(object):
1188 1188 def _checkre2(self):
1189 1189 global _re2
1190 1190 try:
1191 1191 # check if match works, see issue3964
1192 1192 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1193 1193 except ImportError:
1194 1194 _re2 = False
1195 1195
1196 1196 def compile(self, pat, flags=0):
1197 1197 '''Compile a regular expression, using re2 if possible
1198 1198
1199 1199 For best performance, use only re2-compatible regexp features. The
1200 1200 only flags from the re module that are re2-compatible are
1201 1201 IGNORECASE and MULTILINE.'''
1202 1202 if _re2 is None:
1203 1203 self._checkre2()
1204 1204 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1205 1205 if flags & remod.IGNORECASE:
1206 1206 pat = '(?i)' + pat
1207 1207 if flags & remod.MULTILINE:
1208 1208 pat = '(?m)' + pat
1209 1209 try:
1210 1210 return re2.compile(pat)
1211 1211 except re2.error:
1212 1212 pass
1213 1213 return remod.compile(pat, flags)
1214 1214
1215 1215 @propertycache
1216 1216 def escape(self):
1217 1217 '''Return the version of escape corresponding to self.compile.
1218 1218
1219 1219 This is imperfect because whether re2 or re is used for a particular
1220 1220 function depends on the flags, etc, but it's the best we can do.
1221 1221 '''
1222 1222 global _re2
1223 1223 if _re2 is None:
1224 1224 self._checkre2()
1225 1225 if _re2:
1226 1226 return re2.escape
1227 1227 else:
1228 1228 return remod.escape
1229 1229
1230 1230 re = _re()
1231 1231
1232 1232 _fspathcache = {}
1233 1233 def fspath(name, root):
1234 1234 '''Get name in the case stored in the filesystem
1235 1235
1236 1236 The name should be relative to root, and be normcase-ed for efficiency.
1237 1237
1238 1238 Note that this function is unnecessary, and should not be
1239 1239 called, for case-sensitive filesystems (simply because it's expensive).
1240 1240
1241 1241 The root should be normcase-ed, too.
1242 1242 '''
1243 1243 def _makefspathcacheentry(dir):
1244 1244 return dict((normcase(n), n) for n in os.listdir(dir))
1245 1245
1246 1246 seps = os.sep
1247 1247 if os.altsep:
1248 1248 seps = seps + os.altsep
1249 1249 # Protect backslashes. This gets silly very quickly.
1250 1250 seps.replace('\\','\\\\')
1251 1251 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1252 1252 dir = os.path.normpath(root)
1253 1253 result = []
1254 1254 for part, sep in pattern.findall(name):
1255 1255 if sep:
1256 1256 result.append(sep)
1257 1257 continue
1258 1258
1259 1259 if dir not in _fspathcache:
1260 1260 _fspathcache[dir] = _makefspathcacheentry(dir)
1261 1261 contents = _fspathcache[dir]
1262 1262
1263 1263 found = contents.get(part)
1264 1264 if not found:
1265 1265 # retry "once per directory" per "dirstate.walk" which
1266 1266 # may take place for each patches of "hg qpush", for example
1267 1267 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1268 1268 found = contents.get(part)
1269 1269
1270 1270 result.append(found or part)
1271 1271 dir = os.path.join(dir, part)
1272 1272
1273 1273 return ''.join(result)
1274 1274
1275 1275 def checknlink(testfile):
1276 1276 '''check whether hardlink count reporting works properly'''
1277 1277
1278 1278 # testfile may be open, so we need a separate file for checking to
1279 1279 # work around issue2543 (or testfile may get lost on Samba shares)
1280 1280 f1 = testfile + ".hgtmp1"
1281 1281 if os.path.lexists(f1):
1282 1282 return False
1283 1283 try:
1284 1284 posixfile(f1, 'w').close()
1285 1285 except IOError:
1286 1286 return False
1287 1287
1288 1288 f2 = testfile + ".hgtmp2"
1289 1289 fd = None
1290 1290 try:
1291 1291 oslink(f1, f2)
1292 1292 # nlinks() may behave differently for files on Windows shares if
1293 1293 # the file is open.
1294 1294 fd = posixfile(f2)
1295 1295 return nlinks(f2) > 1
1296 1296 except OSError:
1297 1297 return False
1298 1298 finally:
1299 1299 if fd is not None:
1300 1300 fd.close()
1301 1301 for f in (f1, f2):
1302 1302 try:
1303 1303 os.unlink(f)
1304 1304 except OSError:
1305 1305 pass
1306 1306
1307 1307 def endswithsep(path):
1308 1308 '''Check path ends with os.sep or os.altsep.'''
1309 1309 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1310 1310
1311 1311 def splitpath(path):
1312 1312 '''Split path by os.sep.
1313 1313 Note that this function does not use os.altsep because this is
1314 1314 an alternative of simple "xxx.split(os.sep)".
1315 1315 It is recommended to use os.path.normpath() before using this
1316 1316 function if need.'''
1317 1317 return path.split(os.sep)
1318 1318
1319 1319 def gui():
1320 1320 '''Are we running in a GUI?'''
1321 1321 if sys.platform == 'darwin':
1322 1322 if 'SSH_CONNECTION' in os.environ:
1323 1323 # handle SSH access to a box where the user is logged in
1324 1324 return False
1325 1325 elif getattr(osutil, 'isgui', None):
1326 1326 # check if a CoreGraphics session is available
1327 1327 return osutil.isgui()
1328 1328 else:
1329 1329 # pure build; use a safe default
1330 1330 return True
1331 1331 else:
1332 1332 return os.name == "nt" or os.environ.get("DISPLAY")
1333 1333
1334 1334 def mktempcopy(name, emptyok=False, createmode=None):
1335 1335 """Create a temporary file with the same contents from name
1336 1336
1337 1337 The permission bits are copied from the original file.
1338 1338
1339 1339 If the temporary file is going to be truncated immediately, you
1340 1340 can use emptyok=True as an optimization.
1341 1341
1342 1342 Returns the name of the temporary file.
1343 1343 """
1344 1344 d, fn = os.path.split(name)
1345 1345 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1346 1346 os.close(fd)
1347 1347 # Temporary files are created with mode 0600, which is usually not
1348 1348 # what we want. If the original file already exists, just copy
1349 1349 # its mode. Otherwise, manually obey umask.
1350 1350 copymode(name, temp, createmode)
1351 1351 if emptyok:
1352 1352 return temp
1353 1353 try:
1354 1354 try:
1355 1355 ifp = posixfile(name, "rb")
1356 1356 except IOError as inst:
1357 1357 if inst.errno == errno.ENOENT:
1358 1358 return temp
1359 1359 if not getattr(inst, 'filename', None):
1360 1360 inst.filename = name
1361 1361 raise
1362 1362 ofp = posixfile(temp, "wb")
1363 1363 for chunk in filechunkiter(ifp):
1364 1364 ofp.write(chunk)
1365 1365 ifp.close()
1366 1366 ofp.close()
1367 1367 except: # re-raises
1368 1368 try: os.unlink(temp)
1369 1369 except OSError: pass
1370 1370 raise
1371 1371 return temp
1372 1372
1373 1373 class atomictempfile(object):
1374 1374 '''writable file object that atomically updates a file
1375 1375
1376 1376 All writes will go to a temporary copy of the original file. Call
1377 1377 close() when you are done writing, and atomictempfile will rename
1378 1378 the temporary copy to the original name, making the changes
1379 1379 visible. If the object is destroyed without being closed, all your
1380 1380 writes are discarded.
1381 1381 '''
1382 1382 def __init__(self, name, mode='w+b', createmode=None):
1383 1383 self.__name = name # permanent name
1384 1384 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1385 1385 createmode=createmode)
1386 1386 self._fp = posixfile(self._tempname, mode)
1387 1387
1388 1388 # delegated methods
1389 1389 self.write = self._fp.write
1390 1390 self.seek = self._fp.seek
1391 1391 self.tell = self._fp.tell
1392 1392 self.fileno = self._fp.fileno
1393 1393
1394 1394 def close(self):
1395 1395 if not self._fp.closed:
1396 1396 self._fp.close()
1397 1397 rename(self._tempname, localpath(self.__name))
1398 1398
1399 1399 def discard(self):
1400 1400 if not self._fp.closed:
1401 1401 try:
1402 1402 os.unlink(self._tempname)
1403 1403 except OSError:
1404 1404 pass
1405 1405 self._fp.close()
1406 1406
1407 1407 def __del__(self):
1408 1408 if safehasattr(self, '_fp'): # constructor actually did something
1409 1409 self.discard()
1410 1410
1411 1411 def makedirs(name, mode=None, notindexed=False):
1412 1412 """recursive directory creation with parent mode inheritance"""
1413 1413 try:
1414 1414 makedir(name, notindexed)
1415 1415 except OSError as err:
1416 1416 if err.errno == errno.EEXIST:
1417 1417 return
1418 1418 if err.errno != errno.ENOENT or not name:
1419 1419 raise
1420 1420 parent = os.path.dirname(os.path.abspath(name))
1421 1421 if parent == name:
1422 1422 raise
1423 1423 makedirs(parent, mode, notindexed)
1424 1424 makedir(name, notindexed)
1425 1425 if mode is not None:
1426 1426 os.chmod(name, mode)
1427 1427
1428 1428 def ensuredirs(name, mode=None, notindexed=False):
1429 1429 """race-safe recursive directory creation
1430 1430
1431 1431 Newly created directories are marked as "not to be indexed by
1432 1432 the content indexing service", if ``notindexed`` is specified
1433 1433 for "write" mode access.
1434 1434 """
1435 1435 if os.path.isdir(name):
1436 1436 return
1437 1437 parent = os.path.dirname(os.path.abspath(name))
1438 1438 if parent != name:
1439 1439 ensuredirs(parent, mode, notindexed)
1440 1440 try:
1441 1441 makedir(name, notindexed)
1442 1442 except OSError as err:
1443 1443 if err.errno == errno.EEXIST and os.path.isdir(name):
1444 1444 # someone else seems to have won a directory creation race
1445 1445 return
1446 1446 raise
1447 1447 if mode is not None:
1448 1448 os.chmod(name, mode)
1449 1449
1450 1450 def readfile(path):
1451 1451 fp = open(path, 'rb')
1452 1452 try:
1453 1453 return fp.read()
1454 1454 finally:
1455 1455 fp.close()
1456 1456
1457 1457 def writefile(path, text):
1458 1458 fp = open(path, 'wb')
1459 1459 try:
1460 1460 fp.write(text)
1461 1461 finally:
1462 1462 fp.close()
1463 1463
1464 1464 def appendfile(path, text):
1465 1465 fp = open(path, 'ab')
1466 1466 try:
1467 1467 fp.write(text)
1468 1468 finally:
1469 1469 fp.close()
1470 1470
1471 1471 class chunkbuffer(object):
1472 1472 """Allow arbitrary sized chunks of data to be efficiently read from an
1473 1473 iterator over chunks of arbitrary size."""
1474 1474
1475 1475 def __init__(self, in_iter):
1476 1476 """in_iter is the iterator that's iterating over the input chunks.
1477 1477 targetsize is how big a buffer to try to maintain."""
1478 1478 def splitbig(chunks):
1479 1479 for chunk in chunks:
1480 1480 if len(chunk) > 2**20:
1481 1481 pos = 0
1482 1482 while pos < len(chunk):
1483 1483 end = pos + 2 ** 18
1484 1484 yield chunk[pos:end]
1485 1485 pos = end
1486 1486 else:
1487 1487 yield chunk
1488 1488 self.iter = splitbig(in_iter)
1489 1489 self._queue = collections.deque()
1490 1490 self._chunkoffset = 0
1491 1491
1492 1492 def read(self, l=None):
1493 1493 """Read L bytes of data from the iterator of chunks of data.
1494 1494 Returns less than L bytes if the iterator runs dry.
1495 1495
1496 1496 If size parameter is omitted, read everything"""
1497 1497 if l is None:
1498 1498 return ''.join(self.iter)
1499 1499
1500 1500 left = l
1501 1501 buf = []
1502 1502 queue = self._queue
1503 1503 while left > 0:
1504 1504 # refill the queue
1505 1505 if not queue:
1506 1506 target = 2**18
1507 1507 for chunk in self.iter:
1508 1508 queue.append(chunk)
1509 1509 target -= len(chunk)
1510 1510 if target <= 0:
1511 1511 break
1512 1512 if not queue:
1513 1513 break
1514 1514
1515 1515 # The easy way to do this would be to queue.popleft(), modify the
1516 1516 # chunk (if necessary), then queue.appendleft(). However, for cases
1517 1517 # where we read partial chunk content, this incurs 2 dequeue
1518 1518 # mutations and creates a new str for the remaining chunk in the
1519 1519 # queue. Our code below avoids this overhead.
1520 1520
1521 1521 chunk = queue[0]
1522 1522 chunkl = len(chunk)
1523 1523 offset = self._chunkoffset
1524 1524
1525 1525 # Use full chunk.
1526 1526 if offset == 0 and left >= chunkl:
1527 1527 left -= chunkl
1528 1528 queue.popleft()
1529 1529 buf.append(chunk)
1530 1530 # self._chunkoffset remains at 0.
1531 1531 continue
1532 1532
1533 1533 chunkremaining = chunkl - offset
1534 1534
1535 1535 # Use all of unconsumed part of chunk.
1536 1536 if left >= chunkremaining:
1537 1537 left -= chunkremaining
1538 1538 queue.popleft()
1539 1539 # offset == 0 is enabled by block above, so this won't merely
1540 1540 # copy via ``chunk[0:]``.
1541 1541 buf.append(chunk[offset:])
1542 1542 self._chunkoffset = 0
1543 1543
1544 1544 # Partial chunk needed.
1545 1545 else:
1546 1546 buf.append(chunk[offset:offset + left])
1547 1547 self._chunkoffset += left
1548 1548 left -= chunkremaining
1549 1549
1550 1550 return ''.join(buf)
1551 1551
1552 1552 def filechunkiter(f, size=65536, limit=None):
1553 1553 """Create a generator that produces the data in the file size
1554 1554 (default 65536) bytes at a time, up to optional limit (default is
1555 1555 to read all data). Chunks may be less than size bytes if the
1556 1556 chunk is the last chunk in the file, or the file is a socket or
1557 1557 some other type of file that sometimes reads less data than is
1558 1558 requested."""
1559 1559 assert size >= 0
1560 1560 assert limit is None or limit >= 0
1561 1561 while True:
1562 1562 if limit is None:
1563 1563 nbytes = size
1564 1564 else:
1565 1565 nbytes = min(limit, size)
1566 1566 s = nbytes and f.read(nbytes)
1567 1567 if not s:
1568 1568 break
1569 1569 if limit:
1570 1570 limit -= len(s)
1571 1571 yield s
1572 1572
1573 1573 def makedate(timestamp=None):
1574 1574 '''Return a unix timestamp (or the current time) as a (unixtime,
1575 1575 offset) tuple based off the local timezone.'''
1576 1576 if timestamp is None:
1577 1577 timestamp = time.time()
1578 1578 if timestamp < 0:
1579 1579 hint = _("check your clock")
1580 1580 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1581 1581 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1582 1582 datetime.datetime.fromtimestamp(timestamp))
1583 1583 tz = delta.days * 86400 + delta.seconds
1584 1584 return timestamp, tz
1585 1585
1586 1586 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1587 1587 """represent a (unixtime, offset) tuple as a localized time.
1588 1588 unixtime is seconds since the epoch, and offset is the time zone's
1589 1589 number of seconds away from UTC. if timezone is false, do not
1590 1590 append time zone to string."""
1591 1591 t, tz = date or makedate()
1592 1592 if t < 0:
1593 1593 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1594 1594 tz = 0
1595 1595 if "%1" in format or "%2" in format or "%z" in format:
1596 1596 sign = (tz > 0) and "-" or "+"
1597 1597 minutes = abs(tz) // 60
1598 1598 q, r = divmod(minutes, 60)
1599 1599 format = format.replace("%z", "%1%2")
1600 1600 format = format.replace("%1", "%c%02d" % (sign, q))
1601 1601 format = format.replace("%2", "%02d" % r)
1602 1602 try:
1603 1603 t = time.gmtime(float(t) - tz)
1604 1604 except ValueError:
1605 1605 # time was out of range
1606 1606 t = time.gmtime(sys.maxint)
1607 1607 s = time.strftime(format, t)
1608 1608 return s
1609 1609
1610 1610 def shortdate(date=None):
1611 1611 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1612 1612 return datestr(date, format='%Y-%m-%d')
1613 1613
1614 1614 def parsetimezone(tz):
1615 1615 """parse a timezone string and return an offset integer"""
1616 1616 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1617 1617 sign = (tz[0] == "+") and 1 or -1
1618 1618 hours = int(tz[1:3])
1619 1619 minutes = int(tz[3:5])
1620 1620 return -sign * (hours * 60 + minutes) * 60
1621 1621 if tz == "GMT" or tz == "UTC":
1622 1622 return 0
1623 1623 return None
1624 1624
1625 1625 def strdate(string, format, defaults=[]):
1626 1626 """parse a localized time string and return a (unixtime, offset) tuple.
1627 1627 if the string cannot be parsed, ValueError is raised."""
1628 1628 # NOTE: unixtime = localunixtime + offset
1629 1629 offset, date = parsetimezone(string.split()[-1]), string
1630 1630 if offset is not None:
1631 1631 date = " ".join(string.split()[:-1])
1632 1632
1633 1633 # add missing elements from defaults
1634 1634 usenow = False # default to using biased defaults
1635 1635 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1636 1636 found = [True for p in part if ("%"+p) in format]
1637 1637 if not found:
1638 1638 date += "@" + defaults[part][usenow]
1639 1639 format += "@%" + part[0]
1640 1640 else:
1641 1641 # We've found a specific time element, less specific time
1642 1642 # elements are relative to today
1643 1643 usenow = True
1644 1644
1645 1645 timetuple = time.strptime(date, format)
1646 1646 localunixtime = int(calendar.timegm(timetuple))
1647 1647 if offset is None:
1648 1648 # local timezone
1649 1649 unixtime = int(time.mktime(timetuple))
1650 1650 offset = unixtime - localunixtime
1651 1651 else:
1652 1652 unixtime = localunixtime + offset
1653 1653 return unixtime, offset
1654 1654
1655 1655 def parsedate(date, formats=None, bias=None):
1656 1656 """parse a localized date/time and return a (unixtime, offset) tuple.
1657 1657
1658 1658 The date may be a "unixtime offset" string or in one of the specified
1659 1659 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1660 1660
1661 1661 >>> parsedate(' today ') == parsedate(\
1662 1662 datetime.date.today().strftime('%b %d'))
1663 1663 True
1664 1664 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1665 1665 datetime.timedelta(days=1)\
1666 1666 ).strftime('%b %d'))
1667 1667 True
1668 1668 >>> now, tz = makedate()
1669 1669 >>> strnow, strtz = parsedate('now')
1670 1670 >>> (strnow - now) < 1
1671 1671 True
1672 1672 >>> tz == strtz
1673 1673 True
1674 1674 """
1675 1675 if bias is None:
1676 1676 bias = {}
1677 1677 if not date:
1678 1678 return 0, 0
1679 1679 if isinstance(date, tuple) and len(date) == 2:
1680 1680 return date
1681 1681 if not formats:
1682 1682 formats = defaultdateformats
1683 1683 date = date.strip()
1684 1684
1685 1685 if date == 'now' or date == _('now'):
1686 1686 return makedate()
1687 1687 if date == 'today' or date == _('today'):
1688 1688 date = datetime.date.today().strftime('%b %d')
1689 1689 elif date == 'yesterday' or date == _('yesterday'):
1690 1690 date = (datetime.date.today() -
1691 1691 datetime.timedelta(days=1)).strftime('%b %d')
1692 1692
1693 1693 try:
1694 1694 when, offset = map(int, date.split(' '))
1695 1695 except ValueError:
1696 1696 # fill out defaults
1697 1697 now = makedate()
1698 1698 defaults = {}
1699 1699 for part in ("d", "mb", "yY", "HI", "M", "S"):
1700 1700 # this piece is for rounding the specific end of unknowns
1701 1701 b = bias.get(part)
1702 1702 if b is None:
1703 1703 if part[0] in "HMS":
1704 1704 b = "00"
1705 1705 else:
1706 1706 b = "0"
1707 1707
1708 1708 # this piece is for matching the generic end to today's date
1709 1709 n = datestr(now, "%" + part[0])
1710 1710
1711 1711 defaults[part] = (b, n)
1712 1712
1713 1713 for format in formats:
1714 1714 try:
1715 1715 when, offset = strdate(date, format, defaults)
1716 1716 except (ValueError, OverflowError):
1717 1717 pass
1718 1718 else:
1719 1719 break
1720 1720 else:
1721 1721 raise Abort(_('invalid date: %r') % date)
1722 1722 # validate explicit (probably user-specified) date and
1723 1723 # time zone offset. values must fit in signed 32 bits for
1724 1724 # current 32-bit linux runtimes. timezones go from UTC-12
1725 1725 # to UTC+14
1726 1726 if abs(when) > 0x7fffffff:
1727 1727 raise Abort(_('date exceeds 32 bits: %d') % when)
1728 1728 if when < 0:
1729 1729 raise Abort(_('negative date value: %d') % when)
1730 1730 if offset < -50400 or offset > 43200:
1731 1731 raise Abort(_('impossible time zone offset: %d') % offset)
1732 1732 return when, offset
1733 1733
1734 1734 def matchdate(date):
1735 1735 """Return a function that matches a given date match specifier
1736 1736
1737 1737 Formats include:
1738 1738
1739 1739 '{date}' match a given date to the accuracy provided
1740 1740
1741 1741 '<{date}' on or before a given date
1742 1742
1743 1743 '>{date}' on or after a given date
1744 1744
1745 1745 >>> p1 = parsedate("10:29:59")
1746 1746 >>> p2 = parsedate("10:30:00")
1747 1747 >>> p3 = parsedate("10:30:59")
1748 1748 >>> p4 = parsedate("10:31:00")
1749 1749 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1750 1750 >>> f = matchdate("10:30")
1751 1751 >>> f(p1[0])
1752 1752 False
1753 1753 >>> f(p2[0])
1754 1754 True
1755 1755 >>> f(p3[0])
1756 1756 True
1757 1757 >>> f(p4[0])
1758 1758 False
1759 1759 >>> f(p5[0])
1760 1760 False
1761 1761 """
1762 1762
1763 1763 def lower(date):
1764 1764 d = {'mb': "1", 'd': "1"}
1765 1765 return parsedate(date, extendeddateformats, d)[0]
1766 1766
1767 1767 def upper(date):
1768 1768 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1769 1769 for days in ("31", "30", "29"):
1770 1770 try:
1771 1771 d["d"] = days
1772 1772 return parsedate(date, extendeddateformats, d)[0]
1773 1773 except Abort:
1774 1774 pass
1775 1775 d["d"] = "28"
1776 1776 return parsedate(date, extendeddateformats, d)[0]
1777 1777
1778 1778 date = date.strip()
1779 1779
1780 1780 if not date:
1781 1781 raise Abort(_("dates cannot consist entirely of whitespace"))
1782 1782 elif date[0] == "<":
1783 1783 if not date[1:]:
1784 1784 raise Abort(_("invalid day spec, use '<DATE'"))
1785 1785 when = upper(date[1:])
1786 1786 return lambda x: x <= when
1787 1787 elif date[0] == ">":
1788 1788 if not date[1:]:
1789 1789 raise Abort(_("invalid day spec, use '>DATE'"))
1790 1790 when = lower(date[1:])
1791 1791 return lambda x: x >= when
1792 1792 elif date[0] == "-":
1793 1793 try:
1794 1794 days = int(date[1:])
1795 1795 except ValueError:
1796 1796 raise Abort(_("invalid day spec: %s") % date[1:])
1797 1797 if days < 0:
1798 1798 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1799 1799 % date[1:])
1800 1800 when = makedate()[0] - days * 3600 * 24
1801 1801 return lambda x: x >= when
1802 1802 elif " to " in date:
1803 1803 a, b = date.split(" to ")
1804 1804 start, stop = lower(a), upper(b)
1805 1805 return lambda x: x >= start and x <= stop
1806 1806 else:
1807 1807 start, stop = lower(date), upper(date)
1808 1808 return lambda x: x >= start and x <= stop
1809 1809
1810 1810 def stringmatcher(pattern):
1811 1811 """
1812 1812 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1813 1813 returns the matcher name, pattern, and matcher function.
1814 1814 missing or unknown prefixes are treated as literal matches.
1815 1815
1816 1816 helper for tests:
1817 1817 >>> def test(pattern, *tests):
1818 1818 ... kind, pattern, matcher = stringmatcher(pattern)
1819 1819 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1820 1820
1821 1821 exact matching (no prefix):
1822 1822 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1823 1823 ('literal', 'abcdefg', [False, False, True])
1824 1824
1825 1825 regex matching ('re:' prefix)
1826 1826 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1827 1827 ('re', 'a.+b', [False, False, True])
1828 1828
1829 1829 force exact matches ('literal:' prefix)
1830 1830 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1831 1831 ('literal', 're:foobar', [False, True])
1832 1832
1833 1833 unknown prefixes are ignored and treated as literals
1834 1834 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1835 1835 ('literal', 'foo:bar', [False, False, True])
1836 1836 """
1837 1837 if pattern.startswith('re:'):
1838 1838 pattern = pattern[3:]
1839 1839 try:
1840 1840 regex = remod.compile(pattern)
1841 1841 except remod.error as e:
1842 1842 raise error.ParseError(_('invalid regular expression: %s')
1843 1843 % e)
1844 1844 return 're', pattern, regex.search
1845 1845 elif pattern.startswith('literal:'):
1846 1846 pattern = pattern[8:]
1847 1847 return 'literal', pattern, pattern.__eq__
1848 1848
1849 1849 def shortuser(user):
1850 1850 """Return a short representation of a user name or email address."""
1851 1851 f = user.find('@')
1852 1852 if f >= 0:
1853 1853 user = user[:f]
1854 1854 f = user.find('<')
1855 1855 if f >= 0:
1856 1856 user = user[f + 1:]
1857 1857 f = user.find(' ')
1858 1858 if f >= 0:
1859 1859 user = user[:f]
1860 1860 f = user.find('.')
1861 1861 if f >= 0:
1862 1862 user = user[:f]
1863 1863 return user
1864 1864
1865 1865 def emailuser(user):
1866 1866 """Return the user portion of an email address."""
1867 1867 f = user.find('@')
1868 1868 if f >= 0:
1869 1869 user = user[:f]
1870 1870 f = user.find('<')
1871 1871 if f >= 0:
1872 1872 user = user[f + 1:]
1873 1873 return user
1874 1874
1875 1875 def email(author):
1876 1876 '''get email of author.'''
1877 1877 r = author.find('>')
1878 1878 if r == -1:
1879 1879 r = None
1880 1880 return author[author.find('<') + 1:r]
1881 1881
1882 1882 def ellipsis(text, maxlength=400):
1883 1883 """Trim string to at most maxlength (default: 400) columns in display."""
1884 1884 return encoding.trim(text, maxlength, ellipsis='...')
1885 1885
1886 1886 def unitcountfn(*unittable):
1887 1887 '''return a function that renders a readable count of some quantity'''
1888 1888
1889 1889 def go(count):
1890 1890 for multiplier, divisor, format in unittable:
1891 1891 if count >= divisor * multiplier:
1892 1892 return format % (count / float(divisor))
1893 1893 return unittable[-1][2] % count
1894 1894
1895 1895 return go
1896 1896
1897 1897 bytecount = unitcountfn(
1898 1898 (100, 1 << 30, _('%.0f GB')),
1899 1899 (10, 1 << 30, _('%.1f GB')),
1900 1900 (1, 1 << 30, _('%.2f GB')),
1901 1901 (100, 1 << 20, _('%.0f MB')),
1902 1902 (10, 1 << 20, _('%.1f MB')),
1903 1903 (1, 1 << 20, _('%.2f MB')),
1904 1904 (100, 1 << 10, _('%.0f KB')),
1905 1905 (10, 1 << 10, _('%.1f KB')),
1906 1906 (1, 1 << 10, _('%.2f KB')),
1907 1907 (1, 1, _('%.0f bytes')),
1908 1908 )
1909 1909
1910 1910 def uirepr(s):
1911 1911 # Avoid double backslash in Windows path repr()
1912 1912 return repr(s).replace('\\\\', '\\')
1913 1913
1914 1914 # delay import of textwrap
1915 1915 def MBTextWrapper(**kwargs):
1916 1916 class tw(textwrap.TextWrapper):
1917 1917 """
1918 1918 Extend TextWrapper for width-awareness.
1919 1919
1920 1920 Neither number of 'bytes' in any encoding nor 'characters' is
1921 1921 appropriate to calculate terminal columns for specified string.
1922 1922
1923 1923 Original TextWrapper implementation uses built-in 'len()' directly,
1924 1924 so overriding is needed to use width information of each characters.
1925 1925
1926 1926 In addition, characters classified into 'ambiguous' width are
1927 1927 treated as wide in East Asian area, but as narrow in other.
1928 1928
1929 1929 This requires use decision to determine width of such characters.
1930 1930 """
1931 1931 def _cutdown(self, ucstr, space_left):
1932 1932 l = 0
1933 1933 colwidth = encoding.ucolwidth
1934 1934 for i in xrange(len(ucstr)):
1935 1935 l += colwidth(ucstr[i])
1936 1936 if space_left < l:
1937 1937 return (ucstr[:i], ucstr[i:])
1938 1938 return ucstr, ''
1939 1939
1940 1940 # overriding of base class
1941 1941 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1942 1942 space_left = max(width - cur_len, 1)
1943 1943
1944 1944 if self.break_long_words:
1945 1945 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1946 1946 cur_line.append(cut)
1947 1947 reversed_chunks[-1] = res
1948 1948 elif not cur_line:
1949 1949 cur_line.append(reversed_chunks.pop())
1950 1950
1951 1951 # this overriding code is imported from TextWrapper of Python 2.6
1952 1952 # to calculate columns of string by 'encoding.ucolwidth()'
1953 1953 def _wrap_chunks(self, chunks):
1954 1954 colwidth = encoding.ucolwidth
1955 1955
1956 1956 lines = []
1957 1957 if self.width <= 0:
1958 1958 raise ValueError("invalid width %r (must be > 0)" % self.width)
1959 1959
1960 1960 # Arrange in reverse order so items can be efficiently popped
1961 1961 # from a stack of chucks.
1962 1962 chunks.reverse()
1963 1963
1964 1964 while chunks:
1965 1965
1966 1966 # Start the list of chunks that will make up the current line.
1967 1967 # cur_len is just the length of all the chunks in cur_line.
1968 1968 cur_line = []
1969 1969 cur_len = 0
1970 1970
1971 1971 # Figure out which static string will prefix this line.
1972 1972 if lines:
1973 1973 indent = self.subsequent_indent
1974 1974 else:
1975 1975 indent = self.initial_indent
1976 1976
1977 1977 # Maximum width for this line.
1978 1978 width = self.width - len(indent)
1979 1979
1980 1980 # First chunk on line is whitespace -- drop it, unless this
1981 1981 # is the very beginning of the text (i.e. no lines started yet).
1982 1982 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1983 1983 del chunks[-1]
1984 1984
1985 1985 while chunks:
1986 1986 l = colwidth(chunks[-1])
1987 1987
1988 1988 # Can at least squeeze this chunk onto the current line.
1989 1989 if cur_len + l <= width:
1990 1990 cur_line.append(chunks.pop())
1991 1991 cur_len += l
1992 1992
1993 1993 # Nope, this line is full.
1994 1994 else:
1995 1995 break
1996 1996
1997 1997 # The current line is full, and the next chunk is too big to
1998 1998 # fit on *any* line (not just this one).
1999 1999 if chunks and colwidth(chunks[-1]) > width:
2000 2000 self._handle_long_word(chunks, cur_line, cur_len, width)
2001 2001
2002 2002 # If the last chunk on this line is all whitespace, drop it.
2003 2003 if (self.drop_whitespace and
2004 2004 cur_line and cur_line[-1].strip() == ''):
2005 2005 del cur_line[-1]
2006 2006
2007 2007 # Convert current line back to a string and store it in list
2008 2008 # of all lines (return value).
2009 2009 if cur_line:
2010 2010 lines.append(indent + ''.join(cur_line))
2011 2011
2012 2012 return lines
2013 2013
2014 2014 global MBTextWrapper
2015 2015 MBTextWrapper = tw
2016 2016 return tw(**kwargs)
2017 2017
2018 2018 def wrap(line, width, initindent='', hangindent=''):
2019 2019 maxindent = max(len(hangindent), len(initindent))
2020 2020 if width <= maxindent:
2021 2021 # adjust for weird terminal size
2022 2022 width = max(78, maxindent + 1)
2023 2023 line = line.decode(encoding.encoding, encoding.encodingmode)
2024 2024 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2025 2025 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2026 2026 wrapper = MBTextWrapper(width=width,
2027 2027 initial_indent=initindent,
2028 2028 subsequent_indent=hangindent)
2029 2029 return wrapper.fill(line).encode(encoding.encoding)
2030 2030
2031 2031 def iterlines(iterator):
2032 2032 for chunk in iterator:
2033 2033 for line in chunk.splitlines():
2034 2034 yield line
2035 2035
2036 2036 def expandpath(path):
2037 2037 return os.path.expanduser(os.path.expandvars(path))
2038 2038
2039 2039 def hgcmd():
2040 2040 """Return the command used to execute current hg
2041 2041
2042 2042 This is different from hgexecutable() because on Windows we want
2043 2043 to avoid things opening new shell windows like batch files, so we
2044 2044 get either the python call or current executable.
2045 2045 """
2046 2046 if mainfrozen():
2047 return [sys.executable]
2047 if getattr(sys, 'frozen', None) == 'macosx_app':
2048 # Env variable set by py2app
2049 return [os.environ['EXECUTABLEPATH']]
2050 else:
2051 return [sys.executable]
2048 2052 return gethgcmd()
2049 2053
2050 2054 def rundetached(args, condfn):
2051 2055 """Execute the argument list in a detached process.
2052 2056
2053 2057 condfn is a callable which is called repeatedly and should return
2054 2058 True once the child process is known to have started successfully.
2055 2059 At this point, the child process PID is returned. If the child
2056 2060 process fails to start or finishes before condfn() evaluates to
2057 2061 True, return -1.
2058 2062 """
2059 2063 # Windows case is easier because the child process is either
2060 2064 # successfully starting and validating the condition or exiting
2061 2065 # on failure. We just poll on its PID. On Unix, if the child
2062 2066 # process fails to start, it will be left in a zombie state until
2063 2067 # the parent wait on it, which we cannot do since we expect a long
2064 2068 # running process on success. Instead we listen for SIGCHLD telling
2065 2069 # us our child process terminated.
2066 2070 terminated = set()
2067 2071 def handler(signum, frame):
2068 2072 terminated.add(os.wait())
2069 2073 prevhandler = None
2070 2074 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2071 2075 if SIGCHLD is not None:
2072 2076 prevhandler = signal.signal(SIGCHLD, handler)
2073 2077 try:
2074 2078 pid = spawndetached(args)
2075 2079 while not condfn():
2076 2080 if ((pid in terminated or not testpid(pid))
2077 2081 and not condfn()):
2078 2082 return -1
2079 2083 time.sleep(0.1)
2080 2084 return pid
2081 2085 finally:
2082 2086 if prevhandler is not None:
2083 2087 signal.signal(signal.SIGCHLD, prevhandler)
2084 2088
2085 2089 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2086 2090 """Return the result of interpolating items in the mapping into string s.
2087 2091
2088 2092 prefix is a single character string, or a two character string with
2089 2093 a backslash as the first character if the prefix needs to be escaped in
2090 2094 a regular expression.
2091 2095
2092 2096 fn is an optional function that will be applied to the replacement text
2093 2097 just before replacement.
2094 2098
2095 2099 escape_prefix is an optional flag that allows using doubled prefix for
2096 2100 its escaping.
2097 2101 """
2098 2102 fn = fn or (lambda s: s)
2099 2103 patterns = '|'.join(mapping.keys())
2100 2104 if escape_prefix:
2101 2105 patterns += '|' + prefix
2102 2106 if len(prefix) > 1:
2103 2107 prefix_char = prefix[1:]
2104 2108 else:
2105 2109 prefix_char = prefix
2106 2110 mapping[prefix_char] = prefix_char
2107 2111 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2108 2112 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2109 2113
2110 2114 def getport(port):
2111 2115 """Return the port for a given network service.
2112 2116
2113 2117 If port is an integer, it's returned as is. If it's a string, it's
2114 2118 looked up using socket.getservbyname(). If there's no matching
2115 2119 service, error.Abort is raised.
2116 2120 """
2117 2121 try:
2118 2122 return int(port)
2119 2123 except ValueError:
2120 2124 pass
2121 2125
2122 2126 try:
2123 2127 return socket.getservbyname(port)
2124 2128 except socket.error:
2125 2129 raise Abort(_("no port number associated with service '%s'") % port)
2126 2130
2127 2131 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2128 2132 '0': False, 'no': False, 'false': False, 'off': False,
2129 2133 'never': False}
2130 2134
2131 2135 def parsebool(s):
2132 2136 """Parse s into a boolean.
2133 2137
2134 2138 If s is not a valid boolean, returns None.
2135 2139 """
2136 2140 return _booleans.get(s.lower(), None)
2137 2141
2138 2142 _hexdig = '0123456789ABCDEFabcdef'
2139 2143 _hextochr = dict((a + b, chr(int(a + b, 16)))
2140 2144 for a in _hexdig for b in _hexdig)
2141 2145
2142 2146 def _urlunquote(s):
2143 2147 """Decode HTTP/HTML % encoding.
2144 2148
2145 2149 >>> _urlunquote('abc%20def')
2146 2150 'abc def'
2147 2151 """
2148 2152 res = s.split('%')
2149 2153 # fastpath
2150 2154 if len(res) == 1:
2151 2155 return s
2152 2156 s = res[0]
2153 2157 for item in res[1:]:
2154 2158 try:
2155 2159 s += _hextochr[item[:2]] + item[2:]
2156 2160 except KeyError:
2157 2161 s += '%' + item
2158 2162 except UnicodeDecodeError:
2159 2163 s += unichr(int(item[:2], 16)) + item[2:]
2160 2164 return s
2161 2165
2162 2166 class url(object):
2163 2167 r"""Reliable URL parser.
2164 2168
2165 2169 This parses URLs and provides attributes for the following
2166 2170 components:
2167 2171
2168 2172 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2169 2173
2170 2174 Missing components are set to None. The only exception is
2171 2175 fragment, which is set to '' if present but empty.
2172 2176
2173 2177 If parsefragment is False, fragment is included in query. If
2174 2178 parsequery is False, query is included in path. If both are
2175 2179 False, both fragment and query are included in path.
2176 2180
2177 2181 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2178 2182
2179 2183 Note that for backward compatibility reasons, bundle URLs do not
2180 2184 take host names. That means 'bundle://../' has a path of '../'.
2181 2185
2182 2186 Examples:
2183 2187
2184 2188 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2185 2189 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2186 2190 >>> url('ssh://[::1]:2200//home/joe/repo')
2187 2191 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2188 2192 >>> url('file:///home/joe/repo')
2189 2193 <url scheme: 'file', path: '/home/joe/repo'>
2190 2194 >>> url('file:///c:/temp/foo/')
2191 2195 <url scheme: 'file', path: 'c:/temp/foo/'>
2192 2196 >>> url('bundle:foo')
2193 2197 <url scheme: 'bundle', path: 'foo'>
2194 2198 >>> url('bundle://../foo')
2195 2199 <url scheme: 'bundle', path: '../foo'>
2196 2200 >>> url(r'c:\foo\bar')
2197 2201 <url path: 'c:\\foo\\bar'>
2198 2202 >>> url(r'\\blah\blah\blah')
2199 2203 <url path: '\\\\blah\\blah\\blah'>
2200 2204 >>> url(r'\\blah\blah\blah#baz')
2201 2205 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2202 2206 >>> url(r'file:///C:\users\me')
2203 2207 <url scheme: 'file', path: 'C:\\users\\me'>
2204 2208
2205 2209 Authentication credentials:
2206 2210
2207 2211 >>> url('ssh://joe:xyz@x/repo')
2208 2212 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2209 2213 >>> url('ssh://joe@x/repo')
2210 2214 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2211 2215
2212 2216 Query strings and fragments:
2213 2217
2214 2218 >>> url('http://host/a?b#c')
2215 2219 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2216 2220 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2217 2221 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2218 2222 """
2219 2223
2220 2224 _safechars = "!~*'()+"
2221 2225 _safepchars = "/!~*'()+:\\"
2222 2226 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2223 2227
2224 2228 def __init__(self, path, parsequery=True, parsefragment=True):
2225 2229 # We slowly chomp away at path until we have only the path left
2226 2230 self.scheme = self.user = self.passwd = self.host = None
2227 2231 self.port = self.path = self.query = self.fragment = None
2228 2232 self._localpath = True
2229 2233 self._hostport = ''
2230 2234 self._origpath = path
2231 2235
2232 2236 if parsefragment and '#' in path:
2233 2237 path, self.fragment = path.split('#', 1)
2234 2238 if not path:
2235 2239 path = None
2236 2240
2237 2241 # special case for Windows drive letters and UNC paths
2238 2242 if hasdriveletter(path) or path.startswith(r'\\'):
2239 2243 self.path = path
2240 2244 return
2241 2245
2242 2246 # For compatibility reasons, we can't handle bundle paths as
2243 2247 # normal URLS
2244 2248 if path.startswith('bundle:'):
2245 2249 self.scheme = 'bundle'
2246 2250 path = path[7:]
2247 2251 if path.startswith('//'):
2248 2252 path = path[2:]
2249 2253 self.path = path
2250 2254 return
2251 2255
2252 2256 if self._matchscheme(path):
2253 2257 parts = path.split(':', 1)
2254 2258 if parts[0]:
2255 2259 self.scheme, path = parts
2256 2260 self._localpath = False
2257 2261
2258 2262 if not path:
2259 2263 path = None
2260 2264 if self._localpath:
2261 2265 self.path = ''
2262 2266 return
2263 2267 else:
2264 2268 if self._localpath:
2265 2269 self.path = path
2266 2270 return
2267 2271
2268 2272 if parsequery and '?' in path:
2269 2273 path, self.query = path.split('?', 1)
2270 2274 if not path:
2271 2275 path = None
2272 2276 if not self.query:
2273 2277 self.query = None
2274 2278
2275 2279 # // is required to specify a host/authority
2276 2280 if path and path.startswith('//'):
2277 2281 parts = path[2:].split('/', 1)
2278 2282 if len(parts) > 1:
2279 2283 self.host, path = parts
2280 2284 else:
2281 2285 self.host = parts[0]
2282 2286 path = None
2283 2287 if not self.host:
2284 2288 self.host = None
2285 2289 # path of file:///d is /d
2286 2290 # path of file:///d:/ is d:/, not /d:/
2287 2291 if path and not hasdriveletter(path):
2288 2292 path = '/' + path
2289 2293
2290 2294 if self.host and '@' in self.host:
2291 2295 self.user, self.host = self.host.rsplit('@', 1)
2292 2296 if ':' in self.user:
2293 2297 self.user, self.passwd = self.user.split(':', 1)
2294 2298 if not self.host:
2295 2299 self.host = None
2296 2300
2297 2301 # Don't split on colons in IPv6 addresses without ports
2298 2302 if (self.host and ':' in self.host and
2299 2303 not (self.host.startswith('[') and self.host.endswith(']'))):
2300 2304 self._hostport = self.host
2301 2305 self.host, self.port = self.host.rsplit(':', 1)
2302 2306 if not self.host:
2303 2307 self.host = None
2304 2308
2305 2309 if (self.host and self.scheme == 'file' and
2306 2310 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2307 2311 raise Abort(_('file:// URLs can only refer to localhost'))
2308 2312
2309 2313 self.path = path
2310 2314
2311 2315 # leave the query string escaped
2312 2316 for a in ('user', 'passwd', 'host', 'port',
2313 2317 'path', 'fragment'):
2314 2318 v = getattr(self, a)
2315 2319 if v is not None:
2316 2320 setattr(self, a, _urlunquote(v))
2317 2321
2318 2322 def __repr__(self):
2319 2323 attrs = []
2320 2324 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2321 2325 'query', 'fragment'):
2322 2326 v = getattr(self, a)
2323 2327 if v is not None:
2324 2328 attrs.append('%s: %r' % (a, v))
2325 2329 return '<url %s>' % ', '.join(attrs)
2326 2330
2327 2331 def __str__(self):
2328 2332 r"""Join the URL's components back into a URL string.
2329 2333
2330 2334 Examples:
2331 2335
2332 2336 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2333 2337 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2334 2338 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2335 2339 'http://user:pw@host:80/?foo=bar&baz=42'
2336 2340 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2337 2341 'http://user:pw@host:80/?foo=bar%3dbaz'
2338 2342 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2339 2343 'ssh://user:pw@[::1]:2200//home/joe#'
2340 2344 >>> str(url('http://localhost:80//'))
2341 2345 'http://localhost:80//'
2342 2346 >>> str(url('http://localhost:80/'))
2343 2347 'http://localhost:80/'
2344 2348 >>> str(url('http://localhost:80'))
2345 2349 'http://localhost:80/'
2346 2350 >>> str(url('bundle:foo'))
2347 2351 'bundle:foo'
2348 2352 >>> str(url('bundle://../foo'))
2349 2353 'bundle:../foo'
2350 2354 >>> str(url('path'))
2351 2355 'path'
2352 2356 >>> str(url('file:///tmp/foo/bar'))
2353 2357 'file:///tmp/foo/bar'
2354 2358 >>> str(url('file:///c:/tmp/foo/bar'))
2355 2359 'file:///c:/tmp/foo/bar'
2356 2360 >>> print url(r'bundle:foo\bar')
2357 2361 bundle:foo\bar
2358 2362 >>> print url(r'file:///D:\data\hg')
2359 2363 file:///D:\data\hg
2360 2364 """
2361 2365 if self._localpath:
2362 2366 s = self.path
2363 2367 if self.scheme == 'bundle':
2364 2368 s = 'bundle:' + s
2365 2369 if self.fragment:
2366 2370 s += '#' + self.fragment
2367 2371 return s
2368 2372
2369 2373 s = self.scheme + ':'
2370 2374 if self.user or self.passwd or self.host:
2371 2375 s += '//'
2372 2376 elif self.scheme and (not self.path or self.path.startswith('/')
2373 2377 or hasdriveletter(self.path)):
2374 2378 s += '//'
2375 2379 if hasdriveletter(self.path):
2376 2380 s += '/'
2377 2381 if self.user:
2378 2382 s += urllib.quote(self.user, safe=self._safechars)
2379 2383 if self.passwd:
2380 2384 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2381 2385 if self.user or self.passwd:
2382 2386 s += '@'
2383 2387 if self.host:
2384 2388 if not (self.host.startswith('[') and self.host.endswith(']')):
2385 2389 s += urllib.quote(self.host)
2386 2390 else:
2387 2391 s += self.host
2388 2392 if self.port:
2389 2393 s += ':' + urllib.quote(self.port)
2390 2394 if self.host:
2391 2395 s += '/'
2392 2396 if self.path:
2393 2397 # TODO: similar to the query string, we should not unescape the
2394 2398 # path when we store it, the path might contain '%2f' = '/',
2395 2399 # which we should *not* escape.
2396 2400 s += urllib.quote(self.path, safe=self._safepchars)
2397 2401 if self.query:
2398 2402 # we store the query in escaped form.
2399 2403 s += '?' + self.query
2400 2404 if self.fragment is not None:
2401 2405 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2402 2406 return s
2403 2407
2404 2408 def authinfo(self):
2405 2409 user, passwd = self.user, self.passwd
2406 2410 try:
2407 2411 self.user, self.passwd = None, None
2408 2412 s = str(self)
2409 2413 finally:
2410 2414 self.user, self.passwd = user, passwd
2411 2415 if not self.user:
2412 2416 return (s, None)
2413 2417 # authinfo[1] is passed to urllib2 password manager, and its
2414 2418 # URIs must not contain credentials. The host is passed in the
2415 2419 # URIs list because Python < 2.4.3 uses only that to search for
2416 2420 # a password.
2417 2421 return (s, (None, (s, self.host),
2418 2422 self.user, self.passwd or ''))
2419 2423
2420 2424 def isabs(self):
2421 2425 if self.scheme and self.scheme != 'file':
2422 2426 return True # remote URL
2423 2427 if hasdriveletter(self.path):
2424 2428 return True # absolute for our purposes - can't be joined()
2425 2429 if self.path.startswith(r'\\'):
2426 2430 return True # Windows UNC path
2427 2431 if self.path.startswith('/'):
2428 2432 return True # POSIX-style
2429 2433 return False
2430 2434
2431 2435 def localpath(self):
2432 2436 if self.scheme == 'file' or self.scheme == 'bundle':
2433 2437 path = self.path or '/'
2434 2438 # For Windows, we need to promote hosts containing drive
2435 2439 # letters to paths with drive letters.
2436 2440 if hasdriveletter(self._hostport):
2437 2441 path = self._hostport + '/' + self.path
2438 2442 elif (self.host is not None and self.path
2439 2443 and not hasdriveletter(path)):
2440 2444 path = '/' + path
2441 2445 return path
2442 2446 return self._origpath
2443 2447
2444 2448 def islocal(self):
2445 2449 '''whether localpath will return something that posixfile can open'''
2446 2450 return (not self.scheme or self.scheme == 'file'
2447 2451 or self.scheme == 'bundle')
2448 2452
2449 2453 def hasscheme(path):
2450 2454 return bool(url(path).scheme)
2451 2455
2452 2456 def hasdriveletter(path):
2453 2457 return path and path[1:2] == ':' and path[0:1].isalpha()
2454 2458
2455 2459 def urllocalpath(path):
2456 2460 return url(path, parsequery=False, parsefragment=False).localpath()
2457 2461
2458 2462 def hidepassword(u):
2459 2463 '''hide user credential in a url string'''
2460 2464 u = url(u)
2461 2465 if u.passwd:
2462 2466 u.passwd = '***'
2463 2467 return str(u)
2464 2468
2465 2469 def removeauth(u):
2466 2470 '''remove all authentication information from a url string'''
2467 2471 u = url(u)
2468 2472 u.user = u.passwd = None
2469 2473 return str(u)
2470 2474
2471 2475 def isatty(fp):
2472 2476 try:
2473 2477 return fp.isatty()
2474 2478 except AttributeError:
2475 2479 return False
2476 2480
2477 2481 timecount = unitcountfn(
2478 2482 (1, 1e3, _('%.0f s')),
2479 2483 (100, 1, _('%.1f s')),
2480 2484 (10, 1, _('%.2f s')),
2481 2485 (1, 1, _('%.3f s')),
2482 2486 (100, 0.001, _('%.1f ms')),
2483 2487 (10, 0.001, _('%.2f ms')),
2484 2488 (1, 0.001, _('%.3f ms')),
2485 2489 (100, 0.000001, _('%.1f us')),
2486 2490 (10, 0.000001, _('%.2f us')),
2487 2491 (1, 0.000001, _('%.3f us')),
2488 2492 (100, 0.000000001, _('%.1f ns')),
2489 2493 (10, 0.000000001, _('%.2f ns')),
2490 2494 (1, 0.000000001, _('%.3f ns')),
2491 2495 )
2492 2496
2493 2497 _timenesting = [0]
2494 2498
2495 2499 def timed(func):
2496 2500 '''Report the execution time of a function call to stderr.
2497 2501
2498 2502 During development, use as a decorator when you need to measure
2499 2503 the cost of a function, e.g. as follows:
2500 2504
2501 2505 @util.timed
2502 2506 def foo(a, b, c):
2503 2507 pass
2504 2508 '''
2505 2509
2506 2510 def wrapper(*args, **kwargs):
2507 2511 start = time.time()
2508 2512 indent = 2
2509 2513 _timenesting[0] += indent
2510 2514 try:
2511 2515 return func(*args, **kwargs)
2512 2516 finally:
2513 2517 elapsed = time.time() - start
2514 2518 _timenesting[0] -= indent
2515 2519 sys.stderr.write('%s%s: %s\n' %
2516 2520 (' ' * _timenesting[0], func.__name__,
2517 2521 timecount(elapsed)))
2518 2522 return wrapper
2519 2523
2520 2524 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2521 2525 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2522 2526
2523 2527 def sizetoint(s):
2524 2528 '''Convert a space specifier to a byte count.
2525 2529
2526 2530 >>> sizetoint('30')
2527 2531 30
2528 2532 >>> sizetoint('2.2kb')
2529 2533 2252
2530 2534 >>> sizetoint('6M')
2531 2535 6291456
2532 2536 '''
2533 2537 t = s.strip().lower()
2534 2538 try:
2535 2539 for k, u in _sizeunits:
2536 2540 if t.endswith(k):
2537 2541 return int(float(t[:-len(k)]) * u)
2538 2542 return int(t)
2539 2543 except ValueError:
2540 2544 raise error.ParseError(_("couldn't parse size: %s") % s)
2541 2545
2542 2546 class hooks(object):
2543 2547 '''A collection of hook functions that can be used to extend a
2544 2548 function's behavior. Hooks are called in lexicographic order,
2545 2549 based on the names of their sources.'''
2546 2550
2547 2551 def __init__(self):
2548 2552 self._hooks = []
2549 2553
2550 2554 def add(self, source, hook):
2551 2555 self._hooks.append((source, hook))
2552 2556
2553 2557 def __call__(self, *args):
2554 2558 self._hooks.sort(key=lambda x: x[0])
2555 2559 results = []
2556 2560 for source, hook in self._hooks:
2557 2561 results.append(hook(*args))
2558 2562 return results
2559 2563
2560 2564 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2561 2565 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2562 2566 Skips the 'skip' last entries. By default it will flush stdout first.
2563 2567 It can be used everywhere and do intentionally not require an ui object.
2564 2568 Not be used in production code but very convenient while developing.
2565 2569 '''
2566 2570 if otherf:
2567 2571 otherf.flush()
2568 2572 f.write('%s at:\n' % msg)
2569 2573 entries = [('%s:%s' % (fn, ln), func)
2570 2574 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2571 2575 if entries:
2572 2576 fnmax = max(len(entry[0]) for entry in entries)
2573 2577 for fnln, func in entries:
2574 2578 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2575 2579 f.flush()
2576 2580
2577 2581 class dirs(object):
2578 2582 '''a multiset of directory names from a dirstate or manifest'''
2579 2583
2580 2584 def __init__(self, map, skip=None):
2581 2585 self._dirs = {}
2582 2586 addpath = self.addpath
2583 2587 if safehasattr(map, 'iteritems') and skip is not None:
2584 2588 for f, s in map.iteritems():
2585 2589 if s[0] != skip:
2586 2590 addpath(f)
2587 2591 else:
2588 2592 for f in map:
2589 2593 addpath(f)
2590 2594
2591 2595 def addpath(self, path):
2592 2596 dirs = self._dirs
2593 2597 for base in finddirs(path):
2594 2598 if base in dirs:
2595 2599 dirs[base] += 1
2596 2600 return
2597 2601 dirs[base] = 1
2598 2602
2599 2603 def delpath(self, path):
2600 2604 dirs = self._dirs
2601 2605 for base in finddirs(path):
2602 2606 if dirs[base] > 1:
2603 2607 dirs[base] -= 1
2604 2608 return
2605 2609 del dirs[base]
2606 2610
2607 2611 def __iter__(self):
2608 2612 return self._dirs.iterkeys()
2609 2613
2610 2614 def __contains__(self, d):
2611 2615 return d in self._dirs
2612 2616
2613 2617 if safehasattr(parsers, 'dirs'):
2614 2618 dirs = parsers.dirs
2615 2619
2616 2620 def finddirs(path):
2617 2621 pos = path.rfind('/')
2618 2622 while pos != -1:
2619 2623 yield path[:pos]
2620 2624 pos = path.rfind('/', 0, pos)
2621 2625
2622 2626 # compression utility
2623 2627
2624 2628 class nocompress(object):
2625 2629 def compress(self, x):
2626 2630 return x
2627 2631 def flush(self):
2628 2632 return ""
2629 2633
2630 2634 compressors = {
2631 2635 None: nocompress,
2632 2636 # lambda to prevent early import
2633 2637 'BZ': lambda: bz2.BZ2Compressor(),
2634 2638 'GZ': lambda: zlib.compressobj(),
2635 2639 }
2636 2640 # also support the old form by courtesies
2637 2641 compressors['UN'] = compressors[None]
2638 2642
2639 2643 def _makedecompressor(decompcls):
2640 2644 def generator(f):
2641 2645 d = decompcls()
2642 2646 for chunk in filechunkiter(f):
2643 2647 yield d.decompress(chunk)
2644 2648 def func(fh):
2645 2649 return chunkbuffer(generator(fh))
2646 2650 return func
2647 2651
2648 2652 class ctxmanager(object):
2649 2653 '''A context manager for use in 'with' blocks to allow multiple
2650 2654 contexts to be entered at once. This is both safer and more
2651 2655 flexible than contextlib.nested.
2652 2656
2653 2657 Once Mercurial supports Python 2.7+, this will become mostly
2654 2658 unnecessary.
2655 2659 '''
2656 2660
2657 2661 def __init__(self, *args):
2658 2662 '''Accepts a list of no-argument functions that return context
2659 2663 managers. These will be invoked at __call__ time.'''
2660 2664 self._pending = args
2661 2665 self._atexit = []
2662 2666
2663 2667 def __enter__(self):
2664 2668 return self
2665 2669
2666 2670 def __call__(self):
2667 2671 '''Create and enter context managers in the order in which they were
2668 2672 passed to the constructor.'''
2669 2673 values = []
2670 2674 for func in self._pending:
2671 2675 obj = func()
2672 2676 values.append(obj.__enter__())
2673 2677 self._atexit.append(obj.__exit__)
2674 2678 del self._pending
2675 2679 return values
2676 2680
2677 2681 def atexit(self, func, *args, **kwargs):
2678 2682 '''Add a function to call when this context manager exits. The
2679 2683 ordering of multiple atexit calls is unspecified, save that
2680 2684 they will happen before any __exit__ functions.'''
2681 2685 def wrapper(exc_type, exc_val, exc_tb):
2682 2686 func(*args, **kwargs)
2683 2687 self._atexit.append(wrapper)
2684 2688 return func
2685 2689
2686 2690 def __exit__(self, exc_type, exc_val, exc_tb):
2687 2691 '''Context managers are exited in the reverse order from which
2688 2692 they were created.'''
2689 2693 received = exc_type is not None
2690 2694 suppressed = False
2691 2695 pending = None
2692 2696 self._atexit.reverse()
2693 2697 for exitfunc in self._atexit:
2694 2698 try:
2695 2699 if exitfunc(exc_type, exc_val, exc_tb):
2696 2700 suppressed = True
2697 2701 exc_type = None
2698 2702 exc_val = None
2699 2703 exc_tb = None
2700 2704 except BaseException:
2701 2705 pending = sys.exc_info()
2702 2706 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2703 2707 del self._atexit
2704 2708 if pending:
2705 2709 raise exc_val
2706 2710 return received and suppressed
2707 2711
2708 2712 def _bz2():
2709 2713 d = bz2.BZ2Decompressor()
2710 2714 # Bzip2 stream start with BZ, but we stripped it.
2711 2715 # we put it back for good measure.
2712 2716 d.decompress('BZ')
2713 2717 return d
2714 2718
2715 2719 decompressors = {None: lambda fh: fh,
2716 2720 '_truncatedBZ': _makedecompressor(_bz2),
2717 2721 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2718 2722 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2719 2723 }
2720 2724 # also support the old form by courtesies
2721 2725 decompressors['UN'] = decompressors[None]
2722 2726
2723 2727 # convenient shortcut
2724 2728 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now