##// END OF EJS Templates
util: adjust hgexecutable() to handle frozen Mercurial on OS X...
Matt Harbison -
r27765:f1fb93ee default
parent child Browse files
Show More
@@ -1,2720 +1,2724 b''
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 from __future__ import absolute_import
17 17
18 18 import bz2
19 19 import calendar
20 20 import collections
21 21 import datetime
22 22 import errno
23 23 import gc
24 24 import hashlib
25 25 import imp
26 26 import os
27 27 import re as remod
28 28 import shutil
29 29 import signal
30 30 import socket
31 31 import subprocess
32 32 import sys
33 33 import tempfile
34 34 import textwrap
35 35 import time
36 36 import traceback
37 37 import urllib
38 38 import zlib
39 39
40 40 from . import (
41 41 encoding,
42 42 error,
43 43 i18n,
44 44 osutil,
45 45 parsers,
46 46 )
47 47
48 48 if os.name == 'nt':
49 49 from . import windows as platform
50 50 else:
51 51 from . import posix as platform
52 52
53 53 md5 = hashlib.md5
54 54 sha1 = hashlib.sha1
55 55 sha512 = hashlib.sha512
56 56 _ = i18n._
57 57
58 58 cachestat = platform.cachestat
59 59 checkexec = platform.checkexec
60 60 checklink = platform.checklink
61 61 copymode = platform.copymode
62 62 executablepath = platform.executablepath
63 63 expandglobs = platform.expandglobs
64 64 explainexit = platform.explainexit
65 65 findexe = platform.findexe
66 66 gethgcmd = platform.gethgcmd
67 67 getuser = platform.getuser
68 68 groupmembers = platform.groupmembers
69 69 groupname = platform.groupname
70 70 hidewindow = platform.hidewindow
71 71 isexec = platform.isexec
72 72 isowner = platform.isowner
73 73 localpath = platform.localpath
74 74 lookupreg = platform.lookupreg
75 75 makedir = platform.makedir
76 76 nlinks = platform.nlinks
77 77 normpath = platform.normpath
78 78 normcase = platform.normcase
79 79 normcasespec = platform.normcasespec
80 80 normcasefallback = platform.normcasefallback
81 81 openhardlinks = platform.openhardlinks
82 82 oslink = platform.oslink
83 83 parsepatchoutput = platform.parsepatchoutput
84 84 pconvert = platform.pconvert
85 85 poll = platform.poll
86 86 popen = platform.popen
87 87 posixfile = platform.posixfile
88 88 quotecommand = platform.quotecommand
89 89 readpipe = platform.readpipe
90 90 rename = platform.rename
91 91 removedirs = platform.removedirs
92 92 samedevice = platform.samedevice
93 93 samefile = platform.samefile
94 94 samestat = platform.samestat
95 95 setbinary = platform.setbinary
96 96 setflags = platform.setflags
97 97 setsignalhandler = platform.setsignalhandler
98 98 shellquote = platform.shellquote
99 99 spawndetached = platform.spawndetached
100 100 split = platform.split
101 101 sshargs = platform.sshargs
102 102 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
103 103 statisexec = platform.statisexec
104 104 statislink = platform.statislink
105 105 termwidth = platform.termwidth
106 106 testpid = platform.testpid
107 107 umask = platform.umask
108 108 unlink = platform.unlink
109 109 unlinkpath = platform.unlinkpath
110 110 username = platform.username
111 111
112 112 # Python compatibility
113 113
114 114 _notset = object()
115 115
116 116 # disable Python's problematic floating point timestamps (issue4836)
117 117 # (Python hypocritically says you shouldn't change this behavior in
118 118 # libraries, and sure enough Mercurial is not a library.)
119 119 os.stat_float_times(False)
120 120
121 121 def safehasattr(thing, attr):
122 122 return getattr(thing, attr, _notset) is not _notset
123 123
124 124 DIGESTS = {
125 125 'md5': md5,
126 126 'sha1': sha1,
127 127 'sha512': sha512,
128 128 }
129 129 # List of digest types from strongest to weakest
130 130 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
131 131
132 132 for k in DIGESTS_BY_STRENGTH:
133 133 assert k in DIGESTS
134 134
135 135 class digester(object):
136 136 """helper to compute digests.
137 137
138 138 This helper can be used to compute one or more digests given their name.
139 139
140 140 >>> d = digester(['md5', 'sha1'])
141 141 >>> d.update('foo')
142 142 >>> [k for k in sorted(d)]
143 143 ['md5', 'sha1']
144 144 >>> d['md5']
145 145 'acbd18db4cc2f85cedef654fccc4a4d8'
146 146 >>> d['sha1']
147 147 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
148 148 >>> digester.preferred(['md5', 'sha1'])
149 149 'sha1'
150 150 """
151 151
152 152 def __init__(self, digests, s=''):
153 153 self._hashes = {}
154 154 for k in digests:
155 155 if k not in DIGESTS:
156 156 raise Abort(_('unknown digest type: %s') % k)
157 157 self._hashes[k] = DIGESTS[k]()
158 158 if s:
159 159 self.update(s)
160 160
161 161 def update(self, data):
162 162 for h in self._hashes.values():
163 163 h.update(data)
164 164
165 165 def __getitem__(self, key):
166 166 if key not in DIGESTS:
167 167 raise Abort(_('unknown digest type: %s') % k)
168 168 return self._hashes[key].hexdigest()
169 169
170 170 def __iter__(self):
171 171 return iter(self._hashes)
172 172
173 173 @staticmethod
174 174 def preferred(supported):
175 175 """returns the strongest digest type in both supported and DIGESTS."""
176 176
177 177 for k in DIGESTS_BY_STRENGTH:
178 178 if k in supported:
179 179 return k
180 180 return None
181 181
182 182 class digestchecker(object):
183 183 """file handle wrapper that additionally checks content against a given
184 184 size and digests.
185 185
186 186 d = digestchecker(fh, size, {'md5': '...'})
187 187
188 188 When multiple digests are given, all of them are validated.
189 189 """
190 190
191 191 def __init__(self, fh, size, digests):
192 192 self._fh = fh
193 193 self._size = size
194 194 self._got = 0
195 195 self._digests = dict(digests)
196 196 self._digester = digester(self._digests.keys())
197 197
198 198 def read(self, length=-1):
199 199 content = self._fh.read(length)
200 200 self._digester.update(content)
201 201 self._got += len(content)
202 202 return content
203 203
204 204 def validate(self):
205 205 if self._size != self._got:
206 206 raise Abort(_('size mismatch: expected %d, got %d') %
207 207 (self._size, self._got))
208 208 for k, v in self._digests.items():
209 209 if v != self._digester[k]:
210 210 # i18n: first parameter is a digest name
211 211 raise Abort(_('%s mismatch: expected %s, got %s') %
212 212 (k, v, self._digester[k]))
213 213
214 214 try:
215 215 buffer = buffer
216 216 except NameError:
217 217 if sys.version_info[0] < 3:
218 218 def buffer(sliceable, offset=0):
219 219 return sliceable[offset:]
220 220 else:
221 221 def buffer(sliceable, offset=0):
222 222 return memoryview(sliceable)[offset:]
223 223
224 224 closefds = os.name == 'posix'
225 225
226 226 _chunksize = 4096
227 227
228 228 class bufferedinputpipe(object):
229 229 """a manually buffered input pipe
230 230
231 231 Python will not let us use buffered IO and lazy reading with 'polling' at
232 232 the same time. We cannot probe the buffer state and select will not detect
233 233 that data are ready to read if they are already buffered.
234 234
235 235 This class let us work around that by implementing its own buffering
236 236 (allowing efficient readline) while offering a way to know if the buffer is
237 237 empty from the output (allowing collaboration of the buffer with polling).
238 238
239 239 This class lives in the 'util' module because it makes use of the 'os'
240 240 module from the python stdlib.
241 241 """
242 242
243 243 def __init__(self, input):
244 244 self._input = input
245 245 self._buffer = []
246 246 self._eof = False
247 247 self._lenbuf = 0
248 248
249 249 @property
250 250 def hasbuffer(self):
251 251 """True is any data is currently buffered
252 252
253 253 This will be used externally a pre-step for polling IO. If there is
254 254 already data then no polling should be set in place."""
255 255 return bool(self._buffer)
256 256
257 257 @property
258 258 def closed(self):
259 259 return self._input.closed
260 260
261 261 def fileno(self):
262 262 return self._input.fileno()
263 263
264 264 def close(self):
265 265 return self._input.close()
266 266
267 267 def read(self, size):
268 268 while (not self._eof) and (self._lenbuf < size):
269 269 self._fillbuffer()
270 270 return self._frombuffer(size)
271 271
272 272 def readline(self, *args, **kwargs):
273 273 if 1 < len(self._buffer):
274 274 # this should not happen because both read and readline end with a
275 275 # _frombuffer call that collapse it.
276 276 self._buffer = [''.join(self._buffer)]
277 277 self._lenbuf = len(self._buffer[0])
278 278 lfi = -1
279 279 if self._buffer:
280 280 lfi = self._buffer[-1].find('\n')
281 281 while (not self._eof) and lfi < 0:
282 282 self._fillbuffer()
283 283 if self._buffer:
284 284 lfi = self._buffer[-1].find('\n')
285 285 size = lfi + 1
286 286 if lfi < 0: # end of file
287 287 size = self._lenbuf
288 288 elif 1 < len(self._buffer):
289 289 # we need to take previous chunks into account
290 290 size += self._lenbuf - len(self._buffer[-1])
291 291 return self._frombuffer(size)
292 292
293 293 def _frombuffer(self, size):
294 294 """return at most 'size' data from the buffer
295 295
296 296 The data are removed from the buffer."""
297 297 if size == 0 or not self._buffer:
298 298 return ''
299 299 buf = self._buffer[0]
300 300 if 1 < len(self._buffer):
301 301 buf = ''.join(self._buffer)
302 302
303 303 data = buf[:size]
304 304 buf = buf[len(data):]
305 305 if buf:
306 306 self._buffer = [buf]
307 307 self._lenbuf = len(buf)
308 308 else:
309 309 self._buffer = []
310 310 self._lenbuf = 0
311 311 return data
312 312
313 313 def _fillbuffer(self):
314 314 """read data to the buffer"""
315 315 data = os.read(self._input.fileno(), _chunksize)
316 316 if not data:
317 317 self._eof = True
318 318 else:
319 319 self._lenbuf += len(data)
320 320 self._buffer.append(data)
321 321
322 322 def popen2(cmd, env=None, newlines=False):
323 323 # Setting bufsize to -1 lets the system decide the buffer size.
324 324 # The default for bufsize is 0, meaning unbuffered. This leads to
325 325 # poor performance on Mac OS X: http://bugs.python.org/issue4194
326 326 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
327 327 close_fds=closefds,
328 328 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
329 329 universal_newlines=newlines,
330 330 env=env)
331 331 return p.stdin, p.stdout
332 332
333 333 def popen3(cmd, env=None, newlines=False):
334 334 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
335 335 return stdin, stdout, stderr
336 336
337 337 def popen4(cmd, env=None, newlines=False, bufsize=-1):
338 338 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
339 339 close_fds=closefds,
340 340 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
341 341 stderr=subprocess.PIPE,
342 342 universal_newlines=newlines,
343 343 env=env)
344 344 return p.stdin, p.stdout, p.stderr, p
345 345
346 346 def version():
347 347 """Return version information if available."""
348 348 try:
349 349 from . import __version__
350 350 return __version__.version
351 351 except ImportError:
352 352 return 'unknown'
353 353
354 354 def versiontuple(v=None, n=4):
355 355 """Parses a Mercurial version string into an N-tuple.
356 356
357 357 The version string to be parsed is specified with the ``v`` argument.
358 358 If it isn't defined, the current Mercurial version string will be parsed.
359 359
360 360 ``n`` can be 2, 3, or 4. Here is how some version strings map to
361 361 returned values:
362 362
363 363 >>> v = '3.6.1+190-df9b73d2d444'
364 364 >>> versiontuple(v, 2)
365 365 (3, 6)
366 366 >>> versiontuple(v, 3)
367 367 (3, 6, 1)
368 368 >>> versiontuple(v, 4)
369 369 (3, 6, 1, '190-df9b73d2d444')
370 370
371 371 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
372 372 (3, 6, 1, '190-df9b73d2d444+20151118')
373 373
374 374 >>> v = '3.6'
375 375 >>> versiontuple(v, 2)
376 376 (3, 6)
377 377 >>> versiontuple(v, 3)
378 378 (3, 6, None)
379 379 >>> versiontuple(v, 4)
380 380 (3, 6, None, None)
381 381 """
382 382 if not v:
383 383 v = version()
384 384 parts = v.split('+', 1)
385 385 if len(parts) == 1:
386 386 vparts, extra = parts[0], None
387 387 else:
388 388 vparts, extra = parts
389 389
390 390 vints = []
391 391 for i in vparts.split('.'):
392 392 try:
393 393 vints.append(int(i))
394 394 except ValueError:
395 395 break
396 396 # (3, 6) -> (3, 6, None)
397 397 while len(vints) < 3:
398 398 vints.append(None)
399 399
400 400 if n == 2:
401 401 return (vints[0], vints[1])
402 402 if n == 3:
403 403 return (vints[0], vints[1], vints[2])
404 404 if n == 4:
405 405 return (vints[0], vints[1], vints[2], extra)
406 406
407 407 # used by parsedate
408 408 defaultdateformats = (
409 409 '%Y-%m-%d %H:%M:%S',
410 410 '%Y-%m-%d %I:%M:%S%p',
411 411 '%Y-%m-%d %H:%M',
412 412 '%Y-%m-%d %I:%M%p',
413 413 '%Y-%m-%d',
414 414 '%m-%d',
415 415 '%m/%d',
416 416 '%m/%d/%y',
417 417 '%m/%d/%Y',
418 418 '%a %b %d %H:%M:%S %Y',
419 419 '%a %b %d %I:%M:%S%p %Y',
420 420 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
421 421 '%b %d %H:%M:%S %Y',
422 422 '%b %d %I:%M:%S%p %Y',
423 423 '%b %d %H:%M:%S',
424 424 '%b %d %I:%M:%S%p',
425 425 '%b %d %H:%M',
426 426 '%b %d %I:%M%p',
427 427 '%b %d %Y',
428 428 '%b %d',
429 429 '%H:%M:%S',
430 430 '%I:%M:%S%p',
431 431 '%H:%M',
432 432 '%I:%M%p',
433 433 )
434 434
435 435 extendeddateformats = defaultdateformats + (
436 436 "%Y",
437 437 "%Y-%m",
438 438 "%b",
439 439 "%b %Y",
440 440 )
441 441
442 442 def cachefunc(func):
443 443 '''cache the result of function calls'''
444 444 # XXX doesn't handle keywords args
445 445 if func.func_code.co_argcount == 0:
446 446 cache = []
447 447 def f():
448 448 if len(cache) == 0:
449 449 cache.append(func())
450 450 return cache[0]
451 451 return f
452 452 cache = {}
453 453 if func.func_code.co_argcount == 1:
454 454 # we gain a small amount of time because
455 455 # we don't need to pack/unpack the list
456 456 def f(arg):
457 457 if arg not in cache:
458 458 cache[arg] = func(arg)
459 459 return cache[arg]
460 460 else:
461 461 def f(*args):
462 462 if args not in cache:
463 463 cache[args] = func(*args)
464 464 return cache[args]
465 465
466 466 return f
467 467
468 468 class sortdict(dict):
469 469 '''a simple sorted dictionary'''
470 470 def __init__(self, data=None):
471 471 self._list = []
472 472 if data:
473 473 self.update(data)
474 474 def copy(self):
475 475 return sortdict(self)
476 476 def __setitem__(self, key, val):
477 477 if key in self:
478 478 self._list.remove(key)
479 479 self._list.append(key)
480 480 dict.__setitem__(self, key, val)
481 481 def __iter__(self):
482 482 return self._list.__iter__()
483 483 def update(self, src):
484 484 if isinstance(src, dict):
485 485 src = src.iteritems()
486 486 for k, v in src:
487 487 self[k] = v
488 488 def clear(self):
489 489 dict.clear(self)
490 490 self._list = []
491 491 def items(self):
492 492 return [(k, self[k]) for k in self._list]
493 493 def __delitem__(self, key):
494 494 dict.__delitem__(self, key)
495 495 self._list.remove(key)
496 496 def pop(self, key, *args, **kwargs):
497 497 dict.pop(self, key, *args, **kwargs)
498 498 try:
499 499 self._list.remove(key)
500 500 except ValueError:
501 501 pass
502 502 def keys(self):
503 503 return self._list
504 504 def iterkeys(self):
505 505 return self._list.__iter__()
506 506 def iteritems(self):
507 507 for k in self._list:
508 508 yield k, self[k]
509 509 def insert(self, index, key, val):
510 510 self._list.insert(index, key)
511 511 dict.__setitem__(self, key, val)
512 512
513 513 class _lrucachenode(object):
514 514 """A node in a doubly linked list.
515 515
516 516 Holds a reference to nodes on either side as well as a key-value
517 517 pair for the dictionary entry.
518 518 """
519 519 __slots__ = ('next', 'prev', 'key', 'value')
520 520
521 521 def __init__(self):
522 522 self.next = None
523 523 self.prev = None
524 524
525 525 self.key = _notset
526 526 self.value = None
527 527
528 528 def markempty(self):
529 529 """Mark the node as emptied."""
530 530 self.key = _notset
531 531
532 532 class lrucachedict(object):
533 533 """Dict that caches most recent accesses and sets.
534 534
535 535 The dict consists of an actual backing dict - indexed by original
536 536 key - and a doubly linked circular list defining the order of entries in
537 537 the cache.
538 538
539 539 The head node is the newest entry in the cache. If the cache is full,
540 540 we recycle head.prev and make it the new head. Cache accesses result in
541 541 the node being moved to before the existing head and being marked as the
542 542 new head node.
543 543 """
544 544 def __init__(self, max):
545 545 self._cache = {}
546 546
547 547 self._head = head = _lrucachenode()
548 548 head.prev = head
549 549 head.next = head
550 550 self._size = 1
551 551 self._capacity = max
552 552
553 553 def __len__(self):
554 554 return len(self._cache)
555 555
556 556 def __contains__(self, k):
557 557 return k in self._cache
558 558
559 559 def __iter__(self):
560 560 # We don't have to iterate in cache order, but why not.
561 561 n = self._head
562 562 for i in range(len(self._cache)):
563 563 yield n.key
564 564 n = n.next
565 565
566 566 def __getitem__(self, k):
567 567 node = self._cache[k]
568 568 self._movetohead(node)
569 569 return node.value
570 570
571 571 def __setitem__(self, k, v):
572 572 node = self._cache.get(k)
573 573 # Replace existing value and mark as newest.
574 574 if node is not None:
575 575 node.value = v
576 576 self._movetohead(node)
577 577 return
578 578
579 579 if self._size < self._capacity:
580 580 node = self._addcapacity()
581 581 else:
582 582 # Grab the last/oldest item.
583 583 node = self._head.prev
584 584
585 585 # At capacity. Kill the old entry.
586 586 if node.key is not _notset:
587 587 del self._cache[node.key]
588 588
589 589 node.key = k
590 590 node.value = v
591 591 self._cache[k] = node
592 592 # And mark it as newest entry. No need to adjust order since it
593 593 # is already self._head.prev.
594 594 self._head = node
595 595
596 596 def __delitem__(self, k):
597 597 node = self._cache.pop(k)
598 598 node.markempty()
599 599
600 600 # Temporarily mark as newest item before re-adjusting head to make
601 601 # this node the oldest item.
602 602 self._movetohead(node)
603 603 self._head = node.next
604 604
605 605 # Additional dict methods.
606 606
607 607 def get(self, k, default=None):
608 608 try:
609 609 return self._cache[k]
610 610 except KeyError:
611 611 return default
612 612
613 613 def clear(self):
614 614 n = self._head
615 615 while n.key is not _notset:
616 616 n.markempty()
617 617 n = n.next
618 618
619 619 self._cache.clear()
620 620
621 621 def copy(self):
622 622 result = lrucachedict(self._capacity)
623 623 n = self._head.prev
624 624 # Iterate in oldest-to-newest order, so the copy has the right ordering
625 625 for i in range(len(self._cache)):
626 626 result[n.key] = n.value
627 627 n = n.prev
628 628 return result
629 629
630 630 def _movetohead(self, node):
631 631 """Mark a node as the newest, making it the new head.
632 632
633 633 When a node is accessed, it becomes the freshest entry in the LRU
634 634 list, which is denoted by self._head.
635 635
636 636 Visually, let's make ``N`` the new head node (* denotes head):
637 637
638 638 previous/oldest <-> head <-> next/next newest
639 639
640 640 ----<->--- A* ---<->-----
641 641 | |
642 642 E <-> D <-> N <-> C <-> B
643 643
644 644 To:
645 645
646 646 ----<->--- N* ---<->-----
647 647 | |
648 648 E <-> D <-> C <-> B <-> A
649 649
650 650 This requires the following moves:
651 651
652 652 C.next = D (node.prev.next = node.next)
653 653 D.prev = C (node.next.prev = node.prev)
654 654 E.next = N (head.prev.next = node)
655 655 N.prev = E (node.prev = head.prev)
656 656 N.next = A (node.next = head)
657 657 A.prev = N (head.prev = node)
658 658 """
659 659 head = self._head
660 660 # C.next = D
661 661 node.prev.next = node.next
662 662 # D.prev = C
663 663 node.next.prev = node.prev
664 664 # N.prev = E
665 665 node.prev = head.prev
666 666 # N.next = A
667 667 # It is tempting to do just "head" here, however if node is
668 668 # adjacent to head, this will do bad things.
669 669 node.next = head.prev.next
670 670 # E.next = N
671 671 node.next.prev = node
672 672 # A.prev = N
673 673 node.prev.next = node
674 674
675 675 self._head = node
676 676
677 677 def _addcapacity(self):
678 678 """Add a node to the circular linked list.
679 679
680 680 The new node is inserted before the head node.
681 681 """
682 682 head = self._head
683 683 node = _lrucachenode()
684 684 head.prev.next = node
685 685 node.prev = head.prev
686 686 node.next = head
687 687 head.prev = node
688 688 self._size += 1
689 689 return node
690 690
691 691 def lrucachefunc(func):
692 692 '''cache most recent results of function calls'''
693 693 cache = {}
694 694 order = collections.deque()
695 695 if func.func_code.co_argcount == 1:
696 696 def f(arg):
697 697 if arg not in cache:
698 698 if len(cache) > 20:
699 699 del cache[order.popleft()]
700 700 cache[arg] = func(arg)
701 701 else:
702 702 order.remove(arg)
703 703 order.append(arg)
704 704 return cache[arg]
705 705 else:
706 706 def f(*args):
707 707 if args not in cache:
708 708 if len(cache) > 20:
709 709 del cache[order.popleft()]
710 710 cache[args] = func(*args)
711 711 else:
712 712 order.remove(args)
713 713 order.append(args)
714 714 return cache[args]
715 715
716 716 return f
717 717
718 718 class propertycache(object):
719 719 def __init__(self, func):
720 720 self.func = func
721 721 self.name = func.__name__
722 722 def __get__(self, obj, type=None):
723 723 result = self.func(obj)
724 724 self.cachevalue(obj, result)
725 725 return result
726 726
727 727 def cachevalue(self, obj, value):
728 728 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
729 729 obj.__dict__[self.name] = value
730 730
731 731 def pipefilter(s, cmd):
732 732 '''filter string S through command CMD, returning its output'''
733 733 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
734 734 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
735 735 pout, perr = p.communicate(s)
736 736 return pout
737 737
738 738 def tempfilter(s, cmd):
739 739 '''filter string S through a pair of temporary files with CMD.
740 740 CMD is used as a template to create the real command to be run,
741 741 with the strings INFILE and OUTFILE replaced by the real names of
742 742 the temporary files generated.'''
743 743 inname, outname = None, None
744 744 try:
745 745 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
746 746 fp = os.fdopen(infd, 'wb')
747 747 fp.write(s)
748 748 fp.close()
749 749 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
750 750 os.close(outfd)
751 751 cmd = cmd.replace('INFILE', inname)
752 752 cmd = cmd.replace('OUTFILE', outname)
753 753 code = os.system(cmd)
754 754 if sys.platform == 'OpenVMS' and code & 1:
755 755 code = 0
756 756 if code:
757 757 raise Abort(_("command '%s' failed: %s") %
758 758 (cmd, explainexit(code)))
759 759 fp = open(outname, 'rb')
760 760 r = fp.read()
761 761 fp.close()
762 762 return r
763 763 finally:
764 764 try:
765 765 if inname:
766 766 os.unlink(inname)
767 767 except OSError:
768 768 pass
769 769 try:
770 770 if outname:
771 771 os.unlink(outname)
772 772 except OSError:
773 773 pass
774 774
775 775 filtertable = {
776 776 'tempfile:': tempfilter,
777 777 'pipe:': pipefilter,
778 778 }
779 779
780 780 def filter(s, cmd):
781 781 "filter a string through a command that transforms its input to its output"
782 782 for name, fn in filtertable.iteritems():
783 783 if cmd.startswith(name):
784 784 return fn(s, cmd[len(name):].lstrip())
785 785 return pipefilter(s, cmd)
786 786
787 787 def binary(s):
788 788 """return true if a string is binary data"""
789 789 return bool(s and '\0' in s)
790 790
791 791 def increasingchunks(source, min=1024, max=65536):
792 792 '''return no less than min bytes per chunk while data remains,
793 793 doubling min after each chunk until it reaches max'''
794 794 def log2(x):
795 795 if not x:
796 796 return 0
797 797 i = 0
798 798 while x:
799 799 x >>= 1
800 800 i += 1
801 801 return i - 1
802 802
803 803 buf = []
804 804 blen = 0
805 805 for chunk in source:
806 806 buf.append(chunk)
807 807 blen += len(chunk)
808 808 if blen >= min:
809 809 if min < max:
810 810 min = min << 1
811 811 nmin = 1 << log2(blen)
812 812 if nmin > min:
813 813 min = nmin
814 814 if min > max:
815 815 min = max
816 816 yield ''.join(buf)
817 817 blen = 0
818 818 buf = []
819 819 if buf:
820 820 yield ''.join(buf)
821 821
822 822 Abort = error.Abort
823 823
824 824 def always(fn):
825 825 return True
826 826
827 827 def never(fn):
828 828 return False
829 829
830 830 def nogc(func):
831 831 """disable garbage collector
832 832
833 833 Python's garbage collector triggers a GC each time a certain number of
834 834 container objects (the number being defined by gc.get_threshold()) are
835 835 allocated even when marked not to be tracked by the collector. Tracking has
836 836 no effect on when GCs are triggered, only on what objects the GC looks
837 837 into. As a workaround, disable GC while building complex (huge)
838 838 containers.
839 839
840 840 This garbage collector issue have been fixed in 2.7.
841 841 """
842 842 def wrapper(*args, **kwargs):
843 843 gcenabled = gc.isenabled()
844 844 gc.disable()
845 845 try:
846 846 return func(*args, **kwargs)
847 847 finally:
848 848 if gcenabled:
849 849 gc.enable()
850 850 return wrapper
851 851
852 852 def pathto(root, n1, n2):
853 853 '''return the relative path from one place to another.
854 854 root should use os.sep to separate directories
855 855 n1 should use os.sep to separate directories
856 856 n2 should use "/" to separate directories
857 857 returns an os.sep-separated path.
858 858
859 859 If n1 is a relative path, it's assumed it's
860 860 relative to root.
861 861 n2 should always be relative to root.
862 862 '''
863 863 if not n1:
864 864 return localpath(n2)
865 865 if os.path.isabs(n1):
866 866 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
867 867 return os.path.join(root, localpath(n2))
868 868 n2 = '/'.join((pconvert(root), n2))
869 869 a, b = splitpath(n1), n2.split('/')
870 870 a.reverse()
871 871 b.reverse()
872 872 while a and b and a[-1] == b[-1]:
873 873 a.pop()
874 874 b.pop()
875 875 b.reverse()
876 876 return os.sep.join((['..'] * len(a)) + b) or '.'
877 877
878 878 def mainfrozen():
879 879 """return True if we are a frozen executable.
880 880
881 881 The code supports py2exe (most common, Windows only) and tools/freeze
882 882 (portable, not much used).
883 883 """
884 884 return (safehasattr(sys, "frozen") or # new py2exe
885 885 safehasattr(sys, "importers") or # old py2exe
886 886 imp.is_frozen("__main__")) # tools/freeze
887 887
888 888 # the location of data files matching the source code
889 889 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
890 890 # executable version (py2exe) doesn't support __file__
891 891 datapath = os.path.dirname(sys.executable)
892 892 else:
893 893 datapath = os.path.dirname(__file__)
894 894
895 895 i18n.setdatapath(datapath)
896 896
897 897 _hgexecutable = None
898 898
899 899 def hgexecutable():
900 900 """return location of the 'hg' executable.
901 901
902 902 Defaults to $HG or 'hg' in the search path.
903 903 """
904 904 if _hgexecutable is None:
905 905 hg = os.environ.get('HG')
906 906 mainmod = sys.modules['__main__']
907 907 if hg:
908 908 _sethgexecutable(hg)
909 909 elif mainfrozen():
910 _sethgexecutable(sys.executable)
910 if getattr(sys, 'frozen', None) == 'macosx_app':
911 # Env variable set by py2app
912 _sethgexecutable(os.environ['EXECUTABLEPATH'])
913 else:
914 _sethgexecutable(sys.executable)
911 915 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
912 916 _sethgexecutable(mainmod.__file__)
913 917 else:
914 918 exe = findexe('hg') or os.path.basename(sys.argv[0])
915 919 _sethgexecutable(exe)
916 920 return _hgexecutable
917 921
918 922 def _sethgexecutable(path):
919 923 """set location of the 'hg' executable"""
920 924 global _hgexecutable
921 925 _hgexecutable = path
922 926
923 927 def _isstdout(f):
924 928 fileno = getattr(f, 'fileno', None)
925 929 return fileno and fileno() == sys.__stdout__.fileno()
926 930
927 931 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
928 932 '''enhanced shell command execution.
929 933 run with environment maybe modified, maybe in different dir.
930 934
931 935 if command fails and onerr is None, return status, else raise onerr
932 936 object as exception.
933 937
934 938 if out is specified, it is assumed to be a file-like object that has a
935 939 write() method. stdout and stderr will be redirected to out.'''
936 940 if environ is None:
937 941 environ = {}
938 942 try:
939 943 sys.stdout.flush()
940 944 except Exception:
941 945 pass
942 946 def py2shell(val):
943 947 'convert python object into string that is useful to shell'
944 948 if val is None or val is False:
945 949 return '0'
946 950 if val is True:
947 951 return '1'
948 952 return str(val)
949 953 origcmd = cmd
950 954 cmd = quotecommand(cmd)
951 955 if sys.platform == 'plan9' and (sys.version_info[0] == 2
952 956 and sys.version_info[1] < 7):
953 957 # subprocess kludge to work around issues in half-baked Python
954 958 # ports, notably bichued/python:
955 959 if not cwd is None:
956 960 os.chdir(cwd)
957 961 rc = os.system(cmd)
958 962 else:
959 963 env = dict(os.environ)
960 964 env.update((k, py2shell(v)) for k, v in environ.iteritems())
961 965 env['HG'] = hgexecutable()
962 966 if out is None or _isstdout(out):
963 967 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
964 968 env=env, cwd=cwd)
965 969 else:
966 970 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
967 971 env=env, cwd=cwd, stdout=subprocess.PIPE,
968 972 stderr=subprocess.STDOUT)
969 973 while True:
970 974 line = proc.stdout.readline()
971 975 if not line:
972 976 break
973 977 out.write(line)
974 978 proc.wait()
975 979 rc = proc.returncode
976 980 if sys.platform == 'OpenVMS' and rc & 1:
977 981 rc = 0
978 982 if rc and onerr:
979 983 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
980 984 explainexit(rc)[0])
981 985 if errprefix:
982 986 errmsg = '%s: %s' % (errprefix, errmsg)
983 987 raise onerr(errmsg)
984 988 return rc
985 989
986 990 def checksignature(func):
987 991 '''wrap a function with code to check for calling errors'''
988 992 def check(*args, **kwargs):
989 993 try:
990 994 return func(*args, **kwargs)
991 995 except TypeError:
992 996 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
993 997 raise error.SignatureError
994 998 raise
995 999
996 1000 return check
997 1001
998 1002 def copyfile(src, dest, hardlink=False, copystat=False):
999 1003 '''copy a file, preserving mode and optionally other stat info like
1000 1004 atime/mtime'''
1001 1005 if os.path.lexists(dest):
1002 1006 unlink(dest)
1003 1007 # hardlinks are problematic on CIFS, quietly ignore this flag
1004 1008 # until we find a way to work around it cleanly (issue4546)
1005 1009 if False and hardlink:
1006 1010 try:
1007 1011 oslink(src, dest)
1008 1012 return
1009 1013 except (IOError, OSError):
1010 1014 pass # fall back to normal copy
1011 1015 if os.path.islink(src):
1012 1016 os.symlink(os.readlink(src), dest)
1013 1017 # copytime is ignored for symlinks, but in general copytime isn't needed
1014 1018 # for them anyway
1015 1019 else:
1016 1020 try:
1017 1021 shutil.copyfile(src, dest)
1018 1022 if copystat:
1019 1023 # copystat also copies mode
1020 1024 shutil.copystat(src, dest)
1021 1025 else:
1022 1026 shutil.copymode(src, dest)
1023 1027 except shutil.Error as inst:
1024 1028 raise Abort(str(inst))
1025 1029
1026 1030 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1027 1031 """Copy a directory tree using hardlinks if possible."""
1028 1032 num = 0
1029 1033
1030 1034 if hardlink is None:
1031 1035 hardlink = (os.stat(src).st_dev ==
1032 1036 os.stat(os.path.dirname(dst)).st_dev)
1033 1037 if hardlink:
1034 1038 topic = _('linking')
1035 1039 else:
1036 1040 topic = _('copying')
1037 1041
1038 1042 if os.path.isdir(src):
1039 1043 os.mkdir(dst)
1040 1044 for name, kind in osutil.listdir(src):
1041 1045 srcname = os.path.join(src, name)
1042 1046 dstname = os.path.join(dst, name)
1043 1047 def nprog(t, pos):
1044 1048 if pos is not None:
1045 1049 return progress(t, pos + num)
1046 1050 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1047 1051 num += n
1048 1052 else:
1049 1053 if hardlink:
1050 1054 try:
1051 1055 oslink(src, dst)
1052 1056 except (IOError, OSError):
1053 1057 hardlink = False
1054 1058 shutil.copy(src, dst)
1055 1059 else:
1056 1060 shutil.copy(src, dst)
1057 1061 num += 1
1058 1062 progress(topic, num)
1059 1063 progress(topic, None)
1060 1064
1061 1065 return hardlink, num
1062 1066
1063 1067 _winreservednames = '''con prn aux nul
1064 1068 com1 com2 com3 com4 com5 com6 com7 com8 com9
1065 1069 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1066 1070 _winreservedchars = ':*?"<>|'
1067 1071 def checkwinfilename(path):
1068 1072 r'''Check that the base-relative path is a valid filename on Windows.
1069 1073 Returns None if the path is ok, or a UI string describing the problem.
1070 1074
1071 1075 >>> checkwinfilename("just/a/normal/path")
1072 1076 >>> checkwinfilename("foo/bar/con.xml")
1073 1077 "filename contains 'con', which is reserved on Windows"
1074 1078 >>> checkwinfilename("foo/con.xml/bar")
1075 1079 "filename contains 'con', which is reserved on Windows"
1076 1080 >>> checkwinfilename("foo/bar/xml.con")
1077 1081 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1078 1082 "filename contains 'AUX', which is reserved on Windows"
1079 1083 >>> checkwinfilename("foo/bar/bla:.txt")
1080 1084 "filename contains ':', which is reserved on Windows"
1081 1085 >>> checkwinfilename("foo/bar/b\07la.txt")
1082 1086 "filename contains '\\x07', which is invalid on Windows"
1083 1087 >>> checkwinfilename("foo/bar/bla ")
1084 1088 "filename ends with ' ', which is not allowed on Windows"
1085 1089 >>> checkwinfilename("../bar")
1086 1090 >>> checkwinfilename("foo\\")
1087 1091 "filename ends with '\\', which is invalid on Windows"
1088 1092 >>> checkwinfilename("foo\\/bar")
1089 1093 "directory name ends with '\\', which is invalid on Windows"
1090 1094 '''
1091 1095 if path.endswith('\\'):
1092 1096 return _("filename ends with '\\', which is invalid on Windows")
1093 1097 if '\\/' in path:
1094 1098 return _("directory name ends with '\\', which is invalid on Windows")
1095 1099 for n in path.replace('\\', '/').split('/'):
1096 1100 if not n:
1097 1101 continue
1098 1102 for c in n:
1099 1103 if c in _winreservedchars:
1100 1104 return _("filename contains '%s', which is reserved "
1101 1105 "on Windows") % c
1102 1106 if ord(c) <= 31:
1103 1107 return _("filename contains %r, which is invalid "
1104 1108 "on Windows") % c
1105 1109 base = n.split('.')[0]
1106 1110 if base and base.lower() in _winreservednames:
1107 1111 return _("filename contains '%s', which is reserved "
1108 1112 "on Windows") % base
1109 1113 t = n[-1]
1110 1114 if t in '. ' and n not in '..':
1111 1115 return _("filename ends with '%s', which is not allowed "
1112 1116 "on Windows") % t
1113 1117
1114 1118 if os.name == 'nt':
1115 1119 checkosfilename = checkwinfilename
1116 1120 else:
1117 1121 checkosfilename = platform.checkosfilename
1118 1122
1119 1123 def makelock(info, pathname):
1120 1124 try:
1121 1125 return os.symlink(info, pathname)
1122 1126 except OSError as why:
1123 1127 if why.errno == errno.EEXIST:
1124 1128 raise
1125 1129 except AttributeError: # no symlink in os
1126 1130 pass
1127 1131
1128 1132 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1129 1133 os.write(ld, info)
1130 1134 os.close(ld)
1131 1135
1132 1136 def readlock(pathname):
1133 1137 try:
1134 1138 return os.readlink(pathname)
1135 1139 except OSError as why:
1136 1140 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1137 1141 raise
1138 1142 except AttributeError: # no symlink in os
1139 1143 pass
1140 1144 fp = posixfile(pathname)
1141 1145 r = fp.read()
1142 1146 fp.close()
1143 1147 return r
1144 1148
1145 1149 def fstat(fp):
1146 1150 '''stat file object that may not have fileno method.'''
1147 1151 try:
1148 1152 return os.fstat(fp.fileno())
1149 1153 except AttributeError:
1150 1154 return os.stat(fp.name)
1151 1155
1152 1156 # File system features
1153 1157
1154 1158 def checkcase(path):
1155 1159 """
1156 1160 Return true if the given path is on a case-sensitive filesystem
1157 1161
1158 1162 Requires a path (like /foo/.hg) ending with a foldable final
1159 1163 directory component.
1160 1164 """
1161 1165 s1 = os.lstat(path)
1162 1166 d, b = os.path.split(path)
1163 1167 b2 = b.upper()
1164 1168 if b == b2:
1165 1169 b2 = b.lower()
1166 1170 if b == b2:
1167 1171 return True # no evidence against case sensitivity
1168 1172 p2 = os.path.join(d, b2)
1169 1173 try:
1170 1174 s2 = os.lstat(p2)
1171 1175 if s2 == s1:
1172 1176 return False
1173 1177 return True
1174 1178 except OSError:
1175 1179 return True
1176 1180
1177 1181 try:
1178 1182 import re2
1179 1183 _re2 = None
1180 1184 except ImportError:
1181 1185 _re2 = False
1182 1186
1183 1187 class _re(object):
1184 1188 def _checkre2(self):
1185 1189 global _re2
1186 1190 try:
1187 1191 # check if match works, see issue3964
1188 1192 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1189 1193 except ImportError:
1190 1194 _re2 = False
1191 1195
1192 1196 def compile(self, pat, flags=0):
1193 1197 '''Compile a regular expression, using re2 if possible
1194 1198
1195 1199 For best performance, use only re2-compatible regexp features. The
1196 1200 only flags from the re module that are re2-compatible are
1197 1201 IGNORECASE and MULTILINE.'''
1198 1202 if _re2 is None:
1199 1203 self._checkre2()
1200 1204 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1201 1205 if flags & remod.IGNORECASE:
1202 1206 pat = '(?i)' + pat
1203 1207 if flags & remod.MULTILINE:
1204 1208 pat = '(?m)' + pat
1205 1209 try:
1206 1210 return re2.compile(pat)
1207 1211 except re2.error:
1208 1212 pass
1209 1213 return remod.compile(pat, flags)
1210 1214
1211 1215 @propertycache
1212 1216 def escape(self):
1213 1217 '''Return the version of escape corresponding to self.compile.
1214 1218
1215 1219 This is imperfect because whether re2 or re is used for a particular
1216 1220 function depends on the flags, etc, but it's the best we can do.
1217 1221 '''
1218 1222 global _re2
1219 1223 if _re2 is None:
1220 1224 self._checkre2()
1221 1225 if _re2:
1222 1226 return re2.escape
1223 1227 else:
1224 1228 return remod.escape
1225 1229
1226 1230 re = _re()
1227 1231
1228 1232 _fspathcache = {}
1229 1233 def fspath(name, root):
1230 1234 '''Get name in the case stored in the filesystem
1231 1235
1232 1236 The name should be relative to root, and be normcase-ed for efficiency.
1233 1237
1234 1238 Note that this function is unnecessary, and should not be
1235 1239 called, for case-sensitive filesystems (simply because it's expensive).
1236 1240
1237 1241 The root should be normcase-ed, too.
1238 1242 '''
1239 1243 def _makefspathcacheentry(dir):
1240 1244 return dict((normcase(n), n) for n in os.listdir(dir))
1241 1245
1242 1246 seps = os.sep
1243 1247 if os.altsep:
1244 1248 seps = seps + os.altsep
1245 1249 # Protect backslashes. This gets silly very quickly.
1246 1250 seps.replace('\\','\\\\')
1247 1251 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1248 1252 dir = os.path.normpath(root)
1249 1253 result = []
1250 1254 for part, sep in pattern.findall(name):
1251 1255 if sep:
1252 1256 result.append(sep)
1253 1257 continue
1254 1258
1255 1259 if dir not in _fspathcache:
1256 1260 _fspathcache[dir] = _makefspathcacheentry(dir)
1257 1261 contents = _fspathcache[dir]
1258 1262
1259 1263 found = contents.get(part)
1260 1264 if not found:
1261 1265 # retry "once per directory" per "dirstate.walk" which
1262 1266 # may take place for each patches of "hg qpush", for example
1263 1267 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1264 1268 found = contents.get(part)
1265 1269
1266 1270 result.append(found or part)
1267 1271 dir = os.path.join(dir, part)
1268 1272
1269 1273 return ''.join(result)
1270 1274
1271 1275 def checknlink(testfile):
1272 1276 '''check whether hardlink count reporting works properly'''
1273 1277
1274 1278 # testfile may be open, so we need a separate file for checking to
1275 1279 # work around issue2543 (or testfile may get lost on Samba shares)
1276 1280 f1 = testfile + ".hgtmp1"
1277 1281 if os.path.lexists(f1):
1278 1282 return False
1279 1283 try:
1280 1284 posixfile(f1, 'w').close()
1281 1285 except IOError:
1282 1286 return False
1283 1287
1284 1288 f2 = testfile + ".hgtmp2"
1285 1289 fd = None
1286 1290 try:
1287 1291 oslink(f1, f2)
1288 1292 # nlinks() may behave differently for files on Windows shares if
1289 1293 # the file is open.
1290 1294 fd = posixfile(f2)
1291 1295 return nlinks(f2) > 1
1292 1296 except OSError:
1293 1297 return False
1294 1298 finally:
1295 1299 if fd is not None:
1296 1300 fd.close()
1297 1301 for f in (f1, f2):
1298 1302 try:
1299 1303 os.unlink(f)
1300 1304 except OSError:
1301 1305 pass
1302 1306
1303 1307 def endswithsep(path):
1304 1308 '''Check path ends with os.sep or os.altsep.'''
1305 1309 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1306 1310
1307 1311 def splitpath(path):
1308 1312 '''Split path by os.sep.
1309 1313 Note that this function does not use os.altsep because this is
1310 1314 an alternative of simple "xxx.split(os.sep)".
1311 1315 It is recommended to use os.path.normpath() before using this
1312 1316 function if need.'''
1313 1317 return path.split(os.sep)
1314 1318
1315 1319 def gui():
1316 1320 '''Are we running in a GUI?'''
1317 1321 if sys.platform == 'darwin':
1318 1322 if 'SSH_CONNECTION' in os.environ:
1319 1323 # handle SSH access to a box where the user is logged in
1320 1324 return False
1321 1325 elif getattr(osutil, 'isgui', None):
1322 1326 # check if a CoreGraphics session is available
1323 1327 return osutil.isgui()
1324 1328 else:
1325 1329 # pure build; use a safe default
1326 1330 return True
1327 1331 else:
1328 1332 return os.name == "nt" or os.environ.get("DISPLAY")
1329 1333
1330 1334 def mktempcopy(name, emptyok=False, createmode=None):
1331 1335 """Create a temporary file with the same contents from name
1332 1336
1333 1337 The permission bits are copied from the original file.
1334 1338
1335 1339 If the temporary file is going to be truncated immediately, you
1336 1340 can use emptyok=True as an optimization.
1337 1341
1338 1342 Returns the name of the temporary file.
1339 1343 """
1340 1344 d, fn = os.path.split(name)
1341 1345 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1342 1346 os.close(fd)
1343 1347 # Temporary files are created with mode 0600, which is usually not
1344 1348 # what we want. If the original file already exists, just copy
1345 1349 # its mode. Otherwise, manually obey umask.
1346 1350 copymode(name, temp, createmode)
1347 1351 if emptyok:
1348 1352 return temp
1349 1353 try:
1350 1354 try:
1351 1355 ifp = posixfile(name, "rb")
1352 1356 except IOError as inst:
1353 1357 if inst.errno == errno.ENOENT:
1354 1358 return temp
1355 1359 if not getattr(inst, 'filename', None):
1356 1360 inst.filename = name
1357 1361 raise
1358 1362 ofp = posixfile(temp, "wb")
1359 1363 for chunk in filechunkiter(ifp):
1360 1364 ofp.write(chunk)
1361 1365 ifp.close()
1362 1366 ofp.close()
1363 1367 except: # re-raises
1364 1368 try: os.unlink(temp)
1365 1369 except OSError: pass
1366 1370 raise
1367 1371 return temp
1368 1372
1369 1373 class atomictempfile(object):
1370 1374 '''writable file object that atomically updates a file
1371 1375
1372 1376 All writes will go to a temporary copy of the original file. Call
1373 1377 close() when you are done writing, and atomictempfile will rename
1374 1378 the temporary copy to the original name, making the changes
1375 1379 visible. If the object is destroyed without being closed, all your
1376 1380 writes are discarded.
1377 1381 '''
1378 1382 def __init__(self, name, mode='w+b', createmode=None):
1379 1383 self.__name = name # permanent name
1380 1384 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1381 1385 createmode=createmode)
1382 1386 self._fp = posixfile(self._tempname, mode)
1383 1387
1384 1388 # delegated methods
1385 1389 self.write = self._fp.write
1386 1390 self.seek = self._fp.seek
1387 1391 self.tell = self._fp.tell
1388 1392 self.fileno = self._fp.fileno
1389 1393
1390 1394 def close(self):
1391 1395 if not self._fp.closed:
1392 1396 self._fp.close()
1393 1397 rename(self._tempname, localpath(self.__name))
1394 1398
1395 1399 def discard(self):
1396 1400 if not self._fp.closed:
1397 1401 try:
1398 1402 os.unlink(self._tempname)
1399 1403 except OSError:
1400 1404 pass
1401 1405 self._fp.close()
1402 1406
1403 1407 def __del__(self):
1404 1408 if safehasattr(self, '_fp'): # constructor actually did something
1405 1409 self.discard()
1406 1410
1407 1411 def makedirs(name, mode=None, notindexed=False):
1408 1412 """recursive directory creation with parent mode inheritance"""
1409 1413 try:
1410 1414 makedir(name, notindexed)
1411 1415 except OSError as err:
1412 1416 if err.errno == errno.EEXIST:
1413 1417 return
1414 1418 if err.errno != errno.ENOENT or not name:
1415 1419 raise
1416 1420 parent = os.path.dirname(os.path.abspath(name))
1417 1421 if parent == name:
1418 1422 raise
1419 1423 makedirs(parent, mode, notindexed)
1420 1424 makedir(name, notindexed)
1421 1425 if mode is not None:
1422 1426 os.chmod(name, mode)
1423 1427
1424 1428 def ensuredirs(name, mode=None, notindexed=False):
1425 1429 """race-safe recursive directory creation
1426 1430
1427 1431 Newly created directories are marked as "not to be indexed by
1428 1432 the content indexing service", if ``notindexed`` is specified
1429 1433 for "write" mode access.
1430 1434 """
1431 1435 if os.path.isdir(name):
1432 1436 return
1433 1437 parent = os.path.dirname(os.path.abspath(name))
1434 1438 if parent != name:
1435 1439 ensuredirs(parent, mode, notindexed)
1436 1440 try:
1437 1441 makedir(name, notindexed)
1438 1442 except OSError as err:
1439 1443 if err.errno == errno.EEXIST and os.path.isdir(name):
1440 1444 # someone else seems to have won a directory creation race
1441 1445 return
1442 1446 raise
1443 1447 if mode is not None:
1444 1448 os.chmod(name, mode)
1445 1449
1446 1450 def readfile(path):
1447 1451 fp = open(path, 'rb')
1448 1452 try:
1449 1453 return fp.read()
1450 1454 finally:
1451 1455 fp.close()
1452 1456
1453 1457 def writefile(path, text):
1454 1458 fp = open(path, 'wb')
1455 1459 try:
1456 1460 fp.write(text)
1457 1461 finally:
1458 1462 fp.close()
1459 1463
1460 1464 def appendfile(path, text):
1461 1465 fp = open(path, 'ab')
1462 1466 try:
1463 1467 fp.write(text)
1464 1468 finally:
1465 1469 fp.close()
1466 1470
1467 1471 class chunkbuffer(object):
1468 1472 """Allow arbitrary sized chunks of data to be efficiently read from an
1469 1473 iterator over chunks of arbitrary size."""
1470 1474
1471 1475 def __init__(self, in_iter):
1472 1476 """in_iter is the iterator that's iterating over the input chunks.
1473 1477 targetsize is how big a buffer to try to maintain."""
1474 1478 def splitbig(chunks):
1475 1479 for chunk in chunks:
1476 1480 if len(chunk) > 2**20:
1477 1481 pos = 0
1478 1482 while pos < len(chunk):
1479 1483 end = pos + 2 ** 18
1480 1484 yield chunk[pos:end]
1481 1485 pos = end
1482 1486 else:
1483 1487 yield chunk
1484 1488 self.iter = splitbig(in_iter)
1485 1489 self._queue = collections.deque()
1486 1490 self._chunkoffset = 0
1487 1491
1488 1492 def read(self, l=None):
1489 1493 """Read L bytes of data from the iterator of chunks of data.
1490 1494 Returns less than L bytes if the iterator runs dry.
1491 1495
1492 1496 If size parameter is omitted, read everything"""
1493 1497 if l is None:
1494 1498 return ''.join(self.iter)
1495 1499
1496 1500 left = l
1497 1501 buf = []
1498 1502 queue = self._queue
1499 1503 while left > 0:
1500 1504 # refill the queue
1501 1505 if not queue:
1502 1506 target = 2**18
1503 1507 for chunk in self.iter:
1504 1508 queue.append(chunk)
1505 1509 target -= len(chunk)
1506 1510 if target <= 0:
1507 1511 break
1508 1512 if not queue:
1509 1513 break
1510 1514
1511 1515 # The easy way to do this would be to queue.popleft(), modify the
1512 1516 # chunk (if necessary), then queue.appendleft(). However, for cases
1513 1517 # where we read partial chunk content, this incurs 2 dequeue
1514 1518 # mutations and creates a new str for the remaining chunk in the
1515 1519 # queue. Our code below avoids this overhead.
1516 1520
1517 1521 chunk = queue[0]
1518 1522 chunkl = len(chunk)
1519 1523 offset = self._chunkoffset
1520 1524
1521 1525 # Use full chunk.
1522 1526 if offset == 0 and left >= chunkl:
1523 1527 left -= chunkl
1524 1528 queue.popleft()
1525 1529 buf.append(chunk)
1526 1530 # self._chunkoffset remains at 0.
1527 1531 continue
1528 1532
1529 1533 chunkremaining = chunkl - offset
1530 1534
1531 1535 # Use all of unconsumed part of chunk.
1532 1536 if left >= chunkremaining:
1533 1537 left -= chunkremaining
1534 1538 queue.popleft()
1535 1539 # offset == 0 is enabled by block above, so this won't merely
1536 1540 # copy via ``chunk[0:]``.
1537 1541 buf.append(chunk[offset:])
1538 1542 self._chunkoffset = 0
1539 1543
1540 1544 # Partial chunk needed.
1541 1545 else:
1542 1546 buf.append(chunk[offset:offset + left])
1543 1547 self._chunkoffset += left
1544 1548 left -= chunkremaining
1545 1549
1546 1550 return ''.join(buf)
1547 1551
1548 1552 def filechunkiter(f, size=65536, limit=None):
1549 1553 """Create a generator that produces the data in the file size
1550 1554 (default 65536) bytes at a time, up to optional limit (default is
1551 1555 to read all data). Chunks may be less than size bytes if the
1552 1556 chunk is the last chunk in the file, or the file is a socket or
1553 1557 some other type of file that sometimes reads less data than is
1554 1558 requested."""
1555 1559 assert size >= 0
1556 1560 assert limit is None or limit >= 0
1557 1561 while True:
1558 1562 if limit is None:
1559 1563 nbytes = size
1560 1564 else:
1561 1565 nbytes = min(limit, size)
1562 1566 s = nbytes and f.read(nbytes)
1563 1567 if not s:
1564 1568 break
1565 1569 if limit:
1566 1570 limit -= len(s)
1567 1571 yield s
1568 1572
1569 1573 def makedate(timestamp=None):
1570 1574 '''Return a unix timestamp (or the current time) as a (unixtime,
1571 1575 offset) tuple based off the local timezone.'''
1572 1576 if timestamp is None:
1573 1577 timestamp = time.time()
1574 1578 if timestamp < 0:
1575 1579 hint = _("check your clock")
1576 1580 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1577 1581 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1578 1582 datetime.datetime.fromtimestamp(timestamp))
1579 1583 tz = delta.days * 86400 + delta.seconds
1580 1584 return timestamp, tz
1581 1585
1582 1586 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1583 1587 """represent a (unixtime, offset) tuple as a localized time.
1584 1588 unixtime is seconds since the epoch, and offset is the time zone's
1585 1589 number of seconds away from UTC. if timezone is false, do not
1586 1590 append time zone to string."""
1587 1591 t, tz = date or makedate()
1588 1592 if t < 0:
1589 1593 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1590 1594 tz = 0
1591 1595 if "%1" in format or "%2" in format or "%z" in format:
1592 1596 sign = (tz > 0) and "-" or "+"
1593 1597 minutes = abs(tz) // 60
1594 1598 q, r = divmod(minutes, 60)
1595 1599 format = format.replace("%z", "%1%2")
1596 1600 format = format.replace("%1", "%c%02d" % (sign, q))
1597 1601 format = format.replace("%2", "%02d" % r)
1598 1602 try:
1599 1603 t = time.gmtime(float(t) - tz)
1600 1604 except ValueError:
1601 1605 # time was out of range
1602 1606 t = time.gmtime(sys.maxint)
1603 1607 s = time.strftime(format, t)
1604 1608 return s
1605 1609
1606 1610 def shortdate(date=None):
1607 1611 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1608 1612 return datestr(date, format='%Y-%m-%d')
1609 1613
1610 1614 def parsetimezone(tz):
1611 1615 """parse a timezone string and return an offset integer"""
1612 1616 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1613 1617 sign = (tz[0] == "+") and 1 or -1
1614 1618 hours = int(tz[1:3])
1615 1619 minutes = int(tz[3:5])
1616 1620 return -sign * (hours * 60 + minutes) * 60
1617 1621 if tz == "GMT" or tz == "UTC":
1618 1622 return 0
1619 1623 return None
1620 1624
1621 1625 def strdate(string, format, defaults=[]):
1622 1626 """parse a localized time string and return a (unixtime, offset) tuple.
1623 1627 if the string cannot be parsed, ValueError is raised."""
1624 1628 # NOTE: unixtime = localunixtime + offset
1625 1629 offset, date = parsetimezone(string.split()[-1]), string
1626 1630 if offset is not None:
1627 1631 date = " ".join(string.split()[:-1])
1628 1632
1629 1633 # add missing elements from defaults
1630 1634 usenow = False # default to using biased defaults
1631 1635 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1632 1636 found = [True for p in part if ("%"+p) in format]
1633 1637 if not found:
1634 1638 date += "@" + defaults[part][usenow]
1635 1639 format += "@%" + part[0]
1636 1640 else:
1637 1641 # We've found a specific time element, less specific time
1638 1642 # elements are relative to today
1639 1643 usenow = True
1640 1644
1641 1645 timetuple = time.strptime(date, format)
1642 1646 localunixtime = int(calendar.timegm(timetuple))
1643 1647 if offset is None:
1644 1648 # local timezone
1645 1649 unixtime = int(time.mktime(timetuple))
1646 1650 offset = unixtime - localunixtime
1647 1651 else:
1648 1652 unixtime = localunixtime + offset
1649 1653 return unixtime, offset
1650 1654
1651 1655 def parsedate(date, formats=None, bias=None):
1652 1656 """parse a localized date/time and return a (unixtime, offset) tuple.
1653 1657
1654 1658 The date may be a "unixtime offset" string or in one of the specified
1655 1659 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1656 1660
1657 1661 >>> parsedate(' today ') == parsedate(\
1658 1662 datetime.date.today().strftime('%b %d'))
1659 1663 True
1660 1664 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1661 1665 datetime.timedelta(days=1)\
1662 1666 ).strftime('%b %d'))
1663 1667 True
1664 1668 >>> now, tz = makedate()
1665 1669 >>> strnow, strtz = parsedate('now')
1666 1670 >>> (strnow - now) < 1
1667 1671 True
1668 1672 >>> tz == strtz
1669 1673 True
1670 1674 """
1671 1675 if bias is None:
1672 1676 bias = {}
1673 1677 if not date:
1674 1678 return 0, 0
1675 1679 if isinstance(date, tuple) and len(date) == 2:
1676 1680 return date
1677 1681 if not formats:
1678 1682 formats = defaultdateformats
1679 1683 date = date.strip()
1680 1684
1681 1685 if date == 'now' or date == _('now'):
1682 1686 return makedate()
1683 1687 if date == 'today' or date == _('today'):
1684 1688 date = datetime.date.today().strftime('%b %d')
1685 1689 elif date == 'yesterday' or date == _('yesterday'):
1686 1690 date = (datetime.date.today() -
1687 1691 datetime.timedelta(days=1)).strftime('%b %d')
1688 1692
1689 1693 try:
1690 1694 when, offset = map(int, date.split(' '))
1691 1695 except ValueError:
1692 1696 # fill out defaults
1693 1697 now = makedate()
1694 1698 defaults = {}
1695 1699 for part in ("d", "mb", "yY", "HI", "M", "S"):
1696 1700 # this piece is for rounding the specific end of unknowns
1697 1701 b = bias.get(part)
1698 1702 if b is None:
1699 1703 if part[0] in "HMS":
1700 1704 b = "00"
1701 1705 else:
1702 1706 b = "0"
1703 1707
1704 1708 # this piece is for matching the generic end to today's date
1705 1709 n = datestr(now, "%" + part[0])
1706 1710
1707 1711 defaults[part] = (b, n)
1708 1712
1709 1713 for format in formats:
1710 1714 try:
1711 1715 when, offset = strdate(date, format, defaults)
1712 1716 except (ValueError, OverflowError):
1713 1717 pass
1714 1718 else:
1715 1719 break
1716 1720 else:
1717 1721 raise Abort(_('invalid date: %r') % date)
1718 1722 # validate explicit (probably user-specified) date and
1719 1723 # time zone offset. values must fit in signed 32 bits for
1720 1724 # current 32-bit linux runtimes. timezones go from UTC-12
1721 1725 # to UTC+14
1722 1726 if abs(when) > 0x7fffffff:
1723 1727 raise Abort(_('date exceeds 32 bits: %d') % when)
1724 1728 if when < 0:
1725 1729 raise Abort(_('negative date value: %d') % when)
1726 1730 if offset < -50400 or offset > 43200:
1727 1731 raise Abort(_('impossible time zone offset: %d') % offset)
1728 1732 return when, offset
1729 1733
1730 1734 def matchdate(date):
1731 1735 """Return a function that matches a given date match specifier
1732 1736
1733 1737 Formats include:
1734 1738
1735 1739 '{date}' match a given date to the accuracy provided
1736 1740
1737 1741 '<{date}' on or before a given date
1738 1742
1739 1743 '>{date}' on or after a given date
1740 1744
1741 1745 >>> p1 = parsedate("10:29:59")
1742 1746 >>> p2 = parsedate("10:30:00")
1743 1747 >>> p3 = parsedate("10:30:59")
1744 1748 >>> p4 = parsedate("10:31:00")
1745 1749 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1746 1750 >>> f = matchdate("10:30")
1747 1751 >>> f(p1[0])
1748 1752 False
1749 1753 >>> f(p2[0])
1750 1754 True
1751 1755 >>> f(p3[0])
1752 1756 True
1753 1757 >>> f(p4[0])
1754 1758 False
1755 1759 >>> f(p5[0])
1756 1760 False
1757 1761 """
1758 1762
1759 1763 def lower(date):
1760 1764 d = {'mb': "1", 'd': "1"}
1761 1765 return parsedate(date, extendeddateformats, d)[0]
1762 1766
1763 1767 def upper(date):
1764 1768 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1765 1769 for days in ("31", "30", "29"):
1766 1770 try:
1767 1771 d["d"] = days
1768 1772 return parsedate(date, extendeddateformats, d)[0]
1769 1773 except Abort:
1770 1774 pass
1771 1775 d["d"] = "28"
1772 1776 return parsedate(date, extendeddateformats, d)[0]
1773 1777
1774 1778 date = date.strip()
1775 1779
1776 1780 if not date:
1777 1781 raise Abort(_("dates cannot consist entirely of whitespace"))
1778 1782 elif date[0] == "<":
1779 1783 if not date[1:]:
1780 1784 raise Abort(_("invalid day spec, use '<DATE'"))
1781 1785 when = upper(date[1:])
1782 1786 return lambda x: x <= when
1783 1787 elif date[0] == ">":
1784 1788 if not date[1:]:
1785 1789 raise Abort(_("invalid day spec, use '>DATE'"))
1786 1790 when = lower(date[1:])
1787 1791 return lambda x: x >= when
1788 1792 elif date[0] == "-":
1789 1793 try:
1790 1794 days = int(date[1:])
1791 1795 except ValueError:
1792 1796 raise Abort(_("invalid day spec: %s") % date[1:])
1793 1797 if days < 0:
1794 1798 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1795 1799 % date[1:])
1796 1800 when = makedate()[0] - days * 3600 * 24
1797 1801 return lambda x: x >= when
1798 1802 elif " to " in date:
1799 1803 a, b = date.split(" to ")
1800 1804 start, stop = lower(a), upper(b)
1801 1805 return lambda x: x >= start and x <= stop
1802 1806 else:
1803 1807 start, stop = lower(date), upper(date)
1804 1808 return lambda x: x >= start and x <= stop
1805 1809
1806 1810 def stringmatcher(pattern):
1807 1811 """
1808 1812 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1809 1813 returns the matcher name, pattern, and matcher function.
1810 1814 missing or unknown prefixes are treated as literal matches.
1811 1815
1812 1816 helper for tests:
1813 1817 >>> def test(pattern, *tests):
1814 1818 ... kind, pattern, matcher = stringmatcher(pattern)
1815 1819 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1816 1820
1817 1821 exact matching (no prefix):
1818 1822 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1819 1823 ('literal', 'abcdefg', [False, False, True])
1820 1824
1821 1825 regex matching ('re:' prefix)
1822 1826 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1823 1827 ('re', 'a.+b', [False, False, True])
1824 1828
1825 1829 force exact matches ('literal:' prefix)
1826 1830 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1827 1831 ('literal', 're:foobar', [False, True])
1828 1832
1829 1833 unknown prefixes are ignored and treated as literals
1830 1834 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1831 1835 ('literal', 'foo:bar', [False, False, True])
1832 1836 """
1833 1837 if pattern.startswith('re:'):
1834 1838 pattern = pattern[3:]
1835 1839 try:
1836 1840 regex = remod.compile(pattern)
1837 1841 except remod.error as e:
1838 1842 raise error.ParseError(_('invalid regular expression: %s')
1839 1843 % e)
1840 1844 return 're', pattern, regex.search
1841 1845 elif pattern.startswith('literal:'):
1842 1846 pattern = pattern[8:]
1843 1847 return 'literal', pattern, pattern.__eq__
1844 1848
1845 1849 def shortuser(user):
1846 1850 """Return a short representation of a user name or email address."""
1847 1851 f = user.find('@')
1848 1852 if f >= 0:
1849 1853 user = user[:f]
1850 1854 f = user.find('<')
1851 1855 if f >= 0:
1852 1856 user = user[f + 1:]
1853 1857 f = user.find(' ')
1854 1858 if f >= 0:
1855 1859 user = user[:f]
1856 1860 f = user.find('.')
1857 1861 if f >= 0:
1858 1862 user = user[:f]
1859 1863 return user
1860 1864
1861 1865 def emailuser(user):
1862 1866 """Return the user portion of an email address."""
1863 1867 f = user.find('@')
1864 1868 if f >= 0:
1865 1869 user = user[:f]
1866 1870 f = user.find('<')
1867 1871 if f >= 0:
1868 1872 user = user[f + 1:]
1869 1873 return user
1870 1874
1871 1875 def email(author):
1872 1876 '''get email of author.'''
1873 1877 r = author.find('>')
1874 1878 if r == -1:
1875 1879 r = None
1876 1880 return author[author.find('<') + 1:r]
1877 1881
1878 1882 def ellipsis(text, maxlength=400):
1879 1883 """Trim string to at most maxlength (default: 400) columns in display."""
1880 1884 return encoding.trim(text, maxlength, ellipsis='...')
1881 1885
1882 1886 def unitcountfn(*unittable):
1883 1887 '''return a function that renders a readable count of some quantity'''
1884 1888
1885 1889 def go(count):
1886 1890 for multiplier, divisor, format in unittable:
1887 1891 if count >= divisor * multiplier:
1888 1892 return format % (count / float(divisor))
1889 1893 return unittable[-1][2] % count
1890 1894
1891 1895 return go
1892 1896
1893 1897 bytecount = unitcountfn(
1894 1898 (100, 1 << 30, _('%.0f GB')),
1895 1899 (10, 1 << 30, _('%.1f GB')),
1896 1900 (1, 1 << 30, _('%.2f GB')),
1897 1901 (100, 1 << 20, _('%.0f MB')),
1898 1902 (10, 1 << 20, _('%.1f MB')),
1899 1903 (1, 1 << 20, _('%.2f MB')),
1900 1904 (100, 1 << 10, _('%.0f KB')),
1901 1905 (10, 1 << 10, _('%.1f KB')),
1902 1906 (1, 1 << 10, _('%.2f KB')),
1903 1907 (1, 1, _('%.0f bytes')),
1904 1908 )
1905 1909
1906 1910 def uirepr(s):
1907 1911 # Avoid double backslash in Windows path repr()
1908 1912 return repr(s).replace('\\\\', '\\')
1909 1913
1910 1914 # delay import of textwrap
1911 1915 def MBTextWrapper(**kwargs):
1912 1916 class tw(textwrap.TextWrapper):
1913 1917 """
1914 1918 Extend TextWrapper for width-awareness.
1915 1919
1916 1920 Neither number of 'bytes' in any encoding nor 'characters' is
1917 1921 appropriate to calculate terminal columns for specified string.
1918 1922
1919 1923 Original TextWrapper implementation uses built-in 'len()' directly,
1920 1924 so overriding is needed to use width information of each characters.
1921 1925
1922 1926 In addition, characters classified into 'ambiguous' width are
1923 1927 treated as wide in East Asian area, but as narrow in other.
1924 1928
1925 1929 This requires use decision to determine width of such characters.
1926 1930 """
1927 1931 def _cutdown(self, ucstr, space_left):
1928 1932 l = 0
1929 1933 colwidth = encoding.ucolwidth
1930 1934 for i in xrange(len(ucstr)):
1931 1935 l += colwidth(ucstr[i])
1932 1936 if space_left < l:
1933 1937 return (ucstr[:i], ucstr[i:])
1934 1938 return ucstr, ''
1935 1939
1936 1940 # overriding of base class
1937 1941 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1938 1942 space_left = max(width - cur_len, 1)
1939 1943
1940 1944 if self.break_long_words:
1941 1945 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1942 1946 cur_line.append(cut)
1943 1947 reversed_chunks[-1] = res
1944 1948 elif not cur_line:
1945 1949 cur_line.append(reversed_chunks.pop())
1946 1950
1947 1951 # this overriding code is imported from TextWrapper of Python 2.6
1948 1952 # to calculate columns of string by 'encoding.ucolwidth()'
1949 1953 def _wrap_chunks(self, chunks):
1950 1954 colwidth = encoding.ucolwidth
1951 1955
1952 1956 lines = []
1953 1957 if self.width <= 0:
1954 1958 raise ValueError("invalid width %r (must be > 0)" % self.width)
1955 1959
1956 1960 # Arrange in reverse order so items can be efficiently popped
1957 1961 # from a stack of chucks.
1958 1962 chunks.reverse()
1959 1963
1960 1964 while chunks:
1961 1965
1962 1966 # Start the list of chunks that will make up the current line.
1963 1967 # cur_len is just the length of all the chunks in cur_line.
1964 1968 cur_line = []
1965 1969 cur_len = 0
1966 1970
1967 1971 # Figure out which static string will prefix this line.
1968 1972 if lines:
1969 1973 indent = self.subsequent_indent
1970 1974 else:
1971 1975 indent = self.initial_indent
1972 1976
1973 1977 # Maximum width for this line.
1974 1978 width = self.width - len(indent)
1975 1979
1976 1980 # First chunk on line is whitespace -- drop it, unless this
1977 1981 # is the very beginning of the text (i.e. no lines started yet).
1978 1982 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1979 1983 del chunks[-1]
1980 1984
1981 1985 while chunks:
1982 1986 l = colwidth(chunks[-1])
1983 1987
1984 1988 # Can at least squeeze this chunk onto the current line.
1985 1989 if cur_len + l <= width:
1986 1990 cur_line.append(chunks.pop())
1987 1991 cur_len += l
1988 1992
1989 1993 # Nope, this line is full.
1990 1994 else:
1991 1995 break
1992 1996
1993 1997 # The current line is full, and the next chunk is too big to
1994 1998 # fit on *any* line (not just this one).
1995 1999 if chunks and colwidth(chunks[-1]) > width:
1996 2000 self._handle_long_word(chunks, cur_line, cur_len, width)
1997 2001
1998 2002 # If the last chunk on this line is all whitespace, drop it.
1999 2003 if (self.drop_whitespace and
2000 2004 cur_line and cur_line[-1].strip() == ''):
2001 2005 del cur_line[-1]
2002 2006
2003 2007 # Convert current line back to a string and store it in list
2004 2008 # of all lines (return value).
2005 2009 if cur_line:
2006 2010 lines.append(indent + ''.join(cur_line))
2007 2011
2008 2012 return lines
2009 2013
2010 2014 global MBTextWrapper
2011 2015 MBTextWrapper = tw
2012 2016 return tw(**kwargs)
2013 2017
2014 2018 def wrap(line, width, initindent='', hangindent=''):
2015 2019 maxindent = max(len(hangindent), len(initindent))
2016 2020 if width <= maxindent:
2017 2021 # adjust for weird terminal size
2018 2022 width = max(78, maxindent + 1)
2019 2023 line = line.decode(encoding.encoding, encoding.encodingmode)
2020 2024 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2021 2025 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2022 2026 wrapper = MBTextWrapper(width=width,
2023 2027 initial_indent=initindent,
2024 2028 subsequent_indent=hangindent)
2025 2029 return wrapper.fill(line).encode(encoding.encoding)
2026 2030
2027 2031 def iterlines(iterator):
2028 2032 for chunk in iterator:
2029 2033 for line in chunk.splitlines():
2030 2034 yield line
2031 2035
2032 2036 def expandpath(path):
2033 2037 return os.path.expanduser(os.path.expandvars(path))
2034 2038
2035 2039 def hgcmd():
2036 2040 """Return the command used to execute current hg
2037 2041
2038 2042 This is different from hgexecutable() because on Windows we want
2039 2043 to avoid things opening new shell windows like batch files, so we
2040 2044 get either the python call or current executable.
2041 2045 """
2042 2046 if mainfrozen():
2043 2047 return [sys.executable]
2044 2048 return gethgcmd()
2045 2049
2046 2050 def rundetached(args, condfn):
2047 2051 """Execute the argument list in a detached process.
2048 2052
2049 2053 condfn is a callable which is called repeatedly and should return
2050 2054 True once the child process is known to have started successfully.
2051 2055 At this point, the child process PID is returned. If the child
2052 2056 process fails to start or finishes before condfn() evaluates to
2053 2057 True, return -1.
2054 2058 """
2055 2059 # Windows case is easier because the child process is either
2056 2060 # successfully starting and validating the condition or exiting
2057 2061 # on failure. We just poll on its PID. On Unix, if the child
2058 2062 # process fails to start, it will be left in a zombie state until
2059 2063 # the parent wait on it, which we cannot do since we expect a long
2060 2064 # running process on success. Instead we listen for SIGCHLD telling
2061 2065 # us our child process terminated.
2062 2066 terminated = set()
2063 2067 def handler(signum, frame):
2064 2068 terminated.add(os.wait())
2065 2069 prevhandler = None
2066 2070 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2067 2071 if SIGCHLD is not None:
2068 2072 prevhandler = signal.signal(SIGCHLD, handler)
2069 2073 try:
2070 2074 pid = spawndetached(args)
2071 2075 while not condfn():
2072 2076 if ((pid in terminated or not testpid(pid))
2073 2077 and not condfn()):
2074 2078 return -1
2075 2079 time.sleep(0.1)
2076 2080 return pid
2077 2081 finally:
2078 2082 if prevhandler is not None:
2079 2083 signal.signal(signal.SIGCHLD, prevhandler)
2080 2084
2081 2085 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2082 2086 """Return the result of interpolating items in the mapping into string s.
2083 2087
2084 2088 prefix is a single character string, or a two character string with
2085 2089 a backslash as the first character if the prefix needs to be escaped in
2086 2090 a regular expression.
2087 2091
2088 2092 fn is an optional function that will be applied to the replacement text
2089 2093 just before replacement.
2090 2094
2091 2095 escape_prefix is an optional flag that allows using doubled prefix for
2092 2096 its escaping.
2093 2097 """
2094 2098 fn = fn or (lambda s: s)
2095 2099 patterns = '|'.join(mapping.keys())
2096 2100 if escape_prefix:
2097 2101 patterns += '|' + prefix
2098 2102 if len(prefix) > 1:
2099 2103 prefix_char = prefix[1:]
2100 2104 else:
2101 2105 prefix_char = prefix
2102 2106 mapping[prefix_char] = prefix_char
2103 2107 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2104 2108 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2105 2109
2106 2110 def getport(port):
2107 2111 """Return the port for a given network service.
2108 2112
2109 2113 If port is an integer, it's returned as is. If it's a string, it's
2110 2114 looked up using socket.getservbyname(). If there's no matching
2111 2115 service, error.Abort is raised.
2112 2116 """
2113 2117 try:
2114 2118 return int(port)
2115 2119 except ValueError:
2116 2120 pass
2117 2121
2118 2122 try:
2119 2123 return socket.getservbyname(port)
2120 2124 except socket.error:
2121 2125 raise Abort(_("no port number associated with service '%s'") % port)
2122 2126
2123 2127 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2124 2128 '0': False, 'no': False, 'false': False, 'off': False,
2125 2129 'never': False}
2126 2130
2127 2131 def parsebool(s):
2128 2132 """Parse s into a boolean.
2129 2133
2130 2134 If s is not a valid boolean, returns None.
2131 2135 """
2132 2136 return _booleans.get(s.lower(), None)
2133 2137
2134 2138 _hexdig = '0123456789ABCDEFabcdef'
2135 2139 _hextochr = dict((a + b, chr(int(a + b, 16)))
2136 2140 for a in _hexdig for b in _hexdig)
2137 2141
2138 2142 def _urlunquote(s):
2139 2143 """Decode HTTP/HTML % encoding.
2140 2144
2141 2145 >>> _urlunquote('abc%20def')
2142 2146 'abc def'
2143 2147 """
2144 2148 res = s.split('%')
2145 2149 # fastpath
2146 2150 if len(res) == 1:
2147 2151 return s
2148 2152 s = res[0]
2149 2153 for item in res[1:]:
2150 2154 try:
2151 2155 s += _hextochr[item[:2]] + item[2:]
2152 2156 except KeyError:
2153 2157 s += '%' + item
2154 2158 except UnicodeDecodeError:
2155 2159 s += unichr(int(item[:2], 16)) + item[2:]
2156 2160 return s
2157 2161
2158 2162 class url(object):
2159 2163 r"""Reliable URL parser.
2160 2164
2161 2165 This parses URLs and provides attributes for the following
2162 2166 components:
2163 2167
2164 2168 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2165 2169
2166 2170 Missing components are set to None. The only exception is
2167 2171 fragment, which is set to '' if present but empty.
2168 2172
2169 2173 If parsefragment is False, fragment is included in query. If
2170 2174 parsequery is False, query is included in path. If both are
2171 2175 False, both fragment and query are included in path.
2172 2176
2173 2177 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2174 2178
2175 2179 Note that for backward compatibility reasons, bundle URLs do not
2176 2180 take host names. That means 'bundle://../' has a path of '../'.
2177 2181
2178 2182 Examples:
2179 2183
2180 2184 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2181 2185 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2182 2186 >>> url('ssh://[::1]:2200//home/joe/repo')
2183 2187 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2184 2188 >>> url('file:///home/joe/repo')
2185 2189 <url scheme: 'file', path: '/home/joe/repo'>
2186 2190 >>> url('file:///c:/temp/foo/')
2187 2191 <url scheme: 'file', path: 'c:/temp/foo/'>
2188 2192 >>> url('bundle:foo')
2189 2193 <url scheme: 'bundle', path: 'foo'>
2190 2194 >>> url('bundle://../foo')
2191 2195 <url scheme: 'bundle', path: '../foo'>
2192 2196 >>> url(r'c:\foo\bar')
2193 2197 <url path: 'c:\\foo\\bar'>
2194 2198 >>> url(r'\\blah\blah\blah')
2195 2199 <url path: '\\\\blah\\blah\\blah'>
2196 2200 >>> url(r'\\blah\blah\blah#baz')
2197 2201 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2198 2202 >>> url(r'file:///C:\users\me')
2199 2203 <url scheme: 'file', path: 'C:\\users\\me'>
2200 2204
2201 2205 Authentication credentials:
2202 2206
2203 2207 >>> url('ssh://joe:xyz@x/repo')
2204 2208 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2205 2209 >>> url('ssh://joe@x/repo')
2206 2210 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2207 2211
2208 2212 Query strings and fragments:
2209 2213
2210 2214 >>> url('http://host/a?b#c')
2211 2215 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2212 2216 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2213 2217 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2214 2218 """
2215 2219
2216 2220 _safechars = "!~*'()+"
2217 2221 _safepchars = "/!~*'()+:\\"
2218 2222 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2219 2223
2220 2224 def __init__(self, path, parsequery=True, parsefragment=True):
2221 2225 # We slowly chomp away at path until we have only the path left
2222 2226 self.scheme = self.user = self.passwd = self.host = None
2223 2227 self.port = self.path = self.query = self.fragment = None
2224 2228 self._localpath = True
2225 2229 self._hostport = ''
2226 2230 self._origpath = path
2227 2231
2228 2232 if parsefragment and '#' in path:
2229 2233 path, self.fragment = path.split('#', 1)
2230 2234 if not path:
2231 2235 path = None
2232 2236
2233 2237 # special case for Windows drive letters and UNC paths
2234 2238 if hasdriveletter(path) or path.startswith(r'\\'):
2235 2239 self.path = path
2236 2240 return
2237 2241
2238 2242 # For compatibility reasons, we can't handle bundle paths as
2239 2243 # normal URLS
2240 2244 if path.startswith('bundle:'):
2241 2245 self.scheme = 'bundle'
2242 2246 path = path[7:]
2243 2247 if path.startswith('//'):
2244 2248 path = path[2:]
2245 2249 self.path = path
2246 2250 return
2247 2251
2248 2252 if self._matchscheme(path):
2249 2253 parts = path.split(':', 1)
2250 2254 if parts[0]:
2251 2255 self.scheme, path = parts
2252 2256 self._localpath = False
2253 2257
2254 2258 if not path:
2255 2259 path = None
2256 2260 if self._localpath:
2257 2261 self.path = ''
2258 2262 return
2259 2263 else:
2260 2264 if self._localpath:
2261 2265 self.path = path
2262 2266 return
2263 2267
2264 2268 if parsequery and '?' in path:
2265 2269 path, self.query = path.split('?', 1)
2266 2270 if not path:
2267 2271 path = None
2268 2272 if not self.query:
2269 2273 self.query = None
2270 2274
2271 2275 # // is required to specify a host/authority
2272 2276 if path and path.startswith('//'):
2273 2277 parts = path[2:].split('/', 1)
2274 2278 if len(parts) > 1:
2275 2279 self.host, path = parts
2276 2280 else:
2277 2281 self.host = parts[0]
2278 2282 path = None
2279 2283 if not self.host:
2280 2284 self.host = None
2281 2285 # path of file:///d is /d
2282 2286 # path of file:///d:/ is d:/, not /d:/
2283 2287 if path and not hasdriveletter(path):
2284 2288 path = '/' + path
2285 2289
2286 2290 if self.host and '@' in self.host:
2287 2291 self.user, self.host = self.host.rsplit('@', 1)
2288 2292 if ':' in self.user:
2289 2293 self.user, self.passwd = self.user.split(':', 1)
2290 2294 if not self.host:
2291 2295 self.host = None
2292 2296
2293 2297 # Don't split on colons in IPv6 addresses without ports
2294 2298 if (self.host and ':' in self.host and
2295 2299 not (self.host.startswith('[') and self.host.endswith(']'))):
2296 2300 self._hostport = self.host
2297 2301 self.host, self.port = self.host.rsplit(':', 1)
2298 2302 if not self.host:
2299 2303 self.host = None
2300 2304
2301 2305 if (self.host and self.scheme == 'file' and
2302 2306 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2303 2307 raise Abort(_('file:// URLs can only refer to localhost'))
2304 2308
2305 2309 self.path = path
2306 2310
2307 2311 # leave the query string escaped
2308 2312 for a in ('user', 'passwd', 'host', 'port',
2309 2313 'path', 'fragment'):
2310 2314 v = getattr(self, a)
2311 2315 if v is not None:
2312 2316 setattr(self, a, _urlunquote(v))
2313 2317
2314 2318 def __repr__(self):
2315 2319 attrs = []
2316 2320 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2317 2321 'query', 'fragment'):
2318 2322 v = getattr(self, a)
2319 2323 if v is not None:
2320 2324 attrs.append('%s: %r' % (a, v))
2321 2325 return '<url %s>' % ', '.join(attrs)
2322 2326
2323 2327 def __str__(self):
2324 2328 r"""Join the URL's components back into a URL string.
2325 2329
2326 2330 Examples:
2327 2331
2328 2332 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2329 2333 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2330 2334 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2331 2335 'http://user:pw@host:80/?foo=bar&baz=42'
2332 2336 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2333 2337 'http://user:pw@host:80/?foo=bar%3dbaz'
2334 2338 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2335 2339 'ssh://user:pw@[::1]:2200//home/joe#'
2336 2340 >>> str(url('http://localhost:80//'))
2337 2341 'http://localhost:80//'
2338 2342 >>> str(url('http://localhost:80/'))
2339 2343 'http://localhost:80/'
2340 2344 >>> str(url('http://localhost:80'))
2341 2345 'http://localhost:80/'
2342 2346 >>> str(url('bundle:foo'))
2343 2347 'bundle:foo'
2344 2348 >>> str(url('bundle://../foo'))
2345 2349 'bundle:../foo'
2346 2350 >>> str(url('path'))
2347 2351 'path'
2348 2352 >>> str(url('file:///tmp/foo/bar'))
2349 2353 'file:///tmp/foo/bar'
2350 2354 >>> str(url('file:///c:/tmp/foo/bar'))
2351 2355 'file:///c:/tmp/foo/bar'
2352 2356 >>> print url(r'bundle:foo\bar')
2353 2357 bundle:foo\bar
2354 2358 >>> print url(r'file:///D:\data\hg')
2355 2359 file:///D:\data\hg
2356 2360 """
2357 2361 if self._localpath:
2358 2362 s = self.path
2359 2363 if self.scheme == 'bundle':
2360 2364 s = 'bundle:' + s
2361 2365 if self.fragment:
2362 2366 s += '#' + self.fragment
2363 2367 return s
2364 2368
2365 2369 s = self.scheme + ':'
2366 2370 if self.user or self.passwd or self.host:
2367 2371 s += '//'
2368 2372 elif self.scheme and (not self.path or self.path.startswith('/')
2369 2373 or hasdriveletter(self.path)):
2370 2374 s += '//'
2371 2375 if hasdriveletter(self.path):
2372 2376 s += '/'
2373 2377 if self.user:
2374 2378 s += urllib.quote(self.user, safe=self._safechars)
2375 2379 if self.passwd:
2376 2380 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2377 2381 if self.user or self.passwd:
2378 2382 s += '@'
2379 2383 if self.host:
2380 2384 if not (self.host.startswith('[') and self.host.endswith(']')):
2381 2385 s += urllib.quote(self.host)
2382 2386 else:
2383 2387 s += self.host
2384 2388 if self.port:
2385 2389 s += ':' + urllib.quote(self.port)
2386 2390 if self.host:
2387 2391 s += '/'
2388 2392 if self.path:
2389 2393 # TODO: similar to the query string, we should not unescape the
2390 2394 # path when we store it, the path might contain '%2f' = '/',
2391 2395 # which we should *not* escape.
2392 2396 s += urllib.quote(self.path, safe=self._safepchars)
2393 2397 if self.query:
2394 2398 # we store the query in escaped form.
2395 2399 s += '?' + self.query
2396 2400 if self.fragment is not None:
2397 2401 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2398 2402 return s
2399 2403
2400 2404 def authinfo(self):
2401 2405 user, passwd = self.user, self.passwd
2402 2406 try:
2403 2407 self.user, self.passwd = None, None
2404 2408 s = str(self)
2405 2409 finally:
2406 2410 self.user, self.passwd = user, passwd
2407 2411 if not self.user:
2408 2412 return (s, None)
2409 2413 # authinfo[1] is passed to urllib2 password manager, and its
2410 2414 # URIs must not contain credentials. The host is passed in the
2411 2415 # URIs list because Python < 2.4.3 uses only that to search for
2412 2416 # a password.
2413 2417 return (s, (None, (s, self.host),
2414 2418 self.user, self.passwd or ''))
2415 2419
2416 2420 def isabs(self):
2417 2421 if self.scheme and self.scheme != 'file':
2418 2422 return True # remote URL
2419 2423 if hasdriveletter(self.path):
2420 2424 return True # absolute for our purposes - can't be joined()
2421 2425 if self.path.startswith(r'\\'):
2422 2426 return True # Windows UNC path
2423 2427 if self.path.startswith('/'):
2424 2428 return True # POSIX-style
2425 2429 return False
2426 2430
2427 2431 def localpath(self):
2428 2432 if self.scheme == 'file' or self.scheme == 'bundle':
2429 2433 path = self.path or '/'
2430 2434 # For Windows, we need to promote hosts containing drive
2431 2435 # letters to paths with drive letters.
2432 2436 if hasdriveletter(self._hostport):
2433 2437 path = self._hostport + '/' + self.path
2434 2438 elif (self.host is not None and self.path
2435 2439 and not hasdriveletter(path)):
2436 2440 path = '/' + path
2437 2441 return path
2438 2442 return self._origpath
2439 2443
2440 2444 def islocal(self):
2441 2445 '''whether localpath will return something that posixfile can open'''
2442 2446 return (not self.scheme or self.scheme == 'file'
2443 2447 or self.scheme == 'bundle')
2444 2448
2445 2449 def hasscheme(path):
2446 2450 return bool(url(path).scheme)
2447 2451
2448 2452 def hasdriveletter(path):
2449 2453 return path and path[1:2] == ':' and path[0:1].isalpha()
2450 2454
2451 2455 def urllocalpath(path):
2452 2456 return url(path, parsequery=False, parsefragment=False).localpath()
2453 2457
2454 2458 def hidepassword(u):
2455 2459 '''hide user credential in a url string'''
2456 2460 u = url(u)
2457 2461 if u.passwd:
2458 2462 u.passwd = '***'
2459 2463 return str(u)
2460 2464
2461 2465 def removeauth(u):
2462 2466 '''remove all authentication information from a url string'''
2463 2467 u = url(u)
2464 2468 u.user = u.passwd = None
2465 2469 return str(u)
2466 2470
2467 2471 def isatty(fp):
2468 2472 try:
2469 2473 return fp.isatty()
2470 2474 except AttributeError:
2471 2475 return False
2472 2476
2473 2477 timecount = unitcountfn(
2474 2478 (1, 1e3, _('%.0f s')),
2475 2479 (100, 1, _('%.1f s')),
2476 2480 (10, 1, _('%.2f s')),
2477 2481 (1, 1, _('%.3f s')),
2478 2482 (100, 0.001, _('%.1f ms')),
2479 2483 (10, 0.001, _('%.2f ms')),
2480 2484 (1, 0.001, _('%.3f ms')),
2481 2485 (100, 0.000001, _('%.1f us')),
2482 2486 (10, 0.000001, _('%.2f us')),
2483 2487 (1, 0.000001, _('%.3f us')),
2484 2488 (100, 0.000000001, _('%.1f ns')),
2485 2489 (10, 0.000000001, _('%.2f ns')),
2486 2490 (1, 0.000000001, _('%.3f ns')),
2487 2491 )
2488 2492
2489 2493 _timenesting = [0]
2490 2494
2491 2495 def timed(func):
2492 2496 '''Report the execution time of a function call to stderr.
2493 2497
2494 2498 During development, use as a decorator when you need to measure
2495 2499 the cost of a function, e.g. as follows:
2496 2500
2497 2501 @util.timed
2498 2502 def foo(a, b, c):
2499 2503 pass
2500 2504 '''
2501 2505
2502 2506 def wrapper(*args, **kwargs):
2503 2507 start = time.time()
2504 2508 indent = 2
2505 2509 _timenesting[0] += indent
2506 2510 try:
2507 2511 return func(*args, **kwargs)
2508 2512 finally:
2509 2513 elapsed = time.time() - start
2510 2514 _timenesting[0] -= indent
2511 2515 sys.stderr.write('%s%s: %s\n' %
2512 2516 (' ' * _timenesting[0], func.__name__,
2513 2517 timecount(elapsed)))
2514 2518 return wrapper
2515 2519
2516 2520 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2517 2521 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2518 2522
2519 2523 def sizetoint(s):
2520 2524 '''Convert a space specifier to a byte count.
2521 2525
2522 2526 >>> sizetoint('30')
2523 2527 30
2524 2528 >>> sizetoint('2.2kb')
2525 2529 2252
2526 2530 >>> sizetoint('6M')
2527 2531 6291456
2528 2532 '''
2529 2533 t = s.strip().lower()
2530 2534 try:
2531 2535 for k, u in _sizeunits:
2532 2536 if t.endswith(k):
2533 2537 return int(float(t[:-len(k)]) * u)
2534 2538 return int(t)
2535 2539 except ValueError:
2536 2540 raise error.ParseError(_("couldn't parse size: %s") % s)
2537 2541
2538 2542 class hooks(object):
2539 2543 '''A collection of hook functions that can be used to extend a
2540 2544 function's behavior. Hooks are called in lexicographic order,
2541 2545 based on the names of their sources.'''
2542 2546
2543 2547 def __init__(self):
2544 2548 self._hooks = []
2545 2549
2546 2550 def add(self, source, hook):
2547 2551 self._hooks.append((source, hook))
2548 2552
2549 2553 def __call__(self, *args):
2550 2554 self._hooks.sort(key=lambda x: x[0])
2551 2555 results = []
2552 2556 for source, hook in self._hooks:
2553 2557 results.append(hook(*args))
2554 2558 return results
2555 2559
2556 2560 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2557 2561 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2558 2562 Skips the 'skip' last entries. By default it will flush stdout first.
2559 2563 It can be used everywhere and do intentionally not require an ui object.
2560 2564 Not be used in production code but very convenient while developing.
2561 2565 '''
2562 2566 if otherf:
2563 2567 otherf.flush()
2564 2568 f.write('%s at:\n' % msg)
2565 2569 entries = [('%s:%s' % (fn, ln), func)
2566 2570 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2567 2571 if entries:
2568 2572 fnmax = max(len(entry[0]) for entry in entries)
2569 2573 for fnln, func in entries:
2570 2574 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2571 2575 f.flush()
2572 2576
2573 2577 class dirs(object):
2574 2578 '''a multiset of directory names from a dirstate or manifest'''
2575 2579
2576 2580 def __init__(self, map, skip=None):
2577 2581 self._dirs = {}
2578 2582 addpath = self.addpath
2579 2583 if safehasattr(map, 'iteritems') and skip is not None:
2580 2584 for f, s in map.iteritems():
2581 2585 if s[0] != skip:
2582 2586 addpath(f)
2583 2587 else:
2584 2588 for f in map:
2585 2589 addpath(f)
2586 2590
2587 2591 def addpath(self, path):
2588 2592 dirs = self._dirs
2589 2593 for base in finddirs(path):
2590 2594 if base in dirs:
2591 2595 dirs[base] += 1
2592 2596 return
2593 2597 dirs[base] = 1
2594 2598
2595 2599 def delpath(self, path):
2596 2600 dirs = self._dirs
2597 2601 for base in finddirs(path):
2598 2602 if dirs[base] > 1:
2599 2603 dirs[base] -= 1
2600 2604 return
2601 2605 del dirs[base]
2602 2606
2603 2607 def __iter__(self):
2604 2608 return self._dirs.iterkeys()
2605 2609
2606 2610 def __contains__(self, d):
2607 2611 return d in self._dirs
2608 2612
2609 2613 if safehasattr(parsers, 'dirs'):
2610 2614 dirs = parsers.dirs
2611 2615
2612 2616 def finddirs(path):
2613 2617 pos = path.rfind('/')
2614 2618 while pos != -1:
2615 2619 yield path[:pos]
2616 2620 pos = path.rfind('/', 0, pos)
2617 2621
2618 2622 # compression utility
2619 2623
2620 2624 class nocompress(object):
2621 2625 def compress(self, x):
2622 2626 return x
2623 2627 def flush(self):
2624 2628 return ""
2625 2629
2626 2630 compressors = {
2627 2631 None: nocompress,
2628 2632 # lambda to prevent early import
2629 2633 'BZ': lambda: bz2.BZ2Compressor(),
2630 2634 'GZ': lambda: zlib.compressobj(),
2631 2635 }
2632 2636 # also support the old form by courtesies
2633 2637 compressors['UN'] = compressors[None]
2634 2638
2635 2639 def _makedecompressor(decompcls):
2636 2640 def generator(f):
2637 2641 d = decompcls()
2638 2642 for chunk in filechunkiter(f):
2639 2643 yield d.decompress(chunk)
2640 2644 def func(fh):
2641 2645 return chunkbuffer(generator(fh))
2642 2646 return func
2643 2647
2644 2648 class ctxmanager(object):
2645 2649 '''A context manager for use in 'with' blocks to allow multiple
2646 2650 contexts to be entered at once. This is both safer and more
2647 2651 flexible than contextlib.nested.
2648 2652
2649 2653 Once Mercurial supports Python 2.7+, this will become mostly
2650 2654 unnecessary.
2651 2655 '''
2652 2656
2653 2657 def __init__(self, *args):
2654 2658 '''Accepts a list of no-argument functions that return context
2655 2659 managers. These will be invoked at __call__ time.'''
2656 2660 self._pending = args
2657 2661 self._atexit = []
2658 2662
2659 2663 def __enter__(self):
2660 2664 return self
2661 2665
2662 2666 def __call__(self):
2663 2667 '''Create and enter context managers in the order in which they were
2664 2668 passed to the constructor.'''
2665 2669 values = []
2666 2670 for func in self._pending:
2667 2671 obj = func()
2668 2672 values.append(obj.__enter__())
2669 2673 self._atexit.append(obj.__exit__)
2670 2674 del self._pending
2671 2675 return values
2672 2676
2673 2677 def atexit(self, func, *args, **kwargs):
2674 2678 '''Add a function to call when this context manager exits. The
2675 2679 ordering of multiple atexit calls is unspecified, save that
2676 2680 they will happen before any __exit__ functions.'''
2677 2681 def wrapper(exc_type, exc_val, exc_tb):
2678 2682 func(*args, **kwargs)
2679 2683 self._atexit.append(wrapper)
2680 2684 return func
2681 2685
2682 2686 def __exit__(self, exc_type, exc_val, exc_tb):
2683 2687 '''Context managers are exited in the reverse order from which
2684 2688 they were created.'''
2685 2689 received = exc_type is not None
2686 2690 suppressed = False
2687 2691 pending = None
2688 2692 self._atexit.reverse()
2689 2693 for exitfunc in self._atexit:
2690 2694 try:
2691 2695 if exitfunc(exc_type, exc_val, exc_tb):
2692 2696 suppressed = True
2693 2697 exc_type = None
2694 2698 exc_val = None
2695 2699 exc_tb = None
2696 2700 except BaseException:
2697 2701 pending = sys.exc_info()
2698 2702 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2699 2703 del self._atexit
2700 2704 if pending:
2701 2705 raise exc_val
2702 2706 return received and suppressed
2703 2707
2704 2708 def _bz2():
2705 2709 d = bz2.BZ2Decompressor()
2706 2710 # Bzip2 stream start with BZ, but we stripped it.
2707 2711 # we put it back for good measure.
2708 2712 d.decompress('BZ')
2709 2713 return d
2710 2714
2711 2715 decompressors = {None: lambda fh: fh,
2712 2716 '_truncatedBZ': _makedecompressor(_bz2),
2713 2717 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2714 2718 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2715 2719 }
2716 2720 # also support the old form by courtesies
2717 2721 decompressors['UN'] = decompressors[None]
2718 2722
2719 2723 # convenient shortcut
2720 2724 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now