##// END OF EJS Templates
pycompat: add empty and queue to handle py3 divergence...
timeless -
r28818:6041fb8f default
parent child Browse files
Show More
@@ -0,0 +1,18 b''
1 # pycompat.py - portability shim for python 3
2 #
3 # This software may be used and distributed according to the terms of the
4 # GNU General Public License version 2 or any later version.
5
6 """Mercurial portability shim for python 3.
7
8 This contains aliases to hide python version-specific details from the core.
9 """
10
11 from __future__ import absolute_import
12
13 try:
14 import Queue as _queue
15 except ImportError:
16 import queue as _queue
17 empty = _queue.Empty
18 queue = _queue.Queue
@@ -1,2735 +1,2742 b''
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 from __future__ import absolute_import
17 17
18 18 import bz2
19 19 import calendar
20 20 import collections
21 21 import datetime
22 22 import errno
23 23 import gc
24 24 import hashlib
25 25 import imp
26 26 import os
27 27 import re as remod
28 28 import shutil
29 29 import signal
30 30 import socket
31 31 import subprocess
32 32 import sys
33 33 import tempfile
34 34 import textwrap
35 35 import time
36 36 import traceback
37 37 import urllib
38 38 import zlib
39 39
40 40 from . import (
41 41 encoding,
42 42 error,
43 43 i18n,
44 44 osutil,
45 45 parsers,
46 pycompat,
46 47 )
47 48
49 for attr in (
50 'empty',
51 'queue',
52 ):
53 globals()[attr] = getattr(pycompat, attr)
54
48 55 if os.name == 'nt':
49 56 from . import windows as platform
50 57 else:
51 58 from . import posix as platform
52 59
53 60 md5 = hashlib.md5
54 61 sha1 = hashlib.sha1
55 62 sha512 = hashlib.sha512
56 63 _ = i18n._
57 64
58 65 cachestat = platform.cachestat
59 66 checkexec = platform.checkexec
60 67 checklink = platform.checklink
61 68 copymode = platform.copymode
62 69 executablepath = platform.executablepath
63 70 expandglobs = platform.expandglobs
64 71 explainexit = platform.explainexit
65 72 findexe = platform.findexe
66 73 gethgcmd = platform.gethgcmd
67 74 getuser = platform.getuser
68 75 getpid = os.getpid
69 76 groupmembers = platform.groupmembers
70 77 groupname = platform.groupname
71 78 hidewindow = platform.hidewindow
72 79 isexec = platform.isexec
73 80 isowner = platform.isowner
74 81 localpath = platform.localpath
75 82 lookupreg = platform.lookupreg
76 83 makedir = platform.makedir
77 84 nlinks = platform.nlinks
78 85 normpath = platform.normpath
79 86 normcase = platform.normcase
80 87 normcasespec = platform.normcasespec
81 88 normcasefallback = platform.normcasefallback
82 89 openhardlinks = platform.openhardlinks
83 90 oslink = platform.oslink
84 91 parsepatchoutput = platform.parsepatchoutput
85 92 pconvert = platform.pconvert
86 93 poll = platform.poll
87 94 popen = platform.popen
88 95 posixfile = platform.posixfile
89 96 quotecommand = platform.quotecommand
90 97 readpipe = platform.readpipe
91 98 rename = platform.rename
92 99 removedirs = platform.removedirs
93 100 samedevice = platform.samedevice
94 101 samefile = platform.samefile
95 102 samestat = platform.samestat
96 103 setbinary = platform.setbinary
97 104 setflags = platform.setflags
98 105 setsignalhandler = platform.setsignalhandler
99 106 shellquote = platform.shellquote
100 107 spawndetached = platform.spawndetached
101 108 split = platform.split
102 109 sshargs = platform.sshargs
103 110 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
104 111 statisexec = platform.statisexec
105 112 statislink = platform.statislink
106 113 termwidth = platform.termwidth
107 114 testpid = platform.testpid
108 115 umask = platform.umask
109 116 unlink = platform.unlink
110 117 unlinkpath = platform.unlinkpath
111 118 username = platform.username
112 119
113 120 # Python compatibility
114 121
115 122 _notset = object()
116 123
117 124 # disable Python's problematic floating point timestamps (issue4836)
118 125 # (Python hypocritically says you shouldn't change this behavior in
119 126 # libraries, and sure enough Mercurial is not a library.)
120 127 os.stat_float_times(False)
121 128
122 129 def safehasattr(thing, attr):
123 130 return getattr(thing, attr, _notset) is not _notset
124 131
125 132 DIGESTS = {
126 133 'md5': md5,
127 134 'sha1': sha1,
128 135 'sha512': sha512,
129 136 }
130 137 # List of digest types from strongest to weakest
131 138 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
132 139
133 140 for k in DIGESTS_BY_STRENGTH:
134 141 assert k in DIGESTS
135 142
136 143 class digester(object):
137 144 """helper to compute digests.
138 145
139 146 This helper can be used to compute one or more digests given their name.
140 147
141 148 >>> d = digester(['md5', 'sha1'])
142 149 >>> d.update('foo')
143 150 >>> [k for k in sorted(d)]
144 151 ['md5', 'sha1']
145 152 >>> d['md5']
146 153 'acbd18db4cc2f85cedef654fccc4a4d8'
147 154 >>> d['sha1']
148 155 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
149 156 >>> digester.preferred(['md5', 'sha1'])
150 157 'sha1'
151 158 """
152 159
153 160 def __init__(self, digests, s=''):
154 161 self._hashes = {}
155 162 for k in digests:
156 163 if k not in DIGESTS:
157 164 raise Abort(_('unknown digest type: %s') % k)
158 165 self._hashes[k] = DIGESTS[k]()
159 166 if s:
160 167 self.update(s)
161 168
162 169 def update(self, data):
163 170 for h in self._hashes.values():
164 171 h.update(data)
165 172
166 173 def __getitem__(self, key):
167 174 if key not in DIGESTS:
168 175 raise Abort(_('unknown digest type: %s') % k)
169 176 return self._hashes[key].hexdigest()
170 177
171 178 def __iter__(self):
172 179 return iter(self._hashes)
173 180
174 181 @staticmethod
175 182 def preferred(supported):
176 183 """returns the strongest digest type in both supported and DIGESTS."""
177 184
178 185 for k in DIGESTS_BY_STRENGTH:
179 186 if k in supported:
180 187 return k
181 188 return None
182 189
183 190 class digestchecker(object):
184 191 """file handle wrapper that additionally checks content against a given
185 192 size and digests.
186 193
187 194 d = digestchecker(fh, size, {'md5': '...'})
188 195
189 196 When multiple digests are given, all of them are validated.
190 197 """
191 198
192 199 def __init__(self, fh, size, digests):
193 200 self._fh = fh
194 201 self._size = size
195 202 self._got = 0
196 203 self._digests = dict(digests)
197 204 self._digester = digester(self._digests.keys())
198 205
199 206 def read(self, length=-1):
200 207 content = self._fh.read(length)
201 208 self._digester.update(content)
202 209 self._got += len(content)
203 210 return content
204 211
205 212 def validate(self):
206 213 if self._size != self._got:
207 214 raise Abort(_('size mismatch: expected %d, got %d') %
208 215 (self._size, self._got))
209 216 for k, v in self._digests.items():
210 217 if v != self._digester[k]:
211 218 # i18n: first parameter is a digest name
212 219 raise Abort(_('%s mismatch: expected %s, got %s') %
213 220 (k, v, self._digester[k]))
214 221
215 222 try:
216 223 buffer = buffer
217 224 except NameError:
218 225 if sys.version_info[0] < 3:
219 226 def buffer(sliceable, offset=0):
220 227 return sliceable[offset:]
221 228 else:
222 229 def buffer(sliceable, offset=0):
223 230 return memoryview(sliceable)[offset:]
224 231
225 232 closefds = os.name == 'posix'
226 233
227 234 _chunksize = 4096
228 235
229 236 class bufferedinputpipe(object):
230 237 """a manually buffered input pipe
231 238
232 239 Python will not let us use buffered IO and lazy reading with 'polling' at
233 240 the same time. We cannot probe the buffer state and select will not detect
234 241 that data are ready to read if they are already buffered.
235 242
236 243 This class let us work around that by implementing its own buffering
237 244 (allowing efficient readline) while offering a way to know if the buffer is
238 245 empty from the output (allowing collaboration of the buffer with polling).
239 246
240 247 This class lives in the 'util' module because it makes use of the 'os'
241 248 module from the python stdlib.
242 249 """
243 250
244 251 def __init__(self, input):
245 252 self._input = input
246 253 self._buffer = []
247 254 self._eof = False
248 255 self._lenbuf = 0
249 256
250 257 @property
251 258 def hasbuffer(self):
252 259 """True is any data is currently buffered
253 260
254 261 This will be used externally a pre-step for polling IO. If there is
255 262 already data then no polling should be set in place."""
256 263 return bool(self._buffer)
257 264
258 265 @property
259 266 def closed(self):
260 267 return self._input.closed
261 268
262 269 def fileno(self):
263 270 return self._input.fileno()
264 271
265 272 def close(self):
266 273 return self._input.close()
267 274
268 275 def read(self, size):
269 276 while (not self._eof) and (self._lenbuf < size):
270 277 self._fillbuffer()
271 278 return self._frombuffer(size)
272 279
273 280 def readline(self, *args, **kwargs):
274 281 if 1 < len(self._buffer):
275 282 # this should not happen because both read and readline end with a
276 283 # _frombuffer call that collapse it.
277 284 self._buffer = [''.join(self._buffer)]
278 285 self._lenbuf = len(self._buffer[0])
279 286 lfi = -1
280 287 if self._buffer:
281 288 lfi = self._buffer[-1].find('\n')
282 289 while (not self._eof) and lfi < 0:
283 290 self._fillbuffer()
284 291 if self._buffer:
285 292 lfi = self._buffer[-1].find('\n')
286 293 size = lfi + 1
287 294 if lfi < 0: # end of file
288 295 size = self._lenbuf
289 296 elif 1 < len(self._buffer):
290 297 # we need to take previous chunks into account
291 298 size += self._lenbuf - len(self._buffer[-1])
292 299 return self._frombuffer(size)
293 300
294 301 def _frombuffer(self, size):
295 302 """return at most 'size' data from the buffer
296 303
297 304 The data are removed from the buffer."""
298 305 if size == 0 or not self._buffer:
299 306 return ''
300 307 buf = self._buffer[0]
301 308 if 1 < len(self._buffer):
302 309 buf = ''.join(self._buffer)
303 310
304 311 data = buf[:size]
305 312 buf = buf[len(data):]
306 313 if buf:
307 314 self._buffer = [buf]
308 315 self._lenbuf = len(buf)
309 316 else:
310 317 self._buffer = []
311 318 self._lenbuf = 0
312 319 return data
313 320
314 321 def _fillbuffer(self):
315 322 """read data to the buffer"""
316 323 data = os.read(self._input.fileno(), _chunksize)
317 324 if not data:
318 325 self._eof = True
319 326 else:
320 327 self._lenbuf += len(data)
321 328 self._buffer.append(data)
322 329
323 330 def popen2(cmd, env=None, newlines=False):
324 331 # Setting bufsize to -1 lets the system decide the buffer size.
325 332 # The default for bufsize is 0, meaning unbuffered. This leads to
326 333 # poor performance on Mac OS X: http://bugs.python.org/issue4194
327 334 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
328 335 close_fds=closefds,
329 336 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
330 337 universal_newlines=newlines,
331 338 env=env)
332 339 return p.stdin, p.stdout
333 340
334 341 def popen3(cmd, env=None, newlines=False):
335 342 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
336 343 return stdin, stdout, stderr
337 344
338 345 def popen4(cmd, env=None, newlines=False, bufsize=-1):
339 346 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
340 347 close_fds=closefds,
341 348 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
342 349 stderr=subprocess.PIPE,
343 350 universal_newlines=newlines,
344 351 env=env)
345 352 return p.stdin, p.stdout, p.stderr, p
346 353
347 354 def version():
348 355 """Return version information if available."""
349 356 try:
350 357 from . import __version__
351 358 return __version__.version
352 359 except ImportError:
353 360 return 'unknown'
354 361
355 362 def versiontuple(v=None, n=4):
356 363 """Parses a Mercurial version string into an N-tuple.
357 364
358 365 The version string to be parsed is specified with the ``v`` argument.
359 366 If it isn't defined, the current Mercurial version string will be parsed.
360 367
361 368 ``n`` can be 2, 3, or 4. Here is how some version strings map to
362 369 returned values:
363 370
364 371 >>> v = '3.6.1+190-df9b73d2d444'
365 372 >>> versiontuple(v, 2)
366 373 (3, 6)
367 374 >>> versiontuple(v, 3)
368 375 (3, 6, 1)
369 376 >>> versiontuple(v, 4)
370 377 (3, 6, 1, '190-df9b73d2d444')
371 378
372 379 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
373 380 (3, 6, 1, '190-df9b73d2d444+20151118')
374 381
375 382 >>> v = '3.6'
376 383 >>> versiontuple(v, 2)
377 384 (3, 6)
378 385 >>> versiontuple(v, 3)
379 386 (3, 6, None)
380 387 >>> versiontuple(v, 4)
381 388 (3, 6, None, None)
382 389 """
383 390 if not v:
384 391 v = version()
385 392 parts = v.split('+', 1)
386 393 if len(parts) == 1:
387 394 vparts, extra = parts[0], None
388 395 else:
389 396 vparts, extra = parts
390 397
391 398 vints = []
392 399 for i in vparts.split('.'):
393 400 try:
394 401 vints.append(int(i))
395 402 except ValueError:
396 403 break
397 404 # (3, 6) -> (3, 6, None)
398 405 while len(vints) < 3:
399 406 vints.append(None)
400 407
401 408 if n == 2:
402 409 return (vints[0], vints[1])
403 410 if n == 3:
404 411 return (vints[0], vints[1], vints[2])
405 412 if n == 4:
406 413 return (vints[0], vints[1], vints[2], extra)
407 414
408 415 # used by parsedate
409 416 defaultdateformats = (
410 417 '%Y-%m-%d %H:%M:%S',
411 418 '%Y-%m-%d %I:%M:%S%p',
412 419 '%Y-%m-%d %H:%M',
413 420 '%Y-%m-%d %I:%M%p',
414 421 '%Y-%m-%d',
415 422 '%m-%d',
416 423 '%m/%d',
417 424 '%m/%d/%y',
418 425 '%m/%d/%Y',
419 426 '%a %b %d %H:%M:%S %Y',
420 427 '%a %b %d %I:%M:%S%p %Y',
421 428 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
422 429 '%b %d %H:%M:%S %Y',
423 430 '%b %d %I:%M:%S%p %Y',
424 431 '%b %d %H:%M:%S',
425 432 '%b %d %I:%M:%S%p',
426 433 '%b %d %H:%M',
427 434 '%b %d %I:%M%p',
428 435 '%b %d %Y',
429 436 '%b %d',
430 437 '%H:%M:%S',
431 438 '%I:%M:%S%p',
432 439 '%H:%M',
433 440 '%I:%M%p',
434 441 )
435 442
436 443 extendeddateformats = defaultdateformats + (
437 444 "%Y",
438 445 "%Y-%m",
439 446 "%b",
440 447 "%b %Y",
441 448 )
442 449
443 450 def cachefunc(func):
444 451 '''cache the result of function calls'''
445 452 # XXX doesn't handle keywords args
446 453 if func.func_code.co_argcount == 0:
447 454 cache = []
448 455 def f():
449 456 if len(cache) == 0:
450 457 cache.append(func())
451 458 return cache[0]
452 459 return f
453 460 cache = {}
454 461 if func.func_code.co_argcount == 1:
455 462 # we gain a small amount of time because
456 463 # we don't need to pack/unpack the list
457 464 def f(arg):
458 465 if arg not in cache:
459 466 cache[arg] = func(arg)
460 467 return cache[arg]
461 468 else:
462 469 def f(*args):
463 470 if args not in cache:
464 471 cache[args] = func(*args)
465 472 return cache[args]
466 473
467 474 return f
468 475
469 476 class sortdict(dict):
470 477 '''a simple sorted dictionary'''
471 478 def __init__(self, data=None):
472 479 self._list = []
473 480 if data:
474 481 self.update(data)
475 482 def copy(self):
476 483 return sortdict(self)
477 484 def __setitem__(self, key, val):
478 485 if key in self:
479 486 self._list.remove(key)
480 487 self._list.append(key)
481 488 dict.__setitem__(self, key, val)
482 489 def __iter__(self):
483 490 return self._list.__iter__()
484 491 def update(self, src):
485 492 if isinstance(src, dict):
486 493 src = src.iteritems()
487 494 for k, v in src:
488 495 self[k] = v
489 496 def clear(self):
490 497 dict.clear(self)
491 498 self._list = []
492 499 def items(self):
493 500 return [(k, self[k]) for k in self._list]
494 501 def __delitem__(self, key):
495 502 dict.__delitem__(self, key)
496 503 self._list.remove(key)
497 504 def pop(self, key, *args, **kwargs):
498 505 dict.pop(self, key, *args, **kwargs)
499 506 try:
500 507 self._list.remove(key)
501 508 except ValueError:
502 509 pass
503 510 def keys(self):
504 511 return self._list
505 512 def iterkeys(self):
506 513 return self._list.__iter__()
507 514 def iteritems(self):
508 515 for k in self._list:
509 516 yield k, self[k]
510 517 def insert(self, index, key, val):
511 518 self._list.insert(index, key)
512 519 dict.__setitem__(self, key, val)
513 520
514 521 class _lrucachenode(object):
515 522 """A node in a doubly linked list.
516 523
517 524 Holds a reference to nodes on either side as well as a key-value
518 525 pair for the dictionary entry.
519 526 """
520 527 __slots__ = ('next', 'prev', 'key', 'value')
521 528
522 529 def __init__(self):
523 530 self.next = None
524 531 self.prev = None
525 532
526 533 self.key = _notset
527 534 self.value = None
528 535
529 536 def markempty(self):
530 537 """Mark the node as emptied."""
531 538 self.key = _notset
532 539
533 540 class lrucachedict(object):
534 541 """Dict that caches most recent accesses and sets.
535 542
536 543 The dict consists of an actual backing dict - indexed by original
537 544 key - and a doubly linked circular list defining the order of entries in
538 545 the cache.
539 546
540 547 The head node is the newest entry in the cache. If the cache is full,
541 548 we recycle head.prev and make it the new head. Cache accesses result in
542 549 the node being moved to before the existing head and being marked as the
543 550 new head node.
544 551 """
545 552 def __init__(self, max):
546 553 self._cache = {}
547 554
548 555 self._head = head = _lrucachenode()
549 556 head.prev = head
550 557 head.next = head
551 558 self._size = 1
552 559 self._capacity = max
553 560
554 561 def __len__(self):
555 562 return len(self._cache)
556 563
557 564 def __contains__(self, k):
558 565 return k in self._cache
559 566
560 567 def __iter__(self):
561 568 # We don't have to iterate in cache order, but why not.
562 569 n = self._head
563 570 for i in range(len(self._cache)):
564 571 yield n.key
565 572 n = n.next
566 573
567 574 def __getitem__(self, k):
568 575 node = self._cache[k]
569 576 self._movetohead(node)
570 577 return node.value
571 578
572 579 def __setitem__(self, k, v):
573 580 node = self._cache.get(k)
574 581 # Replace existing value and mark as newest.
575 582 if node is not None:
576 583 node.value = v
577 584 self._movetohead(node)
578 585 return
579 586
580 587 if self._size < self._capacity:
581 588 node = self._addcapacity()
582 589 else:
583 590 # Grab the last/oldest item.
584 591 node = self._head.prev
585 592
586 593 # At capacity. Kill the old entry.
587 594 if node.key is not _notset:
588 595 del self._cache[node.key]
589 596
590 597 node.key = k
591 598 node.value = v
592 599 self._cache[k] = node
593 600 # And mark it as newest entry. No need to adjust order since it
594 601 # is already self._head.prev.
595 602 self._head = node
596 603
597 604 def __delitem__(self, k):
598 605 node = self._cache.pop(k)
599 606 node.markempty()
600 607
601 608 # Temporarily mark as newest item before re-adjusting head to make
602 609 # this node the oldest item.
603 610 self._movetohead(node)
604 611 self._head = node.next
605 612
606 613 # Additional dict methods.
607 614
608 615 def get(self, k, default=None):
609 616 try:
610 617 return self._cache[k]
611 618 except KeyError:
612 619 return default
613 620
614 621 def clear(self):
615 622 n = self._head
616 623 while n.key is not _notset:
617 624 n.markempty()
618 625 n = n.next
619 626
620 627 self._cache.clear()
621 628
622 629 def copy(self):
623 630 result = lrucachedict(self._capacity)
624 631 n = self._head.prev
625 632 # Iterate in oldest-to-newest order, so the copy has the right ordering
626 633 for i in range(len(self._cache)):
627 634 result[n.key] = n.value
628 635 n = n.prev
629 636 return result
630 637
631 638 def _movetohead(self, node):
632 639 """Mark a node as the newest, making it the new head.
633 640
634 641 When a node is accessed, it becomes the freshest entry in the LRU
635 642 list, which is denoted by self._head.
636 643
637 644 Visually, let's make ``N`` the new head node (* denotes head):
638 645
639 646 previous/oldest <-> head <-> next/next newest
640 647
641 648 ----<->--- A* ---<->-----
642 649 | |
643 650 E <-> D <-> N <-> C <-> B
644 651
645 652 To:
646 653
647 654 ----<->--- N* ---<->-----
648 655 | |
649 656 E <-> D <-> C <-> B <-> A
650 657
651 658 This requires the following moves:
652 659
653 660 C.next = D (node.prev.next = node.next)
654 661 D.prev = C (node.next.prev = node.prev)
655 662 E.next = N (head.prev.next = node)
656 663 N.prev = E (node.prev = head.prev)
657 664 N.next = A (node.next = head)
658 665 A.prev = N (head.prev = node)
659 666 """
660 667 head = self._head
661 668 # C.next = D
662 669 node.prev.next = node.next
663 670 # D.prev = C
664 671 node.next.prev = node.prev
665 672 # N.prev = E
666 673 node.prev = head.prev
667 674 # N.next = A
668 675 # It is tempting to do just "head" here, however if node is
669 676 # adjacent to head, this will do bad things.
670 677 node.next = head.prev.next
671 678 # E.next = N
672 679 node.next.prev = node
673 680 # A.prev = N
674 681 node.prev.next = node
675 682
676 683 self._head = node
677 684
678 685 def _addcapacity(self):
679 686 """Add a node to the circular linked list.
680 687
681 688 The new node is inserted before the head node.
682 689 """
683 690 head = self._head
684 691 node = _lrucachenode()
685 692 head.prev.next = node
686 693 node.prev = head.prev
687 694 node.next = head
688 695 head.prev = node
689 696 self._size += 1
690 697 return node
691 698
692 699 def lrucachefunc(func):
693 700 '''cache most recent results of function calls'''
694 701 cache = {}
695 702 order = collections.deque()
696 703 if func.func_code.co_argcount == 1:
697 704 def f(arg):
698 705 if arg not in cache:
699 706 if len(cache) > 20:
700 707 del cache[order.popleft()]
701 708 cache[arg] = func(arg)
702 709 else:
703 710 order.remove(arg)
704 711 order.append(arg)
705 712 return cache[arg]
706 713 else:
707 714 def f(*args):
708 715 if args not in cache:
709 716 if len(cache) > 20:
710 717 del cache[order.popleft()]
711 718 cache[args] = func(*args)
712 719 else:
713 720 order.remove(args)
714 721 order.append(args)
715 722 return cache[args]
716 723
717 724 return f
718 725
719 726 class propertycache(object):
720 727 def __init__(self, func):
721 728 self.func = func
722 729 self.name = func.__name__
723 730 def __get__(self, obj, type=None):
724 731 result = self.func(obj)
725 732 self.cachevalue(obj, result)
726 733 return result
727 734
728 735 def cachevalue(self, obj, value):
729 736 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
730 737 obj.__dict__[self.name] = value
731 738
732 739 def pipefilter(s, cmd):
733 740 '''filter string S through command CMD, returning its output'''
734 741 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
735 742 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
736 743 pout, perr = p.communicate(s)
737 744 return pout
738 745
739 746 def tempfilter(s, cmd):
740 747 '''filter string S through a pair of temporary files with CMD.
741 748 CMD is used as a template to create the real command to be run,
742 749 with the strings INFILE and OUTFILE replaced by the real names of
743 750 the temporary files generated.'''
744 751 inname, outname = None, None
745 752 try:
746 753 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
747 754 fp = os.fdopen(infd, 'wb')
748 755 fp.write(s)
749 756 fp.close()
750 757 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
751 758 os.close(outfd)
752 759 cmd = cmd.replace('INFILE', inname)
753 760 cmd = cmd.replace('OUTFILE', outname)
754 761 code = os.system(cmd)
755 762 if sys.platform == 'OpenVMS' and code & 1:
756 763 code = 0
757 764 if code:
758 765 raise Abort(_("command '%s' failed: %s") %
759 766 (cmd, explainexit(code)))
760 767 return readfile(outname)
761 768 finally:
762 769 try:
763 770 if inname:
764 771 os.unlink(inname)
765 772 except OSError:
766 773 pass
767 774 try:
768 775 if outname:
769 776 os.unlink(outname)
770 777 except OSError:
771 778 pass
772 779
773 780 filtertable = {
774 781 'tempfile:': tempfilter,
775 782 'pipe:': pipefilter,
776 783 }
777 784
778 785 def filter(s, cmd):
779 786 "filter a string through a command that transforms its input to its output"
780 787 for name, fn in filtertable.iteritems():
781 788 if cmd.startswith(name):
782 789 return fn(s, cmd[len(name):].lstrip())
783 790 return pipefilter(s, cmd)
784 791
785 792 def binary(s):
786 793 """return true if a string is binary data"""
787 794 return bool(s and '\0' in s)
788 795
789 796 def increasingchunks(source, min=1024, max=65536):
790 797 '''return no less than min bytes per chunk while data remains,
791 798 doubling min after each chunk until it reaches max'''
792 799 def log2(x):
793 800 if not x:
794 801 return 0
795 802 i = 0
796 803 while x:
797 804 x >>= 1
798 805 i += 1
799 806 return i - 1
800 807
801 808 buf = []
802 809 blen = 0
803 810 for chunk in source:
804 811 buf.append(chunk)
805 812 blen += len(chunk)
806 813 if blen >= min:
807 814 if min < max:
808 815 min = min << 1
809 816 nmin = 1 << log2(blen)
810 817 if nmin > min:
811 818 min = nmin
812 819 if min > max:
813 820 min = max
814 821 yield ''.join(buf)
815 822 blen = 0
816 823 buf = []
817 824 if buf:
818 825 yield ''.join(buf)
819 826
820 827 Abort = error.Abort
821 828
822 829 def always(fn):
823 830 return True
824 831
825 832 def never(fn):
826 833 return False
827 834
828 835 def nogc(func):
829 836 """disable garbage collector
830 837
831 838 Python's garbage collector triggers a GC each time a certain number of
832 839 container objects (the number being defined by gc.get_threshold()) are
833 840 allocated even when marked not to be tracked by the collector. Tracking has
834 841 no effect on when GCs are triggered, only on what objects the GC looks
835 842 into. As a workaround, disable GC while building complex (huge)
836 843 containers.
837 844
838 845 This garbage collector issue have been fixed in 2.7.
839 846 """
840 847 def wrapper(*args, **kwargs):
841 848 gcenabled = gc.isenabled()
842 849 gc.disable()
843 850 try:
844 851 return func(*args, **kwargs)
845 852 finally:
846 853 if gcenabled:
847 854 gc.enable()
848 855 return wrapper
849 856
850 857 def pathto(root, n1, n2):
851 858 '''return the relative path from one place to another.
852 859 root should use os.sep to separate directories
853 860 n1 should use os.sep to separate directories
854 861 n2 should use "/" to separate directories
855 862 returns an os.sep-separated path.
856 863
857 864 If n1 is a relative path, it's assumed it's
858 865 relative to root.
859 866 n2 should always be relative to root.
860 867 '''
861 868 if not n1:
862 869 return localpath(n2)
863 870 if os.path.isabs(n1):
864 871 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
865 872 return os.path.join(root, localpath(n2))
866 873 n2 = '/'.join((pconvert(root), n2))
867 874 a, b = splitpath(n1), n2.split('/')
868 875 a.reverse()
869 876 b.reverse()
870 877 while a and b and a[-1] == b[-1]:
871 878 a.pop()
872 879 b.pop()
873 880 b.reverse()
874 881 return os.sep.join((['..'] * len(a)) + b) or '.'
875 882
876 883 def mainfrozen():
877 884 """return True if we are a frozen executable.
878 885
879 886 The code supports py2exe (most common, Windows only) and tools/freeze
880 887 (portable, not much used).
881 888 """
882 889 return (safehasattr(sys, "frozen") or # new py2exe
883 890 safehasattr(sys, "importers") or # old py2exe
884 891 imp.is_frozen("__main__")) # tools/freeze
885 892
886 893 # the location of data files matching the source code
887 894 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
888 895 # executable version (py2exe) doesn't support __file__
889 896 datapath = os.path.dirname(sys.executable)
890 897 else:
891 898 datapath = os.path.dirname(__file__)
892 899
893 900 i18n.setdatapath(datapath)
894 901
895 902 _hgexecutable = None
896 903
897 904 def hgexecutable():
898 905 """return location of the 'hg' executable.
899 906
900 907 Defaults to $HG or 'hg' in the search path.
901 908 """
902 909 if _hgexecutable is None:
903 910 hg = os.environ.get('HG')
904 911 mainmod = sys.modules['__main__']
905 912 if hg:
906 913 _sethgexecutable(hg)
907 914 elif mainfrozen():
908 915 if getattr(sys, 'frozen', None) == 'macosx_app':
909 916 # Env variable set by py2app
910 917 _sethgexecutable(os.environ['EXECUTABLEPATH'])
911 918 else:
912 919 _sethgexecutable(sys.executable)
913 920 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
914 921 _sethgexecutable(mainmod.__file__)
915 922 else:
916 923 exe = findexe('hg') or os.path.basename(sys.argv[0])
917 924 _sethgexecutable(exe)
918 925 return _hgexecutable
919 926
920 927 def _sethgexecutable(path):
921 928 """set location of the 'hg' executable"""
922 929 global _hgexecutable
923 930 _hgexecutable = path
924 931
925 932 def _isstdout(f):
926 933 fileno = getattr(f, 'fileno', None)
927 934 return fileno and fileno() == sys.__stdout__.fileno()
928 935
929 936 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
930 937 '''enhanced shell command execution.
931 938 run with environment maybe modified, maybe in different dir.
932 939
933 940 if command fails and onerr is None, return status, else raise onerr
934 941 object as exception.
935 942
936 943 if out is specified, it is assumed to be a file-like object that has a
937 944 write() method. stdout and stderr will be redirected to out.'''
938 945 if environ is None:
939 946 environ = {}
940 947 try:
941 948 sys.stdout.flush()
942 949 except Exception:
943 950 pass
944 951 def py2shell(val):
945 952 'convert python object into string that is useful to shell'
946 953 if val is None or val is False:
947 954 return '0'
948 955 if val is True:
949 956 return '1'
950 957 return str(val)
951 958 origcmd = cmd
952 959 cmd = quotecommand(cmd)
953 960 if sys.platform == 'plan9' and (sys.version_info[0] == 2
954 961 and sys.version_info[1] < 7):
955 962 # subprocess kludge to work around issues in half-baked Python
956 963 # ports, notably bichued/python:
957 964 if not cwd is None:
958 965 os.chdir(cwd)
959 966 rc = os.system(cmd)
960 967 else:
961 968 env = dict(os.environ)
962 969 env.update((k, py2shell(v)) for k, v in environ.iteritems())
963 970 env['HG'] = hgexecutable()
964 971 if out is None or _isstdout(out):
965 972 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
966 973 env=env, cwd=cwd)
967 974 else:
968 975 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
969 976 env=env, cwd=cwd, stdout=subprocess.PIPE,
970 977 stderr=subprocess.STDOUT)
971 978 while True:
972 979 line = proc.stdout.readline()
973 980 if not line:
974 981 break
975 982 out.write(line)
976 983 proc.wait()
977 984 rc = proc.returncode
978 985 if sys.platform == 'OpenVMS' and rc & 1:
979 986 rc = 0
980 987 if rc and onerr:
981 988 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
982 989 explainexit(rc)[0])
983 990 if errprefix:
984 991 errmsg = '%s: %s' % (errprefix, errmsg)
985 992 raise onerr(errmsg)
986 993 return rc
987 994
988 995 def checksignature(func):
989 996 '''wrap a function with code to check for calling errors'''
990 997 def check(*args, **kwargs):
991 998 try:
992 999 return func(*args, **kwargs)
993 1000 except TypeError:
994 1001 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
995 1002 raise error.SignatureError
996 1003 raise
997 1004
998 1005 return check
999 1006
1000 1007 def copyfile(src, dest, hardlink=False, copystat=False):
1001 1008 '''copy a file, preserving mode and optionally other stat info like
1002 1009 atime/mtime'''
1003 1010 if os.path.lexists(dest):
1004 1011 unlink(dest)
1005 1012 # hardlinks are problematic on CIFS, quietly ignore this flag
1006 1013 # until we find a way to work around it cleanly (issue4546)
1007 1014 if False and hardlink:
1008 1015 try:
1009 1016 oslink(src, dest)
1010 1017 return
1011 1018 except (IOError, OSError):
1012 1019 pass # fall back to normal copy
1013 1020 if os.path.islink(src):
1014 1021 os.symlink(os.readlink(src), dest)
1015 1022 # copytime is ignored for symlinks, but in general copytime isn't needed
1016 1023 # for them anyway
1017 1024 else:
1018 1025 try:
1019 1026 shutil.copyfile(src, dest)
1020 1027 if copystat:
1021 1028 # copystat also copies mode
1022 1029 shutil.copystat(src, dest)
1023 1030 else:
1024 1031 shutil.copymode(src, dest)
1025 1032 except shutil.Error as inst:
1026 1033 raise Abort(str(inst))
1027 1034
1028 1035 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1029 1036 """Copy a directory tree using hardlinks if possible."""
1030 1037 num = 0
1031 1038
1032 1039 if hardlink is None:
1033 1040 hardlink = (os.stat(src).st_dev ==
1034 1041 os.stat(os.path.dirname(dst)).st_dev)
1035 1042 if hardlink:
1036 1043 topic = _('linking')
1037 1044 else:
1038 1045 topic = _('copying')
1039 1046
1040 1047 if os.path.isdir(src):
1041 1048 os.mkdir(dst)
1042 1049 for name, kind in osutil.listdir(src):
1043 1050 srcname = os.path.join(src, name)
1044 1051 dstname = os.path.join(dst, name)
1045 1052 def nprog(t, pos):
1046 1053 if pos is not None:
1047 1054 return progress(t, pos + num)
1048 1055 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1049 1056 num += n
1050 1057 else:
1051 1058 if hardlink:
1052 1059 try:
1053 1060 oslink(src, dst)
1054 1061 except (IOError, OSError):
1055 1062 hardlink = False
1056 1063 shutil.copy(src, dst)
1057 1064 else:
1058 1065 shutil.copy(src, dst)
1059 1066 num += 1
1060 1067 progress(topic, num)
1061 1068 progress(topic, None)
1062 1069
1063 1070 return hardlink, num
1064 1071
1065 1072 _winreservednames = '''con prn aux nul
1066 1073 com1 com2 com3 com4 com5 com6 com7 com8 com9
1067 1074 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1068 1075 _winreservedchars = ':*?"<>|'
1069 1076 def checkwinfilename(path):
1070 1077 r'''Check that the base-relative path is a valid filename on Windows.
1071 1078 Returns None if the path is ok, or a UI string describing the problem.
1072 1079
1073 1080 >>> checkwinfilename("just/a/normal/path")
1074 1081 >>> checkwinfilename("foo/bar/con.xml")
1075 1082 "filename contains 'con', which is reserved on Windows"
1076 1083 >>> checkwinfilename("foo/con.xml/bar")
1077 1084 "filename contains 'con', which is reserved on Windows"
1078 1085 >>> checkwinfilename("foo/bar/xml.con")
1079 1086 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1080 1087 "filename contains 'AUX', which is reserved on Windows"
1081 1088 >>> checkwinfilename("foo/bar/bla:.txt")
1082 1089 "filename contains ':', which is reserved on Windows"
1083 1090 >>> checkwinfilename("foo/bar/b\07la.txt")
1084 1091 "filename contains '\\x07', which is invalid on Windows"
1085 1092 >>> checkwinfilename("foo/bar/bla ")
1086 1093 "filename ends with ' ', which is not allowed on Windows"
1087 1094 >>> checkwinfilename("../bar")
1088 1095 >>> checkwinfilename("foo\\")
1089 1096 "filename ends with '\\', which is invalid on Windows"
1090 1097 >>> checkwinfilename("foo\\/bar")
1091 1098 "directory name ends with '\\', which is invalid on Windows"
1092 1099 '''
1093 1100 if path.endswith('\\'):
1094 1101 return _("filename ends with '\\', which is invalid on Windows")
1095 1102 if '\\/' in path:
1096 1103 return _("directory name ends with '\\', which is invalid on Windows")
1097 1104 for n in path.replace('\\', '/').split('/'):
1098 1105 if not n:
1099 1106 continue
1100 1107 for c in n:
1101 1108 if c in _winreservedchars:
1102 1109 return _("filename contains '%s', which is reserved "
1103 1110 "on Windows") % c
1104 1111 if ord(c) <= 31:
1105 1112 return _("filename contains %r, which is invalid "
1106 1113 "on Windows") % c
1107 1114 base = n.split('.')[0]
1108 1115 if base and base.lower() in _winreservednames:
1109 1116 return _("filename contains '%s', which is reserved "
1110 1117 "on Windows") % base
1111 1118 t = n[-1]
1112 1119 if t in '. ' and n not in '..':
1113 1120 return _("filename ends with '%s', which is not allowed "
1114 1121 "on Windows") % t
1115 1122
1116 1123 if os.name == 'nt':
1117 1124 checkosfilename = checkwinfilename
1118 1125 else:
1119 1126 checkosfilename = platform.checkosfilename
1120 1127
1121 1128 def makelock(info, pathname):
1122 1129 try:
1123 1130 return os.symlink(info, pathname)
1124 1131 except OSError as why:
1125 1132 if why.errno == errno.EEXIST:
1126 1133 raise
1127 1134 except AttributeError: # no symlink in os
1128 1135 pass
1129 1136
1130 1137 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1131 1138 os.write(ld, info)
1132 1139 os.close(ld)
1133 1140
1134 1141 def readlock(pathname):
1135 1142 try:
1136 1143 return os.readlink(pathname)
1137 1144 except OSError as why:
1138 1145 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1139 1146 raise
1140 1147 except AttributeError: # no symlink in os
1141 1148 pass
1142 1149 fp = posixfile(pathname)
1143 1150 r = fp.read()
1144 1151 fp.close()
1145 1152 return r
1146 1153
1147 1154 def fstat(fp):
1148 1155 '''stat file object that may not have fileno method.'''
1149 1156 try:
1150 1157 return os.fstat(fp.fileno())
1151 1158 except AttributeError:
1152 1159 return os.stat(fp.name)
1153 1160
1154 1161 # File system features
1155 1162
1156 1163 def checkcase(path):
1157 1164 """
1158 1165 Return true if the given path is on a case-sensitive filesystem
1159 1166
1160 1167 Requires a path (like /foo/.hg) ending with a foldable final
1161 1168 directory component.
1162 1169 """
1163 1170 s1 = os.lstat(path)
1164 1171 d, b = os.path.split(path)
1165 1172 b2 = b.upper()
1166 1173 if b == b2:
1167 1174 b2 = b.lower()
1168 1175 if b == b2:
1169 1176 return True # no evidence against case sensitivity
1170 1177 p2 = os.path.join(d, b2)
1171 1178 try:
1172 1179 s2 = os.lstat(p2)
1173 1180 if s2 == s1:
1174 1181 return False
1175 1182 return True
1176 1183 except OSError:
1177 1184 return True
1178 1185
1179 1186 try:
1180 1187 import re2
1181 1188 _re2 = None
1182 1189 except ImportError:
1183 1190 _re2 = False
1184 1191
1185 1192 class _re(object):
1186 1193 def _checkre2(self):
1187 1194 global _re2
1188 1195 try:
1189 1196 # check if match works, see issue3964
1190 1197 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1191 1198 except ImportError:
1192 1199 _re2 = False
1193 1200
1194 1201 def compile(self, pat, flags=0):
1195 1202 '''Compile a regular expression, using re2 if possible
1196 1203
1197 1204 For best performance, use only re2-compatible regexp features. The
1198 1205 only flags from the re module that are re2-compatible are
1199 1206 IGNORECASE and MULTILINE.'''
1200 1207 if _re2 is None:
1201 1208 self._checkre2()
1202 1209 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1203 1210 if flags & remod.IGNORECASE:
1204 1211 pat = '(?i)' + pat
1205 1212 if flags & remod.MULTILINE:
1206 1213 pat = '(?m)' + pat
1207 1214 try:
1208 1215 return re2.compile(pat)
1209 1216 except re2.error:
1210 1217 pass
1211 1218 return remod.compile(pat, flags)
1212 1219
1213 1220 @propertycache
1214 1221 def escape(self):
1215 1222 '''Return the version of escape corresponding to self.compile.
1216 1223
1217 1224 This is imperfect because whether re2 or re is used for a particular
1218 1225 function depends on the flags, etc, but it's the best we can do.
1219 1226 '''
1220 1227 global _re2
1221 1228 if _re2 is None:
1222 1229 self._checkre2()
1223 1230 if _re2:
1224 1231 return re2.escape
1225 1232 else:
1226 1233 return remod.escape
1227 1234
1228 1235 re = _re()
1229 1236
1230 1237 _fspathcache = {}
1231 1238 def fspath(name, root):
1232 1239 '''Get name in the case stored in the filesystem
1233 1240
1234 1241 The name should be relative to root, and be normcase-ed for efficiency.
1235 1242
1236 1243 Note that this function is unnecessary, and should not be
1237 1244 called, for case-sensitive filesystems (simply because it's expensive).
1238 1245
1239 1246 The root should be normcase-ed, too.
1240 1247 '''
1241 1248 def _makefspathcacheentry(dir):
1242 1249 return dict((normcase(n), n) for n in os.listdir(dir))
1243 1250
1244 1251 seps = os.sep
1245 1252 if os.altsep:
1246 1253 seps = seps + os.altsep
1247 1254 # Protect backslashes. This gets silly very quickly.
1248 1255 seps.replace('\\','\\\\')
1249 1256 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1250 1257 dir = os.path.normpath(root)
1251 1258 result = []
1252 1259 for part, sep in pattern.findall(name):
1253 1260 if sep:
1254 1261 result.append(sep)
1255 1262 continue
1256 1263
1257 1264 if dir not in _fspathcache:
1258 1265 _fspathcache[dir] = _makefspathcacheentry(dir)
1259 1266 contents = _fspathcache[dir]
1260 1267
1261 1268 found = contents.get(part)
1262 1269 if not found:
1263 1270 # retry "once per directory" per "dirstate.walk" which
1264 1271 # may take place for each patches of "hg qpush", for example
1265 1272 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1266 1273 found = contents.get(part)
1267 1274
1268 1275 result.append(found or part)
1269 1276 dir = os.path.join(dir, part)
1270 1277
1271 1278 return ''.join(result)
1272 1279
1273 1280 def checknlink(testfile):
1274 1281 '''check whether hardlink count reporting works properly'''
1275 1282
1276 1283 # testfile may be open, so we need a separate file for checking to
1277 1284 # work around issue2543 (or testfile may get lost on Samba shares)
1278 1285 f1 = testfile + ".hgtmp1"
1279 1286 if os.path.lexists(f1):
1280 1287 return False
1281 1288 try:
1282 1289 posixfile(f1, 'w').close()
1283 1290 except IOError:
1284 1291 return False
1285 1292
1286 1293 f2 = testfile + ".hgtmp2"
1287 1294 fd = None
1288 1295 try:
1289 1296 oslink(f1, f2)
1290 1297 # nlinks() may behave differently for files on Windows shares if
1291 1298 # the file is open.
1292 1299 fd = posixfile(f2)
1293 1300 return nlinks(f2) > 1
1294 1301 except OSError:
1295 1302 return False
1296 1303 finally:
1297 1304 if fd is not None:
1298 1305 fd.close()
1299 1306 for f in (f1, f2):
1300 1307 try:
1301 1308 os.unlink(f)
1302 1309 except OSError:
1303 1310 pass
1304 1311
1305 1312 def endswithsep(path):
1306 1313 '''Check path ends with os.sep or os.altsep.'''
1307 1314 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1308 1315
1309 1316 def splitpath(path):
1310 1317 '''Split path by os.sep.
1311 1318 Note that this function does not use os.altsep because this is
1312 1319 an alternative of simple "xxx.split(os.sep)".
1313 1320 It is recommended to use os.path.normpath() before using this
1314 1321 function if need.'''
1315 1322 return path.split(os.sep)
1316 1323
1317 1324 def gui():
1318 1325 '''Are we running in a GUI?'''
1319 1326 if sys.platform == 'darwin':
1320 1327 if 'SSH_CONNECTION' in os.environ:
1321 1328 # handle SSH access to a box where the user is logged in
1322 1329 return False
1323 1330 elif getattr(osutil, 'isgui', None):
1324 1331 # check if a CoreGraphics session is available
1325 1332 return osutil.isgui()
1326 1333 else:
1327 1334 # pure build; use a safe default
1328 1335 return True
1329 1336 else:
1330 1337 return os.name == "nt" or os.environ.get("DISPLAY")
1331 1338
1332 1339 def mktempcopy(name, emptyok=False, createmode=None):
1333 1340 """Create a temporary file with the same contents from name
1334 1341
1335 1342 The permission bits are copied from the original file.
1336 1343
1337 1344 If the temporary file is going to be truncated immediately, you
1338 1345 can use emptyok=True as an optimization.
1339 1346
1340 1347 Returns the name of the temporary file.
1341 1348 """
1342 1349 d, fn = os.path.split(name)
1343 1350 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1344 1351 os.close(fd)
1345 1352 # Temporary files are created with mode 0600, which is usually not
1346 1353 # what we want. If the original file already exists, just copy
1347 1354 # its mode. Otherwise, manually obey umask.
1348 1355 copymode(name, temp, createmode)
1349 1356 if emptyok:
1350 1357 return temp
1351 1358 try:
1352 1359 try:
1353 1360 ifp = posixfile(name, "rb")
1354 1361 except IOError as inst:
1355 1362 if inst.errno == errno.ENOENT:
1356 1363 return temp
1357 1364 if not getattr(inst, 'filename', None):
1358 1365 inst.filename = name
1359 1366 raise
1360 1367 ofp = posixfile(temp, "wb")
1361 1368 for chunk in filechunkiter(ifp):
1362 1369 ofp.write(chunk)
1363 1370 ifp.close()
1364 1371 ofp.close()
1365 1372 except: # re-raises
1366 1373 try: os.unlink(temp)
1367 1374 except OSError: pass
1368 1375 raise
1369 1376 return temp
1370 1377
1371 1378 class atomictempfile(object):
1372 1379 '''writable file object that atomically updates a file
1373 1380
1374 1381 All writes will go to a temporary copy of the original file. Call
1375 1382 close() when you are done writing, and atomictempfile will rename
1376 1383 the temporary copy to the original name, making the changes
1377 1384 visible. If the object is destroyed without being closed, all your
1378 1385 writes are discarded.
1379 1386 '''
1380 1387 def __init__(self, name, mode='w+b', createmode=None):
1381 1388 self.__name = name # permanent name
1382 1389 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1383 1390 createmode=createmode)
1384 1391 self._fp = posixfile(self._tempname, mode)
1385 1392
1386 1393 # delegated methods
1387 1394 self.write = self._fp.write
1388 1395 self.seek = self._fp.seek
1389 1396 self.tell = self._fp.tell
1390 1397 self.fileno = self._fp.fileno
1391 1398
1392 1399 def close(self):
1393 1400 if not self._fp.closed:
1394 1401 self._fp.close()
1395 1402 rename(self._tempname, localpath(self.__name))
1396 1403
1397 1404 def discard(self):
1398 1405 if not self._fp.closed:
1399 1406 try:
1400 1407 os.unlink(self._tempname)
1401 1408 except OSError:
1402 1409 pass
1403 1410 self._fp.close()
1404 1411
1405 1412 def __del__(self):
1406 1413 if safehasattr(self, '_fp'): # constructor actually did something
1407 1414 self.discard()
1408 1415
1409 1416 def makedirs(name, mode=None, notindexed=False):
1410 1417 """recursive directory creation with parent mode inheritance"""
1411 1418 try:
1412 1419 makedir(name, notindexed)
1413 1420 except OSError as err:
1414 1421 if err.errno == errno.EEXIST:
1415 1422 return
1416 1423 if err.errno != errno.ENOENT or not name:
1417 1424 raise
1418 1425 parent = os.path.dirname(os.path.abspath(name))
1419 1426 if parent == name:
1420 1427 raise
1421 1428 makedirs(parent, mode, notindexed)
1422 1429 makedir(name, notindexed)
1423 1430 if mode is not None:
1424 1431 os.chmod(name, mode)
1425 1432
1426 1433 def ensuredirs(name, mode=None, notindexed=False):
1427 1434 """race-safe recursive directory creation
1428 1435
1429 1436 Newly created directories are marked as "not to be indexed by
1430 1437 the content indexing service", if ``notindexed`` is specified
1431 1438 for "write" mode access.
1432 1439 """
1433 1440 if os.path.isdir(name):
1434 1441 return
1435 1442 parent = os.path.dirname(os.path.abspath(name))
1436 1443 if parent != name:
1437 1444 ensuredirs(parent, mode, notindexed)
1438 1445 try:
1439 1446 makedir(name, notindexed)
1440 1447 except OSError as err:
1441 1448 if err.errno == errno.EEXIST and os.path.isdir(name):
1442 1449 # someone else seems to have won a directory creation race
1443 1450 return
1444 1451 raise
1445 1452 if mode is not None:
1446 1453 os.chmod(name, mode)
1447 1454
1448 1455 def readfile(path):
1449 1456 with open(path, 'rb') as fp:
1450 1457 return fp.read()
1451 1458
1452 1459 def writefile(path, text):
1453 1460 with open(path, 'wb') as fp:
1454 1461 fp.write(text)
1455 1462
1456 1463 def appendfile(path, text):
1457 1464 with open(path, 'ab') as fp:
1458 1465 fp.write(text)
1459 1466
1460 1467 class chunkbuffer(object):
1461 1468 """Allow arbitrary sized chunks of data to be efficiently read from an
1462 1469 iterator over chunks of arbitrary size."""
1463 1470
1464 1471 def __init__(self, in_iter):
1465 1472 """in_iter is the iterator that's iterating over the input chunks.
1466 1473 targetsize is how big a buffer to try to maintain."""
1467 1474 def splitbig(chunks):
1468 1475 for chunk in chunks:
1469 1476 if len(chunk) > 2**20:
1470 1477 pos = 0
1471 1478 while pos < len(chunk):
1472 1479 end = pos + 2 ** 18
1473 1480 yield chunk[pos:end]
1474 1481 pos = end
1475 1482 else:
1476 1483 yield chunk
1477 1484 self.iter = splitbig(in_iter)
1478 1485 self._queue = collections.deque()
1479 1486 self._chunkoffset = 0
1480 1487
1481 1488 def read(self, l=None):
1482 1489 """Read L bytes of data from the iterator of chunks of data.
1483 1490 Returns less than L bytes if the iterator runs dry.
1484 1491
1485 1492 If size parameter is omitted, read everything"""
1486 1493 if l is None:
1487 1494 return ''.join(self.iter)
1488 1495
1489 1496 left = l
1490 1497 buf = []
1491 1498 queue = self._queue
1492 1499 while left > 0:
1493 1500 # refill the queue
1494 1501 if not queue:
1495 1502 target = 2**18
1496 1503 for chunk in self.iter:
1497 1504 queue.append(chunk)
1498 1505 target -= len(chunk)
1499 1506 if target <= 0:
1500 1507 break
1501 1508 if not queue:
1502 1509 break
1503 1510
1504 1511 # The easy way to do this would be to queue.popleft(), modify the
1505 1512 # chunk (if necessary), then queue.appendleft(). However, for cases
1506 1513 # where we read partial chunk content, this incurs 2 dequeue
1507 1514 # mutations and creates a new str for the remaining chunk in the
1508 1515 # queue. Our code below avoids this overhead.
1509 1516
1510 1517 chunk = queue[0]
1511 1518 chunkl = len(chunk)
1512 1519 offset = self._chunkoffset
1513 1520
1514 1521 # Use full chunk.
1515 1522 if offset == 0 and left >= chunkl:
1516 1523 left -= chunkl
1517 1524 queue.popleft()
1518 1525 buf.append(chunk)
1519 1526 # self._chunkoffset remains at 0.
1520 1527 continue
1521 1528
1522 1529 chunkremaining = chunkl - offset
1523 1530
1524 1531 # Use all of unconsumed part of chunk.
1525 1532 if left >= chunkremaining:
1526 1533 left -= chunkremaining
1527 1534 queue.popleft()
1528 1535 # offset == 0 is enabled by block above, so this won't merely
1529 1536 # copy via ``chunk[0:]``.
1530 1537 buf.append(chunk[offset:])
1531 1538 self._chunkoffset = 0
1532 1539
1533 1540 # Partial chunk needed.
1534 1541 else:
1535 1542 buf.append(chunk[offset:offset + left])
1536 1543 self._chunkoffset += left
1537 1544 left -= chunkremaining
1538 1545
1539 1546 return ''.join(buf)
1540 1547
1541 1548 def filechunkiter(f, size=65536, limit=None):
1542 1549 """Create a generator that produces the data in the file size
1543 1550 (default 65536) bytes at a time, up to optional limit (default is
1544 1551 to read all data). Chunks may be less than size bytes if the
1545 1552 chunk is the last chunk in the file, or the file is a socket or
1546 1553 some other type of file that sometimes reads less data than is
1547 1554 requested."""
1548 1555 assert size >= 0
1549 1556 assert limit is None or limit >= 0
1550 1557 while True:
1551 1558 if limit is None:
1552 1559 nbytes = size
1553 1560 else:
1554 1561 nbytes = min(limit, size)
1555 1562 s = nbytes and f.read(nbytes)
1556 1563 if not s:
1557 1564 break
1558 1565 if limit:
1559 1566 limit -= len(s)
1560 1567 yield s
1561 1568
1562 1569 def makedate(timestamp=None):
1563 1570 '''Return a unix timestamp (or the current time) as a (unixtime,
1564 1571 offset) tuple based off the local timezone.'''
1565 1572 if timestamp is None:
1566 1573 timestamp = time.time()
1567 1574 if timestamp < 0:
1568 1575 hint = _("check your clock")
1569 1576 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1570 1577 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1571 1578 datetime.datetime.fromtimestamp(timestamp))
1572 1579 tz = delta.days * 86400 + delta.seconds
1573 1580 return timestamp, tz
1574 1581
1575 1582 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1576 1583 """represent a (unixtime, offset) tuple as a localized time.
1577 1584 unixtime is seconds since the epoch, and offset is the time zone's
1578 1585 number of seconds away from UTC. if timezone is false, do not
1579 1586 append time zone to string."""
1580 1587 t, tz = date or makedate()
1581 1588 if t < 0:
1582 1589 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1583 1590 tz = 0
1584 1591 if "%1" in format or "%2" in format or "%z" in format:
1585 1592 sign = (tz > 0) and "-" or "+"
1586 1593 minutes = abs(tz) // 60
1587 1594 q, r = divmod(minutes, 60)
1588 1595 format = format.replace("%z", "%1%2")
1589 1596 format = format.replace("%1", "%c%02d" % (sign, q))
1590 1597 format = format.replace("%2", "%02d" % r)
1591 1598 try:
1592 1599 t = time.gmtime(float(t) - tz)
1593 1600 except ValueError:
1594 1601 # time was out of range
1595 1602 t = time.gmtime(sys.maxint)
1596 1603 s = time.strftime(format, t)
1597 1604 return s
1598 1605
1599 1606 def shortdate(date=None):
1600 1607 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1601 1608 return datestr(date, format='%Y-%m-%d')
1602 1609
1603 1610 def parsetimezone(tz):
1604 1611 """parse a timezone string and return an offset integer"""
1605 1612 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1606 1613 sign = (tz[0] == "+") and 1 or -1
1607 1614 hours = int(tz[1:3])
1608 1615 minutes = int(tz[3:5])
1609 1616 return -sign * (hours * 60 + minutes) * 60
1610 1617 if tz == "GMT" or tz == "UTC":
1611 1618 return 0
1612 1619 return None
1613 1620
1614 1621 def strdate(string, format, defaults=[]):
1615 1622 """parse a localized time string and return a (unixtime, offset) tuple.
1616 1623 if the string cannot be parsed, ValueError is raised."""
1617 1624 # NOTE: unixtime = localunixtime + offset
1618 1625 offset, date = parsetimezone(string.split()[-1]), string
1619 1626 if offset is not None:
1620 1627 date = " ".join(string.split()[:-1])
1621 1628
1622 1629 # add missing elements from defaults
1623 1630 usenow = False # default to using biased defaults
1624 1631 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1625 1632 found = [True for p in part if ("%"+p) in format]
1626 1633 if not found:
1627 1634 date += "@" + defaults[part][usenow]
1628 1635 format += "@%" + part[0]
1629 1636 else:
1630 1637 # We've found a specific time element, less specific time
1631 1638 # elements are relative to today
1632 1639 usenow = True
1633 1640
1634 1641 timetuple = time.strptime(date, format)
1635 1642 localunixtime = int(calendar.timegm(timetuple))
1636 1643 if offset is None:
1637 1644 # local timezone
1638 1645 unixtime = int(time.mktime(timetuple))
1639 1646 offset = unixtime - localunixtime
1640 1647 else:
1641 1648 unixtime = localunixtime + offset
1642 1649 return unixtime, offset
1643 1650
1644 1651 def parsedate(date, formats=None, bias=None):
1645 1652 """parse a localized date/time and return a (unixtime, offset) tuple.
1646 1653
1647 1654 The date may be a "unixtime offset" string or in one of the specified
1648 1655 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1649 1656
1650 1657 >>> parsedate(' today ') == parsedate(\
1651 1658 datetime.date.today().strftime('%b %d'))
1652 1659 True
1653 1660 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1654 1661 datetime.timedelta(days=1)\
1655 1662 ).strftime('%b %d'))
1656 1663 True
1657 1664 >>> now, tz = makedate()
1658 1665 >>> strnow, strtz = parsedate('now')
1659 1666 >>> (strnow - now) < 1
1660 1667 True
1661 1668 >>> tz == strtz
1662 1669 True
1663 1670 """
1664 1671 if bias is None:
1665 1672 bias = {}
1666 1673 if not date:
1667 1674 return 0, 0
1668 1675 if isinstance(date, tuple) and len(date) == 2:
1669 1676 return date
1670 1677 if not formats:
1671 1678 formats = defaultdateformats
1672 1679 date = date.strip()
1673 1680
1674 1681 if date == 'now' or date == _('now'):
1675 1682 return makedate()
1676 1683 if date == 'today' or date == _('today'):
1677 1684 date = datetime.date.today().strftime('%b %d')
1678 1685 elif date == 'yesterday' or date == _('yesterday'):
1679 1686 date = (datetime.date.today() -
1680 1687 datetime.timedelta(days=1)).strftime('%b %d')
1681 1688
1682 1689 try:
1683 1690 when, offset = map(int, date.split(' '))
1684 1691 except ValueError:
1685 1692 # fill out defaults
1686 1693 now = makedate()
1687 1694 defaults = {}
1688 1695 for part in ("d", "mb", "yY", "HI", "M", "S"):
1689 1696 # this piece is for rounding the specific end of unknowns
1690 1697 b = bias.get(part)
1691 1698 if b is None:
1692 1699 if part[0] in "HMS":
1693 1700 b = "00"
1694 1701 else:
1695 1702 b = "0"
1696 1703
1697 1704 # this piece is for matching the generic end to today's date
1698 1705 n = datestr(now, "%" + part[0])
1699 1706
1700 1707 defaults[part] = (b, n)
1701 1708
1702 1709 for format in formats:
1703 1710 try:
1704 1711 when, offset = strdate(date, format, defaults)
1705 1712 except (ValueError, OverflowError):
1706 1713 pass
1707 1714 else:
1708 1715 break
1709 1716 else:
1710 1717 raise Abort(_('invalid date: %r') % date)
1711 1718 # validate explicit (probably user-specified) date and
1712 1719 # time zone offset. values must fit in signed 32 bits for
1713 1720 # current 32-bit linux runtimes. timezones go from UTC-12
1714 1721 # to UTC+14
1715 1722 if abs(when) > 0x7fffffff:
1716 1723 raise Abort(_('date exceeds 32 bits: %d') % when)
1717 1724 if when < 0:
1718 1725 raise Abort(_('negative date value: %d') % when)
1719 1726 if offset < -50400 or offset > 43200:
1720 1727 raise Abort(_('impossible time zone offset: %d') % offset)
1721 1728 return when, offset
1722 1729
1723 1730 def matchdate(date):
1724 1731 """Return a function that matches a given date match specifier
1725 1732
1726 1733 Formats include:
1727 1734
1728 1735 '{date}' match a given date to the accuracy provided
1729 1736
1730 1737 '<{date}' on or before a given date
1731 1738
1732 1739 '>{date}' on or after a given date
1733 1740
1734 1741 >>> p1 = parsedate("10:29:59")
1735 1742 >>> p2 = parsedate("10:30:00")
1736 1743 >>> p3 = parsedate("10:30:59")
1737 1744 >>> p4 = parsedate("10:31:00")
1738 1745 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1739 1746 >>> f = matchdate("10:30")
1740 1747 >>> f(p1[0])
1741 1748 False
1742 1749 >>> f(p2[0])
1743 1750 True
1744 1751 >>> f(p3[0])
1745 1752 True
1746 1753 >>> f(p4[0])
1747 1754 False
1748 1755 >>> f(p5[0])
1749 1756 False
1750 1757 """
1751 1758
1752 1759 def lower(date):
1753 1760 d = {'mb': "1", 'd': "1"}
1754 1761 return parsedate(date, extendeddateformats, d)[0]
1755 1762
1756 1763 def upper(date):
1757 1764 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1758 1765 for days in ("31", "30", "29"):
1759 1766 try:
1760 1767 d["d"] = days
1761 1768 return parsedate(date, extendeddateformats, d)[0]
1762 1769 except Abort:
1763 1770 pass
1764 1771 d["d"] = "28"
1765 1772 return parsedate(date, extendeddateformats, d)[0]
1766 1773
1767 1774 date = date.strip()
1768 1775
1769 1776 if not date:
1770 1777 raise Abort(_("dates cannot consist entirely of whitespace"))
1771 1778 elif date[0] == "<":
1772 1779 if not date[1:]:
1773 1780 raise Abort(_("invalid day spec, use '<DATE'"))
1774 1781 when = upper(date[1:])
1775 1782 return lambda x: x <= when
1776 1783 elif date[0] == ">":
1777 1784 if not date[1:]:
1778 1785 raise Abort(_("invalid day spec, use '>DATE'"))
1779 1786 when = lower(date[1:])
1780 1787 return lambda x: x >= when
1781 1788 elif date[0] == "-":
1782 1789 try:
1783 1790 days = int(date[1:])
1784 1791 except ValueError:
1785 1792 raise Abort(_("invalid day spec: %s") % date[1:])
1786 1793 if days < 0:
1787 1794 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1788 1795 % date[1:])
1789 1796 when = makedate()[0] - days * 3600 * 24
1790 1797 return lambda x: x >= when
1791 1798 elif " to " in date:
1792 1799 a, b = date.split(" to ")
1793 1800 start, stop = lower(a), upper(b)
1794 1801 return lambda x: x >= start and x <= stop
1795 1802 else:
1796 1803 start, stop = lower(date), upper(date)
1797 1804 return lambda x: x >= start and x <= stop
1798 1805
1799 1806 def stringmatcher(pattern):
1800 1807 """
1801 1808 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1802 1809 returns the matcher name, pattern, and matcher function.
1803 1810 missing or unknown prefixes are treated as literal matches.
1804 1811
1805 1812 helper for tests:
1806 1813 >>> def test(pattern, *tests):
1807 1814 ... kind, pattern, matcher = stringmatcher(pattern)
1808 1815 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1809 1816
1810 1817 exact matching (no prefix):
1811 1818 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1812 1819 ('literal', 'abcdefg', [False, False, True])
1813 1820
1814 1821 regex matching ('re:' prefix)
1815 1822 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1816 1823 ('re', 'a.+b', [False, False, True])
1817 1824
1818 1825 force exact matches ('literal:' prefix)
1819 1826 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1820 1827 ('literal', 're:foobar', [False, True])
1821 1828
1822 1829 unknown prefixes are ignored and treated as literals
1823 1830 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1824 1831 ('literal', 'foo:bar', [False, False, True])
1825 1832 """
1826 1833 if pattern.startswith('re:'):
1827 1834 pattern = pattern[3:]
1828 1835 try:
1829 1836 regex = remod.compile(pattern)
1830 1837 except remod.error as e:
1831 1838 raise error.ParseError(_('invalid regular expression: %s')
1832 1839 % e)
1833 1840 return 're', pattern, regex.search
1834 1841 elif pattern.startswith('literal:'):
1835 1842 pattern = pattern[8:]
1836 1843 return 'literal', pattern, pattern.__eq__
1837 1844
1838 1845 def shortuser(user):
1839 1846 """Return a short representation of a user name or email address."""
1840 1847 f = user.find('@')
1841 1848 if f >= 0:
1842 1849 user = user[:f]
1843 1850 f = user.find('<')
1844 1851 if f >= 0:
1845 1852 user = user[f + 1:]
1846 1853 f = user.find(' ')
1847 1854 if f >= 0:
1848 1855 user = user[:f]
1849 1856 f = user.find('.')
1850 1857 if f >= 0:
1851 1858 user = user[:f]
1852 1859 return user
1853 1860
1854 1861 def emailuser(user):
1855 1862 """Return the user portion of an email address."""
1856 1863 f = user.find('@')
1857 1864 if f >= 0:
1858 1865 user = user[:f]
1859 1866 f = user.find('<')
1860 1867 if f >= 0:
1861 1868 user = user[f + 1:]
1862 1869 return user
1863 1870
1864 1871 def email(author):
1865 1872 '''get email of author.'''
1866 1873 r = author.find('>')
1867 1874 if r == -1:
1868 1875 r = None
1869 1876 return author[author.find('<') + 1:r]
1870 1877
1871 1878 def ellipsis(text, maxlength=400):
1872 1879 """Trim string to at most maxlength (default: 400) columns in display."""
1873 1880 return encoding.trim(text, maxlength, ellipsis='...')
1874 1881
1875 1882 def unitcountfn(*unittable):
1876 1883 '''return a function that renders a readable count of some quantity'''
1877 1884
1878 1885 def go(count):
1879 1886 for multiplier, divisor, format in unittable:
1880 1887 if count >= divisor * multiplier:
1881 1888 return format % (count / float(divisor))
1882 1889 return unittable[-1][2] % count
1883 1890
1884 1891 return go
1885 1892
1886 1893 bytecount = unitcountfn(
1887 1894 (100, 1 << 30, _('%.0f GB')),
1888 1895 (10, 1 << 30, _('%.1f GB')),
1889 1896 (1, 1 << 30, _('%.2f GB')),
1890 1897 (100, 1 << 20, _('%.0f MB')),
1891 1898 (10, 1 << 20, _('%.1f MB')),
1892 1899 (1, 1 << 20, _('%.2f MB')),
1893 1900 (100, 1 << 10, _('%.0f KB')),
1894 1901 (10, 1 << 10, _('%.1f KB')),
1895 1902 (1, 1 << 10, _('%.2f KB')),
1896 1903 (1, 1, _('%.0f bytes')),
1897 1904 )
1898 1905
1899 1906 def uirepr(s):
1900 1907 # Avoid double backslash in Windows path repr()
1901 1908 return repr(s).replace('\\\\', '\\')
1902 1909
1903 1910 # delay import of textwrap
1904 1911 def MBTextWrapper(**kwargs):
1905 1912 class tw(textwrap.TextWrapper):
1906 1913 """
1907 1914 Extend TextWrapper for width-awareness.
1908 1915
1909 1916 Neither number of 'bytes' in any encoding nor 'characters' is
1910 1917 appropriate to calculate terminal columns for specified string.
1911 1918
1912 1919 Original TextWrapper implementation uses built-in 'len()' directly,
1913 1920 so overriding is needed to use width information of each characters.
1914 1921
1915 1922 In addition, characters classified into 'ambiguous' width are
1916 1923 treated as wide in East Asian area, but as narrow in other.
1917 1924
1918 1925 This requires use decision to determine width of such characters.
1919 1926 """
1920 1927 def _cutdown(self, ucstr, space_left):
1921 1928 l = 0
1922 1929 colwidth = encoding.ucolwidth
1923 1930 for i in xrange(len(ucstr)):
1924 1931 l += colwidth(ucstr[i])
1925 1932 if space_left < l:
1926 1933 return (ucstr[:i], ucstr[i:])
1927 1934 return ucstr, ''
1928 1935
1929 1936 # overriding of base class
1930 1937 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1931 1938 space_left = max(width - cur_len, 1)
1932 1939
1933 1940 if self.break_long_words:
1934 1941 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1935 1942 cur_line.append(cut)
1936 1943 reversed_chunks[-1] = res
1937 1944 elif not cur_line:
1938 1945 cur_line.append(reversed_chunks.pop())
1939 1946
1940 1947 # this overriding code is imported from TextWrapper of Python 2.6
1941 1948 # to calculate columns of string by 'encoding.ucolwidth()'
1942 1949 def _wrap_chunks(self, chunks):
1943 1950 colwidth = encoding.ucolwidth
1944 1951
1945 1952 lines = []
1946 1953 if self.width <= 0:
1947 1954 raise ValueError("invalid width %r (must be > 0)" % self.width)
1948 1955
1949 1956 # Arrange in reverse order so items can be efficiently popped
1950 1957 # from a stack of chucks.
1951 1958 chunks.reverse()
1952 1959
1953 1960 while chunks:
1954 1961
1955 1962 # Start the list of chunks that will make up the current line.
1956 1963 # cur_len is just the length of all the chunks in cur_line.
1957 1964 cur_line = []
1958 1965 cur_len = 0
1959 1966
1960 1967 # Figure out which static string will prefix this line.
1961 1968 if lines:
1962 1969 indent = self.subsequent_indent
1963 1970 else:
1964 1971 indent = self.initial_indent
1965 1972
1966 1973 # Maximum width for this line.
1967 1974 width = self.width - len(indent)
1968 1975
1969 1976 # First chunk on line is whitespace -- drop it, unless this
1970 1977 # is the very beginning of the text (i.e. no lines started yet).
1971 1978 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1972 1979 del chunks[-1]
1973 1980
1974 1981 while chunks:
1975 1982 l = colwidth(chunks[-1])
1976 1983
1977 1984 # Can at least squeeze this chunk onto the current line.
1978 1985 if cur_len + l <= width:
1979 1986 cur_line.append(chunks.pop())
1980 1987 cur_len += l
1981 1988
1982 1989 # Nope, this line is full.
1983 1990 else:
1984 1991 break
1985 1992
1986 1993 # The current line is full, and the next chunk is too big to
1987 1994 # fit on *any* line (not just this one).
1988 1995 if chunks and colwidth(chunks[-1]) > width:
1989 1996 self._handle_long_word(chunks, cur_line, cur_len, width)
1990 1997
1991 1998 # If the last chunk on this line is all whitespace, drop it.
1992 1999 if (self.drop_whitespace and
1993 2000 cur_line and cur_line[-1].strip() == ''):
1994 2001 del cur_line[-1]
1995 2002
1996 2003 # Convert current line back to a string and store it in list
1997 2004 # of all lines (return value).
1998 2005 if cur_line:
1999 2006 lines.append(indent + ''.join(cur_line))
2000 2007
2001 2008 return lines
2002 2009
2003 2010 global MBTextWrapper
2004 2011 MBTextWrapper = tw
2005 2012 return tw(**kwargs)
2006 2013
2007 2014 def wrap(line, width, initindent='', hangindent=''):
2008 2015 maxindent = max(len(hangindent), len(initindent))
2009 2016 if width <= maxindent:
2010 2017 # adjust for weird terminal size
2011 2018 width = max(78, maxindent + 1)
2012 2019 line = line.decode(encoding.encoding, encoding.encodingmode)
2013 2020 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2014 2021 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2015 2022 wrapper = MBTextWrapper(width=width,
2016 2023 initial_indent=initindent,
2017 2024 subsequent_indent=hangindent)
2018 2025 return wrapper.fill(line).encode(encoding.encoding)
2019 2026
2020 2027 def iterlines(iterator):
2021 2028 for chunk in iterator:
2022 2029 for line in chunk.splitlines():
2023 2030 yield line
2024 2031
2025 2032 def expandpath(path):
2026 2033 return os.path.expanduser(os.path.expandvars(path))
2027 2034
2028 2035 def hgcmd():
2029 2036 """Return the command used to execute current hg
2030 2037
2031 2038 This is different from hgexecutable() because on Windows we want
2032 2039 to avoid things opening new shell windows like batch files, so we
2033 2040 get either the python call or current executable.
2034 2041 """
2035 2042 if mainfrozen():
2036 2043 if getattr(sys, 'frozen', None) == 'macosx_app':
2037 2044 # Env variable set by py2app
2038 2045 return [os.environ['EXECUTABLEPATH']]
2039 2046 else:
2040 2047 return [sys.executable]
2041 2048 return gethgcmd()
2042 2049
2043 2050 def rundetached(args, condfn):
2044 2051 """Execute the argument list in a detached process.
2045 2052
2046 2053 condfn is a callable which is called repeatedly and should return
2047 2054 True once the child process is known to have started successfully.
2048 2055 At this point, the child process PID is returned. If the child
2049 2056 process fails to start or finishes before condfn() evaluates to
2050 2057 True, return -1.
2051 2058 """
2052 2059 # Windows case is easier because the child process is either
2053 2060 # successfully starting and validating the condition or exiting
2054 2061 # on failure. We just poll on its PID. On Unix, if the child
2055 2062 # process fails to start, it will be left in a zombie state until
2056 2063 # the parent wait on it, which we cannot do since we expect a long
2057 2064 # running process on success. Instead we listen for SIGCHLD telling
2058 2065 # us our child process terminated.
2059 2066 terminated = set()
2060 2067 def handler(signum, frame):
2061 2068 terminated.add(os.wait())
2062 2069 prevhandler = None
2063 2070 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2064 2071 if SIGCHLD is not None:
2065 2072 prevhandler = signal.signal(SIGCHLD, handler)
2066 2073 try:
2067 2074 pid = spawndetached(args)
2068 2075 while not condfn():
2069 2076 if ((pid in terminated or not testpid(pid))
2070 2077 and not condfn()):
2071 2078 return -1
2072 2079 time.sleep(0.1)
2073 2080 return pid
2074 2081 finally:
2075 2082 if prevhandler is not None:
2076 2083 signal.signal(signal.SIGCHLD, prevhandler)
2077 2084
2078 2085 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2079 2086 """Return the result of interpolating items in the mapping into string s.
2080 2087
2081 2088 prefix is a single character string, or a two character string with
2082 2089 a backslash as the first character if the prefix needs to be escaped in
2083 2090 a regular expression.
2084 2091
2085 2092 fn is an optional function that will be applied to the replacement text
2086 2093 just before replacement.
2087 2094
2088 2095 escape_prefix is an optional flag that allows using doubled prefix for
2089 2096 its escaping.
2090 2097 """
2091 2098 fn = fn or (lambda s: s)
2092 2099 patterns = '|'.join(mapping.keys())
2093 2100 if escape_prefix:
2094 2101 patterns += '|' + prefix
2095 2102 if len(prefix) > 1:
2096 2103 prefix_char = prefix[1:]
2097 2104 else:
2098 2105 prefix_char = prefix
2099 2106 mapping[prefix_char] = prefix_char
2100 2107 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2101 2108 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2102 2109
2103 2110 def getport(port):
2104 2111 """Return the port for a given network service.
2105 2112
2106 2113 If port is an integer, it's returned as is. If it's a string, it's
2107 2114 looked up using socket.getservbyname(). If there's no matching
2108 2115 service, error.Abort is raised.
2109 2116 """
2110 2117 try:
2111 2118 return int(port)
2112 2119 except ValueError:
2113 2120 pass
2114 2121
2115 2122 try:
2116 2123 return socket.getservbyname(port)
2117 2124 except socket.error:
2118 2125 raise Abort(_("no port number associated with service '%s'") % port)
2119 2126
2120 2127 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2121 2128 '0': False, 'no': False, 'false': False, 'off': False,
2122 2129 'never': False}
2123 2130
2124 2131 def parsebool(s):
2125 2132 """Parse s into a boolean.
2126 2133
2127 2134 If s is not a valid boolean, returns None.
2128 2135 """
2129 2136 return _booleans.get(s.lower(), None)
2130 2137
2131 2138 _hexdig = '0123456789ABCDEFabcdef'
2132 2139 _hextochr = dict((a + b, chr(int(a + b, 16)))
2133 2140 for a in _hexdig for b in _hexdig)
2134 2141
2135 2142 def _urlunquote(s):
2136 2143 """Decode HTTP/HTML % encoding.
2137 2144
2138 2145 >>> _urlunquote('abc%20def')
2139 2146 'abc def'
2140 2147 """
2141 2148 res = s.split('%')
2142 2149 # fastpath
2143 2150 if len(res) == 1:
2144 2151 return s
2145 2152 s = res[0]
2146 2153 for item in res[1:]:
2147 2154 try:
2148 2155 s += _hextochr[item[:2]] + item[2:]
2149 2156 except KeyError:
2150 2157 s += '%' + item
2151 2158 except UnicodeDecodeError:
2152 2159 s += unichr(int(item[:2], 16)) + item[2:]
2153 2160 return s
2154 2161
2155 2162 class url(object):
2156 2163 r"""Reliable URL parser.
2157 2164
2158 2165 This parses URLs and provides attributes for the following
2159 2166 components:
2160 2167
2161 2168 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2162 2169
2163 2170 Missing components are set to None. The only exception is
2164 2171 fragment, which is set to '' if present but empty.
2165 2172
2166 2173 If parsefragment is False, fragment is included in query. If
2167 2174 parsequery is False, query is included in path. If both are
2168 2175 False, both fragment and query are included in path.
2169 2176
2170 2177 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2171 2178
2172 2179 Note that for backward compatibility reasons, bundle URLs do not
2173 2180 take host names. That means 'bundle://../' has a path of '../'.
2174 2181
2175 2182 Examples:
2176 2183
2177 2184 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2178 2185 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2179 2186 >>> url('ssh://[::1]:2200//home/joe/repo')
2180 2187 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2181 2188 >>> url('file:///home/joe/repo')
2182 2189 <url scheme: 'file', path: '/home/joe/repo'>
2183 2190 >>> url('file:///c:/temp/foo/')
2184 2191 <url scheme: 'file', path: 'c:/temp/foo/'>
2185 2192 >>> url('bundle:foo')
2186 2193 <url scheme: 'bundle', path: 'foo'>
2187 2194 >>> url('bundle://../foo')
2188 2195 <url scheme: 'bundle', path: '../foo'>
2189 2196 >>> url(r'c:\foo\bar')
2190 2197 <url path: 'c:\\foo\\bar'>
2191 2198 >>> url(r'\\blah\blah\blah')
2192 2199 <url path: '\\\\blah\\blah\\blah'>
2193 2200 >>> url(r'\\blah\blah\blah#baz')
2194 2201 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2195 2202 >>> url(r'file:///C:\users\me')
2196 2203 <url scheme: 'file', path: 'C:\\users\\me'>
2197 2204
2198 2205 Authentication credentials:
2199 2206
2200 2207 >>> url('ssh://joe:xyz@x/repo')
2201 2208 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2202 2209 >>> url('ssh://joe@x/repo')
2203 2210 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2204 2211
2205 2212 Query strings and fragments:
2206 2213
2207 2214 >>> url('http://host/a?b#c')
2208 2215 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2209 2216 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2210 2217 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2211 2218 """
2212 2219
2213 2220 _safechars = "!~*'()+"
2214 2221 _safepchars = "/!~*'()+:\\"
2215 2222 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2216 2223
2217 2224 def __init__(self, path, parsequery=True, parsefragment=True):
2218 2225 # We slowly chomp away at path until we have only the path left
2219 2226 self.scheme = self.user = self.passwd = self.host = None
2220 2227 self.port = self.path = self.query = self.fragment = None
2221 2228 self._localpath = True
2222 2229 self._hostport = ''
2223 2230 self._origpath = path
2224 2231
2225 2232 if parsefragment and '#' in path:
2226 2233 path, self.fragment = path.split('#', 1)
2227 2234 if not path:
2228 2235 path = None
2229 2236
2230 2237 # special case for Windows drive letters and UNC paths
2231 2238 if hasdriveletter(path) or path.startswith(r'\\'):
2232 2239 self.path = path
2233 2240 return
2234 2241
2235 2242 # For compatibility reasons, we can't handle bundle paths as
2236 2243 # normal URLS
2237 2244 if path.startswith('bundle:'):
2238 2245 self.scheme = 'bundle'
2239 2246 path = path[7:]
2240 2247 if path.startswith('//'):
2241 2248 path = path[2:]
2242 2249 self.path = path
2243 2250 return
2244 2251
2245 2252 if self._matchscheme(path):
2246 2253 parts = path.split(':', 1)
2247 2254 if parts[0]:
2248 2255 self.scheme, path = parts
2249 2256 self._localpath = False
2250 2257
2251 2258 if not path:
2252 2259 path = None
2253 2260 if self._localpath:
2254 2261 self.path = ''
2255 2262 return
2256 2263 else:
2257 2264 if self._localpath:
2258 2265 self.path = path
2259 2266 return
2260 2267
2261 2268 if parsequery and '?' in path:
2262 2269 path, self.query = path.split('?', 1)
2263 2270 if not path:
2264 2271 path = None
2265 2272 if not self.query:
2266 2273 self.query = None
2267 2274
2268 2275 # // is required to specify a host/authority
2269 2276 if path and path.startswith('//'):
2270 2277 parts = path[2:].split('/', 1)
2271 2278 if len(parts) > 1:
2272 2279 self.host, path = parts
2273 2280 else:
2274 2281 self.host = parts[0]
2275 2282 path = None
2276 2283 if not self.host:
2277 2284 self.host = None
2278 2285 # path of file:///d is /d
2279 2286 # path of file:///d:/ is d:/, not /d:/
2280 2287 if path and not hasdriveletter(path):
2281 2288 path = '/' + path
2282 2289
2283 2290 if self.host and '@' in self.host:
2284 2291 self.user, self.host = self.host.rsplit('@', 1)
2285 2292 if ':' in self.user:
2286 2293 self.user, self.passwd = self.user.split(':', 1)
2287 2294 if not self.host:
2288 2295 self.host = None
2289 2296
2290 2297 # Don't split on colons in IPv6 addresses without ports
2291 2298 if (self.host and ':' in self.host and
2292 2299 not (self.host.startswith('[') and self.host.endswith(']'))):
2293 2300 self._hostport = self.host
2294 2301 self.host, self.port = self.host.rsplit(':', 1)
2295 2302 if not self.host:
2296 2303 self.host = None
2297 2304
2298 2305 if (self.host and self.scheme == 'file' and
2299 2306 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2300 2307 raise Abort(_('file:// URLs can only refer to localhost'))
2301 2308
2302 2309 self.path = path
2303 2310
2304 2311 # leave the query string escaped
2305 2312 for a in ('user', 'passwd', 'host', 'port',
2306 2313 'path', 'fragment'):
2307 2314 v = getattr(self, a)
2308 2315 if v is not None:
2309 2316 setattr(self, a, _urlunquote(v))
2310 2317
2311 2318 def __repr__(self):
2312 2319 attrs = []
2313 2320 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2314 2321 'query', 'fragment'):
2315 2322 v = getattr(self, a)
2316 2323 if v is not None:
2317 2324 attrs.append('%s: %r' % (a, v))
2318 2325 return '<url %s>' % ', '.join(attrs)
2319 2326
2320 2327 def __str__(self):
2321 2328 r"""Join the URL's components back into a URL string.
2322 2329
2323 2330 Examples:
2324 2331
2325 2332 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2326 2333 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2327 2334 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2328 2335 'http://user:pw@host:80/?foo=bar&baz=42'
2329 2336 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2330 2337 'http://user:pw@host:80/?foo=bar%3dbaz'
2331 2338 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2332 2339 'ssh://user:pw@[::1]:2200//home/joe#'
2333 2340 >>> str(url('http://localhost:80//'))
2334 2341 'http://localhost:80//'
2335 2342 >>> str(url('http://localhost:80/'))
2336 2343 'http://localhost:80/'
2337 2344 >>> str(url('http://localhost:80'))
2338 2345 'http://localhost:80/'
2339 2346 >>> str(url('bundle:foo'))
2340 2347 'bundle:foo'
2341 2348 >>> str(url('bundle://../foo'))
2342 2349 'bundle:../foo'
2343 2350 >>> str(url('path'))
2344 2351 'path'
2345 2352 >>> str(url('file:///tmp/foo/bar'))
2346 2353 'file:///tmp/foo/bar'
2347 2354 >>> str(url('file:///c:/tmp/foo/bar'))
2348 2355 'file:///c:/tmp/foo/bar'
2349 2356 >>> print url(r'bundle:foo\bar')
2350 2357 bundle:foo\bar
2351 2358 >>> print url(r'file:///D:\data\hg')
2352 2359 file:///D:\data\hg
2353 2360 """
2354 2361 if self._localpath:
2355 2362 s = self.path
2356 2363 if self.scheme == 'bundle':
2357 2364 s = 'bundle:' + s
2358 2365 if self.fragment:
2359 2366 s += '#' + self.fragment
2360 2367 return s
2361 2368
2362 2369 s = self.scheme + ':'
2363 2370 if self.user or self.passwd or self.host:
2364 2371 s += '//'
2365 2372 elif self.scheme and (not self.path or self.path.startswith('/')
2366 2373 or hasdriveletter(self.path)):
2367 2374 s += '//'
2368 2375 if hasdriveletter(self.path):
2369 2376 s += '/'
2370 2377 if self.user:
2371 2378 s += urllib.quote(self.user, safe=self._safechars)
2372 2379 if self.passwd:
2373 2380 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2374 2381 if self.user or self.passwd:
2375 2382 s += '@'
2376 2383 if self.host:
2377 2384 if not (self.host.startswith('[') and self.host.endswith(']')):
2378 2385 s += urllib.quote(self.host)
2379 2386 else:
2380 2387 s += self.host
2381 2388 if self.port:
2382 2389 s += ':' + urllib.quote(self.port)
2383 2390 if self.host:
2384 2391 s += '/'
2385 2392 if self.path:
2386 2393 # TODO: similar to the query string, we should not unescape the
2387 2394 # path when we store it, the path might contain '%2f' = '/',
2388 2395 # which we should *not* escape.
2389 2396 s += urllib.quote(self.path, safe=self._safepchars)
2390 2397 if self.query:
2391 2398 # we store the query in escaped form.
2392 2399 s += '?' + self.query
2393 2400 if self.fragment is not None:
2394 2401 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2395 2402 return s
2396 2403
2397 2404 def authinfo(self):
2398 2405 user, passwd = self.user, self.passwd
2399 2406 try:
2400 2407 self.user, self.passwd = None, None
2401 2408 s = str(self)
2402 2409 finally:
2403 2410 self.user, self.passwd = user, passwd
2404 2411 if not self.user:
2405 2412 return (s, None)
2406 2413 # authinfo[1] is passed to urllib2 password manager, and its
2407 2414 # URIs must not contain credentials. The host is passed in the
2408 2415 # URIs list because Python < 2.4.3 uses only that to search for
2409 2416 # a password.
2410 2417 return (s, (None, (s, self.host),
2411 2418 self.user, self.passwd or ''))
2412 2419
2413 2420 def isabs(self):
2414 2421 if self.scheme and self.scheme != 'file':
2415 2422 return True # remote URL
2416 2423 if hasdriveletter(self.path):
2417 2424 return True # absolute for our purposes - can't be joined()
2418 2425 if self.path.startswith(r'\\'):
2419 2426 return True # Windows UNC path
2420 2427 if self.path.startswith('/'):
2421 2428 return True # POSIX-style
2422 2429 return False
2423 2430
2424 2431 def localpath(self):
2425 2432 if self.scheme == 'file' or self.scheme == 'bundle':
2426 2433 path = self.path or '/'
2427 2434 # For Windows, we need to promote hosts containing drive
2428 2435 # letters to paths with drive letters.
2429 2436 if hasdriveletter(self._hostport):
2430 2437 path = self._hostport + '/' + self.path
2431 2438 elif (self.host is not None and self.path
2432 2439 and not hasdriveletter(path)):
2433 2440 path = '/' + path
2434 2441 return path
2435 2442 return self._origpath
2436 2443
2437 2444 def islocal(self):
2438 2445 '''whether localpath will return something that posixfile can open'''
2439 2446 return (not self.scheme or self.scheme == 'file'
2440 2447 or self.scheme == 'bundle')
2441 2448
2442 2449 def hasscheme(path):
2443 2450 return bool(url(path).scheme)
2444 2451
2445 2452 def hasdriveletter(path):
2446 2453 return path and path[1:2] == ':' and path[0:1].isalpha()
2447 2454
2448 2455 def urllocalpath(path):
2449 2456 return url(path, parsequery=False, parsefragment=False).localpath()
2450 2457
2451 2458 def hidepassword(u):
2452 2459 '''hide user credential in a url string'''
2453 2460 u = url(u)
2454 2461 if u.passwd:
2455 2462 u.passwd = '***'
2456 2463 return str(u)
2457 2464
2458 2465 def removeauth(u):
2459 2466 '''remove all authentication information from a url string'''
2460 2467 u = url(u)
2461 2468 u.user = u.passwd = None
2462 2469 return str(u)
2463 2470
2464 2471 def isatty(fp):
2465 2472 try:
2466 2473 return fp.isatty()
2467 2474 except AttributeError:
2468 2475 return False
2469 2476
2470 2477 timecount = unitcountfn(
2471 2478 (1, 1e3, _('%.0f s')),
2472 2479 (100, 1, _('%.1f s')),
2473 2480 (10, 1, _('%.2f s')),
2474 2481 (1, 1, _('%.3f s')),
2475 2482 (100, 0.001, _('%.1f ms')),
2476 2483 (10, 0.001, _('%.2f ms')),
2477 2484 (1, 0.001, _('%.3f ms')),
2478 2485 (100, 0.000001, _('%.1f us')),
2479 2486 (10, 0.000001, _('%.2f us')),
2480 2487 (1, 0.000001, _('%.3f us')),
2481 2488 (100, 0.000000001, _('%.1f ns')),
2482 2489 (10, 0.000000001, _('%.2f ns')),
2483 2490 (1, 0.000000001, _('%.3f ns')),
2484 2491 )
2485 2492
2486 2493 _timenesting = [0]
2487 2494
2488 2495 def timed(func):
2489 2496 '''Report the execution time of a function call to stderr.
2490 2497
2491 2498 During development, use as a decorator when you need to measure
2492 2499 the cost of a function, e.g. as follows:
2493 2500
2494 2501 @util.timed
2495 2502 def foo(a, b, c):
2496 2503 pass
2497 2504 '''
2498 2505
2499 2506 def wrapper(*args, **kwargs):
2500 2507 start = time.time()
2501 2508 indent = 2
2502 2509 _timenesting[0] += indent
2503 2510 try:
2504 2511 return func(*args, **kwargs)
2505 2512 finally:
2506 2513 elapsed = time.time() - start
2507 2514 _timenesting[0] -= indent
2508 2515 sys.stderr.write('%s%s: %s\n' %
2509 2516 (' ' * _timenesting[0], func.__name__,
2510 2517 timecount(elapsed)))
2511 2518 return wrapper
2512 2519
2513 2520 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2514 2521 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2515 2522
2516 2523 def sizetoint(s):
2517 2524 '''Convert a space specifier to a byte count.
2518 2525
2519 2526 >>> sizetoint('30')
2520 2527 30
2521 2528 >>> sizetoint('2.2kb')
2522 2529 2252
2523 2530 >>> sizetoint('6M')
2524 2531 6291456
2525 2532 '''
2526 2533 t = s.strip().lower()
2527 2534 try:
2528 2535 for k, u in _sizeunits:
2529 2536 if t.endswith(k):
2530 2537 return int(float(t[:-len(k)]) * u)
2531 2538 return int(t)
2532 2539 except ValueError:
2533 2540 raise error.ParseError(_("couldn't parse size: %s") % s)
2534 2541
2535 2542 class hooks(object):
2536 2543 '''A collection of hook functions that can be used to extend a
2537 2544 function's behavior. Hooks are called in lexicographic order,
2538 2545 based on the names of their sources.'''
2539 2546
2540 2547 def __init__(self):
2541 2548 self._hooks = []
2542 2549
2543 2550 def add(self, source, hook):
2544 2551 self._hooks.append((source, hook))
2545 2552
2546 2553 def __call__(self, *args):
2547 2554 self._hooks.sort(key=lambda x: x[0])
2548 2555 results = []
2549 2556 for source, hook in self._hooks:
2550 2557 results.append(hook(*args))
2551 2558 return results
2552 2559
2553 2560 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2554 2561 '''Yields lines for a nicely formatted stacktrace.
2555 2562 Skips the 'skip' last entries.
2556 2563 Each file+linenumber is formatted according to fileline.
2557 2564 Each line is formatted according to line.
2558 2565 If line is None, it yields:
2559 2566 length of longest filepath+line number,
2560 2567 filepath+linenumber,
2561 2568 function
2562 2569
2563 2570 Not be used in production code but very convenient while developing.
2564 2571 '''
2565 2572 entries = [(fileline % (fn, ln), func)
2566 2573 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2567 2574 if entries:
2568 2575 fnmax = max(len(entry[0]) for entry in entries)
2569 2576 for fnln, func in entries:
2570 2577 if line is None:
2571 2578 yield (fnmax, fnln, func)
2572 2579 else:
2573 2580 yield line % (fnmax, fnln, func)
2574 2581
2575 2582 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2576 2583 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2577 2584 Skips the 'skip' last entries. By default it will flush stdout first.
2578 2585 It can be used everywhere and intentionally does not require an ui object.
2579 2586 Not be used in production code but very convenient while developing.
2580 2587 '''
2581 2588 if otherf:
2582 2589 otherf.flush()
2583 2590 f.write('%s at:\n' % msg)
2584 2591 for line in getstackframes(skip + 1):
2585 2592 f.write(line)
2586 2593 f.flush()
2587 2594
2588 2595 class dirs(object):
2589 2596 '''a multiset of directory names from a dirstate or manifest'''
2590 2597
2591 2598 def __init__(self, map, skip=None):
2592 2599 self._dirs = {}
2593 2600 addpath = self.addpath
2594 2601 if safehasattr(map, 'iteritems') and skip is not None:
2595 2602 for f, s in map.iteritems():
2596 2603 if s[0] != skip:
2597 2604 addpath(f)
2598 2605 else:
2599 2606 for f in map:
2600 2607 addpath(f)
2601 2608
2602 2609 def addpath(self, path):
2603 2610 dirs = self._dirs
2604 2611 for base in finddirs(path):
2605 2612 if base in dirs:
2606 2613 dirs[base] += 1
2607 2614 return
2608 2615 dirs[base] = 1
2609 2616
2610 2617 def delpath(self, path):
2611 2618 dirs = self._dirs
2612 2619 for base in finddirs(path):
2613 2620 if dirs[base] > 1:
2614 2621 dirs[base] -= 1
2615 2622 return
2616 2623 del dirs[base]
2617 2624
2618 2625 def __iter__(self):
2619 2626 return self._dirs.iterkeys()
2620 2627
2621 2628 def __contains__(self, d):
2622 2629 return d in self._dirs
2623 2630
2624 2631 if safehasattr(parsers, 'dirs'):
2625 2632 dirs = parsers.dirs
2626 2633
2627 2634 def finddirs(path):
2628 2635 pos = path.rfind('/')
2629 2636 while pos != -1:
2630 2637 yield path[:pos]
2631 2638 pos = path.rfind('/', 0, pos)
2632 2639
2633 2640 # compression utility
2634 2641
2635 2642 class nocompress(object):
2636 2643 def compress(self, x):
2637 2644 return x
2638 2645 def flush(self):
2639 2646 return ""
2640 2647
2641 2648 compressors = {
2642 2649 None: nocompress,
2643 2650 # lambda to prevent early import
2644 2651 'BZ': lambda: bz2.BZ2Compressor(),
2645 2652 'GZ': lambda: zlib.compressobj(),
2646 2653 }
2647 2654 # also support the old form by courtesies
2648 2655 compressors['UN'] = compressors[None]
2649 2656
2650 2657 def _makedecompressor(decompcls):
2651 2658 def generator(f):
2652 2659 d = decompcls()
2653 2660 for chunk in filechunkiter(f):
2654 2661 yield d.decompress(chunk)
2655 2662 def func(fh):
2656 2663 return chunkbuffer(generator(fh))
2657 2664 return func
2658 2665
2659 2666 class ctxmanager(object):
2660 2667 '''A context manager for use in 'with' blocks to allow multiple
2661 2668 contexts to be entered at once. This is both safer and more
2662 2669 flexible than contextlib.nested.
2663 2670
2664 2671 Once Mercurial supports Python 2.7+, this will become mostly
2665 2672 unnecessary.
2666 2673 '''
2667 2674
2668 2675 def __init__(self, *args):
2669 2676 '''Accepts a list of no-argument functions that return context
2670 2677 managers. These will be invoked at __call__ time.'''
2671 2678 self._pending = args
2672 2679 self._atexit = []
2673 2680
2674 2681 def __enter__(self):
2675 2682 return self
2676 2683
2677 2684 def enter(self):
2678 2685 '''Create and enter context managers in the order in which they were
2679 2686 passed to the constructor.'''
2680 2687 values = []
2681 2688 for func in self._pending:
2682 2689 obj = func()
2683 2690 values.append(obj.__enter__())
2684 2691 self._atexit.append(obj.__exit__)
2685 2692 del self._pending
2686 2693 return values
2687 2694
2688 2695 def atexit(self, func, *args, **kwargs):
2689 2696 '''Add a function to call when this context manager exits. The
2690 2697 ordering of multiple atexit calls is unspecified, save that
2691 2698 they will happen before any __exit__ functions.'''
2692 2699 def wrapper(exc_type, exc_val, exc_tb):
2693 2700 func(*args, **kwargs)
2694 2701 self._atexit.append(wrapper)
2695 2702 return func
2696 2703
2697 2704 def __exit__(self, exc_type, exc_val, exc_tb):
2698 2705 '''Context managers are exited in the reverse order from which
2699 2706 they were created.'''
2700 2707 received = exc_type is not None
2701 2708 suppressed = False
2702 2709 pending = None
2703 2710 self._atexit.reverse()
2704 2711 for exitfunc in self._atexit:
2705 2712 try:
2706 2713 if exitfunc(exc_type, exc_val, exc_tb):
2707 2714 suppressed = True
2708 2715 exc_type = None
2709 2716 exc_val = None
2710 2717 exc_tb = None
2711 2718 except BaseException:
2712 2719 pending = sys.exc_info()
2713 2720 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2714 2721 del self._atexit
2715 2722 if pending:
2716 2723 raise exc_val
2717 2724 return received and suppressed
2718 2725
2719 2726 def _bz2():
2720 2727 d = bz2.BZ2Decompressor()
2721 2728 # Bzip2 stream start with BZ, but we stripped it.
2722 2729 # we put it back for good measure.
2723 2730 d.decompress('BZ')
2724 2731 return d
2725 2732
2726 2733 decompressors = {None: lambda fh: fh,
2727 2734 '_truncatedBZ': _makedecompressor(_bz2),
2728 2735 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2729 2736 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2730 2737 }
2731 2738 # also support the old form by courtesies
2732 2739 decompressors['UN'] = decompressors[None]
2733 2740
2734 2741 # convenient shortcut
2735 2742 dst = debugstacktrace
General Comments 0
You need to be logged in to leave comments. Login now