##// END OF EJS Templates
atomictempfile: add context manager support...
Martijn Pieters -
r29394:6d96658a default
parent child Browse files
Show More
@@ -1,2844 +1,2853 b''
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 from __future__ import absolute_import
17 17
18 18 import bz2
19 19 import calendar
20 20 import collections
21 21 import datetime
22 22 import errno
23 23 import gc
24 24 import hashlib
25 25 import imp
26 26 import os
27 27 import re as remod
28 28 import shutil
29 29 import signal
30 30 import socket
31 31 import subprocess
32 32 import sys
33 33 import tempfile
34 34 import textwrap
35 35 import time
36 36 import traceback
37 37 import zlib
38 38
39 39 from . import (
40 40 encoding,
41 41 error,
42 42 i18n,
43 43 osutil,
44 44 parsers,
45 45 pycompat,
46 46 )
47 47
48 48 for attr in (
49 49 'empty',
50 50 'pickle',
51 51 'queue',
52 52 'urlerr',
53 53 # we do import urlreq, but we do it outside the loop
54 54 #'urlreq',
55 55 'stringio',
56 56 ):
57 57 globals()[attr] = getattr(pycompat, attr)
58 58
59 59 # This line is to make pyflakes happy:
60 60 urlreq = pycompat.urlreq
61 61
62 62 if os.name == 'nt':
63 63 from . import windows as platform
64 64 else:
65 65 from . import posix as platform
66 66
67 67 _ = i18n._
68 68
69 69 cachestat = platform.cachestat
70 70 checkexec = platform.checkexec
71 71 checklink = platform.checklink
72 72 copymode = platform.copymode
73 73 executablepath = platform.executablepath
74 74 expandglobs = platform.expandglobs
75 75 explainexit = platform.explainexit
76 76 findexe = platform.findexe
77 77 gethgcmd = platform.gethgcmd
78 78 getuser = platform.getuser
79 79 getpid = os.getpid
80 80 groupmembers = platform.groupmembers
81 81 groupname = platform.groupname
82 82 hidewindow = platform.hidewindow
83 83 isexec = platform.isexec
84 84 isowner = platform.isowner
85 85 localpath = platform.localpath
86 86 lookupreg = platform.lookupreg
87 87 makedir = platform.makedir
88 88 nlinks = platform.nlinks
89 89 normpath = platform.normpath
90 90 normcase = platform.normcase
91 91 normcasespec = platform.normcasespec
92 92 normcasefallback = platform.normcasefallback
93 93 openhardlinks = platform.openhardlinks
94 94 oslink = platform.oslink
95 95 parsepatchoutput = platform.parsepatchoutput
96 96 pconvert = platform.pconvert
97 97 poll = platform.poll
98 98 popen = platform.popen
99 99 posixfile = platform.posixfile
100 100 quotecommand = platform.quotecommand
101 101 readpipe = platform.readpipe
102 102 rename = platform.rename
103 103 removedirs = platform.removedirs
104 104 samedevice = platform.samedevice
105 105 samefile = platform.samefile
106 106 samestat = platform.samestat
107 107 setbinary = platform.setbinary
108 108 setflags = platform.setflags
109 109 setsignalhandler = platform.setsignalhandler
110 110 shellquote = platform.shellquote
111 111 spawndetached = platform.spawndetached
112 112 split = platform.split
113 113 sshargs = platform.sshargs
114 114 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
115 115 statisexec = platform.statisexec
116 116 statislink = platform.statislink
117 117 termwidth = platform.termwidth
118 118 testpid = platform.testpid
119 119 umask = platform.umask
120 120 unlink = platform.unlink
121 121 unlinkpath = platform.unlinkpath
122 122 username = platform.username
123 123
124 124 # Python compatibility
125 125
126 126 _notset = object()
127 127
128 128 # disable Python's problematic floating point timestamps (issue4836)
129 129 # (Python hypocritically says you shouldn't change this behavior in
130 130 # libraries, and sure enough Mercurial is not a library.)
131 131 os.stat_float_times(False)
132 132
133 133 def safehasattr(thing, attr):
134 134 return getattr(thing, attr, _notset) is not _notset
135 135
136 136 DIGESTS = {
137 137 'md5': hashlib.md5,
138 138 'sha1': hashlib.sha1,
139 139 'sha512': hashlib.sha512,
140 140 }
141 141 # List of digest types from strongest to weakest
142 142 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
143 143
144 144 for k in DIGESTS_BY_STRENGTH:
145 145 assert k in DIGESTS
146 146
147 147 class digester(object):
148 148 """helper to compute digests.
149 149
150 150 This helper can be used to compute one or more digests given their name.
151 151
152 152 >>> d = digester(['md5', 'sha1'])
153 153 >>> d.update('foo')
154 154 >>> [k for k in sorted(d)]
155 155 ['md5', 'sha1']
156 156 >>> d['md5']
157 157 'acbd18db4cc2f85cedef654fccc4a4d8'
158 158 >>> d['sha1']
159 159 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
160 160 >>> digester.preferred(['md5', 'sha1'])
161 161 'sha1'
162 162 """
163 163
164 164 def __init__(self, digests, s=''):
165 165 self._hashes = {}
166 166 for k in digests:
167 167 if k not in DIGESTS:
168 168 raise Abort(_('unknown digest type: %s') % k)
169 169 self._hashes[k] = DIGESTS[k]()
170 170 if s:
171 171 self.update(s)
172 172
173 173 def update(self, data):
174 174 for h in self._hashes.values():
175 175 h.update(data)
176 176
177 177 def __getitem__(self, key):
178 178 if key not in DIGESTS:
179 179 raise Abort(_('unknown digest type: %s') % k)
180 180 return self._hashes[key].hexdigest()
181 181
182 182 def __iter__(self):
183 183 return iter(self._hashes)
184 184
185 185 @staticmethod
186 186 def preferred(supported):
187 187 """returns the strongest digest type in both supported and DIGESTS."""
188 188
189 189 for k in DIGESTS_BY_STRENGTH:
190 190 if k in supported:
191 191 return k
192 192 return None
193 193
194 194 class digestchecker(object):
195 195 """file handle wrapper that additionally checks content against a given
196 196 size and digests.
197 197
198 198 d = digestchecker(fh, size, {'md5': '...'})
199 199
200 200 When multiple digests are given, all of them are validated.
201 201 """
202 202
203 203 def __init__(self, fh, size, digests):
204 204 self._fh = fh
205 205 self._size = size
206 206 self._got = 0
207 207 self._digests = dict(digests)
208 208 self._digester = digester(self._digests.keys())
209 209
210 210 def read(self, length=-1):
211 211 content = self._fh.read(length)
212 212 self._digester.update(content)
213 213 self._got += len(content)
214 214 return content
215 215
216 216 def validate(self):
217 217 if self._size != self._got:
218 218 raise Abort(_('size mismatch: expected %d, got %d') %
219 219 (self._size, self._got))
220 220 for k, v in self._digests.items():
221 221 if v != self._digester[k]:
222 222 # i18n: first parameter is a digest name
223 223 raise Abort(_('%s mismatch: expected %s, got %s') %
224 224 (k, v, self._digester[k]))
225 225
226 226 try:
227 227 buffer = buffer
228 228 except NameError:
229 229 if sys.version_info[0] < 3:
230 230 def buffer(sliceable, offset=0):
231 231 return sliceable[offset:]
232 232 else:
233 233 def buffer(sliceable, offset=0):
234 234 return memoryview(sliceable)[offset:]
235 235
236 236 closefds = os.name == 'posix'
237 237
238 238 _chunksize = 4096
239 239
240 240 class bufferedinputpipe(object):
241 241 """a manually buffered input pipe
242 242
243 243 Python will not let us use buffered IO and lazy reading with 'polling' at
244 244 the same time. We cannot probe the buffer state and select will not detect
245 245 that data are ready to read if they are already buffered.
246 246
247 247 This class let us work around that by implementing its own buffering
248 248 (allowing efficient readline) while offering a way to know if the buffer is
249 249 empty from the output (allowing collaboration of the buffer with polling).
250 250
251 251 This class lives in the 'util' module because it makes use of the 'os'
252 252 module from the python stdlib.
253 253 """
254 254
255 255 def __init__(self, input):
256 256 self._input = input
257 257 self._buffer = []
258 258 self._eof = False
259 259 self._lenbuf = 0
260 260
261 261 @property
262 262 def hasbuffer(self):
263 263 """True is any data is currently buffered
264 264
265 265 This will be used externally a pre-step for polling IO. If there is
266 266 already data then no polling should be set in place."""
267 267 return bool(self._buffer)
268 268
269 269 @property
270 270 def closed(self):
271 271 return self._input.closed
272 272
273 273 def fileno(self):
274 274 return self._input.fileno()
275 275
276 276 def close(self):
277 277 return self._input.close()
278 278
279 279 def read(self, size):
280 280 while (not self._eof) and (self._lenbuf < size):
281 281 self._fillbuffer()
282 282 return self._frombuffer(size)
283 283
284 284 def readline(self, *args, **kwargs):
285 285 if 1 < len(self._buffer):
286 286 # this should not happen because both read and readline end with a
287 287 # _frombuffer call that collapse it.
288 288 self._buffer = [''.join(self._buffer)]
289 289 self._lenbuf = len(self._buffer[0])
290 290 lfi = -1
291 291 if self._buffer:
292 292 lfi = self._buffer[-1].find('\n')
293 293 while (not self._eof) and lfi < 0:
294 294 self._fillbuffer()
295 295 if self._buffer:
296 296 lfi = self._buffer[-1].find('\n')
297 297 size = lfi + 1
298 298 if lfi < 0: # end of file
299 299 size = self._lenbuf
300 300 elif 1 < len(self._buffer):
301 301 # we need to take previous chunks into account
302 302 size += self._lenbuf - len(self._buffer[-1])
303 303 return self._frombuffer(size)
304 304
305 305 def _frombuffer(self, size):
306 306 """return at most 'size' data from the buffer
307 307
308 308 The data are removed from the buffer."""
309 309 if size == 0 or not self._buffer:
310 310 return ''
311 311 buf = self._buffer[0]
312 312 if 1 < len(self._buffer):
313 313 buf = ''.join(self._buffer)
314 314
315 315 data = buf[:size]
316 316 buf = buf[len(data):]
317 317 if buf:
318 318 self._buffer = [buf]
319 319 self._lenbuf = len(buf)
320 320 else:
321 321 self._buffer = []
322 322 self._lenbuf = 0
323 323 return data
324 324
325 325 def _fillbuffer(self):
326 326 """read data to the buffer"""
327 327 data = os.read(self._input.fileno(), _chunksize)
328 328 if not data:
329 329 self._eof = True
330 330 else:
331 331 self._lenbuf += len(data)
332 332 self._buffer.append(data)
333 333
334 334 def popen2(cmd, env=None, newlines=False):
335 335 # Setting bufsize to -1 lets the system decide the buffer size.
336 336 # The default for bufsize is 0, meaning unbuffered. This leads to
337 337 # poor performance on Mac OS X: http://bugs.python.org/issue4194
338 338 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
339 339 close_fds=closefds,
340 340 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
341 341 universal_newlines=newlines,
342 342 env=env)
343 343 return p.stdin, p.stdout
344 344
345 345 def popen3(cmd, env=None, newlines=False):
346 346 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
347 347 return stdin, stdout, stderr
348 348
349 349 def popen4(cmd, env=None, newlines=False, bufsize=-1):
350 350 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
351 351 close_fds=closefds,
352 352 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
353 353 stderr=subprocess.PIPE,
354 354 universal_newlines=newlines,
355 355 env=env)
356 356 return p.stdin, p.stdout, p.stderr, p
357 357
358 358 def version():
359 359 """Return version information if available."""
360 360 try:
361 361 from . import __version__
362 362 return __version__.version
363 363 except ImportError:
364 364 return 'unknown'
365 365
366 366 def versiontuple(v=None, n=4):
367 367 """Parses a Mercurial version string into an N-tuple.
368 368
369 369 The version string to be parsed is specified with the ``v`` argument.
370 370 If it isn't defined, the current Mercurial version string will be parsed.
371 371
372 372 ``n`` can be 2, 3, or 4. Here is how some version strings map to
373 373 returned values:
374 374
375 375 >>> v = '3.6.1+190-df9b73d2d444'
376 376 >>> versiontuple(v, 2)
377 377 (3, 6)
378 378 >>> versiontuple(v, 3)
379 379 (3, 6, 1)
380 380 >>> versiontuple(v, 4)
381 381 (3, 6, 1, '190-df9b73d2d444')
382 382
383 383 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
384 384 (3, 6, 1, '190-df9b73d2d444+20151118')
385 385
386 386 >>> v = '3.6'
387 387 >>> versiontuple(v, 2)
388 388 (3, 6)
389 389 >>> versiontuple(v, 3)
390 390 (3, 6, None)
391 391 >>> versiontuple(v, 4)
392 392 (3, 6, None, None)
393 393 """
394 394 if not v:
395 395 v = version()
396 396 parts = v.split('+', 1)
397 397 if len(parts) == 1:
398 398 vparts, extra = parts[0], None
399 399 else:
400 400 vparts, extra = parts
401 401
402 402 vints = []
403 403 for i in vparts.split('.'):
404 404 try:
405 405 vints.append(int(i))
406 406 except ValueError:
407 407 break
408 408 # (3, 6) -> (3, 6, None)
409 409 while len(vints) < 3:
410 410 vints.append(None)
411 411
412 412 if n == 2:
413 413 return (vints[0], vints[1])
414 414 if n == 3:
415 415 return (vints[0], vints[1], vints[2])
416 416 if n == 4:
417 417 return (vints[0], vints[1], vints[2], extra)
418 418
419 419 # used by parsedate
420 420 defaultdateformats = (
421 421 '%Y-%m-%d %H:%M:%S',
422 422 '%Y-%m-%d %I:%M:%S%p',
423 423 '%Y-%m-%d %H:%M',
424 424 '%Y-%m-%d %I:%M%p',
425 425 '%Y-%m-%d',
426 426 '%m-%d',
427 427 '%m/%d',
428 428 '%m/%d/%y',
429 429 '%m/%d/%Y',
430 430 '%a %b %d %H:%M:%S %Y',
431 431 '%a %b %d %I:%M:%S%p %Y',
432 432 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
433 433 '%b %d %H:%M:%S %Y',
434 434 '%b %d %I:%M:%S%p %Y',
435 435 '%b %d %H:%M:%S',
436 436 '%b %d %I:%M:%S%p',
437 437 '%b %d %H:%M',
438 438 '%b %d %I:%M%p',
439 439 '%b %d %Y',
440 440 '%b %d',
441 441 '%H:%M:%S',
442 442 '%I:%M:%S%p',
443 443 '%H:%M',
444 444 '%I:%M%p',
445 445 )
446 446
447 447 extendeddateformats = defaultdateformats + (
448 448 "%Y",
449 449 "%Y-%m",
450 450 "%b",
451 451 "%b %Y",
452 452 )
453 453
454 454 def cachefunc(func):
455 455 '''cache the result of function calls'''
456 456 # XXX doesn't handle keywords args
457 457 if func.__code__.co_argcount == 0:
458 458 cache = []
459 459 def f():
460 460 if len(cache) == 0:
461 461 cache.append(func())
462 462 return cache[0]
463 463 return f
464 464 cache = {}
465 465 if func.__code__.co_argcount == 1:
466 466 # we gain a small amount of time because
467 467 # we don't need to pack/unpack the list
468 468 def f(arg):
469 469 if arg not in cache:
470 470 cache[arg] = func(arg)
471 471 return cache[arg]
472 472 else:
473 473 def f(*args):
474 474 if args not in cache:
475 475 cache[args] = func(*args)
476 476 return cache[args]
477 477
478 478 return f
479 479
480 480 class sortdict(dict):
481 481 '''a simple sorted dictionary'''
482 482 def __init__(self, data=None):
483 483 self._list = []
484 484 if data:
485 485 self.update(data)
486 486 def copy(self):
487 487 return sortdict(self)
488 488 def __setitem__(self, key, val):
489 489 if key in self:
490 490 self._list.remove(key)
491 491 self._list.append(key)
492 492 dict.__setitem__(self, key, val)
493 493 def __iter__(self):
494 494 return self._list.__iter__()
495 495 def update(self, src):
496 496 if isinstance(src, dict):
497 497 src = src.iteritems()
498 498 for k, v in src:
499 499 self[k] = v
500 500 def clear(self):
501 501 dict.clear(self)
502 502 self._list = []
503 503 def items(self):
504 504 return [(k, self[k]) for k in self._list]
505 505 def __delitem__(self, key):
506 506 dict.__delitem__(self, key)
507 507 self._list.remove(key)
508 508 def pop(self, key, *args, **kwargs):
509 509 dict.pop(self, key, *args, **kwargs)
510 510 try:
511 511 self._list.remove(key)
512 512 except ValueError:
513 513 pass
514 514 def keys(self):
515 515 return self._list
516 516 def iterkeys(self):
517 517 return self._list.__iter__()
518 518 def iteritems(self):
519 519 for k in self._list:
520 520 yield k, self[k]
521 521 def insert(self, index, key, val):
522 522 self._list.insert(index, key)
523 523 dict.__setitem__(self, key, val)
524 524
525 525 class _lrucachenode(object):
526 526 """A node in a doubly linked list.
527 527
528 528 Holds a reference to nodes on either side as well as a key-value
529 529 pair for the dictionary entry.
530 530 """
531 531 __slots__ = ('next', 'prev', 'key', 'value')
532 532
533 533 def __init__(self):
534 534 self.next = None
535 535 self.prev = None
536 536
537 537 self.key = _notset
538 538 self.value = None
539 539
540 540 def markempty(self):
541 541 """Mark the node as emptied."""
542 542 self.key = _notset
543 543
544 544 class lrucachedict(object):
545 545 """Dict that caches most recent accesses and sets.
546 546
547 547 The dict consists of an actual backing dict - indexed by original
548 548 key - and a doubly linked circular list defining the order of entries in
549 549 the cache.
550 550
551 551 The head node is the newest entry in the cache. If the cache is full,
552 552 we recycle head.prev and make it the new head. Cache accesses result in
553 553 the node being moved to before the existing head and being marked as the
554 554 new head node.
555 555 """
556 556 def __init__(self, max):
557 557 self._cache = {}
558 558
559 559 self._head = head = _lrucachenode()
560 560 head.prev = head
561 561 head.next = head
562 562 self._size = 1
563 563 self._capacity = max
564 564
565 565 def __len__(self):
566 566 return len(self._cache)
567 567
568 568 def __contains__(self, k):
569 569 return k in self._cache
570 570
571 571 def __iter__(self):
572 572 # We don't have to iterate in cache order, but why not.
573 573 n = self._head
574 574 for i in range(len(self._cache)):
575 575 yield n.key
576 576 n = n.next
577 577
578 578 def __getitem__(self, k):
579 579 node = self._cache[k]
580 580 self._movetohead(node)
581 581 return node.value
582 582
583 583 def __setitem__(self, k, v):
584 584 node = self._cache.get(k)
585 585 # Replace existing value and mark as newest.
586 586 if node is not None:
587 587 node.value = v
588 588 self._movetohead(node)
589 589 return
590 590
591 591 if self._size < self._capacity:
592 592 node = self._addcapacity()
593 593 else:
594 594 # Grab the last/oldest item.
595 595 node = self._head.prev
596 596
597 597 # At capacity. Kill the old entry.
598 598 if node.key is not _notset:
599 599 del self._cache[node.key]
600 600
601 601 node.key = k
602 602 node.value = v
603 603 self._cache[k] = node
604 604 # And mark it as newest entry. No need to adjust order since it
605 605 # is already self._head.prev.
606 606 self._head = node
607 607
608 608 def __delitem__(self, k):
609 609 node = self._cache.pop(k)
610 610 node.markempty()
611 611
612 612 # Temporarily mark as newest item before re-adjusting head to make
613 613 # this node the oldest item.
614 614 self._movetohead(node)
615 615 self._head = node.next
616 616
617 617 # Additional dict methods.
618 618
619 619 def get(self, k, default=None):
620 620 try:
621 621 return self._cache[k]
622 622 except KeyError:
623 623 return default
624 624
625 625 def clear(self):
626 626 n = self._head
627 627 while n.key is not _notset:
628 628 n.markempty()
629 629 n = n.next
630 630
631 631 self._cache.clear()
632 632
633 633 def copy(self):
634 634 result = lrucachedict(self._capacity)
635 635 n = self._head.prev
636 636 # Iterate in oldest-to-newest order, so the copy has the right ordering
637 637 for i in range(len(self._cache)):
638 638 result[n.key] = n.value
639 639 n = n.prev
640 640 return result
641 641
642 642 def _movetohead(self, node):
643 643 """Mark a node as the newest, making it the new head.
644 644
645 645 When a node is accessed, it becomes the freshest entry in the LRU
646 646 list, which is denoted by self._head.
647 647
648 648 Visually, let's make ``N`` the new head node (* denotes head):
649 649
650 650 previous/oldest <-> head <-> next/next newest
651 651
652 652 ----<->--- A* ---<->-----
653 653 | |
654 654 E <-> D <-> N <-> C <-> B
655 655
656 656 To:
657 657
658 658 ----<->--- N* ---<->-----
659 659 | |
660 660 E <-> D <-> C <-> B <-> A
661 661
662 662 This requires the following moves:
663 663
664 664 C.next = D (node.prev.next = node.next)
665 665 D.prev = C (node.next.prev = node.prev)
666 666 E.next = N (head.prev.next = node)
667 667 N.prev = E (node.prev = head.prev)
668 668 N.next = A (node.next = head)
669 669 A.prev = N (head.prev = node)
670 670 """
671 671 head = self._head
672 672 # C.next = D
673 673 node.prev.next = node.next
674 674 # D.prev = C
675 675 node.next.prev = node.prev
676 676 # N.prev = E
677 677 node.prev = head.prev
678 678 # N.next = A
679 679 # It is tempting to do just "head" here, however if node is
680 680 # adjacent to head, this will do bad things.
681 681 node.next = head.prev.next
682 682 # E.next = N
683 683 node.next.prev = node
684 684 # A.prev = N
685 685 node.prev.next = node
686 686
687 687 self._head = node
688 688
689 689 def _addcapacity(self):
690 690 """Add a node to the circular linked list.
691 691
692 692 The new node is inserted before the head node.
693 693 """
694 694 head = self._head
695 695 node = _lrucachenode()
696 696 head.prev.next = node
697 697 node.prev = head.prev
698 698 node.next = head
699 699 head.prev = node
700 700 self._size += 1
701 701 return node
702 702
703 703 def lrucachefunc(func):
704 704 '''cache most recent results of function calls'''
705 705 cache = {}
706 706 order = collections.deque()
707 707 if func.__code__.co_argcount == 1:
708 708 def f(arg):
709 709 if arg not in cache:
710 710 if len(cache) > 20:
711 711 del cache[order.popleft()]
712 712 cache[arg] = func(arg)
713 713 else:
714 714 order.remove(arg)
715 715 order.append(arg)
716 716 return cache[arg]
717 717 else:
718 718 def f(*args):
719 719 if args not in cache:
720 720 if len(cache) > 20:
721 721 del cache[order.popleft()]
722 722 cache[args] = func(*args)
723 723 else:
724 724 order.remove(args)
725 725 order.append(args)
726 726 return cache[args]
727 727
728 728 return f
729 729
730 730 class propertycache(object):
731 731 def __init__(self, func):
732 732 self.func = func
733 733 self.name = func.__name__
734 734 def __get__(self, obj, type=None):
735 735 result = self.func(obj)
736 736 self.cachevalue(obj, result)
737 737 return result
738 738
739 739 def cachevalue(self, obj, value):
740 740 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
741 741 obj.__dict__[self.name] = value
742 742
743 743 def pipefilter(s, cmd):
744 744 '''filter string S through command CMD, returning its output'''
745 745 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
746 746 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
747 747 pout, perr = p.communicate(s)
748 748 return pout
749 749
750 750 def tempfilter(s, cmd):
751 751 '''filter string S through a pair of temporary files with CMD.
752 752 CMD is used as a template to create the real command to be run,
753 753 with the strings INFILE and OUTFILE replaced by the real names of
754 754 the temporary files generated.'''
755 755 inname, outname = None, None
756 756 try:
757 757 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
758 758 fp = os.fdopen(infd, 'wb')
759 759 fp.write(s)
760 760 fp.close()
761 761 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
762 762 os.close(outfd)
763 763 cmd = cmd.replace('INFILE', inname)
764 764 cmd = cmd.replace('OUTFILE', outname)
765 765 code = os.system(cmd)
766 766 if sys.platform == 'OpenVMS' and code & 1:
767 767 code = 0
768 768 if code:
769 769 raise Abort(_("command '%s' failed: %s") %
770 770 (cmd, explainexit(code)))
771 771 return readfile(outname)
772 772 finally:
773 773 try:
774 774 if inname:
775 775 os.unlink(inname)
776 776 except OSError:
777 777 pass
778 778 try:
779 779 if outname:
780 780 os.unlink(outname)
781 781 except OSError:
782 782 pass
783 783
784 784 filtertable = {
785 785 'tempfile:': tempfilter,
786 786 'pipe:': pipefilter,
787 787 }
788 788
789 789 def filter(s, cmd):
790 790 "filter a string through a command that transforms its input to its output"
791 791 for name, fn in filtertable.iteritems():
792 792 if cmd.startswith(name):
793 793 return fn(s, cmd[len(name):].lstrip())
794 794 return pipefilter(s, cmd)
795 795
796 796 def binary(s):
797 797 """return true if a string is binary data"""
798 798 return bool(s and '\0' in s)
799 799
800 800 def increasingchunks(source, min=1024, max=65536):
801 801 '''return no less than min bytes per chunk while data remains,
802 802 doubling min after each chunk until it reaches max'''
803 803 def log2(x):
804 804 if not x:
805 805 return 0
806 806 i = 0
807 807 while x:
808 808 x >>= 1
809 809 i += 1
810 810 return i - 1
811 811
812 812 buf = []
813 813 blen = 0
814 814 for chunk in source:
815 815 buf.append(chunk)
816 816 blen += len(chunk)
817 817 if blen >= min:
818 818 if min < max:
819 819 min = min << 1
820 820 nmin = 1 << log2(blen)
821 821 if nmin > min:
822 822 min = nmin
823 823 if min > max:
824 824 min = max
825 825 yield ''.join(buf)
826 826 blen = 0
827 827 buf = []
828 828 if buf:
829 829 yield ''.join(buf)
830 830
831 831 Abort = error.Abort
832 832
833 833 def always(fn):
834 834 return True
835 835
836 836 def never(fn):
837 837 return False
838 838
839 839 def nogc(func):
840 840 """disable garbage collector
841 841
842 842 Python's garbage collector triggers a GC each time a certain number of
843 843 container objects (the number being defined by gc.get_threshold()) are
844 844 allocated even when marked not to be tracked by the collector. Tracking has
845 845 no effect on when GCs are triggered, only on what objects the GC looks
846 846 into. As a workaround, disable GC while building complex (huge)
847 847 containers.
848 848
849 849 This garbage collector issue have been fixed in 2.7.
850 850 """
851 851 def wrapper(*args, **kwargs):
852 852 gcenabled = gc.isenabled()
853 853 gc.disable()
854 854 try:
855 855 return func(*args, **kwargs)
856 856 finally:
857 857 if gcenabled:
858 858 gc.enable()
859 859 return wrapper
860 860
861 861 def pathto(root, n1, n2):
862 862 '''return the relative path from one place to another.
863 863 root should use os.sep to separate directories
864 864 n1 should use os.sep to separate directories
865 865 n2 should use "/" to separate directories
866 866 returns an os.sep-separated path.
867 867
868 868 If n1 is a relative path, it's assumed it's
869 869 relative to root.
870 870 n2 should always be relative to root.
871 871 '''
872 872 if not n1:
873 873 return localpath(n2)
874 874 if os.path.isabs(n1):
875 875 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
876 876 return os.path.join(root, localpath(n2))
877 877 n2 = '/'.join((pconvert(root), n2))
878 878 a, b = splitpath(n1), n2.split('/')
879 879 a.reverse()
880 880 b.reverse()
881 881 while a and b and a[-1] == b[-1]:
882 882 a.pop()
883 883 b.pop()
884 884 b.reverse()
885 885 return os.sep.join((['..'] * len(a)) + b) or '.'
886 886
887 887 def mainfrozen():
888 888 """return True if we are a frozen executable.
889 889
890 890 The code supports py2exe (most common, Windows only) and tools/freeze
891 891 (portable, not much used).
892 892 """
893 893 return (safehasattr(sys, "frozen") or # new py2exe
894 894 safehasattr(sys, "importers") or # old py2exe
895 895 imp.is_frozen("__main__")) # tools/freeze
896 896
897 897 # the location of data files matching the source code
898 898 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
899 899 # executable version (py2exe) doesn't support __file__
900 900 datapath = os.path.dirname(sys.executable)
901 901 else:
902 902 datapath = os.path.dirname(__file__)
903 903
904 904 i18n.setdatapath(datapath)
905 905
906 906 _hgexecutable = None
907 907
908 908 def hgexecutable():
909 909 """return location of the 'hg' executable.
910 910
911 911 Defaults to $HG or 'hg' in the search path.
912 912 """
913 913 if _hgexecutable is None:
914 914 hg = os.environ.get('HG')
915 915 mainmod = sys.modules['__main__']
916 916 if hg:
917 917 _sethgexecutable(hg)
918 918 elif mainfrozen():
919 919 if getattr(sys, 'frozen', None) == 'macosx_app':
920 920 # Env variable set by py2app
921 921 _sethgexecutable(os.environ['EXECUTABLEPATH'])
922 922 else:
923 923 _sethgexecutable(sys.executable)
924 924 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
925 925 _sethgexecutable(mainmod.__file__)
926 926 else:
927 927 exe = findexe('hg') or os.path.basename(sys.argv[0])
928 928 _sethgexecutable(exe)
929 929 return _hgexecutable
930 930
931 931 def _sethgexecutable(path):
932 932 """set location of the 'hg' executable"""
933 933 global _hgexecutable
934 934 _hgexecutable = path
935 935
936 936 def _isstdout(f):
937 937 fileno = getattr(f, 'fileno', None)
938 938 return fileno and fileno() == sys.__stdout__.fileno()
939 939
940 940 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
941 941 '''enhanced shell command execution.
942 942 run with environment maybe modified, maybe in different dir.
943 943
944 944 if command fails and onerr is None, return status, else raise onerr
945 945 object as exception.
946 946
947 947 if out is specified, it is assumed to be a file-like object that has a
948 948 write() method. stdout and stderr will be redirected to out.'''
949 949 if environ is None:
950 950 environ = {}
951 951 try:
952 952 sys.stdout.flush()
953 953 except Exception:
954 954 pass
955 955 def py2shell(val):
956 956 'convert python object into string that is useful to shell'
957 957 if val is None or val is False:
958 958 return '0'
959 959 if val is True:
960 960 return '1'
961 961 return str(val)
962 962 origcmd = cmd
963 963 cmd = quotecommand(cmd)
964 964 if sys.platform == 'plan9' and (sys.version_info[0] == 2
965 965 and sys.version_info[1] < 7):
966 966 # subprocess kludge to work around issues in half-baked Python
967 967 # ports, notably bichued/python:
968 968 if not cwd is None:
969 969 os.chdir(cwd)
970 970 rc = os.system(cmd)
971 971 else:
972 972 env = dict(os.environ)
973 973 env.update((k, py2shell(v)) for k, v in environ.iteritems())
974 974 env['HG'] = hgexecutable()
975 975 if out is None or _isstdout(out):
976 976 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
977 977 env=env, cwd=cwd)
978 978 else:
979 979 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
980 980 env=env, cwd=cwd, stdout=subprocess.PIPE,
981 981 stderr=subprocess.STDOUT)
982 982 while True:
983 983 line = proc.stdout.readline()
984 984 if not line:
985 985 break
986 986 out.write(line)
987 987 proc.wait()
988 988 rc = proc.returncode
989 989 if sys.platform == 'OpenVMS' and rc & 1:
990 990 rc = 0
991 991 if rc and onerr:
992 992 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
993 993 explainexit(rc)[0])
994 994 if errprefix:
995 995 errmsg = '%s: %s' % (errprefix, errmsg)
996 996 raise onerr(errmsg)
997 997 return rc
998 998
999 999 def checksignature(func):
1000 1000 '''wrap a function with code to check for calling errors'''
1001 1001 def check(*args, **kwargs):
1002 1002 try:
1003 1003 return func(*args, **kwargs)
1004 1004 except TypeError:
1005 1005 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1006 1006 raise error.SignatureError
1007 1007 raise
1008 1008
1009 1009 return check
1010 1010
1011 1011 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1012 1012 '''copy a file, preserving mode and optionally other stat info like
1013 1013 atime/mtime
1014 1014
1015 1015 checkambig argument is used with filestat, and is useful only if
1016 1016 destination file is guarded by any lock (e.g. repo.lock or
1017 1017 repo.wlock).
1018 1018
1019 1019 copystat and checkambig should be exclusive.
1020 1020 '''
1021 1021 assert not (copystat and checkambig)
1022 1022 oldstat = None
1023 1023 if os.path.lexists(dest):
1024 1024 if checkambig:
1025 1025 oldstat = checkambig and filestat(dest)
1026 1026 unlink(dest)
1027 1027 # hardlinks are problematic on CIFS, quietly ignore this flag
1028 1028 # until we find a way to work around it cleanly (issue4546)
1029 1029 if False and hardlink:
1030 1030 try:
1031 1031 oslink(src, dest)
1032 1032 return
1033 1033 except (IOError, OSError):
1034 1034 pass # fall back to normal copy
1035 1035 if os.path.islink(src):
1036 1036 os.symlink(os.readlink(src), dest)
1037 1037 # copytime is ignored for symlinks, but in general copytime isn't needed
1038 1038 # for them anyway
1039 1039 else:
1040 1040 try:
1041 1041 shutil.copyfile(src, dest)
1042 1042 if copystat:
1043 1043 # copystat also copies mode
1044 1044 shutil.copystat(src, dest)
1045 1045 else:
1046 1046 shutil.copymode(src, dest)
1047 1047 if oldstat and oldstat.stat:
1048 1048 newstat = filestat(dest)
1049 1049 if newstat.isambig(oldstat):
1050 1050 # stat of copied file is ambiguous to original one
1051 1051 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1052 1052 os.utime(dest, (advanced, advanced))
1053 1053 except shutil.Error as inst:
1054 1054 raise Abort(str(inst))
1055 1055
1056 1056 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1057 1057 """Copy a directory tree using hardlinks if possible."""
1058 1058 num = 0
1059 1059
1060 1060 if hardlink is None:
1061 1061 hardlink = (os.stat(src).st_dev ==
1062 1062 os.stat(os.path.dirname(dst)).st_dev)
1063 1063 if hardlink:
1064 1064 topic = _('linking')
1065 1065 else:
1066 1066 topic = _('copying')
1067 1067
1068 1068 if os.path.isdir(src):
1069 1069 os.mkdir(dst)
1070 1070 for name, kind in osutil.listdir(src):
1071 1071 srcname = os.path.join(src, name)
1072 1072 dstname = os.path.join(dst, name)
1073 1073 def nprog(t, pos):
1074 1074 if pos is not None:
1075 1075 return progress(t, pos + num)
1076 1076 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1077 1077 num += n
1078 1078 else:
1079 1079 if hardlink:
1080 1080 try:
1081 1081 oslink(src, dst)
1082 1082 except (IOError, OSError):
1083 1083 hardlink = False
1084 1084 shutil.copy(src, dst)
1085 1085 else:
1086 1086 shutil.copy(src, dst)
1087 1087 num += 1
1088 1088 progress(topic, num)
1089 1089 progress(topic, None)
1090 1090
1091 1091 return hardlink, num
1092 1092
1093 1093 _winreservednames = '''con prn aux nul
1094 1094 com1 com2 com3 com4 com5 com6 com7 com8 com9
1095 1095 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1096 1096 _winreservedchars = ':*?"<>|'
1097 1097 def checkwinfilename(path):
1098 1098 r'''Check that the base-relative path is a valid filename on Windows.
1099 1099 Returns None if the path is ok, or a UI string describing the problem.
1100 1100
1101 1101 >>> checkwinfilename("just/a/normal/path")
1102 1102 >>> checkwinfilename("foo/bar/con.xml")
1103 1103 "filename contains 'con', which is reserved on Windows"
1104 1104 >>> checkwinfilename("foo/con.xml/bar")
1105 1105 "filename contains 'con', which is reserved on Windows"
1106 1106 >>> checkwinfilename("foo/bar/xml.con")
1107 1107 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1108 1108 "filename contains 'AUX', which is reserved on Windows"
1109 1109 >>> checkwinfilename("foo/bar/bla:.txt")
1110 1110 "filename contains ':', which is reserved on Windows"
1111 1111 >>> checkwinfilename("foo/bar/b\07la.txt")
1112 1112 "filename contains '\\x07', which is invalid on Windows"
1113 1113 >>> checkwinfilename("foo/bar/bla ")
1114 1114 "filename ends with ' ', which is not allowed on Windows"
1115 1115 >>> checkwinfilename("../bar")
1116 1116 >>> checkwinfilename("foo\\")
1117 1117 "filename ends with '\\', which is invalid on Windows"
1118 1118 >>> checkwinfilename("foo\\/bar")
1119 1119 "directory name ends with '\\', which is invalid on Windows"
1120 1120 '''
1121 1121 if path.endswith('\\'):
1122 1122 return _("filename ends with '\\', which is invalid on Windows")
1123 1123 if '\\/' in path:
1124 1124 return _("directory name ends with '\\', which is invalid on Windows")
1125 1125 for n in path.replace('\\', '/').split('/'):
1126 1126 if not n:
1127 1127 continue
1128 1128 for c in n:
1129 1129 if c in _winreservedchars:
1130 1130 return _("filename contains '%s', which is reserved "
1131 1131 "on Windows") % c
1132 1132 if ord(c) <= 31:
1133 1133 return _("filename contains %r, which is invalid "
1134 1134 "on Windows") % c
1135 1135 base = n.split('.')[0]
1136 1136 if base and base.lower() in _winreservednames:
1137 1137 return _("filename contains '%s', which is reserved "
1138 1138 "on Windows") % base
1139 1139 t = n[-1]
1140 1140 if t in '. ' and n not in '..':
1141 1141 return _("filename ends with '%s', which is not allowed "
1142 1142 "on Windows") % t
1143 1143
1144 1144 if os.name == 'nt':
1145 1145 checkosfilename = checkwinfilename
1146 1146 else:
1147 1147 checkosfilename = platform.checkosfilename
1148 1148
1149 1149 def makelock(info, pathname):
1150 1150 try:
1151 1151 return os.symlink(info, pathname)
1152 1152 except OSError as why:
1153 1153 if why.errno == errno.EEXIST:
1154 1154 raise
1155 1155 except AttributeError: # no symlink in os
1156 1156 pass
1157 1157
1158 1158 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1159 1159 os.write(ld, info)
1160 1160 os.close(ld)
1161 1161
1162 1162 def readlock(pathname):
1163 1163 try:
1164 1164 return os.readlink(pathname)
1165 1165 except OSError as why:
1166 1166 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1167 1167 raise
1168 1168 except AttributeError: # no symlink in os
1169 1169 pass
1170 1170 fp = posixfile(pathname)
1171 1171 r = fp.read()
1172 1172 fp.close()
1173 1173 return r
1174 1174
1175 1175 def fstat(fp):
1176 1176 '''stat file object that may not have fileno method.'''
1177 1177 try:
1178 1178 return os.fstat(fp.fileno())
1179 1179 except AttributeError:
1180 1180 return os.stat(fp.name)
1181 1181
1182 1182 # File system features
1183 1183
1184 1184 def checkcase(path):
1185 1185 """
1186 1186 Return true if the given path is on a case-sensitive filesystem
1187 1187
1188 1188 Requires a path (like /foo/.hg) ending with a foldable final
1189 1189 directory component.
1190 1190 """
1191 1191 s1 = os.lstat(path)
1192 1192 d, b = os.path.split(path)
1193 1193 b2 = b.upper()
1194 1194 if b == b2:
1195 1195 b2 = b.lower()
1196 1196 if b == b2:
1197 1197 return True # no evidence against case sensitivity
1198 1198 p2 = os.path.join(d, b2)
1199 1199 try:
1200 1200 s2 = os.lstat(p2)
1201 1201 if s2 == s1:
1202 1202 return False
1203 1203 return True
1204 1204 except OSError:
1205 1205 return True
1206 1206
1207 1207 try:
1208 1208 import re2
1209 1209 _re2 = None
1210 1210 except ImportError:
1211 1211 _re2 = False
1212 1212
1213 1213 class _re(object):
1214 1214 def _checkre2(self):
1215 1215 global _re2
1216 1216 try:
1217 1217 # check if match works, see issue3964
1218 1218 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1219 1219 except ImportError:
1220 1220 _re2 = False
1221 1221
1222 1222 def compile(self, pat, flags=0):
1223 1223 '''Compile a regular expression, using re2 if possible
1224 1224
1225 1225 For best performance, use only re2-compatible regexp features. The
1226 1226 only flags from the re module that are re2-compatible are
1227 1227 IGNORECASE and MULTILINE.'''
1228 1228 if _re2 is None:
1229 1229 self._checkre2()
1230 1230 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1231 1231 if flags & remod.IGNORECASE:
1232 1232 pat = '(?i)' + pat
1233 1233 if flags & remod.MULTILINE:
1234 1234 pat = '(?m)' + pat
1235 1235 try:
1236 1236 return re2.compile(pat)
1237 1237 except re2.error:
1238 1238 pass
1239 1239 return remod.compile(pat, flags)
1240 1240
1241 1241 @propertycache
1242 1242 def escape(self):
1243 1243 '''Return the version of escape corresponding to self.compile.
1244 1244
1245 1245 This is imperfect because whether re2 or re is used for a particular
1246 1246 function depends on the flags, etc, but it's the best we can do.
1247 1247 '''
1248 1248 global _re2
1249 1249 if _re2 is None:
1250 1250 self._checkre2()
1251 1251 if _re2:
1252 1252 return re2.escape
1253 1253 else:
1254 1254 return remod.escape
1255 1255
1256 1256 re = _re()
1257 1257
1258 1258 _fspathcache = {}
1259 1259 def fspath(name, root):
1260 1260 '''Get name in the case stored in the filesystem
1261 1261
1262 1262 The name should be relative to root, and be normcase-ed for efficiency.
1263 1263
1264 1264 Note that this function is unnecessary, and should not be
1265 1265 called, for case-sensitive filesystems (simply because it's expensive).
1266 1266
1267 1267 The root should be normcase-ed, too.
1268 1268 '''
1269 1269 def _makefspathcacheentry(dir):
1270 1270 return dict((normcase(n), n) for n in os.listdir(dir))
1271 1271
1272 1272 seps = os.sep
1273 1273 if os.altsep:
1274 1274 seps = seps + os.altsep
1275 1275 # Protect backslashes. This gets silly very quickly.
1276 1276 seps.replace('\\','\\\\')
1277 1277 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1278 1278 dir = os.path.normpath(root)
1279 1279 result = []
1280 1280 for part, sep in pattern.findall(name):
1281 1281 if sep:
1282 1282 result.append(sep)
1283 1283 continue
1284 1284
1285 1285 if dir not in _fspathcache:
1286 1286 _fspathcache[dir] = _makefspathcacheentry(dir)
1287 1287 contents = _fspathcache[dir]
1288 1288
1289 1289 found = contents.get(part)
1290 1290 if not found:
1291 1291 # retry "once per directory" per "dirstate.walk" which
1292 1292 # may take place for each patches of "hg qpush", for example
1293 1293 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1294 1294 found = contents.get(part)
1295 1295
1296 1296 result.append(found or part)
1297 1297 dir = os.path.join(dir, part)
1298 1298
1299 1299 return ''.join(result)
1300 1300
1301 1301 def checknlink(testfile):
1302 1302 '''check whether hardlink count reporting works properly'''
1303 1303
1304 1304 # testfile may be open, so we need a separate file for checking to
1305 1305 # work around issue2543 (or testfile may get lost on Samba shares)
1306 1306 f1 = testfile + ".hgtmp1"
1307 1307 if os.path.lexists(f1):
1308 1308 return False
1309 1309 try:
1310 1310 posixfile(f1, 'w').close()
1311 1311 except IOError:
1312 1312 return False
1313 1313
1314 1314 f2 = testfile + ".hgtmp2"
1315 1315 fd = None
1316 1316 try:
1317 1317 oslink(f1, f2)
1318 1318 # nlinks() may behave differently for files on Windows shares if
1319 1319 # the file is open.
1320 1320 fd = posixfile(f2)
1321 1321 return nlinks(f2) > 1
1322 1322 except OSError:
1323 1323 return False
1324 1324 finally:
1325 1325 if fd is not None:
1326 1326 fd.close()
1327 1327 for f in (f1, f2):
1328 1328 try:
1329 1329 os.unlink(f)
1330 1330 except OSError:
1331 1331 pass
1332 1332
1333 1333 def endswithsep(path):
1334 1334 '''Check path ends with os.sep or os.altsep.'''
1335 1335 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1336 1336
1337 1337 def splitpath(path):
1338 1338 '''Split path by os.sep.
1339 1339 Note that this function does not use os.altsep because this is
1340 1340 an alternative of simple "xxx.split(os.sep)".
1341 1341 It is recommended to use os.path.normpath() before using this
1342 1342 function if need.'''
1343 1343 return path.split(os.sep)
1344 1344
1345 1345 def gui():
1346 1346 '''Are we running in a GUI?'''
1347 1347 if sys.platform == 'darwin':
1348 1348 if 'SSH_CONNECTION' in os.environ:
1349 1349 # handle SSH access to a box where the user is logged in
1350 1350 return False
1351 1351 elif getattr(osutil, 'isgui', None):
1352 1352 # check if a CoreGraphics session is available
1353 1353 return osutil.isgui()
1354 1354 else:
1355 1355 # pure build; use a safe default
1356 1356 return True
1357 1357 else:
1358 1358 return os.name == "nt" or os.environ.get("DISPLAY")
1359 1359
1360 1360 def mktempcopy(name, emptyok=False, createmode=None):
1361 1361 """Create a temporary file with the same contents from name
1362 1362
1363 1363 The permission bits are copied from the original file.
1364 1364
1365 1365 If the temporary file is going to be truncated immediately, you
1366 1366 can use emptyok=True as an optimization.
1367 1367
1368 1368 Returns the name of the temporary file.
1369 1369 """
1370 1370 d, fn = os.path.split(name)
1371 1371 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1372 1372 os.close(fd)
1373 1373 # Temporary files are created with mode 0600, which is usually not
1374 1374 # what we want. If the original file already exists, just copy
1375 1375 # its mode. Otherwise, manually obey umask.
1376 1376 copymode(name, temp, createmode)
1377 1377 if emptyok:
1378 1378 return temp
1379 1379 try:
1380 1380 try:
1381 1381 ifp = posixfile(name, "rb")
1382 1382 except IOError as inst:
1383 1383 if inst.errno == errno.ENOENT:
1384 1384 return temp
1385 1385 if not getattr(inst, 'filename', None):
1386 1386 inst.filename = name
1387 1387 raise
1388 1388 ofp = posixfile(temp, "wb")
1389 1389 for chunk in filechunkiter(ifp):
1390 1390 ofp.write(chunk)
1391 1391 ifp.close()
1392 1392 ofp.close()
1393 1393 except: # re-raises
1394 1394 try: os.unlink(temp)
1395 1395 except OSError: pass
1396 1396 raise
1397 1397 return temp
1398 1398
1399 1399 class filestat(object):
1400 1400 """help to exactly detect change of a file
1401 1401
1402 1402 'stat' attribute is result of 'os.stat()' if specified 'path'
1403 1403 exists. Otherwise, it is None. This can avoid preparative
1404 1404 'exists()' examination on client side of this class.
1405 1405 """
1406 1406 def __init__(self, path):
1407 1407 try:
1408 1408 self.stat = os.stat(path)
1409 1409 except OSError as err:
1410 1410 if err.errno != errno.ENOENT:
1411 1411 raise
1412 1412 self.stat = None
1413 1413
1414 1414 __hash__ = object.__hash__
1415 1415
1416 1416 def __eq__(self, old):
1417 1417 try:
1418 1418 # if ambiguity between stat of new and old file is
1419 1419 # avoided, comparision of size, ctime and mtime is enough
1420 1420 # to exactly detect change of a file regardless of platform
1421 1421 return (self.stat.st_size == old.stat.st_size and
1422 1422 self.stat.st_ctime == old.stat.st_ctime and
1423 1423 self.stat.st_mtime == old.stat.st_mtime)
1424 1424 except AttributeError:
1425 1425 return False
1426 1426
1427 1427 def isambig(self, old):
1428 1428 """Examine whether new (= self) stat is ambiguous against old one
1429 1429
1430 1430 "S[N]" below means stat of a file at N-th change:
1431 1431
1432 1432 - S[n-1].ctime < S[n].ctime: can detect change of a file
1433 1433 - S[n-1].ctime == S[n].ctime
1434 1434 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1435 1435 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1436 1436 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1437 1437 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1438 1438
1439 1439 Case (*2) above means that a file was changed twice or more at
1440 1440 same time in sec (= S[n-1].ctime), and comparison of timestamp
1441 1441 is ambiguous.
1442 1442
1443 1443 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1444 1444 timestamp is ambiguous".
1445 1445
1446 1446 But advancing mtime only in case (*2) doesn't work as
1447 1447 expected, because naturally advanced S[n].mtime in case (*1)
1448 1448 might be equal to manually advanced S[n-1 or earlier].mtime.
1449 1449
1450 1450 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1451 1451 treated as ambiguous regardless of mtime, to avoid overlooking
1452 1452 by confliction between such mtime.
1453 1453
1454 1454 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1455 1455 S[n].mtime", even if size of a file isn't changed.
1456 1456 """
1457 1457 try:
1458 1458 return (self.stat.st_ctime == old.stat.st_ctime)
1459 1459 except AttributeError:
1460 1460 return False
1461 1461
1462 1462 def __ne__(self, other):
1463 1463 return not self == other
1464 1464
1465 1465 class atomictempfile(object):
1466 1466 '''writable file object that atomically updates a file
1467 1467
1468 1468 All writes will go to a temporary copy of the original file. Call
1469 1469 close() when you are done writing, and atomictempfile will rename
1470 1470 the temporary copy to the original name, making the changes
1471 1471 visible. If the object is destroyed without being closed, all your
1472 1472 writes are discarded.
1473 1473
1474 1474 checkambig argument of constructor is used with filestat, and is
1475 1475 useful only if target file is guarded by any lock (e.g. repo.lock
1476 1476 or repo.wlock).
1477 1477 '''
1478 1478 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1479 1479 self.__name = name # permanent name
1480 1480 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1481 1481 createmode=createmode)
1482 1482 self._fp = posixfile(self._tempname, mode)
1483 1483 self._checkambig = checkambig
1484 1484
1485 1485 # delegated methods
1486 1486 self.read = self._fp.read
1487 1487 self.write = self._fp.write
1488 1488 self.seek = self._fp.seek
1489 1489 self.tell = self._fp.tell
1490 1490 self.fileno = self._fp.fileno
1491 1491
1492 1492 def close(self):
1493 1493 if not self._fp.closed:
1494 1494 self._fp.close()
1495 1495 filename = localpath(self.__name)
1496 1496 oldstat = self._checkambig and filestat(filename)
1497 1497 if oldstat and oldstat.stat:
1498 1498 rename(self._tempname, filename)
1499 1499 newstat = filestat(filename)
1500 1500 if newstat.isambig(oldstat):
1501 1501 # stat of changed file is ambiguous to original one
1502 1502 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1503 1503 os.utime(filename, (advanced, advanced))
1504 1504 else:
1505 1505 rename(self._tempname, filename)
1506 1506
1507 1507 def discard(self):
1508 1508 if not self._fp.closed:
1509 1509 try:
1510 1510 os.unlink(self._tempname)
1511 1511 except OSError:
1512 1512 pass
1513 1513 self._fp.close()
1514 1514
1515 1515 def __del__(self):
1516 1516 if safehasattr(self, '_fp'): # constructor actually did something
1517 1517 self.discard()
1518 1518
1519 def __enter__(self):
1520 return self
1521
1522 def __exit__(self, exctype, excvalue, traceback):
1523 if exctype is not None:
1524 self.discard()
1525 else:
1526 self.close()
1527
1519 1528 def makedirs(name, mode=None, notindexed=False):
1520 1529 """recursive directory creation with parent mode inheritance
1521 1530
1522 1531 Newly created directories are marked as "not to be indexed by
1523 1532 the content indexing service", if ``notindexed`` is specified
1524 1533 for "write" mode access.
1525 1534 """
1526 1535 try:
1527 1536 makedir(name, notindexed)
1528 1537 except OSError as err:
1529 1538 if err.errno == errno.EEXIST:
1530 1539 return
1531 1540 if err.errno != errno.ENOENT or not name:
1532 1541 raise
1533 1542 parent = os.path.dirname(os.path.abspath(name))
1534 1543 if parent == name:
1535 1544 raise
1536 1545 makedirs(parent, mode, notindexed)
1537 1546 try:
1538 1547 makedir(name, notindexed)
1539 1548 except OSError as err:
1540 1549 # Catch EEXIST to handle races
1541 1550 if err.errno == errno.EEXIST:
1542 1551 return
1543 1552 raise
1544 1553 if mode is not None:
1545 1554 os.chmod(name, mode)
1546 1555
1547 1556 def readfile(path):
1548 1557 with open(path, 'rb') as fp:
1549 1558 return fp.read()
1550 1559
1551 1560 def writefile(path, text):
1552 1561 with open(path, 'wb') as fp:
1553 1562 fp.write(text)
1554 1563
1555 1564 def appendfile(path, text):
1556 1565 with open(path, 'ab') as fp:
1557 1566 fp.write(text)
1558 1567
1559 1568 class chunkbuffer(object):
1560 1569 """Allow arbitrary sized chunks of data to be efficiently read from an
1561 1570 iterator over chunks of arbitrary size."""
1562 1571
1563 1572 def __init__(self, in_iter):
1564 1573 """in_iter is the iterator that's iterating over the input chunks.
1565 1574 targetsize is how big a buffer to try to maintain."""
1566 1575 def splitbig(chunks):
1567 1576 for chunk in chunks:
1568 1577 if len(chunk) > 2**20:
1569 1578 pos = 0
1570 1579 while pos < len(chunk):
1571 1580 end = pos + 2 ** 18
1572 1581 yield chunk[pos:end]
1573 1582 pos = end
1574 1583 else:
1575 1584 yield chunk
1576 1585 self.iter = splitbig(in_iter)
1577 1586 self._queue = collections.deque()
1578 1587 self._chunkoffset = 0
1579 1588
1580 1589 def read(self, l=None):
1581 1590 """Read L bytes of data from the iterator of chunks of data.
1582 1591 Returns less than L bytes if the iterator runs dry.
1583 1592
1584 1593 If size parameter is omitted, read everything"""
1585 1594 if l is None:
1586 1595 return ''.join(self.iter)
1587 1596
1588 1597 left = l
1589 1598 buf = []
1590 1599 queue = self._queue
1591 1600 while left > 0:
1592 1601 # refill the queue
1593 1602 if not queue:
1594 1603 target = 2**18
1595 1604 for chunk in self.iter:
1596 1605 queue.append(chunk)
1597 1606 target -= len(chunk)
1598 1607 if target <= 0:
1599 1608 break
1600 1609 if not queue:
1601 1610 break
1602 1611
1603 1612 # The easy way to do this would be to queue.popleft(), modify the
1604 1613 # chunk (if necessary), then queue.appendleft(). However, for cases
1605 1614 # where we read partial chunk content, this incurs 2 dequeue
1606 1615 # mutations and creates a new str for the remaining chunk in the
1607 1616 # queue. Our code below avoids this overhead.
1608 1617
1609 1618 chunk = queue[0]
1610 1619 chunkl = len(chunk)
1611 1620 offset = self._chunkoffset
1612 1621
1613 1622 # Use full chunk.
1614 1623 if offset == 0 and left >= chunkl:
1615 1624 left -= chunkl
1616 1625 queue.popleft()
1617 1626 buf.append(chunk)
1618 1627 # self._chunkoffset remains at 0.
1619 1628 continue
1620 1629
1621 1630 chunkremaining = chunkl - offset
1622 1631
1623 1632 # Use all of unconsumed part of chunk.
1624 1633 if left >= chunkremaining:
1625 1634 left -= chunkremaining
1626 1635 queue.popleft()
1627 1636 # offset == 0 is enabled by block above, so this won't merely
1628 1637 # copy via ``chunk[0:]``.
1629 1638 buf.append(chunk[offset:])
1630 1639 self._chunkoffset = 0
1631 1640
1632 1641 # Partial chunk needed.
1633 1642 else:
1634 1643 buf.append(chunk[offset:offset + left])
1635 1644 self._chunkoffset += left
1636 1645 left -= chunkremaining
1637 1646
1638 1647 return ''.join(buf)
1639 1648
1640 1649 def filechunkiter(f, size=65536, limit=None):
1641 1650 """Create a generator that produces the data in the file size
1642 1651 (default 65536) bytes at a time, up to optional limit (default is
1643 1652 to read all data). Chunks may be less than size bytes if the
1644 1653 chunk is the last chunk in the file, or the file is a socket or
1645 1654 some other type of file that sometimes reads less data than is
1646 1655 requested."""
1647 1656 assert size >= 0
1648 1657 assert limit is None or limit >= 0
1649 1658 while True:
1650 1659 if limit is None:
1651 1660 nbytes = size
1652 1661 else:
1653 1662 nbytes = min(limit, size)
1654 1663 s = nbytes and f.read(nbytes)
1655 1664 if not s:
1656 1665 break
1657 1666 if limit:
1658 1667 limit -= len(s)
1659 1668 yield s
1660 1669
1661 1670 def makedate(timestamp=None):
1662 1671 '''Return a unix timestamp (or the current time) as a (unixtime,
1663 1672 offset) tuple based off the local timezone.'''
1664 1673 if timestamp is None:
1665 1674 timestamp = time.time()
1666 1675 if timestamp < 0:
1667 1676 hint = _("check your clock")
1668 1677 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1669 1678 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1670 1679 datetime.datetime.fromtimestamp(timestamp))
1671 1680 tz = delta.days * 86400 + delta.seconds
1672 1681 return timestamp, tz
1673 1682
1674 1683 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1675 1684 """represent a (unixtime, offset) tuple as a localized time.
1676 1685 unixtime is seconds since the epoch, and offset is the time zone's
1677 1686 number of seconds away from UTC.
1678 1687
1679 1688 >>> datestr((0, 0))
1680 1689 'Thu Jan 01 00:00:00 1970 +0000'
1681 1690 >>> datestr((42, 0))
1682 1691 'Thu Jan 01 00:00:42 1970 +0000'
1683 1692 >>> datestr((-42, 0))
1684 1693 'Wed Dec 31 23:59:18 1969 +0000'
1685 1694 >>> datestr((0x7fffffff, 0))
1686 1695 'Tue Jan 19 03:14:07 2038 +0000'
1687 1696 >>> datestr((-0x80000000, 0))
1688 1697 'Fri Dec 13 20:45:52 1901 +0000'
1689 1698 """
1690 1699 t, tz = date or makedate()
1691 1700 if "%1" in format or "%2" in format or "%z" in format:
1692 1701 sign = (tz > 0) and "-" or "+"
1693 1702 minutes = abs(tz) // 60
1694 1703 q, r = divmod(minutes, 60)
1695 1704 format = format.replace("%z", "%1%2")
1696 1705 format = format.replace("%1", "%c%02d" % (sign, q))
1697 1706 format = format.replace("%2", "%02d" % r)
1698 1707 d = t - tz
1699 1708 if d > 0x7fffffff:
1700 1709 d = 0x7fffffff
1701 1710 elif d < -0x80000000:
1702 1711 d = -0x80000000
1703 1712 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1704 1713 # because they use the gmtime() system call which is buggy on Windows
1705 1714 # for negative values.
1706 1715 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1707 1716 s = t.strftime(format)
1708 1717 return s
1709 1718
1710 1719 def shortdate(date=None):
1711 1720 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1712 1721 return datestr(date, format='%Y-%m-%d')
1713 1722
1714 1723 def parsetimezone(tz):
1715 1724 """parse a timezone string and return an offset integer"""
1716 1725 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1717 1726 sign = (tz[0] == "+") and 1 or -1
1718 1727 hours = int(tz[1:3])
1719 1728 minutes = int(tz[3:5])
1720 1729 return -sign * (hours * 60 + minutes) * 60
1721 1730 if tz == "GMT" or tz == "UTC":
1722 1731 return 0
1723 1732 return None
1724 1733
1725 1734 def strdate(string, format, defaults=[]):
1726 1735 """parse a localized time string and return a (unixtime, offset) tuple.
1727 1736 if the string cannot be parsed, ValueError is raised."""
1728 1737 # NOTE: unixtime = localunixtime + offset
1729 1738 offset, date = parsetimezone(string.split()[-1]), string
1730 1739 if offset is not None:
1731 1740 date = " ".join(string.split()[:-1])
1732 1741
1733 1742 # add missing elements from defaults
1734 1743 usenow = False # default to using biased defaults
1735 1744 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1736 1745 found = [True for p in part if ("%"+p) in format]
1737 1746 if not found:
1738 1747 date += "@" + defaults[part][usenow]
1739 1748 format += "@%" + part[0]
1740 1749 else:
1741 1750 # We've found a specific time element, less specific time
1742 1751 # elements are relative to today
1743 1752 usenow = True
1744 1753
1745 1754 timetuple = time.strptime(date, format)
1746 1755 localunixtime = int(calendar.timegm(timetuple))
1747 1756 if offset is None:
1748 1757 # local timezone
1749 1758 unixtime = int(time.mktime(timetuple))
1750 1759 offset = unixtime - localunixtime
1751 1760 else:
1752 1761 unixtime = localunixtime + offset
1753 1762 return unixtime, offset
1754 1763
1755 1764 def parsedate(date, formats=None, bias=None):
1756 1765 """parse a localized date/time and return a (unixtime, offset) tuple.
1757 1766
1758 1767 The date may be a "unixtime offset" string or in one of the specified
1759 1768 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1760 1769
1761 1770 >>> parsedate(' today ') == parsedate(\
1762 1771 datetime.date.today().strftime('%b %d'))
1763 1772 True
1764 1773 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1765 1774 datetime.timedelta(days=1)\
1766 1775 ).strftime('%b %d'))
1767 1776 True
1768 1777 >>> now, tz = makedate()
1769 1778 >>> strnow, strtz = parsedate('now')
1770 1779 >>> (strnow - now) < 1
1771 1780 True
1772 1781 >>> tz == strtz
1773 1782 True
1774 1783 """
1775 1784 if bias is None:
1776 1785 bias = {}
1777 1786 if not date:
1778 1787 return 0, 0
1779 1788 if isinstance(date, tuple) and len(date) == 2:
1780 1789 return date
1781 1790 if not formats:
1782 1791 formats = defaultdateformats
1783 1792 date = date.strip()
1784 1793
1785 1794 if date == 'now' or date == _('now'):
1786 1795 return makedate()
1787 1796 if date == 'today' or date == _('today'):
1788 1797 date = datetime.date.today().strftime('%b %d')
1789 1798 elif date == 'yesterday' or date == _('yesterday'):
1790 1799 date = (datetime.date.today() -
1791 1800 datetime.timedelta(days=1)).strftime('%b %d')
1792 1801
1793 1802 try:
1794 1803 when, offset = map(int, date.split(' '))
1795 1804 except ValueError:
1796 1805 # fill out defaults
1797 1806 now = makedate()
1798 1807 defaults = {}
1799 1808 for part in ("d", "mb", "yY", "HI", "M", "S"):
1800 1809 # this piece is for rounding the specific end of unknowns
1801 1810 b = bias.get(part)
1802 1811 if b is None:
1803 1812 if part[0] in "HMS":
1804 1813 b = "00"
1805 1814 else:
1806 1815 b = "0"
1807 1816
1808 1817 # this piece is for matching the generic end to today's date
1809 1818 n = datestr(now, "%" + part[0])
1810 1819
1811 1820 defaults[part] = (b, n)
1812 1821
1813 1822 for format in formats:
1814 1823 try:
1815 1824 when, offset = strdate(date, format, defaults)
1816 1825 except (ValueError, OverflowError):
1817 1826 pass
1818 1827 else:
1819 1828 break
1820 1829 else:
1821 1830 raise Abort(_('invalid date: %r') % date)
1822 1831 # validate explicit (probably user-specified) date and
1823 1832 # time zone offset. values must fit in signed 32 bits for
1824 1833 # current 32-bit linux runtimes. timezones go from UTC-12
1825 1834 # to UTC+14
1826 1835 if when < -0x80000000 or when > 0x7fffffff:
1827 1836 raise Abort(_('date exceeds 32 bits: %d') % when)
1828 1837 if offset < -50400 or offset > 43200:
1829 1838 raise Abort(_('impossible time zone offset: %d') % offset)
1830 1839 return when, offset
1831 1840
1832 1841 def matchdate(date):
1833 1842 """Return a function that matches a given date match specifier
1834 1843
1835 1844 Formats include:
1836 1845
1837 1846 '{date}' match a given date to the accuracy provided
1838 1847
1839 1848 '<{date}' on or before a given date
1840 1849
1841 1850 '>{date}' on or after a given date
1842 1851
1843 1852 >>> p1 = parsedate("10:29:59")
1844 1853 >>> p2 = parsedate("10:30:00")
1845 1854 >>> p3 = parsedate("10:30:59")
1846 1855 >>> p4 = parsedate("10:31:00")
1847 1856 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1848 1857 >>> f = matchdate("10:30")
1849 1858 >>> f(p1[0])
1850 1859 False
1851 1860 >>> f(p2[0])
1852 1861 True
1853 1862 >>> f(p3[0])
1854 1863 True
1855 1864 >>> f(p4[0])
1856 1865 False
1857 1866 >>> f(p5[0])
1858 1867 False
1859 1868 """
1860 1869
1861 1870 def lower(date):
1862 1871 d = {'mb': "1", 'd': "1"}
1863 1872 return parsedate(date, extendeddateformats, d)[0]
1864 1873
1865 1874 def upper(date):
1866 1875 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1867 1876 for days in ("31", "30", "29"):
1868 1877 try:
1869 1878 d["d"] = days
1870 1879 return parsedate(date, extendeddateformats, d)[0]
1871 1880 except Abort:
1872 1881 pass
1873 1882 d["d"] = "28"
1874 1883 return parsedate(date, extendeddateformats, d)[0]
1875 1884
1876 1885 date = date.strip()
1877 1886
1878 1887 if not date:
1879 1888 raise Abort(_("dates cannot consist entirely of whitespace"))
1880 1889 elif date[0] == "<":
1881 1890 if not date[1:]:
1882 1891 raise Abort(_("invalid day spec, use '<DATE'"))
1883 1892 when = upper(date[1:])
1884 1893 return lambda x: x <= when
1885 1894 elif date[0] == ">":
1886 1895 if not date[1:]:
1887 1896 raise Abort(_("invalid day spec, use '>DATE'"))
1888 1897 when = lower(date[1:])
1889 1898 return lambda x: x >= when
1890 1899 elif date[0] == "-":
1891 1900 try:
1892 1901 days = int(date[1:])
1893 1902 except ValueError:
1894 1903 raise Abort(_("invalid day spec: %s") % date[1:])
1895 1904 if days < 0:
1896 1905 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1897 1906 % date[1:])
1898 1907 when = makedate()[0] - days * 3600 * 24
1899 1908 return lambda x: x >= when
1900 1909 elif " to " in date:
1901 1910 a, b = date.split(" to ")
1902 1911 start, stop = lower(a), upper(b)
1903 1912 return lambda x: x >= start and x <= stop
1904 1913 else:
1905 1914 start, stop = lower(date), upper(date)
1906 1915 return lambda x: x >= start and x <= stop
1907 1916
1908 1917 def stringmatcher(pattern):
1909 1918 """
1910 1919 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1911 1920 returns the matcher name, pattern, and matcher function.
1912 1921 missing or unknown prefixes are treated as literal matches.
1913 1922
1914 1923 helper for tests:
1915 1924 >>> def test(pattern, *tests):
1916 1925 ... kind, pattern, matcher = stringmatcher(pattern)
1917 1926 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1918 1927
1919 1928 exact matching (no prefix):
1920 1929 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1921 1930 ('literal', 'abcdefg', [False, False, True])
1922 1931
1923 1932 regex matching ('re:' prefix)
1924 1933 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1925 1934 ('re', 'a.+b', [False, False, True])
1926 1935
1927 1936 force exact matches ('literal:' prefix)
1928 1937 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1929 1938 ('literal', 're:foobar', [False, True])
1930 1939
1931 1940 unknown prefixes are ignored and treated as literals
1932 1941 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1933 1942 ('literal', 'foo:bar', [False, False, True])
1934 1943 """
1935 1944 if pattern.startswith('re:'):
1936 1945 pattern = pattern[3:]
1937 1946 try:
1938 1947 regex = remod.compile(pattern)
1939 1948 except remod.error as e:
1940 1949 raise error.ParseError(_('invalid regular expression: %s')
1941 1950 % e)
1942 1951 return 're', pattern, regex.search
1943 1952 elif pattern.startswith('literal:'):
1944 1953 pattern = pattern[8:]
1945 1954 return 'literal', pattern, pattern.__eq__
1946 1955
1947 1956 def shortuser(user):
1948 1957 """Return a short representation of a user name or email address."""
1949 1958 f = user.find('@')
1950 1959 if f >= 0:
1951 1960 user = user[:f]
1952 1961 f = user.find('<')
1953 1962 if f >= 0:
1954 1963 user = user[f + 1:]
1955 1964 f = user.find(' ')
1956 1965 if f >= 0:
1957 1966 user = user[:f]
1958 1967 f = user.find('.')
1959 1968 if f >= 0:
1960 1969 user = user[:f]
1961 1970 return user
1962 1971
1963 1972 def emailuser(user):
1964 1973 """Return the user portion of an email address."""
1965 1974 f = user.find('@')
1966 1975 if f >= 0:
1967 1976 user = user[:f]
1968 1977 f = user.find('<')
1969 1978 if f >= 0:
1970 1979 user = user[f + 1:]
1971 1980 return user
1972 1981
1973 1982 def email(author):
1974 1983 '''get email of author.'''
1975 1984 r = author.find('>')
1976 1985 if r == -1:
1977 1986 r = None
1978 1987 return author[author.find('<') + 1:r]
1979 1988
1980 1989 def ellipsis(text, maxlength=400):
1981 1990 """Trim string to at most maxlength (default: 400) columns in display."""
1982 1991 return encoding.trim(text, maxlength, ellipsis='...')
1983 1992
1984 1993 def unitcountfn(*unittable):
1985 1994 '''return a function that renders a readable count of some quantity'''
1986 1995
1987 1996 def go(count):
1988 1997 for multiplier, divisor, format in unittable:
1989 1998 if count >= divisor * multiplier:
1990 1999 return format % (count / float(divisor))
1991 2000 return unittable[-1][2] % count
1992 2001
1993 2002 return go
1994 2003
1995 2004 bytecount = unitcountfn(
1996 2005 (100, 1 << 30, _('%.0f GB')),
1997 2006 (10, 1 << 30, _('%.1f GB')),
1998 2007 (1, 1 << 30, _('%.2f GB')),
1999 2008 (100, 1 << 20, _('%.0f MB')),
2000 2009 (10, 1 << 20, _('%.1f MB')),
2001 2010 (1, 1 << 20, _('%.2f MB')),
2002 2011 (100, 1 << 10, _('%.0f KB')),
2003 2012 (10, 1 << 10, _('%.1f KB')),
2004 2013 (1, 1 << 10, _('%.2f KB')),
2005 2014 (1, 1, _('%.0f bytes')),
2006 2015 )
2007 2016
2008 2017 def uirepr(s):
2009 2018 # Avoid double backslash in Windows path repr()
2010 2019 return repr(s).replace('\\\\', '\\')
2011 2020
2012 2021 # delay import of textwrap
2013 2022 def MBTextWrapper(**kwargs):
2014 2023 class tw(textwrap.TextWrapper):
2015 2024 """
2016 2025 Extend TextWrapper for width-awareness.
2017 2026
2018 2027 Neither number of 'bytes' in any encoding nor 'characters' is
2019 2028 appropriate to calculate terminal columns for specified string.
2020 2029
2021 2030 Original TextWrapper implementation uses built-in 'len()' directly,
2022 2031 so overriding is needed to use width information of each characters.
2023 2032
2024 2033 In addition, characters classified into 'ambiguous' width are
2025 2034 treated as wide in East Asian area, but as narrow in other.
2026 2035
2027 2036 This requires use decision to determine width of such characters.
2028 2037 """
2029 2038 def _cutdown(self, ucstr, space_left):
2030 2039 l = 0
2031 2040 colwidth = encoding.ucolwidth
2032 2041 for i in xrange(len(ucstr)):
2033 2042 l += colwidth(ucstr[i])
2034 2043 if space_left < l:
2035 2044 return (ucstr[:i], ucstr[i:])
2036 2045 return ucstr, ''
2037 2046
2038 2047 # overriding of base class
2039 2048 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2040 2049 space_left = max(width - cur_len, 1)
2041 2050
2042 2051 if self.break_long_words:
2043 2052 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2044 2053 cur_line.append(cut)
2045 2054 reversed_chunks[-1] = res
2046 2055 elif not cur_line:
2047 2056 cur_line.append(reversed_chunks.pop())
2048 2057
2049 2058 # this overriding code is imported from TextWrapper of Python 2.6
2050 2059 # to calculate columns of string by 'encoding.ucolwidth()'
2051 2060 def _wrap_chunks(self, chunks):
2052 2061 colwidth = encoding.ucolwidth
2053 2062
2054 2063 lines = []
2055 2064 if self.width <= 0:
2056 2065 raise ValueError("invalid width %r (must be > 0)" % self.width)
2057 2066
2058 2067 # Arrange in reverse order so items can be efficiently popped
2059 2068 # from a stack of chucks.
2060 2069 chunks.reverse()
2061 2070
2062 2071 while chunks:
2063 2072
2064 2073 # Start the list of chunks that will make up the current line.
2065 2074 # cur_len is just the length of all the chunks in cur_line.
2066 2075 cur_line = []
2067 2076 cur_len = 0
2068 2077
2069 2078 # Figure out which static string will prefix this line.
2070 2079 if lines:
2071 2080 indent = self.subsequent_indent
2072 2081 else:
2073 2082 indent = self.initial_indent
2074 2083
2075 2084 # Maximum width for this line.
2076 2085 width = self.width - len(indent)
2077 2086
2078 2087 # First chunk on line is whitespace -- drop it, unless this
2079 2088 # is the very beginning of the text (i.e. no lines started yet).
2080 2089 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2081 2090 del chunks[-1]
2082 2091
2083 2092 while chunks:
2084 2093 l = colwidth(chunks[-1])
2085 2094
2086 2095 # Can at least squeeze this chunk onto the current line.
2087 2096 if cur_len + l <= width:
2088 2097 cur_line.append(chunks.pop())
2089 2098 cur_len += l
2090 2099
2091 2100 # Nope, this line is full.
2092 2101 else:
2093 2102 break
2094 2103
2095 2104 # The current line is full, and the next chunk is too big to
2096 2105 # fit on *any* line (not just this one).
2097 2106 if chunks and colwidth(chunks[-1]) > width:
2098 2107 self._handle_long_word(chunks, cur_line, cur_len, width)
2099 2108
2100 2109 # If the last chunk on this line is all whitespace, drop it.
2101 2110 if (self.drop_whitespace and
2102 2111 cur_line and cur_line[-1].strip() == ''):
2103 2112 del cur_line[-1]
2104 2113
2105 2114 # Convert current line back to a string and store it in list
2106 2115 # of all lines (return value).
2107 2116 if cur_line:
2108 2117 lines.append(indent + ''.join(cur_line))
2109 2118
2110 2119 return lines
2111 2120
2112 2121 global MBTextWrapper
2113 2122 MBTextWrapper = tw
2114 2123 return tw(**kwargs)
2115 2124
2116 2125 def wrap(line, width, initindent='', hangindent=''):
2117 2126 maxindent = max(len(hangindent), len(initindent))
2118 2127 if width <= maxindent:
2119 2128 # adjust for weird terminal size
2120 2129 width = max(78, maxindent + 1)
2121 2130 line = line.decode(encoding.encoding, encoding.encodingmode)
2122 2131 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2123 2132 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2124 2133 wrapper = MBTextWrapper(width=width,
2125 2134 initial_indent=initindent,
2126 2135 subsequent_indent=hangindent)
2127 2136 return wrapper.fill(line).encode(encoding.encoding)
2128 2137
2129 2138 def iterlines(iterator):
2130 2139 for chunk in iterator:
2131 2140 for line in chunk.splitlines():
2132 2141 yield line
2133 2142
2134 2143 def expandpath(path):
2135 2144 return os.path.expanduser(os.path.expandvars(path))
2136 2145
2137 2146 def hgcmd():
2138 2147 """Return the command used to execute current hg
2139 2148
2140 2149 This is different from hgexecutable() because on Windows we want
2141 2150 to avoid things opening new shell windows like batch files, so we
2142 2151 get either the python call or current executable.
2143 2152 """
2144 2153 if mainfrozen():
2145 2154 if getattr(sys, 'frozen', None) == 'macosx_app':
2146 2155 # Env variable set by py2app
2147 2156 return [os.environ['EXECUTABLEPATH']]
2148 2157 else:
2149 2158 return [sys.executable]
2150 2159 return gethgcmd()
2151 2160
2152 2161 def rundetached(args, condfn):
2153 2162 """Execute the argument list in a detached process.
2154 2163
2155 2164 condfn is a callable which is called repeatedly and should return
2156 2165 True once the child process is known to have started successfully.
2157 2166 At this point, the child process PID is returned. If the child
2158 2167 process fails to start or finishes before condfn() evaluates to
2159 2168 True, return -1.
2160 2169 """
2161 2170 # Windows case is easier because the child process is either
2162 2171 # successfully starting and validating the condition or exiting
2163 2172 # on failure. We just poll on its PID. On Unix, if the child
2164 2173 # process fails to start, it will be left in a zombie state until
2165 2174 # the parent wait on it, which we cannot do since we expect a long
2166 2175 # running process on success. Instead we listen for SIGCHLD telling
2167 2176 # us our child process terminated.
2168 2177 terminated = set()
2169 2178 def handler(signum, frame):
2170 2179 terminated.add(os.wait())
2171 2180 prevhandler = None
2172 2181 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2173 2182 if SIGCHLD is not None:
2174 2183 prevhandler = signal.signal(SIGCHLD, handler)
2175 2184 try:
2176 2185 pid = spawndetached(args)
2177 2186 while not condfn():
2178 2187 if ((pid in terminated or not testpid(pid))
2179 2188 and not condfn()):
2180 2189 return -1
2181 2190 time.sleep(0.1)
2182 2191 return pid
2183 2192 finally:
2184 2193 if prevhandler is not None:
2185 2194 signal.signal(signal.SIGCHLD, prevhandler)
2186 2195
2187 2196 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2188 2197 """Return the result of interpolating items in the mapping into string s.
2189 2198
2190 2199 prefix is a single character string, or a two character string with
2191 2200 a backslash as the first character if the prefix needs to be escaped in
2192 2201 a regular expression.
2193 2202
2194 2203 fn is an optional function that will be applied to the replacement text
2195 2204 just before replacement.
2196 2205
2197 2206 escape_prefix is an optional flag that allows using doubled prefix for
2198 2207 its escaping.
2199 2208 """
2200 2209 fn = fn or (lambda s: s)
2201 2210 patterns = '|'.join(mapping.keys())
2202 2211 if escape_prefix:
2203 2212 patterns += '|' + prefix
2204 2213 if len(prefix) > 1:
2205 2214 prefix_char = prefix[1:]
2206 2215 else:
2207 2216 prefix_char = prefix
2208 2217 mapping[prefix_char] = prefix_char
2209 2218 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2210 2219 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2211 2220
2212 2221 def getport(port):
2213 2222 """Return the port for a given network service.
2214 2223
2215 2224 If port is an integer, it's returned as is. If it's a string, it's
2216 2225 looked up using socket.getservbyname(). If there's no matching
2217 2226 service, error.Abort is raised.
2218 2227 """
2219 2228 try:
2220 2229 return int(port)
2221 2230 except ValueError:
2222 2231 pass
2223 2232
2224 2233 try:
2225 2234 return socket.getservbyname(port)
2226 2235 except socket.error:
2227 2236 raise Abort(_("no port number associated with service '%s'") % port)
2228 2237
2229 2238 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2230 2239 '0': False, 'no': False, 'false': False, 'off': False,
2231 2240 'never': False}
2232 2241
2233 2242 def parsebool(s):
2234 2243 """Parse s into a boolean.
2235 2244
2236 2245 If s is not a valid boolean, returns None.
2237 2246 """
2238 2247 return _booleans.get(s.lower(), None)
2239 2248
2240 2249 _hexdig = '0123456789ABCDEFabcdef'
2241 2250 _hextochr = dict((a + b, chr(int(a + b, 16)))
2242 2251 for a in _hexdig for b in _hexdig)
2243 2252
2244 2253 def _urlunquote(s):
2245 2254 """Decode HTTP/HTML % encoding.
2246 2255
2247 2256 >>> _urlunquote('abc%20def')
2248 2257 'abc def'
2249 2258 """
2250 2259 res = s.split('%')
2251 2260 # fastpath
2252 2261 if len(res) == 1:
2253 2262 return s
2254 2263 s = res[0]
2255 2264 for item in res[1:]:
2256 2265 try:
2257 2266 s += _hextochr[item[:2]] + item[2:]
2258 2267 except KeyError:
2259 2268 s += '%' + item
2260 2269 except UnicodeDecodeError:
2261 2270 s += unichr(int(item[:2], 16)) + item[2:]
2262 2271 return s
2263 2272
2264 2273 class url(object):
2265 2274 r"""Reliable URL parser.
2266 2275
2267 2276 This parses URLs and provides attributes for the following
2268 2277 components:
2269 2278
2270 2279 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2271 2280
2272 2281 Missing components are set to None. The only exception is
2273 2282 fragment, which is set to '' if present but empty.
2274 2283
2275 2284 If parsefragment is False, fragment is included in query. If
2276 2285 parsequery is False, query is included in path. If both are
2277 2286 False, both fragment and query are included in path.
2278 2287
2279 2288 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2280 2289
2281 2290 Note that for backward compatibility reasons, bundle URLs do not
2282 2291 take host names. That means 'bundle://../' has a path of '../'.
2283 2292
2284 2293 Examples:
2285 2294
2286 2295 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2287 2296 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2288 2297 >>> url('ssh://[::1]:2200//home/joe/repo')
2289 2298 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2290 2299 >>> url('file:///home/joe/repo')
2291 2300 <url scheme: 'file', path: '/home/joe/repo'>
2292 2301 >>> url('file:///c:/temp/foo/')
2293 2302 <url scheme: 'file', path: 'c:/temp/foo/'>
2294 2303 >>> url('bundle:foo')
2295 2304 <url scheme: 'bundle', path: 'foo'>
2296 2305 >>> url('bundle://../foo')
2297 2306 <url scheme: 'bundle', path: '../foo'>
2298 2307 >>> url(r'c:\foo\bar')
2299 2308 <url path: 'c:\\foo\\bar'>
2300 2309 >>> url(r'\\blah\blah\blah')
2301 2310 <url path: '\\\\blah\\blah\\blah'>
2302 2311 >>> url(r'\\blah\blah\blah#baz')
2303 2312 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2304 2313 >>> url(r'file:///C:\users\me')
2305 2314 <url scheme: 'file', path: 'C:\\users\\me'>
2306 2315
2307 2316 Authentication credentials:
2308 2317
2309 2318 >>> url('ssh://joe:xyz@x/repo')
2310 2319 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2311 2320 >>> url('ssh://joe@x/repo')
2312 2321 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2313 2322
2314 2323 Query strings and fragments:
2315 2324
2316 2325 >>> url('http://host/a?b#c')
2317 2326 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2318 2327 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2319 2328 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2320 2329 """
2321 2330
2322 2331 _safechars = "!~*'()+"
2323 2332 _safepchars = "/!~*'()+:\\"
2324 2333 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2325 2334
2326 2335 def __init__(self, path, parsequery=True, parsefragment=True):
2327 2336 # We slowly chomp away at path until we have only the path left
2328 2337 self.scheme = self.user = self.passwd = self.host = None
2329 2338 self.port = self.path = self.query = self.fragment = None
2330 2339 self._localpath = True
2331 2340 self._hostport = ''
2332 2341 self._origpath = path
2333 2342
2334 2343 if parsefragment and '#' in path:
2335 2344 path, self.fragment = path.split('#', 1)
2336 2345 if not path:
2337 2346 path = None
2338 2347
2339 2348 # special case for Windows drive letters and UNC paths
2340 2349 if hasdriveletter(path) or path.startswith(r'\\'):
2341 2350 self.path = path
2342 2351 return
2343 2352
2344 2353 # For compatibility reasons, we can't handle bundle paths as
2345 2354 # normal URLS
2346 2355 if path.startswith('bundle:'):
2347 2356 self.scheme = 'bundle'
2348 2357 path = path[7:]
2349 2358 if path.startswith('//'):
2350 2359 path = path[2:]
2351 2360 self.path = path
2352 2361 return
2353 2362
2354 2363 if self._matchscheme(path):
2355 2364 parts = path.split(':', 1)
2356 2365 if parts[0]:
2357 2366 self.scheme, path = parts
2358 2367 self._localpath = False
2359 2368
2360 2369 if not path:
2361 2370 path = None
2362 2371 if self._localpath:
2363 2372 self.path = ''
2364 2373 return
2365 2374 else:
2366 2375 if self._localpath:
2367 2376 self.path = path
2368 2377 return
2369 2378
2370 2379 if parsequery and '?' in path:
2371 2380 path, self.query = path.split('?', 1)
2372 2381 if not path:
2373 2382 path = None
2374 2383 if not self.query:
2375 2384 self.query = None
2376 2385
2377 2386 # // is required to specify a host/authority
2378 2387 if path and path.startswith('//'):
2379 2388 parts = path[2:].split('/', 1)
2380 2389 if len(parts) > 1:
2381 2390 self.host, path = parts
2382 2391 else:
2383 2392 self.host = parts[0]
2384 2393 path = None
2385 2394 if not self.host:
2386 2395 self.host = None
2387 2396 # path of file:///d is /d
2388 2397 # path of file:///d:/ is d:/, not /d:/
2389 2398 if path and not hasdriveletter(path):
2390 2399 path = '/' + path
2391 2400
2392 2401 if self.host and '@' in self.host:
2393 2402 self.user, self.host = self.host.rsplit('@', 1)
2394 2403 if ':' in self.user:
2395 2404 self.user, self.passwd = self.user.split(':', 1)
2396 2405 if not self.host:
2397 2406 self.host = None
2398 2407
2399 2408 # Don't split on colons in IPv6 addresses without ports
2400 2409 if (self.host and ':' in self.host and
2401 2410 not (self.host.startswith('[') and self.host.endswith(']'))):
2402 2411 self._hostport = self.host
2403 2412 self.host, self.port = self.host.rsplit(':', 1)
2404 2413 if not self.host:
2405 2414 self.host = None
2406 2415
2407 2416 if (self.host and self.scheme == 'file' and
2408 2417 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2409 2418 raise Abort(_('file:// URLs can only refer to localhost'))
2410 2419
2411 2420 self.path = path
2412 2421
2413 2422 # leave the query string escaped
2414 2423 for a in ('user', 'passwd', 'host', 'port',
2415 2424 'path', 'fragment'):
2416 2425 v = getattr(self, a)
2417 2426 if v is not None:
2418 2427 setattr(self, a, _urlunquote(v))
2419 2428
2420 2429 def __repr__(self):
2421 2430 attrs = []
2422 2431 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2423 2432 'query', 'fragment'):
2424 2433 v = getattr(self, a)
2425 2434 if v is not None:
2426 2435 attrs.append('%s: %r' % (a, v))
2427 2436 return '<url %s>' % ', '.join(attrs)
2428 2437
2429 2438 def __str__(self):
2430 2439 r"""Join the URL's components back into a URL string.
2431 2440
2432 2441 Examples:
2433 2442
2434 2443 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2435 2444 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2436 2445 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2437 2446 'http://user:pw@host:80/?foo=bar&baz=42'
2438 2447 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2439 2448 'http://user:pw@host:80/?foo=bar%3dbaz'
2440 2449 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2441 2450 'ssh://user:pw@[::1]:2200//home/joe#'
2442 2451 >>> str(url('http://localhost:80//'))
2443 2452 'http://localhost:80//'
2444 2453 >>> str(url('http://localhost:80/'))
2445 2454 'http://localhost:80/'
2446 2455 >>> str(url('http://localhost:80'))
2447 2456 'http://localhost:80/'
2448 2457 >>> str(url('bundle:foo'))
2449 2458 'bundle:foo'
2450 2459 >>> str(url('bundle://../foo'))
2451 2460 'bundle:../foo'
2452 2461 >>> str(url('path'))
2453 2462 'path'
2454 2463 >>> str(url('file:///tmp/foo/bar'))
2455 2464 'file:///tmp/foo/bar'
2456 2465 >>> str(url('file:///c:/tmp/foo/bar'))
2457 2466 'file:///c:/tmp/foo/bar'
2458 2467 >>> print url(r'bundle:foo\bar')
2459 2468 bundle:foo\bar
2460 2469 >>> print url(r'file:///D:\data\hg')
2461 2470 file:///D:\data\hg
2462 2471 """
2463 2472 if self._localpath:
2464 2473 s = self.path
2465 2474 if self.scheme == 'bundle':
2466 2475 s = 'bundle:' + s
2467 2476 if self.fragment:
2468 2477 s += '#' + self.fragment
2469 2478 return s
2470 2479
2471 2480 s = self.scheme + ':'
2472 2481 if self.user or self.passwd or self.host:
2473 2482 s += '//'
2474 2483 elif self.scheme and (not self.path or self.path.startswith('/')
2475 2484 or hasdriveletter(self.path)):
2476 2485 s += '//'
2477 2486 if hasdriveletter(self.path):
2478 2487 s += '/'
2479 2488 if self.user:
2480 2489 s += urlreq.quote(self.user, safe=self._safechars)
2481 2490 if self.passwd:
2482 2491 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2483 2492 if self.user or self.passwd:
2484 2493 s += '@'
2485 2494 if self.host:
2486 2495 if not (self.host.startswith('[') and self.host.endswith(']')):
2487 2496 s += urlreq.quote(self.host)
2488 2497 else:
2489 2498 s += self.host
2490 2499 if self.port:
2491 2500 s += ':' + urlreq.quote(self.port)
2492 2501 if self.host:
2493 2502 s += '/'
2494 2503 if self.path:
2495 2504 # TODO: similar to the query string, we should not unescape the
2496 2505 # path when we store it, the path might contain '%2f' = '/',
2497 2506 # which we should *not* escape.
2498 2507 s += urlreq.quote(self.path, safe=self._safepchars)
2499 2508 if self.query:
2500 2509 # we store the query in escaped form.
2501 2510 s += '?' + self.query
2502 2511 if self.fragment is not None:
2503 2512 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2504 2513 return s
2505 2514
2506 2515 def authinfo(self):
2507 2516 user, passwd = self.user, self.passwd
2508 2517 try:
2509 2518 self.user, self.passwd = None, None
2510 2519 s = str(self)
2511 2520 finally:
2512 2521 self.user, self.passwd = user, passwd
2513 2522 if not self.user:
2514 2523 return (s, None)
2515 2524 # authinfo[1] is passed to urllib2 password manager, and its
2516 2525 # URIs must not contain credentials. The host is passed in the
2517 2526 # URIs list because Python < 2.4.3 uses only that to search for
2518 2527 # a password.
2519 2528 return (s, (None, (s, self.host),
2520 2529 self.user, self.passwd or ''))
2521 2530
2522 2531 def isabs(self):
2523 2532 if self.scheme and self.scheme != 'file':
2524 2533 return True # remote URL
2525 2534 if hasdriveletter(self.path):
2526 2535 return True # absolute for our purposes - can't be joined()
2527 2536 if self.path.startswith(r'\\'):
2528 2537 return True # Windows UNC path
2529 2538 if self.path.startswith('/'):
2530 2539 return True # POSIX-style
2531 2540 return False
2532 2541
2533 2542 def localpath(self):
2534 2543 if self.scheme == 'file' or self.scheme == 'bundle':
2535 2544 path = self.path or '/'
2536 2545 # For Windows, we need to promote hosts containing drive
2537 2546 # letters to paths with drive letters.
2538 2547 if hasdriveletter(self._hostport):
2539 2548 path = self._hostport + '/' + self.path
2540 2549 elif (self.host is not None and self.path
2541 2550 and not hasdriveletter(path)):
2542 2551 path = '/' + path
2543 2552 return path
2544 2553 return self._origpath
2545 2554
2546 2555 def islocal(self):
2547 2556 '''whether localpath will return something that posixfile can open'''
2548 2557 return (not self.scheme or self.scheme == 'file'
2549 2558 or self.scheme == 'bundle')
2550 2559
2551 2560 def hasscheme(path):
2552 2561 return bool(url(path).scheme)
2553 2562
2554 2563 def hasdriveletter(path):
2555 2564 return path and path[1:2] == ':' and path[0:1].isalpha()
2556 2565
2557 2566 def urllocalpath(path):
2558 2567 return url(path, parsequery=False, parsefragment=False).localpath()
2559 2568
2560 2569 def hidepassword(u):
2561 2570 '''hide user credential in a url string'''
2562 2571 u = url(u)
2563 2572 if u.passwd:
2564 2573 u.passwd = '***'
2565 2574 return str(u)
2566 2575
2567 2576 def removeauth(u):
2568 2577 '''remove all authentication information from a url string'''
2569 2578 u = url(u)
2570 2579 u.user = u.passwd = None
2571 2580 return str(u)
2572 2581
2573 2582 def isatty(fp):
2574 2583 try:
2575 2584 return fp.isatty()
2576 2585 except AttributeError:
2577 2586 return False
2578 2587
2579 2588 timecount = unitcountfn(
2580 2589 (1, 1e3, _('%.0f s')),
2581 2590 (100, 1, _('%.1f s')),
2582 2591 (10, 1, _('%.2f s')),
2583 2592 (1, 1, _('%.3f s')),
2584 2593 (100, 0.001, _('%.1f ms')),
2585 2594 (10, 0.001, _('%.2f ms')),
2586 2595 (1, 0.001, _('%.3f ms')),
2587 2596 (100, 0.000001, _('%.1f us')),
2588 2597 (10, 0.000001, _('%.2f us')),
2589 2598 (1, 0.000001, _('%.3f us')),
2590 2599 (100, 0.000000001, _('%.1f ns')),
2591 2600 (10, 0.000000001, _('%.2f ns')),
2592 2601 (1, 0.000000001, _('%.3f ns')),
2593 2602 )
2594 2603
2595 2604 _timenesting = [0]
2596 2605
2597 2606 def timed(func):
2598 2607 '''Report the execution time of a function call to stderr.
2599 2608
2600 2609 During development, use as a decorator when you need to measure
2601 2610 the cost of a function, e.g. as follows:
2602 2611
2603 2612 @util.timed
2604 2613 def foo(a, b, c):
2605 2614 pass
2606 2615 '''
2607 2616
2608 2617 def wrapper(*args, **kwargs):
2609 2618 start = time.time()
2610 2619 indent = 2
2611 2620 _timenesting[0] += indent
2612 2621 try:
2613 2622 return func(*args, **kwargs)
2614 2623 finally:
2615 2624 elapsed = time.time() - start
2616 2625 _timenesting[0] -= indent
2617 2626 sys.stderr.write('%s%s: %s\n' %
2618 2627 (' ' * _timenesting[0], func.__name__,
2619 2628 timecount(elapsed)))
2620 2629 return wrapper
2621 2630
2622 2631 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2623 2632 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2624 2633
2625 2634 def sizetoint(s):
2626 2635 '''Convert a space specifier to a byte count.
2627 2636
2628 2637 >>> sizetoint('30')
2629 2638 30
2630 2639 >>> sizetoint('2.2kb')
2631 2640 2252
2632 2641 >>> sizetoint('6M')
2633 2642 6291456
2634 2643 '''
2635 2644 t = s.strip().lower()
2636 2645 try:
2637 2646 for k, u in _sizeunits:
2638 2647 if t.endswith(k):
2639 2648 return int(float(t[:-len(k)]) * u)
2640 2649 return int(t)
2641 2650 except ValueError:
2642 2651 raise error.ParseError(_("couldn't parse size: %s") % s)
2643 2652
2644 2653 class hooks(object):
2645 2654 '''A collection of hook functions that can be used to extend a
2646 2655 function's behavior. Hooks are called in lexicographic order,
2647 2656 based on the names of their sources.'''
2648 2657
2649 2658 def __init__(self):
2650 2659 self._hooks = []
2651 2660
2652 2661 def add(self, source, hook):
2653 2662 self._hooks.append((source, hook))
2654 2663
2655 2664 def __call__(self, *args):
2656 2665 self._hooks.sort(key=lambda x: x[0])
2657 2666 results = []
2658 2667 for source, hook in self._hooks:
2659 2668 results.append(hook(*args))
2660 2669 return results
2661 2670
2662 2671 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2663 2672 '''Yields lines for a nicely formatted stacktrace.
2664 2673 Skips the 'skip' last entries.
2665 2674 Each file+linenumber is formatted according to fileline.
2666 2675 Each line is formatted according to line.
2667 2676 If line is None, it yields:
2668 2677 length of longest filepath+line number,
2669 2678 filepath+linenumber,
2670 2679 function
2671 2680
2672 2681 Not be used in production code but very convenient while developing.
2673 2682 '''
2674 2683 entries = [(fileline % (fn, ln), func)
2675 2684 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2676 2685 if entries:
2677 2686 fnmax = max(len(entry[0]) for entry in entries)
2678 2687 for fnln, func in entries:
2679 2688 if line is None:
2680 2689 yield (fnmax, fnln, func)
2681 2690 else:
2682 2691 yield line % (fnmax, fnln, func)
2683 2692
2684 2693 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2685 2694 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2686 2695 Skips the 'skip' last entries. By default it will flush stdout first.
2687 2696 It can be used everywhere and intentionally does not require an ui object.
2688 2697 Not be used in production code but very convenient while developing.
2689 2698 '''
2690 2699 if otherf:
2691 2700 otherf.flush()
2692 2701 f.write('%s at:\n' % msg)
2693 2702 for line in getstackframes(skip + 1):
2694 2703 f.write(line)
2695 2704 f.flush()
2696 2705
2697 2706 class dirs(object):
2698 2707 '''a multiset of directory names from a dirstate or manifest'''
2699 2708
2700 2709 def __init__(self, map, skip=None):
2701 2710 self._dirs = {}
2702 2711 addpath = self.addpath
2703 2712 if safehasattr(map, 'iteritems') and skip is not None:
2704 2713 for f, s in map.iteritems():
2705 2714 if s[0] != skip:
2706 2715 addpath(f)
2707 2716 else:
2708 2717 for f in map:
2709 2718 addpath(f)
2710 2719
2711 2720 def addpath(self, path):
2712 2721 dirs = self._dirs
2713 2722 for base in finddirs(path):
2714 2723 if base in dirs:
2715 2724 dirs[base] += 1
2716 2725 return
2717 2726 dirs[base] = 1
2718 2727
2719 2728 def delpath(self, path):
2720 2729 dirs = self._dirs
2721 2730 for base in finddirs(path):
2722 2731 if dirs[base] > 1:
2723 2732 dirs[base] -= 1
2724 2733 return
2725 2734 del dirs[base]
2726 2735
2727 2736 def __iter__(self):
2728 2737 return self._dirs.iterkeys()
2729 2738
2730 2739 def __contains__(self, d):
2731 2740 return d in self._dirs
2732 2741
2733 2742 if safehasattr(parsers, 'dirs'):
2734 2743 dirs = parsers.dirs
2735 2744
2736 2745 def finddirs(path):
2737 2746 pos = path.rfind('/')
2738 2747 while pos != -1:
2739 2748 yield path[:pos]
2740 2749 pos = path.rfind('/', 0, pos)
2741 2750
2742 2751 # compression utility
2743 2752
2744 2753 class nocompress(object):
2745 2754 def compress(self, x):
2746 2755 return x
2747 2756 def flush(self):
2748 2757 return ""
2749 2758
2750 2759 compressors = {
2751 2760 None: nocompress,
2752 2761 # lambda to prevent early import
2753 2762 'BZ': lambda: bz2.BZ2Compressor(),
2754 2763 'GZ': lambda: zlib.compressobj(),
2755 2764 }
2756 2765 # also support the old form by courtesies
2757 2766 compressors['UN'] = compressors[None]
2758 2767
2759 2768 def _makedecompressor(decompcls):
2760 2769 def generator(f):
2761 2770 d = decompcls()
2762 2771 for chunk in filechunkiter(f):
2763 2772 yield d.decompress(chunk)
2764 2773 def func(fh):
2765 2774 return chunkbuffer(generator(fh))
2766 2775 return func
2767 2776
2768 2777 class ctxmanager(object):
2769 2778 '''A context manager for use in 'with' blocks to allow multiple
2770 2779 contexts to be entered at once. This is both safer and more
2771 2780 flexible than contextlib.nested.
2772 2781
2773 2782 Once Mercurial supports Python 2.7+, this will become mostly
2774 2783 unnecessary.
2775 2784 '''
2776 2785
2777 2786 def __init__(self, *args):
2778 2787 '''Accepts a list of no-argument functions that return context
2779 2788 managers. These will be invoked at __call__ time.'''
2780 2789 self._pending = args
2781 2790 self._atexit = []
2782 2791
2783 2792 def __enter__(self):
2784 2793 return self
2785 2794
2786 2795 def enter(self):
2787 2796 '''Create and enter context managers in the order in which they were
2788 2797 passed to the constructor.'''
2789 2798 values = []
2790 2799 for func in self._pending:
2791 2800 obj = func()
2792 2801 values.append(obj.__enter__())
2793 2802 self._atexit.append(obj.__exit__)
2794 2803 del self._pending
2795 2804 return values
2796 2805
2797 2806 def atexit(self, func, *args, **kwargs):
2798 2807 '''Add a function to call when this context manager exits. The
2799 2808 ordering of multiple atexit calls is unspecified, save that
2800 2809 they will happen before any __exit__ functions.'''
2801 2810 def wrapper(exc_type, exc_val, exc_tb):
2802 2811 func(*args, **kwargs)
2803 2812 self._atexit.append(wrapper)
2804 2813 return func
2805 2814
2806 2815 def __exit__(self, exc_type, exc_val, exc_tb):
2807 2816 '''Context managers are exited in the reverse order from which
2808 2817 they were created.'''
2809 2818 received = exc_type is not None
2810 2819 suppressed = False
2811 2820 pending = None
2812 2821 self._atexit.reverse()
2813 2822 for exitfunc in self._atexit:
2814 2823 try:
2815 2824 if exitfunc(exc_type, exc_val, exc_tb):
2816 2825 suppressed = True
2817 2826 exc_type = None
2818 2827 exc_val = None
2819 2828 exc_tb = None
2820 2829 except BaseException:
2821 2830 pending = sys.exc_info()
2822 2831 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2823 2832 del self._atexit
2824 2833 if pending:
2825 2834 raise exc_val
2826 2835 return received and suppressed
2827 2836
2828 2837 def _bz2():
2829 2838 d = bz2.BZ2Decompressor()
2830 2839 # Bzip2 stream start with BZ, but we stripped it.
2831 2840 # we put it back for good measure.
2832 2841 d.decompress('BZ')
2833 2842 return d
2834 2843
2835 2844 decompressors = {None: lambda fh: fh,
2836 2845 '_truncatedBZ': _makedecompressor(_bz2),
2837 2846 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2838 2847 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2839 2848 }
2840 2849 # also support the old form by courtesies
2841 2850 decompressors['UN'] = decompressors[None]
2842 2851
2843 2852 # convenient shortcut
2844 2853 dst = debugstacktrace
@@ -1,101 +1,119 b''
1 1 from __future__ import absolute_import
2 2
3 3 import glob
4 4 import os
5 5 import shutil
6 6 import tempfile
7 7 import unittest
8 8
9 9 from mercurial import (
10 10 util,
11 11 )
12 12 atomictempfile = util.atomictempfile
13 13
14 14 class testatomictempfile(unittest.TestCase):
15 15 def setUp(self):
16 16 self._testdir = tempfile.mkdtemp('atomictempfiletest')
17 17 self._filename = os.path.join(self._testdir, 'testfilename')
18 18
19 19 def tearDown(self):
20 20 shutil.rmtree(self._testdir, True)
21 21
22 22 def testsimple(self):
23 23 file = atomictempfile(self._filename)
24 24 self.assertFalse(os.path.isfile(self._filename))
25 25 tempfilename = file._tempname
26 26 self.assertTrue(tempfilename in glob.glob(
27 27 os.path.join(self._testdir, '.testfilename-*')))
28 28
29 29 file.write(b'argh\n')
30 30 file.close()
31 31
32 32 self.assertTrue(os.path.isfile(self._filename))
33 33 self.assertTrue(tempfilename not in glob.glob(
34 34 os.path.join(self._testdir, '.testfilename-*')))
35 35
36 36 # discard() removes the temp file without making the write permanent
37 37 def testdiscard(self):
38 38 file = atomictempfile(self._filename)
39 39 (dir, basename) = os.path.split(file._tempname)
40 40
41 41 file.write(b'yo\n')
42 42 file.discard()
43 43
44 44 self.assertFalse(os.path.isfile(self._filename))
45 45 self.assertTrue(basename not in os.listdir('.'))
46 46
47 47 # if a programmer screws up and passes bad args to atomictempfile, they
48 48 # get a plain ordinary TypeError, not infinite recursion
49 49 def testoops(self):
50 50 self.assertRaises(TypeError, atomictempfile)
51 51
52 52 # checkambig=True avoids ambiguity of timestamp
53 53 def testcheckambig(self):
54 54 def atomicwrite(checkambig):
55 55 f = atomictempfile(self._filename, checkambig=checkambig)
56 56 f.write('FOO')
57 57 f.close()
58 58
59 59 # try some times, because reproduction of ambiguity depends on
60 60 # "filesystem time"
61 61 for i in xrange(5):
62 62 atomicwrite(False)
63 63 oldstat = os.stat(self._filename)
64 64 if oldstat.st_ctime != oldstat.st_mtime:
65 65 # subsequent changing never causes ambiguity
66 66 continue
67 67
68 68 repetition = 3
69 69
70 70 # repeat atomic write with checkambig=True, to examine
71 71 # whether st_mtime is advanced multiple times as expecetd
72 72 for j in xrange(repetition):
73 73 atomicwrite(True)
74 74 newstat = os.stat(self._filename)
75 75 if oldstat.st_ctime != newstat.st_ctime:
76 76 # timestamp ambiguity was naturally avoided while repetition
77 77 continue
78 78
79 79 # st_mtime should be advanced "repetition" times, because
80 80 # all atomicwrite() occured at same time (in sec)
81 81 self.assertTrue(newstat.st_mtime ==
82 82 ((oldstat.st_mtime + repetition) & 0x7fffffff))
83 83 # no more examination is needed, if assumption above is true
84 84 break
85 85 else:
86 86 # This platform seems too slow to examine anti-ambiguity
87 87 # of file timestamp (or test happened to be executed at
88 88 # bad timing). Exit silently in this case, because running
89 89 # on other faster platforms can detect problems
90 90 pass
91 91
92 92 def testread(self):
93 93 with open(self._filename, 'wb') as f:
94 94 f.write(b'foobar\n')
95 95 file = atomictempfile(self._filename, mode='rb')
96 96 self.assertTrue(file.read(), b'foobar\n')
97 97 file.discard()
98 98
99 def testcontextmanagersuccess(self):
100 """When the context closes, the file is closed"""
101 with atomictempfile('foo') as f:
102 self.assertFalse(os.path.isfile('foo'))
103 f.write(b'argh\n')
104 self.assertTrue(os.path.isfile('foo'))
105
106 def testcontextmanagerfailure(self):
107 """On exception, the file is discarded"""
108 try:
109 with atomictempfile('foo') as f:
110 self.assertFalse(os.path.isfile('foo'))
111 f.write(b'argh\n')
112 raise ValueError
113 except ValueError:
114 pass
115 self.assertFalse(os.path.isfile('foo'))
116
99 117 if __name__ == '__main__':
100 118 import silenttestrunner
101 119 silenttestrunner.main(__name__)
General Comments 0
You need to be logged in to leave comments. Login now