##// END OF EJS Templates
util: use absolute_import
Gregory Szorc -
r27358:ac839ee4 default
parent child Browse files
Show More
@@ -1,2484 +1,2504
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 import i18n
17 _ = i18n._
18 import error, osutil, encoding, parsers
19 import errno, shutil, sys, tempfile, traceback
16 from __future__ import absolute_import
17
18 import bz2
19 import calendar
20 import collections
21 import datetime
22 import errno
23 import gc
24 import hashlib
25 import imp
26 import os
20 27 import re as remod
21 import os, time, datetime, calendar, textwrap, signal, collections
22 import imp, socket, urllib
23 import gc
24 import bz2
28 import shutil
29 import signal
30 import socket
31 import subprocess
32 import sys
33 import tempfile
34 import textwrap
35 import time
36 import traceback
37 import urllib
25 38 import zlib
26 import hashlib
39
40 from . import (
41 encoding,
42 error,
43 i18n,
44 osutil,
45 parsers,
46 )
27 47
28 48 if os.name == 'nt':
29 import windows as platform
49 from . import windows as platform
30 50 else:
31 import posix as platform
51 from . import posix as platform
32 52
33 53 md5 = hashlib.md5
34 54 sha1 = hashlib.sha1
35 55 sha512 = hashlib.sha512
56 _ = i18n._
36 57
37 58 cachestat = platform.cachestat
38 59 checkexec = platform.checkexec
39 60 checklink = platform.checklink
40 61 copymode = platform.copymode
41 62 executablepath = platform.executablepath
42 63 expandglobs = platform.expandglobs
43 64 explainexit = platform.explainexit
44 65 findexe = platform.findexe
45 66 gethgcmd = platform.gethgcmd
46 67 getuser = platform.getuser
47 68 groupmembers = platform.groupmembers
48 69 groupname = platform.groupname
49 70 hidewindow = platform.hidewindow
50 71 isexec = platform.isexec
51 72 isowner = platform.isowner
52 73 localpath = platform.localpath
53 74 lookupreg = platform.lookupreg
54 75 makedir = platform.makedir
55 76 nlinks = platform.nlinks
56 77 normpath = platform.normpath
57 78 normcase = platform.normcase
58 79 normcasespec = platform.normcasespec
59 80 normcasefallback = platform.normcasefallback
60 81 openhardlinks = platform.openhardlinks
61 82 oslink = platform.oslink
62 83 parsepatchoutput = platform.parsepatchoutput
63 84 pconvert = platform.pconvert
64 85 poll = platform.poll
65 86 popen = platform.popen
66 87 posixfile = platform.posixfile
67 88 quotecommand = platform.quotecommand
68 89 readpipe = platform.readpipe
69 90 rename = platform.rename
70 91 removedirs = platform.removedirs
71 92 samedevice = platform.samedevice
72 93 samefile = platform.samefile
73 94 samestat = platform.samestat
74 95 setbinary = platform.setbinary
75 96 setflags = platform.setflags
76 97 setsignalhandler = platform.setsignalhandler
77 98 shellquote = platform.shellquote
78 99 spawndetached = platform.spawndetached
79 100 split = platform.split
80 101 sshargs = platform.sshargs
81 102 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
82 103 statisexec = platform.statisexec
83 104 statislink = platform.statislink
84 105 termwidth = platform.termwidth
85 106 testpid = platform.testpid
86 107 umask = platform.umask
87 108 unlink = platform.unlink
88 109 unlinkpath = platform.unlinkpath
89 110 username = platform.username
90 111
91 112 # Python compatibility
92 113
93 114 _notset = object()
94 115
95 116 # disable Python's problematic floating point timestamps (issue4836)
96 117 # (Python hypocritically says you shouldn't change this behavior in
97 118 # libraries, and sure enough Mercurial is not a library.)
98 119 os.stat_float_times(False)
99 120
100 121 def safehasattr(thing, attr):
101 122 return getattr(thing, attr, _notset) is not _notset
102 123
103 124 DIGESTS = {
104 125 'md5': md5,
105 126 'sha1': sha1,
106 127 'sha512': sha512,
107 128 }
108 129 # List of digest types from strongest to weakest
109 130 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
110 131
111 132 for k in DIGESTS_BY_STRENGTH:
112 133 assert k in DIGESTS
113 134
114 135 class digester(object):
115 136 """helper to compute digests.
116 137
117 138 This helper can be used to compute one or more digests given their name.
118 139
119 140 >>> d = digester(['md5', 'sha1'])
120 141 >>> d.update('foo')
121 142 >>> [k for k in sorted(d)]
122 143 ['md5', 'sha1']
123 144 >>> d['md5']
124 145 'acbd18db4cc2f85cedef654fccc4a4d8'
125 146 >>> d['sha1']
126 147 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
127 148 >>> digester.preferred(['md5', 'sha1'])
128 149 'sha1'
129 150 """
130 151
131 152 def __init__(self, digests, s=''):
132 153 self._hashes = {}
133 154 for k in digests:
134 155 if k not in DIGESTS:
135 156 raise Abort(_('unknown digest type: %s') % k)
136 157 self._hashes[k] = DIGESTS[k]()
137 158 if s:
138 159 self.update(s)
139 160
140 161 def update(self, data):
141 162 for h in self._hashes.values():
142 163 h.update(data)
143 164
144 165 def __getitem__(self, key):
145 166 if key not in DIGESTS:
146 167 raise Abort(_('unknown digest type: %s') % k)
147 168 return self._hashes[key].hexdigest()
148 169
149 170 def __iter__(self):
150 171 return iter(self._hashes)
151 172
152 173 @staticmethod
153 174 def preferred(supported):
154 175 """returns the strongest digest type in both supported and DIGESTS."""
155 176
156 177 for k in DIGESTS_BY_STRENGTH:
157 178 if k in supported:
158 179 return k
159 180 return None
160 181
161 182 class digestchecker(object):
162 183 """file handle wrapper that additionally checks content against a given
163 184 size and digests.
164 185
165 186 d = digestchecker(fh, size, {'md5': '...'})
166 187
167 188 When multiple digests are given, all of them are validated.
168 189 """
169 190
170 191 def __init__(self, fh, size, digests):
171 192 self._fh = fh
172 193 self._size = size
173 194 self._got = 0
174 195 self._digests = dict(digests)
175 196 self._digester = digester(self._digests.keys())
176 197
177 198 def read(self, length=-1):
178 199 content = self._fh.read(length)
179 200 self._digester.update(content)
180 201 self._got += len(content)
181 202 return content
182 203
183 204 def validate(self):
184 205 if self._size != self._got:
185 206 raise Abort(_('size mismatch: expected %d, got %d') %
186 207 (self._size, self._got))
187 208 for k, v in self._digests.items():
188 209 if v != self._digester[k]:
189 210 # i18n: first parameter is a digest name
190 211 raise Abort(_('%s mismatch: expected %s, got %s') %
191 212 (k, v, self._digester[k]))
192 213
193 214 try:
194 215 buffer = buffer
195 216 except NameError:
196 217 if sys.version_info[0] < 3:
197 218 def buffer(sliceable, offset=0):
198 219 return sliceable[offset:]
199 220 else:
200 221 def buffer(sliceable, offset=0):
201 222 return memoryview(sliceable)[offset:]
202 223
203 import subprocess
204 224 closefds = os.name == 'posix'
205 225
206 226 _chunksize = 4096
207 227
208 228 class bufferedinputpipe(object):
209 229 """a manually buffered input pipe
210 230
211 231 Python will not let us use buffered IO and lazy reading with 'polling' at
212 232 the same time. We cannot probe the buffer state and select will not detect
213 233 that data are ready to read if they are already buffered.
214 234
215 235 This class let us work around that by implementing its own buffering
216 236 (allowing efficient readline) while offering a way to know if the buffer is
217 237 empty from the output (allowing collaboration of the buffer with polling).
218 238
219 239 This class lives in the 'util' module because it makes use of the 'os'
220 240 module from the python stdlib.
221 241 """
222 242
223 243 def __init__(self, input):
224 244 self._input = input
225 245 self._buffer = []
226 246 self._eof = False
227 247 self._lenbuf = 0
228 248
229 249 @property
230 250 def hasbuffer(self):
231 251 """True is any data is currently buffered
232 252
233 253 This will be used externally a pre-step for polling IO. If there is
234 254 already data then no polling should be set in place."""
235 255 return bool(self._buffer)
236 256
237 257 @property
238 258 def closed(self):
239 259 return self._input.closed
240 260
241 261 def fileno(self):
242 262 return self._input.fileno()
243 263
244 264 def close(self):
245 265 return self._input.close()
246 266
247 267 def read(self, size):
248 268 while (not self._eof) and (self._lenbuf < size):
249 269 self._fillbuffer()
250 270 return self._frombuffer(size)
251 271
252 272 def readline(self, *args, **kwargs):
253 273 if 1 < len(self._buffer):
254 274 # this should not happen because both read and readline end with a
255 275 # _frombuffer call that collapse it.
256 276 self._buffer = [''.join(self._buffer)]
257 277 self._lenbuf = len(self._buffer[0])
258 278 lfi = -1
259 279 if self._buffer:
260 280 lfi = self._buffer[-1].find('\n')
261 281 while (not self._eof) and lfi < 0:
262 282 self._fillbuffer()
263 283 if self._buffer:
264 284 lfi = self._buffer[-1].find('\n')
265 285 size = lfi + 1
266 286 if lfi < 0: # end of file
267 287 size = self._lenbuf
268 288 elif 1 < len(self._buffer):
269 289 # we need to take previous chunks into account
270 290 size += self._lenbuf - len(self._buffer[-1])
271 291 return self._frombuffer(size)
272 292
273 293 def _frombuffer(self, size):
274 294 """return at most 'size' data from the buffer
275 295
276 296 The data are removed from the buffer."""
277 297 if size == 0 or not self._buffer:
278 298 return ''
279 299 buf = self._buffer[0]
280 300 if 1 < len(self._buffer):
281 301 buf = ''.join(self._buffer)
282 302
283 303 data = buf[:size]
284 304 buf = buf[len(data):]
285 305 if buf:
286 306 self._buffer = [buf]
287 307 self._lenbuf = len(buf)
288 308 else:
289 309 self._buffer = []
290 310 self._lenbuf = 0
291 311 return data
292 312
293 313 def _fillbuffer(self):
294 314 """read data to the buffer"""
295 315 data = os.read(self._input.fileno(), _chunksize)
296 316 if not data:
297 317 self._eof = True
298 318 else:
299 319 self._lenbuf += len(data)
300 320 self._buffer.append(data)
301 321
302 322 def popen2(cmd, env=None, newlines=False):
303 323 # Setting bufsize to -1 lets the system decide the buffer size.
304 324 # The default for bufsize is 0, meaning unbuffered. This leads to
305 325 # poor performance on Mac OS X: http://bugs.python.org/issue4194
306 326 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
307 327 close_fds=closefds,
308 328 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
309 329 universal_newlines=newlines,
310 330 env=env)
311 331 return p.stdin, p.stdout
312 332
313 333 def popen3(cmd, env=None, newlines=False):
314 334 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
315 335 return stdin, stdout, stderr
316 336
317 337 def popen4(cmd, env=None, newlines=False, bufsize=-1):
318 338 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
319 339 close_fds=closefds,
320 340 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
321 341 stderr=subprocess.PIPE,
322 342 universal_newlines=newlines,
323 343 env=env)
324 344 return p.stdin, p.stdout, p.stderr, p
325 345
326 346 def version():
327 347 """Return version information if available."""
328 348 try:
329 import __version__
349 from . import __version__
330 350 return __version__.version
331 351 except ImportError:
332 352 return 'unknown'
333 353
334 354 def versiontuple(v=None, n=4):
335 355 """Parses a Mercurial version string into an N-tuple.
336 356
337 357 The version string to be parsed is specified with the ``v`` argument.
338 358 If it isn't defined, the current Mercurial version string will be parsed.
339 359
340 360 ``n`` can be 2, 3, or 4. Here is how some version strings map to
341 361 returned values:
342 362
343 363 >>> v = '3.6.1+190-df9b73d2d444'
344 364 >>> versiontuple(v, 2)
345 365 (3, 6)
346 366 >>> versiontuple(v, 3)
347 367 (3, 6, 1)
348 368 >>> versiontuple(v, 4)
349 369 (3, 6, 1, '190-df9b73d2d444')
350 370
351 371 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
352 372 (3, 6, 1, '190-df9b73d2d444+20151118')
353 373
354 374 >>> v = '3.6'
355 375 >>> versiontuple(v, 2)
356 376 (3, 6)
357 377 >>> versiontuple(v, 3)
358 378 (3, 6, None)
359 379 >>> versiontuple(v, 4)
360 380 (3, 6, None, None)
361 381 """
362 382 if not v:
363 383 v = version()
364 384 parts = v.split('+', 1)
365 385 if len(parts) == 1:
366 386 vparts, extra = parts[0], None
367 387 else:
368 388 vparts, extra = parts
369 389
370 390 vints = []
371 391 for i in vparts.split('.'):
372 392 try:
373 393 vints.append(int(i))
374 394 except ValueError:
375 395 break
376 396 # (3, 6) -> (3, 6, None)
377 397 while len(vints) < 3:
378 398 vints.append(None)
379 399
380 400 if n == 2:
381 401 return (vints[0], vints[1])
382 402 if n == 3:
383 403 return (vints[0], vints[1], vints[2])
384 404 if n == 4:
385 405 return (vints[0], vints[1], vints[2], extra)
386 406
387 407 # used by parsedate
388 408 defaultdateformats = (
389 409 '%Y-%m-%d %H:%M:%S',
390 410 '%Y-%m-%d %I:%M:%S%p',
391 411 '%Y-%m-%d %H:%M',
392 412 '%Y-%m-%d %I:%M%p',
393 413 '%Y-%m-%d',
394 414 '%m-%d',
395 415 '%m/%d',
396 416 '%m/%d/%y',
397 417 '%m/%d/%Y',
398 418 '%a %b %d %H:%M:%S %Y',
399 419 '%a %b %d %I:%M:%S%p %Y',
400 420 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
401 421 '%b %d %H:%M:%S %Y',
402 422 '%b %d %I:%M:%S%p %Y',
403 423 '%b %d %H:%M:%S',
404 424 '%b %d %I:%M:%S%p',
405 425 '%b %d %H:%M',
406 426 '%b %d %I:%M%p',
407 427 '%b %d %Y',
408 428 '%b %d',
409 429 '%H:%M:%S',
410 430 '%I:%M:%S%p',
411 431 '%H:%M',
412 432 '%I:%M%p',
413 433 )
414 434
415 435 extendeddateformats = defaultdateformats + (
416 436 "%Y",
417 437 "%Y-%m",
418 438 "%b",
419 439 "%b %Y",
420 440 )
421 441
422 442 def cachefunc(func):
423 443 '''cache the result of function calls'''
424 444 # XXX doesn't handle keywords args
425 445 if func.func_code.co_argcount == 0:
426 446 cache = []
427 447 def f():
428 448 if len(cache) == 0:
429 449 cache.append(func())
430 450 return cache[0]
431 451 return f
432 452 cache = {}
433 453 if func.func_code.co_argcount == 1:
434 454 # we gain a small amount of time because
435 455 # we don't need to pack/unpack the list
436 456 def f(arg):
437 457 if arg not in cache:
438 458 cache[arg] = func(arg)
439 459 return cache[arg]
440 460 else:
441 461 def f(*args):
442 462 if args not in cache:
443 463 cache[args] = func(*args)
444 464 return cache[args]
445 465
446 466 return f
447 467
448 468 class sortdict(dict):
449 469 '''a simple sorted dictionary'''
450 470 def __init__(self, data=None):
451 471 self._list = []
452 472 if data:
453 473 self.update(data)
454 474 def copy(self):
455 475 return sortdict(self)
456 476 def __setitem__(self, key, val):
457 477 if key in self:
458 478 self._list.remove(key)
459 479 self._list.append(key)
460 480 dict.__setitem__(self, key, val)
461 481 def __iter__(self):
462 482 return self._list.__iter__()
463 483 def update(self, src):
464 484 if isinstance(src, dict):
465 485 src = src.iteritems()
466 486 for k, v in src:
467 487 self[k] = v
468 488 def clear(self):
469 489 dict.clear(self)
470 490 self._list = []
471 491 def items(self):
472 492 return [(k, self[k]) for k in self._list]
473 493 def __delitem__(self, key):
474 494 dict.__delitem__(self, key)
475 495 self._list.remove(key)
476 496 def pop(self, key, *args, **kwargs):
477 497 dict.pop(self, key, *args, **kwargs)
478 498 try:
479 499 self._list.remove(key)
480 500 except ValueError:
481 501 pass
482 502 def keys(self):
483 503 return self._list
484 504 def iterkeys(self):
485 505 return self._list.__iter__()
486 506 def iteritems(self):
487 507 for k in self._list:
488 508 yield k, self[k]
489 509 def insert(self, index, key, val):
490 510 self._list.insert(index, key)
491 511 dict.__setitem__(self, key, val)
492 512
493 513 class lrucachedict(object):
494 514 '''cache most recent gets from or sets to this dictionary'''
495 515 def __init__(self, maxsize):
496 516 self._cache = {}
497 517 self._maxsize = maxsize
498 518 self._order = collections.deque()
499 519
500 520 def __getitem__(self, key):
501 521 value = self._cache[key]
502 522 self._order.remove(key)
503 523 self._order.append(key)
504 524 return value
505 525
506 526 def __setitem__(self, key, value):
507 527 if key not in self._cache:
508 528 if len(self._cache) >= self._maxsize:
509 529 del self._cache[self._order.popleft()]
510 530 else:
511 531 self._order.remove(key)
512 532 self._cache[key] = value
513 533 self._order.append(key)
514 534
515 535 def __contains__(self, key):
516 536 return key in self._cache
517 537
518 538 def clear(self):
519 539 self._cache.clear()
520 540 self._order = collections.deque()
521 541
522 542 def lrucachefunc(func):
523 543 '''cache most recent results of function calls'''
524 544 cache = {}
525 545 order = collections.deque()
526 546 if func.func_code.co_argcount == 1:
527 547 def f(arg):
528 548 if arg not in cache:
529 549 if len(cache) > 20:
530 550 del cache[order.popleft()]
531 551 cache[arg] = func(arg)
532 552 else:
533 553 order.remove(arg)
534 554 order.append(arg)
535 555 return cache[arg]
536 556 else:
537 557 def f(*args):
538 558 if args not in cache:
539 559 if len(cache) > 20:
540 560 del cache[order.popleft()]
541 561 cache[args] = func(*args)
542 562 else:
543 563 order.remove(args)
544 564 order.append(args)
545 565 return cache[args]
546 566
547 567 return f
548 568
549 569 class propertycache(object):
550 570 def __init__(self, func):
551 571 self.func = func
552 572 self.name = func.__name__
553 573 def __get__(self, obj, type=None):
554 574 result = self.func(obj)
555 575 self.cachevalue(obj, result)
556 576 return result
557 577
558 578 def cachevalue(self, obj, value):
559 579 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
560 580 obj.__dict__[self.name] = value
561 581
562 582 def pipefilter(s, cmd):
563 583 '''filter string S through command CMD, returning its output'''
564 584 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
565 585 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
566 586 pout, perr = p.communicate(s)
567 587 return pout
568 588
569 589 def tempfilter(s, cmd):
570 590 '''filter string S through a pair of temporary files with CMD.
571 591 CMD is used as a template to create the real command to be run,
572 592 with the strings INFILE and OUTFILE replaced by the real names of
573 593 the temporary files generated.'''
574 594 inname, outname = None, None
575 595 try:
576 596 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
577 597 fp = os.fdopen(infd, 'wb')
578 598 fp.write(s)
579 599 fp.close()
580 600 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
581 601 os.close(outfd)
582 602 cmd = cmd.replace('INFILE', inname)
583 603 cmd = cmd.replace('OUTFILE', outname)
584 604 code = os.system(cmd)
585 605 if sys.platform == 'OpenVMS' and code & 1:
586 606 code = 0
587 607 if code:
588 608 raise Abort(_("command '%s' failed: %s") %
589 609 (cmd, explainexit(code)))
590 610 fp = open(outname, 'rb')
591 611 r = fp.read()
592 612 fp.close()
593 613 return r
594 614 finally:
595 615 try:
596 616 if inname:
597 617 os.unlink(inname)
598 618 except OSError:
599 619 pass
600 620 try:
601 621 if outname:
602 622 os.unlink(outname)
603 623 except OSError:
604 624 pass
605 625
606 626 filtertable = {
607 627 'tempfile:': tempfilter,
608 628 'pipe:': pipefilter,
609 629 }
610 630
611 631 def filter(s, cmd):
612 632 "filter a string through a command that transforms its input to its output"
613 633 for name, fn in filtertable.iteritems():
614 634 if cmd.startswith(name):
615 635 return fn(s, cmd[len(name):].lstrip())
616 636 return pipefilter(s, cmd)
617 637
618 638 def binary(s):
619 639 """return true if a string is binary data"""
620 640 return bool(s and '\0' in s)
621 641
622 642 def increasingchunks(source, min=1024, max=65536):
623 643 '''return no less than min bytes per chunk while data remains,
624 644 doubling min after each chunk until it reaches max'''
625 645 def log2(x):
626 646 if not x:
627 647 return 0
628 648 i = 0
629 649 while x:
630 650 x >>= 1
631 651 i += 1
632 652 return i - 1
633 653
634 654 buf = []
635 655 blen = 0
636 656 for chunk in source:
637 657 buf.append(chunk)
638 658 blen += len(chunk)
639 659 if blen >= min:
640 660 if min < max:
641 661 min = min << 1
642 662 nmin = 1 << log2(blen)
643 663 if nmin > min:
644 664 min = nmin
645 665 if min > max:
646 666 min = max
647 667 yield ''.join(buf)
648 668 blen = 0
649 669 buf = []
650 670 if buf:
651 671 yield ''.join(buf)
652 672
653 673 Abort = error.Abort
654 674
655 675 def always(fn):
656 676 return True
657 677
658 678 def never(fn):
659 679 return False
660 680
661 681 def nogc(func):
662 682 """disable garbage collector
663 683
664 684 Python's garbage collector triggers a GC each time a certain number of
665 685 container objects (the number being defined by gc.get_threshold()) are
666 686 allocated even when marked not to be tracked by the collector. Tracking has
667 687 no effect on when GCs are triggered, only on what objects the GC looks
668 688 into. As a workaround, disable GC while building complex (huge)
669 689 containers.
670 690
671 691 This garbage collector issue have been fixed in 2.7.
672 692 """
673 693 def wrapper(*args, **kwargs):
674 694 gcenabled = gc.isenabled()
675 695 gc.disable()
676 696 try:
677 697 return func(*args, **kwargs)
678 698 finally:
679 699 if gcenabled:
680 700 gc.enable()
681 701 return wrapper
682 702
683 703 def pathto(root, n1, n2):
684 704 '''return the relative path from one place to another.
685 705 root should use os.sep to separate directories
686 706 n1 should use os.sep to separate directories
687 707 n2 should use "/" to separate directories
688 708 returns an os.sep-separated path.
689 709
690 710 If n1 is a relative path, it's assumed it's
691 711 relative to root.
692 712 n2 should always be relative to root.
693 713 '''
694 714 if not n1:
695 715 return localpath(n2)
696 716 if os.path.isabs(n1):
697 717 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
698 718 return os.path.join(root, localpath(n2))
699 719 n2 = '/'.join((pconvert(root), n2))
700 720 a, b = splitpath(n1), n2.split('/')
701 721 a.reverse()
702 722 b.reverse()
703 723 while a and b and a[-1] == b[-1]:
704 724 a.pop()
705 725 b.pop()
706 726 b.reverse()
707 727 return os.sep.join((['..'] * len(a)) + b) or '.'
708 728
709 729 def mainfrozen():
710 730 """return True if we are a frozen executable.
711 731
712 732 The code supports py2exe (most common, Windows only) and tools/freeze
713 733 (portable, not much used).
714 734 """
715 735 return (safehasattr(sys, "frozen") or # new py2exe
716 736 safehasattr(sys, "importers") or # old py2exe
717 737 imp.is_frozen("__main__")) # tools/freeze
718 738
719 739 # the location of data files matching the source code
720 740 if mainfrozen():
721 741 # executable version (py2exe) doesn't support __file__
722 742 datapath = os.path.dirname(sys.executable)
723 743 else:
724 744 datapath = os.path.dirname(__file__)
725 745
726 746 i18n.setdatapath(datapath)
727 747
728 748 _hgexecutable = None
729 749
730 750 def hgexecutable():
731 751 """return location of the 'hg' executable.
732 752
733 753 Defaults to $HG or 'hg' in the search path.
734 754 """
735 755 if _hgexecutable is None:
736 756 hg = os.environ.get('HG')
737 757 mainmod = sys.modules['__main__']
738 758 if hg:
739 759 _sethgexecutable(hg)
740 760 elif mainfrozen():
741 761 _sethgexecutable(sys.executable)
742 762 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
743 763 _sethgexecutable(mainmod.__file__)
744 764 else:
745 765 exe = findexe('hg') or os.path.basename(sys.argv[0])
746 766 _sethgexecutable(exe)
747 767 return _hgexecutable
748 768
749 769 def _sethgexecutable(path):
750 770 """set location of the 'hg' executable"""
751 771 global _hgexecutable
752 772 _hgexecutable = path
753 773
754 774 def _isstdout(f):
755 775 fileno = getattr(f, 'fileno', None)
756 776 return fileno and fileno() == sys.__stdout__.fileno()
757 777
758 778 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
759 779 '''enhanced shell command execution.
760 780 run with environment maybe modified, maybe in different dir.
761 781
762 782 if command fails and onerr is None, return status, else raise onerr
763 783 object as exception.
764 784
765 785 if out is specified, it is assumed to be a file-like object that has a
766 786 write() method. stdout and stderr will be redirected to out.'''
767 787 if environ is None:
768 788 environ = {}
769 789 try:
770 790 sys.stdout.flush()
771 791 except Exception:
772 792 pass
773 793 def py2shell(val):
774 794 'convert python object into string that is useful to shell'
775 795 if val is None or val is False:
776 796 return '0'
777 797 if val is True:
778 798 return '1'
779 799 return str(val)
780 800 origcmd = cmd
781 801 cmd = quotecommand(cmd)
782 802 if sys.platform == 'plan9' and (sys.version_info[0] == 2
783 803 and sys.version_info[1] < 7):
784 804 # subprocess kludge to work around issues in half-baked Python
785 805 # ports, notably bichued/python:
786 806 if not cwd is None:
787 807 os.chdir(cwd)
788 808 rc = os.system(cmd)
789 809 else:
790 810 env = dict(os.environ)
791 811 env.update((k, py2shell(v)) for k, v in environ.iteritems())
792 812 env['HG'] = hgexecutable()
793 813 if out is None or _isstdout(out):
794 814 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
795 815 env=env, cwd=cwd)
796 816 else:
797 817 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
798 818 env=env, cwd=cwd, stdout=subprocess.PIPE,
799 819 stderr=subprocess.STDOUT)
800 820 while True:
801 821 line = proc.stdout.readline()
802 822 if not line:
803 823 break
804 824 out.write(line)
805 825 proc.wait()
806 826 rc = proc.returncode
807 827 if sys.platform == 'OpenVMS' and rc & 1:
808 828 rc = 0
809 829 if rc and onerr:
810 830 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
811 831 explainexit(rc)[0])
812 832 if errprefix:
813 833 errmsg = '%s: %s' % (errprefix, errmsg)
814 834 raise onerr(errmsg)
815 835 return rc
816 836
817 837 def checksignature(func):
818 838 '''wrap a function with code to check for calling errors'''
819 839 def check(*args, **kwargs):
820 840 try:
821 841 return func(*args, **kwargs)
822 842 except TypeError:
823 843 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
824 844 raise error.SignatureError
825 845 raise
826 846
827 847 return check
828 848
829 849 def copyfile(src, dest, hardlink=False):
830 850 "copy a file, preserving mode and atime/mtime"
831 851 if os.path.lexists(dest):
832 852 unlink(dest)
833 853 # hardlinks are problematic on CIFS, quietly ignore this flag
834 854 # until we find a way to work around it cleanly (issue4546)
835 855 if False and hardlink:
836 856 try:
837 857 oslink(src, dest)
838 858 return
839 859 except (IOError, OSError):
840 860 pass # fall back to normal copy
841 861 if os.path.islink(src):
842 862 os.symlink(os.readlink(src), dest)
843 863 else:
844 864 try:
845 865 shutil.copyfile(src, dest)
846 866 shutil.copymode(src, dest)
847 867 except shutil.Error as inst:
848 868 raise Abort(str(inst))
849 869
850 870 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
851 871 """Copy a directory tree using hardlinks if possible."""
852 872 num = 0
853 873
854 874 if hardlink is None:
855 875 hardlink = (os.stat(src).st_dev ==
856 876 os.stat(os.path.dirname(dst)).st_dev)
857 877 if hardlink:
858 878 topic = _('linking')
859 879 else:
860 880 topic = _('copying')
861 881
862 882 if os.path.isdir(src):
863 883 os.mkdir(dst)
864 884 for name, kind in osutil.listdir(src):
865 885 srcname = os.path.join(src, name)
866 886 dstname = os.path.join(dst, name)
867 887 def nprog(t, pos):
868 888 if pos is not None:
869 889 return progress(t, pos + num)
870 890 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
871 891 num += n
872 892 else:
873 893 if hardlink:
874 894 try:
875 895 oslink(src, dst)
876 896 except (IOError, OSError):
877 897 hardlink = False
878 898 shutil.copy(src, dst)
879 899 else:
880 900 shutil.copy(src, dst)
881 901 num += 1
882 902 progress(topic, num)
883 903 progress(topic, None)
884 904
885 905 return hardlink, num
886 906
887 907 _winreservednames = '''con prn aux nul
888 908 com1 com2 com3 com4 com5 com6 com7 com8 com9
889 909 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
890 910 _winreservedchars = ':*?"<>|'
891 911 def checkwinfilename(path):
892 912 r'''Check that the base-relative path is a valid filename on Windows.
893 913 Returns None if the path is ok, or a UI string describing the problem.
894 914
895 915 >>> checkwinfilename("just/a/normal/path")
896 916 >>> checkwinfilename("foo/bar/con.xml")
897 917 "filename contains 'con', which is reserved on Windows"
898 918 >>> checkwinfilename("foo/con.xml/bar")
899 919 "filename contains 'con', which is reserved on Windows"
900 920 >>> checkwinfilename("foo/bar/xml.con")
901 921 >>> checkwinfilename("foo/bar/AUX/bla.txt")
902 922 "filename contains 'AUX', which is reserved on Windows"
903 923 >>> checkwinfilename("foo/bar/bla:.txt")
904 924 "filename contains ':', which is reserved on Windows"
905 925 >>> checkwinfilename("foo/bar/b\07la.txt")
906 926 "filename contains '\\x07', which is invalid on Windows"
907 927 >>> checkwinfilename("foo/bar/bla ")
908 928 "filename ends with ' ', which is not allowed on Windows"
909 929 >>> checkwinfilename("../bar")
910 930 >>> checkwinfilename("foo\\")
911 931 "filename ends with '\\', which is invalid on Windows"
912 932 >>> checkwinfilename("foo\\/bar")
913 933 "directory name ends with '\\', which is invalid on Windows"
914 934 '''
915 935 if path.endswith('\\'):
916 936 return _("filename ends with '\\', which is invalid on Windows")
917 937 if '\\/' in path:
918 938 return _("directory name ends with '\\', which is invalid on Windows")
919 939 for n in path.replace('\\', '/').split('/'):
920 940 if not n:
921 941 continue
922 942 for c in n:
923 943 if c in _winreservedchars:
924 944 return _("filename contains '%s', which is reserved "
925 945 "on Windows") % c
926 946 if ord(c) <= 31:
927 947 return _("filename contains %r, which is invalid "
928 948 "on Windows") % c
929 949 base = n.split('.')[0]
930 950 if base and base.lower() in _winreservednames:
931 951 return _("filename contains '%s', which is reserved "
932 952 "on Windows") % base
933 953 t = n[-1]
934 954 if t in '. ' and n not in '..':
935 955 return _("filename ends with '%s', which is not allowed "
936 956 "on Windows") % t
937 957
938 958 if os.name == 'nt':
939 959 checkosfilename = checkwinfilename
940 960 else:
941 961 checkosfilename = platform.checkosfilename
942 962
943 963 def makelock(info, pathname):
944 964 try:
945 965 return os.symlink(info, pathname)
946 966 except OSError as why:
947 967 if why.errno == errno.EEXIST:
948 968 raise
949 969 except AttributeError: # no symlink in os
950 970 pass
951 971
952 972 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
953 973 os.write(ld, info)
954 974 os.close(ld)
955 975
956 976 def readlock(pathname):
957 977 try:
958 978 return os.readlink(pathname)
959 979 except OSError as why:
960 980 if why.errno not in (errno.EINVAL, errno.ENOSYS):
961 981 raise
962 982 except AttributeError: # no symlink in os
963 983 pass
964 984 fp = posixfile(pathname)
965 985 r = fp.read()
966 986 fp.close()
967 987 return r
968 988
969 989 def fstat(fp):
970 990 '''stat file object that may not have fileno method.'''
971 991 try:
972 992 return os.fstat(fp.fileno())
973 993 except AttributeError:
974 994 return os.stat(fp.name)
975 995
976 996 # File system features
977 997
978 998 def checkcase(path):
979 999 """
980 1000 Return true if the given path is on a case-sensitive filesystem
981 1001
982 1002 Requires a path (like /foo/.hg) ending with a foldable final
983 1003 directory component.
984 1004 """
985 1005 s1 = os.lstat(path)
986 1006 d, b = os.path.split(path)
987 1007 b2 = b.upper()
988 1008 if b == b2:
989 1009 b2 = b.lower()
990 1010 if b == b2:
991 1011 return True # no evidence against case sensitivity
992 1012 p2 = os.path.join(d, b2)
993 1013 try:
994 1014 s2 = os.lstat(p2)
995 1015 if s2 == s1:
996 1016 return False
997 1017 return True
998 1018 except OSError:
999 1019 return True
1000 1020
1001 1021 try:
1002 1022 import re2
1003 1023 _re2 = None
1004 1024 except ImportError:
1005 1025 _re2 = False
1006 1026
1007 1027 class _re(object):
1008 1028 def _checkre2(self):
1009 1029 global _re2
1010 1030 try:
1011 1031 # check if match works, see issue3964
1012 1032 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1013 1033 except ImportError:
1014 1034 _re2 = False
1015 1035
1016 1036 def compile(self, pat, flags=0):
1017 1037 '''Compile a regular expression, using re2 if possible
1018 1038
1019 1039 For best performance, use only re2-compatible regexp features. The
1020 1040 only flags from the re module that are re2-compatible are
1021 1041 IGNORECASE and MULTILINE.'''
1022 1042 if _re2 is None:
1023 1043 self._checkre2()
1024 1044 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1025 1045 if flags & remod.IGNORECASE:
1026 1046 pat = '(?i)' + pat
1027 1047 if flags & remod.MULTILINE:
1028 1048 pat = '(?m)' + pat
1029 1049 try:
1030 1050 return re2.compile(pat)
1031 1051 except re2.error:
1032 1052 pass
1033 1053 return remod.compile(pat, flags)
1034 1054
1035 1055 @propertycache
1036 1056 def escape(self):
1037 1057 '''Return the version of escape corresponding to self.compile.
1038 1058
1039 1059 This is imperfect because whether re2 or re is used for a particular
1040 1060 function depends on the flags, etc, but it's the best we can do.
1041 1061 '''
1042 1062 global _re2
1043 1063 if _re2 is None:
1044 1064 self._checkre2()
1045 1065 if _re2:
1046 1066 return re2.escape
1047 1067 else:
1048 1068 return remod.escape
1049 1069
1050 1070 re = _re()
1051 1071
1052 1072 _fspathcache = {}
1053 1073 def fspath(name, root):
1054 1074 '''Get name in the case stored in the filesystem
1055 1075
1056 1076 The name should be relative to root, and be normcase-ed for efficiency.
1057 1077
1058 1078 Note that this function is unnecessary, and should not be
1059 1079 called, for case-sensitive filesystems (simply because it's expensive).
1060 1080
1061 1081 The root should be normcase-ed, too.
1062 1082 '''
1063 1083 def _makefspathcacheentry(dir):
1064 1084 return dict((normcase(n), n) for n in os.listdir(dir))
1065 1085
1066 1086 seps = os.sep
1067 1087 if os.altsep:
1068 1088 seps = seps + os.altsep
1069 1089 # Protect backslashes. This gets silly very quickly.
1070 1090 seps.replace('\\','\\\\')
1071 1091 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1072 1092 dir = os.path.normpath(root)
1073 1093 result = []
1074 1094 for part, sep in pattern.findall(name):
1075 1095 if sep:
1076 1096 result.append(sep)
1077 1097 continue
1078 1098
1079 1099 if dir not in _fspathcache:
1080 1100 _fspathcache[dir] = _makefspathcacheentry(dir)
1081 1101 contents = _fspathcache[dir]
1082 1102
1083 1103 found = contents.get(part)
1084 1104 if not found:
1085 1105 # retry "once per directory" per "dirstate.walk" which
1086 1106 # may take place for each patches of "hg qpush", for example
1087 1107 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1088 1108 found = contents.get(part)
1089 1109
1090 1110 result.append(found or part)
1091 1111 dir = os.path.join(dir, part)
1092 1112
1093 1113 return ''.join(result)
1094 1114
1095 1115 def checknlink(testfile):
1096 1116 '''check whether hardlink count reporting works properly'''
1097 1117
1098 1118 # testfile may be open, so we need a separate file for checking to
1099 1119 # work around issue2543 (or testfile may get lost on Samba shares)
1100 1120 f1 = testfile + ".hgtmp1"
1101 1121 if os.path.lexists(f1):
1102 1122 return False
1103 1123 try:
1104 1124 posixfile(f1, 'w').close()
1105 1125 except IOError:
1106 1126 return False
1107 1127
1108 1128 f2 = testfile + ".hgtmp2"
1109 1129 fd = None
1110 1130 try:
1111 1131 oslink(f1, f2)
1112 1132 # nlinks() may behave differently for files on Windows shares if
1113 1133 # the file is open.
1114 1134 fd = posixfile(f2)
1115 1135 return nlinks(f2) > 1
1116 1136 except OSError:
1117 1137 return False
1118 1138 finally:
1119 1139 if fd is not None:
1120 1140 fd.close()
1121 1141 for f in (f1, f2):
1122 1142 try:
1123 1143 os.unlink(f)
1124 1144 except OSError:
1125 1145 pass
1126 1146
1127 1147 def endswithsep(path):
1128 1148 '''Check path ends with os.sep or os.altsep.'''
1129 1149 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1130 1150
1131 1151 def splitpath(path):
1132 1152 '''Split path by os.sep.
1133 1153 Note that this function does not use os.altsep because this is
1134 1154 an alternative of simple "xxx.split(os.sep)".
1135 1155 It is recommended to use os.path.normpath() before using this
1136 1156 function if need.'''
1137 1157 return path.split(os.sep)
1138 1158
1139 1159 def gui():
1140 1160 '''Are we running in a GUI?'''
1141 1161 if sys.platform == 'darwin':
1142 1162 if 'SSH_CONNECTION' in os.environ:
1143 1163 # handle SSH access to a box where the user is logged in
1144 1164 return False
1145 1165 elif getattr(osutil, 'isgui', None):
1146 1166 # check if a CoreGraphics session is available
1147 1167 return osutil.isgui()
1148 1168 else:
1149 1169 # pure build; use a safe default
1150 1170 return True
1151 1171 else:
1152 1172 return os.name == "nt" or os.environ.get("DISPLAY")
1153 1173
1154 1174 def mktempcopy(name, emptyok=False, createmode=None):
1155 1175 """Create a temporary file with the same contents from name
1156 1176
1157 1177 The permission bits are copied from the original file.
1158 1178
1159 1179 If the temporary file is going to be truncated immediately, you
1160 1180 can use emptyok=True as an optimization.
1161 1181
1162 1182 Returns the name of the temporary file.
1163 1183 """
1164 1184 d, fn = os.path.split(name)
1165 1185 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1166 1186 os.close(fd)
1167 1187 # Temporary files are created with mode 0600, which is usually not
1168 1188 # what we want. If the original file already exists, just copy
1169 1189 # its mode. Otherwise, manually obey umask.
1170 1190 copymode(name, temp, createmode)
1171 1191 if emptyok:
1172 1192 return temp
1173 1193 try:
1174 1194 try:
1175 1195 ifp = posixfile(name, "rb")
1176 1196 except IOError as inst:
1177 1197 if inst.errno == errno.ENOENT:
1178 1198 return temp
1179 1199 if not getattr(inst, 'filename', None):
1180 1200 inst.filename = name
1181 1201 raise
1182 1202 ofp = posixfile(temp, "wb")
1183 1203 for chunk in filechunkiter(ifp):
1184 1204 ofp.write(chunk)
1185 1205 ifp.close()
1186 1206 ofp.close()
1187 1207 except: # re-raises
1188 1208 try: os.unlink(temp)
1189 1209 except OSError: pass
1190 1210 raise
1191 1211 return temp
1192 1212
1193 1213 class atomictempfile(object):
1194 1214 '''writable file object that atomically updates a file
1195 1215
1196 1216 All writes will go to a temporary copy of the original file. Call
1197 1217 close() when you are done writing, and atomictempfile will rename
1198 1218 the temporary copy to the original name, making the changes
1199 1219 visible. If the object is destroyed without being closed, all your
1200 1220 writes are discarded.
1201 1221 '''
1202 1222 def __init__(self, name, mode='w+b', createmode=None):
1203 1223 self.__name = name # permanent name
1204 1224 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1205 1225 createmode=createmode)
1206 1226 self._fp = posixfile(self._tempname, mode)
1207 1227
1208 1228 # delegated methods
1209 1229 self.write = self._fp.write
1210 1230 self.seek = self._fp.seek
1211 1231 self.tell = self._fp.tell
1212 1232 self.fileno = self._fp.fileno
1213 1233
1214 1234 def close(self):
1215 1235 if not self._fp.closed:
1216 1236 self._fp.close()
1217 1237 rename(self._tempname, localpath(self.__name))
1218 1238
1219 1239 def discard(self):
1220 1240 if not self._fp.closed:
1221 1241 try:
1222 1242 os.unlink(self._tempname)
1223 1243 except OSError:
1224 1244 pass
1225 1245 self._fp.close()
1226 1246
1227 1247 def __del__(self):
1228 1248 if safehasattr(self, '_fp'): # constructor actually did something
1229 1249 self.discard()
1230 1250
1231 1251 def makedirs(name, mode=None, notindexed=False):
1232 1252 """recursive directory creation with parent mode inheritance"""
1233 1253 try:
1234 1254 makedir(name, notindexed)
1235 1255 except OSError as err:
1236 1256 if err.errno == errno.EEXIST:
1237 1257 return
1238 1258 if err.errno != errno.ENOENT or not name:
1239 1259 raise
1240 1260 parent = os.path.dirname(os.path.abspath(name))
1241 1261 if parent == name:
1242 1262 raise
1243 1263 makedirs(parent, mode, notindexed)
1244 1264 makedir(name, notindexed)
1245 1265 if mode is not None:
1246 1266 os.chmod(name, mode)
1247 1267
1248 1268 def ensuredirs(name, mode=None, notindexed=False):
1249 1269 """race-safe recursive directory creation
1250 1270
1251 1271 Newly created directories are marked as "not to be indexed by
1252 1272 the content indexing service", if ``notindexed`` is specified
1253 1273 for "write" mode access.
1254 1274 """
1255 1275 if os.path.isdir(name):
1256 1276 return
1257 1277 parent = os.path.dirname(os.path.abspath(name))
1258 1278 if parent != name:
1259 1279 ensuredirs(parent, mode, notindexed)
1260 1280 try:
1261 1281 makedir(name, notindexed)
1262 1282 except OSError as err:
1263 1283 if err.errno == errno.EEXIST and os.path.isdir(name):
1264 1284 # someone else seems to have won a directory creation race
1265 1285 return
1266 1286 raise
1267 1287 if mode is not None:
1268 1288 os.chmod(name, mode)
1269 1289
1270 1290 def readfile(path):
1271 1291 fp = open(path, 'rb')
1272 1292 try:
1273 1293 return fp.read()
1274 1294 finally:
1275 1295 fp.close()
1276 1296
1277 1297 def writefile(path, text):
1278 1298 fp = open(path, 'wb')
1279 1299 try:
1280 1300 fp.write(text)
1281 1301 finally:
1282 1302 fp.close()
1283 1303
1284 1304 def appendfile(path, text):
1285 1305 fp = open(path, 'ab')
1286 1306 try:
1287 1307 fp.write(text)
1288 1308 finally:
1289 1309 fp.close()
1290 1310
1291 1311 class chunkbuffer(object):
1292 1312 """Allow arbitrary sized chunks of data to be efficiently read from an
1293 1313 iterator over chunks of arbitrary size."""
1294 1314
1295 1315 def __init__(self, in_iter):
1296 1316 """in_iter is the iterator that's iterating over the input chunks.
1297 1317 targetsize is how big a buffer to try to maintain."""
1298 1318 def splitbig(chunks):
1299 1319 for chunk in chunks:
1300 1320 if len(chunk) > 2**20:
1301 1321 pos = 0
1302 1322 while pos < len(chunk):
1303 1323 end = pos + 2 ** 18
1304 1324 yield chunk[pos:end]
1305 1325 pos = end
1306 1326 else:
1307 1327 yield chunk
1308 1328 self.iter = splitbig(in_iter)
1309 1329 self._queue = collections.deque()
1310 1330 self._chunkoffset = 0
1311 1331
1312 1332 def read(self, l=None):
1313 1333 """Read L bytes of data from the iterator of chunks of data.
1314 1334 Returns less than L bytes if the iterator runs dry.
1315 1335
1316 1336 If size parameter is omitted, read everything"""
1317 1337 if l is None:
1318 1338 return ''.join(self.iter)
1319 1339
1320 1340 left = l
1321 1341 buf = []
1322 1342 queue = self._queue
1323 1343 while left > 0:
1324 1344 # refill the queue
1325 1345 if not queue:
1326 1346 target = 2**18
1327 1347 for chunk in self.iter:
1328 1348 queue.append(chunk)
1329 1349 target -= len(chunk)
1330 1350 if target <= 0:
1331 1351 break
1332 1352 if not queue:
1333 1353 break
1334 1354
1335 1355 # The easy way to do this would be to queue.popleft(), modify the
1336 1356 # chunk (if necessary), then queue.appendleft(). However, for cases
1337 1357 # where we read partial chunk content, this incurs 2 dequeue
1338 1358 # mutations and creates a new str for the remaining chunk in the
1339 1359 # queue. Our code below avoids this overhead.
1340 1360
1341 1361 chunk = queue[0]
1342 1362 chunkl = len(chunk)
1343 1363 offset = self._chunkoffset
1344 1364
1345 1365 # Use full chunk.
1346 1366 if offset == 0 and left >= chunkl:
1347 1367 left -= chunkl
1348 1368 queue.popleft()
1349 1369 buf.append(chunk)
1350 1370 # self._chunkoffset remains at 0.
1351 1371 continue
1352 1372
1353 1373 chunkremaining = chunkl - offset
1354 1374
1355 1375 # Use all of unconsumed part of chunk.
1356 1376 if left >= chunkremaining:
1357 1377 left -= chunkremaining
1358 1378 queue.popleft()
1359 1379 # offset == 0 is enabled by block above, so this won't merely
1360 1380 # copy via ``chunk[0:]``.
1361 1381 buf.append(chunk[offset:])
1362 1382 self._chunkoffset = 0
1363 1383
1364 1384 # Partial chunk needed.
1365 1385 else:
1366 1386 buf.append(chunk[offset:offset + left])
1367 1387 self._chunkoffset += left
1368 1388 left -= chunkremaining
1369 1389
1370 1390 return ''.join(buf)
1371 1391
1372 1392 def filechunkiter(f, size=65536, limit=None):
1373 1393 """Create a generator that produces the data in the file size
1374 1394 (default 65536) bytes at a time, up to optional limit (default is
1375 1395 to read all data). Chunks may be less than size bytes if the
1376 1396 chunk is the last chunk in the file, or the file is a socket or
1377 1397 some other type of file that sometimes reads less data than is
1378 1398 requested."""
1379 1399 assert size >= 0
1380 1400 assert limit is None or limit >= 0
1381 1401 while True:
1382 1402 if limit is None:
1383 1403 nbytes = size
1384 1404 else:
1385 1405 nbytes = min(limit, size)
1386 1406 s = nbytes and f.read(nbytes)
1387 1407 if not s:
1388 1408 break
1389 1409 if limit:
1390 1410 limit -= len(s)
1391 1411 yield s
1392 1412
1393 1413 def makedate(timestamp=None):
1394 1414 '''Return a unix timestamp (or the current time) as a (unixtime,
1395 1415 offset) tuple based off the local timezone.'''
1396 1416 if timestamp is None:
1397 1417 timestamp = time.time()
1398 1418 if timestamp < 0:
1399 1419 hint = _("check your clock")
1400 1420 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1401 1421 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1402 1422 datetime.datetime.fromtimestamp(timestamp))
1403 1423 tz = delta.days * 86400 + delta.seconds
1404 1424 return timestamp, tz
1405 1425
1406 1426 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1407 1427 """represent a (unixtime, offset) tuple as a localized time.
1408 1428 unixtime is seconds since the epoch, and offset is the time zone's
1409 1429 number of seconds away from UTC. if timezone is false, do not
1410 1430 append time zone to string."""
1411 1431 t, tz = date or makedate()
1412 1432 if t < 0:
1413 1433 t = 0 # time.gmtime(lt) fails on Windows for lt < -43200
1414 1434 tz = 0
1415 1435 if "%1" in format or "%2" in format or "%z" in format:
1416 1436 sign = (tz > 0) and "-" or "+"
1417 1437 minutes = abs(tz) // 60
1418 1438 q, r = divmod(minutes, 60)
1419 1439 format = format.replace("%z", "%1%2")
1420 1440 format = format.replace("%1", "%c%02d" % (sign, q))
1421 1441 format = format.replace("%2", "%02d" % r)
1422 1442 try:
1423 1443 t = time.gmtime(float(t) - tz)
1424 1444 except ValueError:
1425 1445 # time was out of range
1426 1446 t = time.gmtime(sys.maxint)
1427 1447 s = time.strftime(format, t)
1428 1448 return s
1429 1449
1430 1450 def shortdate(date=None):
1431 1451 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1432 1452 return datestr(date, format='%Y-%m-%d')
1433 1453
1434 1454 def parsetimezone(tz):
1435 1455 """parse a timezone string and return an offset integer"""
1436 1456 if tz[0] in "+-" and len(tz) == 5 and tz[1:].isdigit():
1437 1457 sign = (tz[0] == "+") and 1 or -1
1438 1458 hours = int(tz[1:3])
1439 1459 minutes = int(tz[3:5])
1440 1460 return -sign * (hours * 60 + minutes) * 60
1441 1461 if tz == "GMT" or tz == "UTC":
1442 1462 return 0
1443 1463 return None
1444 1464
1445 1465 def strdate(string, format, defaults=[]):
1446 1466 """parse a localized time string and return a (unixtime, offset) tuple.
1447 1467 if the string cannot be parsed, ValueError is raised."""
1448 1468 # NOTE: unixtime = localunixtime + offset
1449 1469 offset, date = parsetimezone(string.split()[-1]), string
1450 1470 if offset is not None:
1451 1471 date = " ".join(string.split()[:-1])
1452 1472
1453 1473 # add missing elements from defaults
1454 1474 usenow = False # default to using biased defaults
1455 1475 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1456 1476 found = [True for p in part if ("%"+p) in format]
1457 1477 if not found:
1458 1478 date += "@" + defaults[part][usenow]
1459 1479 format += "@%" + part[0]
1460 1480 else:
1461 1481 # We've found a specific time element, less specific time
1462 1482 # elements are relative to today
1463 1483 usenow = True
1464 1484
1465 1485 timetuple = time.strptime(date, format)
1466 1486 localunixtime = int(calendar.timegm(timetuple))
1467 1487 if offset is None:
1468 1488 # local timezone
1469 1489 unixtime = int(time.mktime(timetuple))
1470 1490 offset = unixtime - localunixtime
1471 1491 else:
1472 1492 unixtime = localunixtime + offset
1473 1493 return unixtime, offset
1474 1494
1475 1495 def parsedate(date, formats=None, bias=None):
1476 1496 """parse a localized date/time and return a (unixtime, offset) tuple.
1477 1497
1478 1498 The date may be a "unixtime offset" string or in one of the specified
1479 1499 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1480 1500
1481 1501 >>> parsedate(' today ') == parsedate(\
1482 1502 datetime.date.today().strftime('%b %d'))
1483 1503 True
1484 1504 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1485 1505 datetime.timedelta(days=1)\
1486 1506 ).strftime('%b %d'))
1487 1507 True
1488 1508 >>> now, tz = makedate()
1489 1509 >>> strnow, strtz = parsedate('now')
1490 1510 >>> (strnow - now) < 1
1491 1511 True
1492 1512 >>> tz == strtz
1493 1513 True
1494 1514 """
1495 1515 if bias is None:
1496 1516 bias = {}
1497 1517 if not date:
1498 1518 return 0, 0
1499 1519 if isinstance(date, tuple) and len(date) == 2:
1500 1520 return date
1501 1521 if not formats:
1502 1522 formats = defaultdateformats
1503 1523 date = date.strip()
1504 1524
1505 1525 if date == 'now' or date == _('now'):
1506 1526 return makedate()
1507 1527 if date == 'today' or date == _('today'):
1508 1528 date = datetime.date.today().strftime('%b %d')
1509 1529 elif date == 'yesterday' or date == _('yesterday'):
1510 1530 date = (datetime.date.today() -
1511 1531 datetime.timedelta(days=1)).strftime('%b %d')
1512 1532
1513 1533 try:
1514 1534 when, offset = map(int, date.split(' '))
1515 1535 except ValueError:
1516 1536 # fill out defaults
1517 1537 now = makedate()
1518 1538 defaults = {}
1519 1539 for part in ("d", "mb", "yY", "HI", "M", "S"):
1520 1540 # this piece is for rounding the specific end of unknowns
1521 1541 b = bias.get(part)
1522 1542 if b is None:
1523 1543 if part[0] in "HMS":
1524 1544 b = "00"
1525 1545 else:
1526 1546 b = "0"
1527 1547
1528 1548 # this piece is for matching the generic end to today's date
1529 1549 n = datestr(now, "%" + part[0])
1530 1550
1531 1551 defaults[part] = (b, n)
1532 1552
1533 1553 for format in formats:
1534 1554 try:
1535 1555 when, offset = strdate(date, format, defaults)
1536 1556 except (ValueError, OverflowError):
1537 1557 pass
1538 1558 else:
1539 1559 break
1540 1560 else:
1541 1561 raise Abort(_('invalid date: %r') % date)
1542 1562 # validate explicit (probably user-specified) date and
1543 1563 # time zone offset. values must fit in signed 32 bits for
1544 1564 # current 32-bit linux runtimes. timezones go from UTC-12
1545 1565 # to UTC+14
1546 1566 if abs(when) > 0x7fffffff:
1547 1567 raise Abort(_('date exceeds 32 bits: %d') % when)
1548 1568 if when < 0:
1549 1569 raise Abort(_('negative date value: %d') % when)
1550 1570 if offset < -50400 or offset > 43200:
1551 1571 raise Abort(_('impossible time zone offset: %d') % offset)
1552 1572 return when, offset
1553 1573
1554 1574 def matchdate(date):
1555 1575 """Return a function that matches a given date match specifier
1556 1576
1557 1577 Formats include:
1558 1578
1559 1579 '{date}' match a given date to the accuracy provided
1560 1580
1561 1581 '<{date}' on or before a given date
1562 1582
1563 1583 '>{date}' on or after a given date
1564 1584
1565 1585 >>> p1 = parsedate("10:29:59")
1566 1586 >>> p2 = parsedate("10:30:00")
1567 1587 >>> p3 = parsedate("10:30:59")
1568 1588 >>> p4 = parsedate("10:31:00")
1569 1589 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1570 1590 >>> f = matchdate("10:30")
1571 1591 >>> f(p1[0])
1572 1592 False
1573 1593 >>> f(p2[0])
1574 1594 True
1575 1595 >>> f(p3[0])
1576 1596 True
1577 1597 >>> f(p4[0])
1578 1598 False
1579 1599 >>> f(p5[0])
1580 1600 False
1581 1601 """
1582 1602
1583 1603 def lower(date):
1584 1604 d = {'mb': "1", 'd': "1"}
1585 1605 return parsedate(date, extendeddateformats, d)[0]
1586 1606
1587 1607 def upper(date):
1588 1608 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1589 1609 for days in ("31", "30", "29"):
1590 1610 try:
1591 1611 d["d"] = days
1592 1612 return parsedate(date, extendeddateformats, d)[0]
1593 1613 except Abort:
1594 1614 pass
1595 1615 d["d"] = "28"
1596 1616 return parsedate(date, extendeddateformats, d)[0]
1597 1617
1598 1618 date = date.strip()
1599 1619
1600 1620 if not date:
1601 1621 raise Abort(_("dates cannot consist entirely of whitespace"))
1602 1622 elif date[0] == "<":
1603 1623 if not date[1:]:
1604 1624 raise Abort(_("invalid day spec, use '<DATE'"))
1605 1625 when = upper(date[1:])
1606 1626 return lambda x: x <= when
1607 1627 elif date[0] == ">":
1608 1628 if not date[1:]:
1609 1629 raise Abort(_("invalid day spec, use '>DATE'"))
1610 1630 when = lower(date[1:])
1611 1631 return lambda x: x >= when
1612 1632 elif date[0] == "-":
1613 1633 try:
1614 1634 days = int(date[1:])
1615 1635 except ValueError:
1616 1636 raise Abort(_("invalid day spec: %s") % date[1:])
1617 1637 if days < 0:
1618 1638 raise Abort(_('%s must be nonnegative (see "hg help dates")')
1619 1639 % date[1:])
1620 1640 when = makedate()[0] - days * 3600 * 24
1621 1641 return lambda x: x >= when
1622 1642 elif " to " in date:
1623 1643 a, b = date.split(" to ")
1624 1644 start, stop = lower(a), upper(b)
1625 1645 return lambda x: x >= start and x <= stop
1626 1646 else:
1627 1647 start, stop = lower(date), upper(date)
1628 1648 return lambda x: x >= start and x <= stop
1629 1649
1630 1650 def stringmatcher(pattern):
1631 1651 """
1632 1652 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1633 1653 returns the matcher name, pattern, and matcher function.
1634 1654 missing or unknown prefixes are treated as literal matches.
1635 1655
1636 1656 helper for tests:
1637 1657 >>> def test(pattern, *tests):
1638 1658 ... kind, pattern, matcher = stringmatcher(pattern)
1639 1659 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1640 1660
1641 1661 exact matching (no prefix):
1642 1662 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1643 1663 ('literal', 'abcdefg', [False, False, True])
1644 1664
1645 1665 regex matching ('re:' prefix)
1646 1666 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1647 1667 ('re', 'a.+b', [False, False, True])
1648 1668
1649 1669 force exact matches ('literal:' prefix)
1650 1670 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1651 1671 ('literal', 're:foobar', [False, True])
1652 1672
1653 1673 unknown prefixes are ignored and treated as literals
1654 1674 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1655 1675 ('literal', 'foo:bar', [False, False, True])
1656 1676 """
1657 1677 if pattern.startswith('re:'):
1658 1678 pattern = pattern[3:]
1659 1679 try:
1660 1680 regex = remod.compile(pattern)
1661 1681 except remod.error as e:
1662 1682 raise error.ParseError(_('invalid regular expression: %s')
1663 1683 % e)
1664 1684 return 're', pattern, regex.search
1665 1685 elif pattern.startswith('literal:'):
1666 1686 pattern = pattern[8:]
1667 1687 return 'literal', pattern, pattern.__eq__
1668 1688
1669 1689 def shortuser(user):
1670 1690 """Return a short representation of a user name or email address."""
1671 1691 f = user.find('@')
1672 1692 if f >= 0:
1673 1693 user = user[:f]
1674 1694 f = user.find('<')
1675 1695 if f >= 0:
1676 1696 user = user[f + 1:]
1677 1697 f = user.find(' ')
1678 1698 if f >= 0:
1679 1699 user = user[:f]
1680 1700 f = user.find('.')
1681 1701 if f >= 0:
1682 1702 user = user[:f]
1683 1703 return user
1684 1704
1685 1705 def emailuser(user):
1686 1706 """Return the user portion of an email address."""
1687 1707 f = user.find('@')
1688 1708 if f >= 0:
1689 1709 user = user[:f]
1690 1710 f = user.find('<')
1691 1711 if f >= 0:
1692 1712 user = user[f + 1:]
1693 1713 return user
1694 1714
1695 1715 def email(author):
1696 1716 '''get email of author.'''
1697 1717 r = author.find('>')
1698 1718 if r == -1:
1699 1719 r = None
1700 1720 return author[author.find('<') + 1:r]
1701 1721
1702 1722 def ellipsis(text, maxlength=400):
1703 1723 """Trim string to at most maxlength (default: 400) columns in display."""
1704 1724 return encoding.trim(text, maxlength, ellipsis='...')
1705 1725
1706 1726 def unitcountfn(*unittable):
1707 1727 '''return a function that renders a readable count of some quantity'''
1708 1728
1709 1729 def go(count):
1710 1730 for multiplier, divisor, format in unittable:
1711 1731 if count >= divisor * multiplier:
1712 1732 return format % (count / float(divisor))
1713 1733 return unittable[-1][2] % count
1714 1734
1715 1735 return go
1716 1736
1717 1737 bytecount = unitcountfn(
1718 1738 (100, 1 << 30, _('%.0f GB')),
1719 1739 (10, 1 << 30, _('%.1f GB')),
1720 1740 (1, 1 << 30, _('%.2f GB')),
1721 1741 (100, 1 << 20, _('%.0f MB')),
1722 1742 (10, 1 << 20, _('%.1f MB')),
1723 1743 (1, 1 << 20, _('%.2f MB')),
1724 1744 (100, 1 << 10, _('%.0f KB')),
1725 1745 (10, 1 << 10, _('%.1f KB')),
1726 1746 (1, 1 << 10, _('%.2f KB')),
1727 1747 (1, 1, _('%.0f bytes')),
1728 1748 )
1729 1749
1730 1750 def uirepr(s):
1731 1751 # Avoid double backslash in Windows path repr()
1732 1752 return repr(s).replace('\\\\', '\\')
1733 1753
1734 1754 # delay import of textwrap
1735 1755 def MBTextWrapper(**kwargs):
1736 1756 class tw(textwrap.TextWrapper):
1737 1757 """
1738 1758 Extend TextWrapper for width-awareness.
1739 1759
1740 1760 Neither number of 'bytes' in any encoding nor 'characters' is
1741 1761 appropriate to calculate terminal columns for specified string.
1742 1762
1743 1763 Original TextWrapper implementation uses built-in 'len()' directly,
1744 1764 so overriding is needed to use width information of each characters.
1745 1765
1746 1766 In addition, characters classified into 'ambiguous' width are
1747 1767 treated as wide in East Asian area, but as narrow in other.
1748 1768
1749 1769 This requires use decision to determine width of such characters.
1750 1770 """
1751 1771 def _cutdown(self, ucstr, space_left):
1752 1772 l = 0
1753 1773 colwidth = encoding.ucolwidth
1754 1774 for i in xrange(len(ucstr)):
1755 1775 l += colwidth(ucstr[i])
1756 1776 if space_left < l:
1757 1777 return (ucstr[:i], ucstr[i:])
1758 1778 return ucstr, ''
1759 1779
1760 1780 # overriding of base class
1761 1781 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
1762 1782 space_left = max(width - cur_len, 1)
1763 1783
1764 1784 if self.break_long_words:
1765 1785 cut, res = self._cutdown(reversed_chunks[-1], space_left)
1766 1786 cur_line.append(cut)
1767 1787 reversed_chunks[-1] = res
1768 1788 elif not cur_line:
1769 1789 cur_line.append(reversed_chunks.pop())
1770 1790
1771 1791 # this overriding code is imported from TextWrapper of Python 2.6
1772 1792 # to calculate columns of string by 'encoding.ucolwidth()'
1773 1793 def _wrap_chunks(self, chunks):
1774 1794 colwidth = encoding.ucolwidth
1775 1795
1776 1796 lines = []
1777 1797 if self.width <= 0:
1778 1798 raise ValueError("invalid width %r (must be > 0)" % self.width)
1779 1799
1780 1800 # Arrange in reverse order so items can be efficiently popped
1781 1801 # from a stack of chucks.
1782 1802 chunks.reverse()
1783 1803
1784 1804 while chunks:
1785 1805
1786 1806 # Start the list of chunks that will make up the current line.
1787 1807 # cur_len is just the length of all the chunks in cur_line.
1788 1808 cur_line = []
1789 1809 cur_len = 0
1790 1810
1791 1811 # Figure out which static string will prefix this line.
1792 1812 if lines:
1793 1813 indent = self.subsequent_indent
1794 1814 else:
1795 1815 indent = self.initial_indent
1796 1816
1797 1817 # Maximum width for this line.
1798 1818 width = self.width - len(indent)
1799 1819
1800 1820 # First chunk on line is whitespace -- drop it, unless this
1801 1821 # is the very beginning of the text (i.e. no lines started yet).
1802 1822 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
1803 1823 del chunks[-1]
1804 1824
1805 1825 while chunks:
1806 1826 l = colwidth(chunks[-1])
1807 1827
1808 1828 # Can at least squeeze this chunk onto the current line.
1809 1829 if cur_len + l <= width:
1810 1830 cur_line.append(chunks.pop())
1811 1831 cur_len += l
1812 1832
1813 1833 # Nope, this line is full.
1814 1834 else:
1815 1835 break
1816 1836
1817 1837 # The current line is full, and the next chunk is too big to
1818 1838 # fit on *any* line (not just this one).
1819 1839 if chunks and colwidth(chunks[-1]) > width:
1820 1840 self._handle_long_word(chunks, cur_line, cur_len, width)
1821 1841
1822 1842 # If the last chunk on this line is all whitespace, drop it.
1823 1843 if (self.drop_whitespace and
1824 1844 cur_line and cur_line[-1].strip() == ''):
1825 1845 del cur_line[-1]
1826 1846
1827 1847 # Convert current line back to a string and store it in list
1828 1848 # of all lines (return value).
1829 1849 if cur_line:
1830 1850 lines.append(indent + ''.join(cur_line))
1831 1851
1832 1852 return lines
1833 1853
1834 1854 global MBTextWrapper
1835 1855 MBTextWrapper = tw
1836 1856 return tw(**kwargs)
1837 1857
1838 1858 def wrap(line, width, initindent='', hangindent=''):
1839 1859 maxindent = max(len(hangindent), len(initindent))
1840 1860 if width <= maxindent:
1841 1861 # adjust for weird terminal size
1842 1862 width = max(78, maxindent + 1)
1843 1863 line = line.decode(encoding.encoding, encoding.encodingmode)
1844 1864 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
1845 1865 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
1846 1866 wrapper = MBTextWrapper(width=width,
1847 1867 initial_indent=initindent,
1848 1868 subsequent_indent=hangindent)
1849 1869 return wrapper.fill(line).encode(encoding.encoding)
1850 1870
1851 1871 def iterlines(iterator):
1852 1872 for chunk in iterator:
1853 1873 for line in chunk.splitlines():
1854 1874 yield line
1855 1875
1856 1876 def expandpath(path):
1857 1877 return os.path.expanduser(os.path.expandvars(path))
1858 1878
1859 1879 def hgcmd():
1860 1880 """Return the command used to execute current hg
1861 1881
1862 1882 This is different from hgexecutable() because on Windows we want
1863 1883 to avoid things opening new shell windows like batch files, so we
1864 1884 get either the python call or current executable.
1865 1885 """
1866 1886 if mainfrozen():
1867 1887 return [sys.executable]
1868 1888 return gethgcmd()
1869 1889
1870 1890 def rundetached(args, condfn):
1871 1891 """Execute the argument list in a detached process.
1872 1892
1873 1893 condfn is a callable which is called repeatedly and should return
1874 1894 True once the child process is known to have started successfully.
1875 1895 At this point, the child process PID is returned. If the child
1876 1896 process fails to start or finishes before condfn() evaluates to
1877 1897 True, return -1.
1878 1898 """
1879 1899 # Windows case is easier because the child process is either
1880 1900 # successfully starting and validating the condition or exiting
1881 1901 # on failure. We just poll on its PID. On Unix, if the child
1882 1902 # process fails to start, it will be left in a zombie state until
1883 1903 # the parent wait on it, which we cannot do since we expect a long
1884 1904 # running process on success. Instead we listen for SIGCHLD telling
1885 1905 # us our child process terminated.
1886 1906 terminated = set()
1887 1907 def handler(signum, frame):
1888 1908 terminated.add(os.wait())
1889 1909 prevhandler = None
1890 1910 SIGCHLD = getattr(signal, 'SIGCHLD', None)
1891 1911 if SIGCHLD is not None:
1892 1912 prevhandler = signal.signal(SIGCHLD, handler)
1893 1913 try:
1894 1914 pid = spawndetached(args)
1895 1915 while not condfn():
1896 1916 if ((pid in terminated or not testpid(pid))
1897 1917 and not condfn()):
1898 1918 return -1
1899 1919 time.sleep(0.1)
1900 1920 return pid
1901 1921 finally:
1902 1922 if prevhandler is not None:
1903 1923 signal.signal(signal.SIGCHLD, prevhandler)
1904 1924
1905 1925 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
1906 1926 """Return the result of interpolating items in the mapping into string s.
1907 1927
1908 1928 prefix is a single character string, or a two character string with
1909 1929 a backslash as the first character if the prefix needs to be escaped in
1910 1930 a regular expression.
1911 1931
1912 1932 fn is an optional function that will be applied to the replacement text
1913 1933 just before replacement.
1914 1934
1915 1935 escape_prefix is an optional flag that allows using doubled prefix for
1916 1936 its escaping.
1917 1937 """
1918 1938 fn = fn or (lambda s: s)
1919 1939 patterns = '|'.join(mapping.keys())
1920 1940 if escape_prefix:
1921 1941 patterns += '|' + prefix
1922 1942 if len(prefix) > 1:
1923 1943 prefix_char = prefix[1:]
1924 1944 else:
1925 1945 prefix_char = prefix
1926 1946 mapping[prefix_char] = prefix_char
1927 1947 r = remod.compile(r'%s(%s)' % (prefix, patterns))
1928 1948 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
1929 1949
1930 1950 def getport(port):
1931 1951 """Return the port for a given network service.
1932 1952
1933 1953 If port is an integer, it's returned as is. If it's a string, it's
1934 1954 looked up using socket.getservbyname(). If there's no matching
1935 1955 service, error.Abort is raised.
1936 1956 """
1937 1957 try:
1938 1958 return int(port)
1939 1959 except ValueError:
1940 1960 pass
1941 1961
1942 1962 try:
1943 1963 return socket.getservbyname(port)
1944 1964 except socket.error:
1945 1965 raise Abort(_("no port number associated with service '%s'") % port)
1946 1966
1947 1967 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
1948 1968 '0': False, 'no': False, 'false': False, 'off': False,
1949 1969 'never': False}
1950 1970
1951 1971 def parsebool(s):
1952 1972 """Parse s into a boolean.
1953 1973
1954 1974 If s is not a valid boolean, returns None.
1955 1975 """
1956 1976 return _booleans.get(s.lower(), None)
1957 1977
1958 1978 _hexdig = '0123456789ABCDEFabcdef'
1959 1979 _hextochr = dict((a + b, chr(int(a + b, 16)))
1960 1980 for a in _hexdig for b in _hexdig)
1961 1981
1962 1982 def _urlunquote(s):
1963 1983 """Decode HTTP/HTML % encoding.
1964 1984
1965 1985 >>> _urlunquote('abc%20def')
1966 1986 'abc def'
1967 1987 """
1968 1988 res = s.split('%')
1969 1989 # fastpath
1970 1990 if len(res) == 1:
1971 1991 return s
1972 1992 s = res[0]
1973 1993 for item in res[1:]:
1974 1994 try:
1975 1995 s += _hextochr[item[:2]] + item[2:]
1976 1996 except KeyError:
1977 1997 s += '%' + item
1978 1998 except UnicodeDecodeError:
1979 1999 s += unichr(int(item[:2], 16)) + item[2:]
1980 2000 return s
1981 2001
1982 2002 class url(object):
1983 2003 r"""Reliable URL parser.
1984 2004
1985 2005 This parses URLs and provides attributes for the following
1986 2006 components:
1987 2007
1988 2008 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
1989 2009
1990 2010 Missing components are set to None. The only exception is
1991 2011 fragment, which is set to '' if present but empty.
1992 2012
1993 2013 If parsefragment is False, fragment is included in query. If
1994 2014 parsequery is False, query is included in path. If both are
1995 2015 False, both fragment and query are included in path.
1996 2016
1997 2017 See http://www.ietf.org/rfc/rfc2396.txt for more information.
1998 2018
1999 2019 Note that for backward compatibility reasons, bundle URLs do not
2000 2020 take host names. That means 'bundle://../' has a path of '../'.
2001 2021
2002 2022 Examples:
2003 2023
2004 2024 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2005 2025 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2006 2026 >>> url('ssh://[::1]:2200//home/joe/repo')
2007 2027 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2008 2028 >>> url('file:///home/joe/repo')
2009 2029 <url scheme: 'file', path: '/home/joe/repo'>
2010 2030 >>> url('file:///c:/temp/foo/')
2011 2031 <url scheme: 'file', path: 'c:/temp/foo/'>
2012 2032 >>> url('bundle:foo')
2013 2033 <url scheme: 'bundle', path: 'foo'>
2014 2034 >>> url('bundle://../foo')
2015 2035 <url scheme: 'bundle', path: '../foo'>
2016 2036 >>> url(r'c:\foo\bar')
2017 2037 <url path: 'c:\\foo\\bar'>
2018 2038 >>> url(r'\\blah\blah\blah')
2019 2039 <url path: '\\\\blah\\blah\\blah'>
2020 2040 >>> url(r'\\blah\blah\blah#baz')
2021 2041 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2022 2042 >>> url(r'file:///C:\users\me')
2023 2043 <url scheme: 'file', path: 'C:\\users\\me'>
2024 2044
2025 2045 Authentication credentials:
2026 2046
2027 2047 >>> url('ssh://joe:xyz@x/repo')
2028 2048 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2029 2049 >>> url('ssh://joe@x/repo')
2030 2050 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2031 2051
2032 2052 Query strings and fragments:
2033 2053
2034 2054 >>> url('http://host/a?b#c')
2035 2055 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2036 2056 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2037 2057 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2038 2058 """
2039 2059
2040 2060 _safechars = "!~*'()+"
2041 2061 _safepchars = "/!~*'()+:\\"
2042 2062 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2043 2063
2044 2064 def __init__(self, path, parsequery=True, parsefragment=True):
2045 2065 # We slowly chomp away at path until we have only the path left
2046 2066 self.scheme = self.user = self.passwd = self.host = None
2047 2067 self.port = self.path = self.query = self.fragment = None
2048 2068 self._localpath = True
2049 2069 self._hostport = ''
2050 2070 self._origpath = path
2051 2071
2052 2072 if parsefragment and '#' in path:
2053 2073 path, self.fragment = path.split('#', 1)
2054 2074 if not path:
2055 2075 path = None
2056 2076
2057 2077 # special case for Windows drive letters and UNC paths
2058 2078 if hasdriveletter(path) or path.startswith(r'\\'):
2059 2079 self.path = path
2060 2080 return
2061 2081
2062 2082 # For compatibility reasons, we can't handle bundle paths as
2063 2083 # normal URLS
2064 2084 if path.startswith('bundle:'):
2065 2085 self.scheme = 'bundle'
2066 2086 path = path[7:]
2067 2087 if path.startswith('//'):
2068 2088 path = path[2:]
2069 2089 self.path = path
2070 2090 return
2071 2091
2072 2092 if self._matchscheme(path):
2073 2093 parts = path.split(':', 1)
2074 2094 if parts[0]:
2075 2095 self.scheme, path = parts
2076 2096 self._localpath = False
2077 2097
2078 2098 if not path:
2079 2099 path = None
2080 2100 if self._localpath:
2081 2101 self.path = ''
2082 2102 return
2083 2103 else:
2084 2104 if self._localpath:
2085 2105 self.path = path
2086 2106 return
2087 2107
2088 2108 if parsequery and '?' in path:
2089 2109 path, self.query = path.split('?', 1)
2090 2110 if not path:
2091 2111 path = None
2092 2112 if not self.query:
2093 2113 self.query = None
2094 2114
2095 2115 # // is required to specify a host/authority
2096 2116 if path and path.startswith('//'):
2097 2117 parts = path[2:].split('/', 1)
2098 2118 if len(parts) > 1:
2099 2119 self.host, path = parts
2100 2120 else:
2101 2121 self.host = parts[0]
2102 2122 path = None
2103 2123 if not self.host:
2104 2124 self.host = None
2105 2125 # path of file:///d is /d
2106 2126 # path of file:///d:/ is d:/, not /d:/
2107 2127 if path and not hasdriveletter(path):
2108 2128 path = '/' + path
2109 2129
2110 2130 if self.host and '@' in self.host:
2111 2131 self.user, self.host = self.host.rsplit('@', 1)
2112 2132 if ':' in self.user:
2113 2133 self.user, self.passwd = self.user.split(':', 1)
2114 2134 if not self.host:
2115 2135 self.host = None
2116 2136
2117 2137 # Don't split on colons in IPv6 addresses without ports
2118 2138 if (self.host and ':' in self.host and
2119 2139 not (self.host.startswith('[') and self.host.endswith(']'))):
2120 2140 self._hostport = self.host
2121 2141 self.host, self.port = self.host.rsplit(':', 1)
2122 2142 if not self.host:
2123 2143 self.host = None
2124 2144
2125 2145 if (self.host and self.scheme == 'file' and
2126 2146 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2127 2147 raise Abort(_('file:// URLs can only refer to localhost'))
2128 2148
2129 2149 self.path = path
2130 2150
2131 2151 # leave the query string escaped
2132 2152 for a in ('user', 'passwd', 'host', 'port',
2133 2153 'path', 'fragment'):
2134 2154 v = getattr(self, a)
2135 2155 if v is not None:
2136 2156 setattr(self, a, _urlunquote(v))
2137 2157
2138 2158 def __repr__(self):
2139 2159 attrs = []
2140 2160 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2141 2161 'query', 'fragment'):
2142 2162 v = getattr(self, a)
2143 2163 if v is not None:
2144 2164 attrs.append('%s: %r' % (a, v))
2145 2165 return '<url %s>' % ', '.join(attrs)
2146 2166
2147 2167 def __str__(self):
2148 2168 r"""Join the URL's components back into a URL string.
2149 2169
2150 2170 Examples:
2151 2171
2152 2172 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2153 2173 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2154 2174 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2155 2175 'http://user:pw@host:80/?foo=bar&baz=42'
2156 2176 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2157 2177 'http://user:pw@host:80/?foo=bar%3dbaz'
2158 2178 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2159 2179 'ssh://user:pw@[::1]:2200//home/joe#'
2160 2180 >>> str(url('http://localhost:80//'))
2161 2181 'http://localhost:80//'
2162 2182 >>> str(url('http://localhost:80/'))
2163 2183 'http://localhost:80/'
2164 2184 >>> str(url('http://localhost:80'))
2165 2185 'http://localhost:80/'
2166 2186 >>> str(url('bundle:foo'))
2167 2187 'bundle:foo'
2168 2188 >>> str(url('bundle://../foo'))
2169 2189 'bundle:../foo'
2170 2190 >>> str(url('path'))
2171 2191 'path'
2172 2192 >>> str(url('file:///tmp/foo/bar'))
2173 2193 'file:///tmp/foo/bar'
2174 2194 >>> str(url('file:///c:/tmp/foo/bar'))
2175 2195 'file:///c:/tmp/foo/bar'
2176 2196 >>> print url(r'bundle:foo\bar')
2177 2197 bundle:foo\bar
2178 2198 >>> print url(r'file:///D:\data\hg')
2179 2199 file:///D:\data\hg
2180 2200 """
2181 2201 if self._localpath:
2182 2202 s = self.path
2183 2203 if self.scheme == 'bundle':
2184 2204 s = 'bundle:' + s
2185 2205 if self.fragment:
2186 2206 s += '#' + self.fragment
2187 2207 return s
2188 2208
2189 2209 s = self.scheme + ':'
2190 2210 if self.user or self.passwd or self.host:
2191 2211 s += '//'
2192 2212 elif self.scheme and (not self.path or self.path.startswith('/')
2193 2213 or hasdriveletter(self.path)):
2194 2214 s += '//'
2195 2215 if hasdriveletter(self.path):
2196 2216 s += '/'
2197 2217 if self.user:
2198 2218 s += urllib.quote(self.user, safe=self._safechars)
2199 2219 if self.passwd:
2200 2220 s += ':' + urllib.quote(self.passwd, safe=self._safechars)
2201 2221 if self.user or self.passwd:
2202 2222 s += '@'
2203 2223 if self.host:
2204 2224 if not (self.host.startswith('[') and self.host.endswith(']')):
2205 2225 s += urllib.quote(self.host)
2206 2226 else:
2207 2227 s += self.host
2208 2228 if self.port:
2209 2229 s += ':' + urllib.quote(self.port)
2210 2230 if self.host:
2211 2231 s += '/'
2212 2232 if self.path:
2213 2233 # TODO: similar to the query string, we should not unescape the
2214 2234 # path when we store it, the path might contain '%2f' = '/',
2215 2235 # which we should *not* escape.
2216 2236 s += urllib.quote(self.path, safe=self._safepchars)
2217 2237 if self.query:
2218 2238 # we store the query in escaped form.
2219 2239 s += '?' + self.query
2220 2240 if self.fragment is not None:
2221 2241 s += '#' + urllib.quote(self.fragment, safe=self._safepchars)
2222 2242 return s
2223 2243
2224 2244 def authinfo(self):
2225 2245 user, passwd = self.user, self.passwd
2226 2246 try:
2227 2247 self.user, self.passwd = None, None
2228 2248 s = str(self)
2229 2249 finally:
2230 2250 self.user, self.passwd = user, passwd
2231 2251 if not self.user:
2232 2252 return (s, None)
2233 2253 # authinfo[1] is passed to urllib2 password manager, and its
2234 2254 # URIs must not contain credentials. The host is passed in the
2235 2255 # URIs list because Python < 2.4.3 uses only that to search for
2236 2256 # a password.
2237 2257 return (s, (None, (s, self.host),
2238 2258 self.user, self.passwd or ''))
2239 2259
2240 2260 def isabs(self):
2241 2261 if self.scheme and self.scheme != 'file':
2242 2262 return True # remote URL
2243 2263 if hasdriveletter(self.path):
2244 2264 return True # absolute for our purposes - can't be joined()
2245 2265 if self.path.startswith(r'\\'):
2246 2266 return True # Windows UNC path
2247 2267 if self.path.startswith('/'):
2248 2268 return True # POSIX-style
2249 2269 return False
2250 2270
2251 2271 def localpath(self):
2252 2272 if self.scheme == 'file' or self.scheme == 'bundle':
2253 2273 path = self.path or '/'
2254 2274 # For Windows, we need to promote hosts containing drive
2255 2275 # letters to paths with drive letters.
2256 2276 if hasdriveletter(self._hostport):
2257 2277 path = self._hostport + '/' + self.path
2258 2278 elif (self.host is not None and self.path
2259 2279 and not hasdriveletter(path)):
2260 2280 path = '/' + path
2261 2281 return path
2262 2282 return self._origpath
2263 2283
2264 2284 def islocal(self):
2265 2285 '''whether localpath will return something that posixfile can open'''
2266 2286 return (not self.scheme or self.scheme == 'file'
2267 2287 or self.scheme == 'bundle')
2268 2288
2269 2289 def hasscheme(path):
2270 2290 return bool(url(path).scheme)
2271 2291
2272 2292 def hasdriveletter(path):
2273 2293 return path and path[1:2] == ':' and path[0:1].isalpha()
2274 2294
2275 2295 def urllocalpath(path):
2276 2296 return url(path, parsequery=False, parsefragment=False).localpath()
2277 2297
2278 2298 def hidepassword(u):
2279 2299 '''hide user credential in a url string'''
2280 2300 u = url(u)
2281 2301 if u.passwd:
2282 2302 u.passwd = '***'
2283 2303 return str(u)
2284 2304
2285 2305 def removeauth(u):
2286 2306 '''remove all authentication information from a url string'''
2287 2307 u = url(u)
2288 2308 u.user = u.passwd = None
2289 2309 return str(u)
2290 2310
2291 2311 def isatty(fd):
2292 2312 try:
2293 2313 return fd.isatty()
2294 2314 except AttributeError:
2295 2315 return False
2296 2316
2297 2317 timecount = unitcountfn(
2298 2318 (1, 1e3, _('%.0f s')),
2299 2319 (100, 1, _('%.1f s')),
2300 2320 (10, 1, _('%.2f s')),
2301 2321 (1, 1, _('%.3f s')),
2302 2322 (100, 0.001, _('%.1f ms')),
2303 2323 (10, 0.001, _('%.2f ms')),
2304 2324 (1, 0.001, _('%.3f ms')),
2305 2325 (100, 0.000001, _('%.1f us')),
2306 2326 (10, 0.000001, _('%.2f us')),
2307 2327 (1, 0.000001, _('%.3f us')),
2308 2328 (100, 0.000000001, _('%.1f ns')),
2309 2329 (10, 0.000000001, _('%.2f ns')),
2310 2330 (1, 0.000000001, _('%.3f ns')),
2311 2331 )
2312 2332
2313 2333 _timenesting = [0]
2314 2334
2315 2335 def timed(func):
2316 2336 '''Report the execution time of a function call to stderr.
2317 2337
2318 2338 During development, use as a decorator when you need to measure
2319 2339 the cost of a function, e.g. as follows:
2320 2340
2321 2341 @util.timed
2322 2342 def foo(a, b, c):
2323 2343 pass
2324 2344 '''
2325 2345
2326 2346 def wrapper(*args, **kwargs):
2327 2347 start = time.time()
2328 2348 indent = 2
2329 2349 _timenesting[0] += indent
2330 2350 try:
2331 2351 return func(*args, **kwargs)
2332 2352 finally:
2333 2353 elapsed = time.time() - start
2334 2354 _timenesting[0] -= indent
2335 2355 sys.stderr.write('%s%s: %s\n' %
2336 2356 (' ' * _timenesting[0], func.__name__,
2337 2357 timecount(elapsed)))
2338 2358 return wrapper
2339 2359
2340 2360 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2341 2361 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2342 2362
2343 2363 def sizetoint(s):
2344 2364 '''Convert a space specifier to a byte count.
2345 2365
2346 2366 >>> sizetoint('30')
2347 2367 30
2348 2368 >>> sizetoint('2.2kb')
2349 2369 2252
2350 2370 >>> sizetoint('6M')
2351 2371 6291456
2352 2372 '''
2353 2373 t = s.strip().lower()
2354 2374 try:
2355 2375 for k, u in _sizeunits:
2356 2376 if t.endswith(k):
2357 2377 return int(float(t[:-len(k)]) * u)
2358 2378 return int(t)
2359 2379 except ValueError:
2360 2380 raise error.ParseError(_("couldn't parse size: %s") % s)
2361 2381
2362 2382 class hooks(object):
2363 2383 '''A collection of hook functions that can be used to extend a
2364 2384 function's behavior. Hooks are called in lexicographic order,
2365 2385 based on the names of their sources.'''
2366 2386
2367 2387 def __init__(self):
2368 2388 self._hooks = []
2369 2389
2370 2390 def add(self, source, hook):
2371 2391 self._hooks.append((source, hook))
2372 2392
2373 2393 def __call__(self, *args):
2374 2394 self._hooks.sort(key=lambda x: x[0])
2375 2395 results = []
2376 2396 for source, hook in self._hooks:
2377 2397 results.append(hook(*args))
2378 2398 return results
2379 2399
2380 2400 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2381 2401 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2382 2402 Skips the 'skip' last entries. By default it will flush stdout first.
2383 2403 It can be used everywhere and do intentionally not require an ui object.
2384 2404 Not be used in production code but very convenient while developing.
2385 2405 '''
2386 2406 if otherf:
2387 2407 otherf.flush()
2388 2408 f.write('%s at:\n' % msg)
2389 2409 entries = [('%s:%s' % (fn, ln), func)
2390 2410 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2391 2411 if entries:
2392 2412 fnmax = max(len(entry[0]) for entry in entries)
2393 2413 for fnln, func in entries:
2394 2414 f.write(' %-*s in %s\n' % (fnmax, fnln, func))
2395 2415 f.flush()
2396 2416
2397 2417 class dirs(object):
2398 2418 '''a multiset of directory names from a dirstate or manifest'''
2399 2419
2400 2420 def __init__(self, map, skip=None):
2401 2421 self._dirs = {}
2402 2422 addpath = self.addpath
2403 2423 if safehasattr(map, 'iteritems') and skip is not None:
2404 2424 for f, s in map.iteritems():
2405 2425 if s[0] != skip:
2406 2426 addpath(f)
2407 2427 else:
2408 2428 for f in map:
2409 2429 addpath(f)
2410 2430
2411 2431 def addpath(self, path):
2412 2432 dirs = self._dirs
2413 2433 for base in finddirs(path):
2414 2434 if base in dirs:
2415 2435 dirs[base] += 1
2416 2436 return
2417 2437 dirs[base] = 1
2418 2438
2419 2439 def delpath(self, path):
2420 2440 dirs = self._dirs
2421 2441 for base in finddirs(path):
2422 2442 if dirs[base] > 1:
2423 2443 dirs[base] -= 1
2424 2444 return
2425 2445 del dirs[base]
2426 2446
2427 2447 def __iter__(self):
2428 2448 return self._dirs.iterkeys()
2429 2449
2430 2450 def __contains__(self, d):
2431 2451 return d in self._dirs
2432 2452
2433 2453 if safehasattr(parsers, 'dirs'):
2434 2454 dirs = parsers.dirs
2435 2455
2436 2456 def finddirs(path):
2437 2457 pos = path.rfind('/')
2438 2458 while pos != -1:
2439 2459 yield path[:pos]
2440 2460 pos = path.rfind('/', 0, pos)
2441 2461
2442 2462 # compression utility
2443 2463
2444 2464 class nocompress(object):
2445 2465 def compress(self, x):
2446 2466 return x
2447 2467 def flush(self):
2448 2468 return ""
2449 2469
2450 2470 compressors = {
2451 2471 None: nocompress,
2452 2472 # lambda to prevent early import
2453 2473 'BZ': lambda: bz2.BZ2Compressor(),
2454 2474 'GZ': lambda: zlib.compressobj(),
2455 2475 }
2456 2476 # also support the old form by courtesies
2457 2477 compressors['UN'] = compressors[None]
2458 2478
2459 2479 def _makedecompressor(decompcls):
2460 2480 def generator(f):
2461 2481 d = decompcls()
2462 2482 for chunk in filechunkiter(f):
2463 2483 yield d.decompress(chunk)
2464 2484 def func(fh):
2465 2485 return chunkbuffer(generator(fh))
2466 2486 return func
2467 2487
2468 2488 def _bz2():
2469 2489 d = bz2.BZ2Decompressor()
2470 2490 # Bzip2 stream start with BZ, but we stripped it.
2471 2491 # we put it back for good measure.
2472 2492 d.decompress('BZ')
2473 2493 return d
2474 2494
2475 2495 decompressors = {None: lambda fh: fh,
2476 2496 '_truncatedBZ': _makedecompressor(_bz2),
2477 2497 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2478 2498 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2479 2499 }
2480 2500 # also support the old form by courtesies
2481 2501 decompressors['UN'] = decompressors[None]
2482 2502
2483 2503 # convenient shortcut
2484 2504 dst = debugstacktrace
@@ -1,211 +1,210
1 1 #require test-repo
2 2
3 3 $ cd "$TESTDIR"/..
4 4
5 5 $ hg files 'set:(**.py)' | xargs python contrib/check-py3-compat.py
6 6 contrib/casesmash.py not using absolute_import
7 7 contrib/check-code.py not using absolute_import
8 8 contrib/check-code.py requires print_function
9 9 contrib/check-config.py not using absolute_import
10 10 contrib/check-config.py requires print_function
11 11 contrib/debugcmdserver.py not using absolute_import
12 12 contrib/debugcmdserver.py requires print_function
13 13 contrib/debugshell.py not using absolute_import
14 14 contrib/fixpax.py not using absolute_import
15 15 contrib/fixpax.py requires print_function
16 16 contrib/hgclient.py not using absolute_import
17 17 contrib/hgclient.py requires print_function
18 18 contrib/hgfixes/fix_bytes.py not using absolute_import
19 19 contrib/hgfixes/fix_bytesmod.py not using absolute_import
20 20 contrib/hgfixes/fix_leftover_imports.py not using absolute_import
21 21 contrib/import-checker.py not using absolute_import
22 22 contrib/import-checker.py requires print_function
23 23 contrib/memory.py not using absolute_import
24 24 contrib/perf.py not using absolute_import
25 25 contrib/python-hook-examples.py not using absolute_import
26 26 contrib/revsetbenchmarks.py not using absolute_import
27 27 contrib/revsetbenchmarks.py requires print_function
28 28 contrib/showstack.py not using absolute_import
29 29 contrib/synthrepo.py not using absolute_import
30 30 contrib/win32/hgwebdir_wsgi.py not using absolute_import
31 31 doc/check-seclevel.py not using absolute_import
32 32 doc/gendoc.py not using absolute_import
33 33 doc/hgmanpage.py not using absolute_import
34 34 hgext/__init__.py not using absolute_import
35 35 hgext/acl.py not using absolute_import
36 36 hgext/blackbox.py not using absolute_import
37 37 hgext/bugzilla.py not using absolute_import
38 38 hgext/censor.py not using absolute_import
39 39 hgext/children.py not using absolute_import
40 40 hgext/churn.py not using absolute_import
41 41 hgext/clonebundles.py not using absolute_import
42 42 hgext/color.py not using absolute_import
43 43 hgext/convert/__init__.py not using absolute_import
44 44 hgext/convert/bzr.py not using absolute_import
45 45 hgext/convert/common.py not using absolute_import
46 46 hgext/convert/convcmd.py not using absolute_import
47 47 hgext/convert/cvs.py not using absolute_import
48 48 hgext/convert/cvsps.py not using absolute_import
49 49 hgext/convert/darcs.py not using absolute_import
50 50 hgext/convert/filemap.py not using absolute_import
51 51 hgext/convert/git.py not using absolute_import
52 52 hgext/convert/gnuarch.py not using absolute_import
53 53 hgext/convert/hg.py not using absolute_import
54 54 hgext/convert/monotone.py not using absolute_import
55 55 hgext/convert/p4.py not using absolute_import
56 56 hgext/convert/subversion.py not using absolute_import
57 57 hgext/convert/transport.py not using absolute_import
58 58 hgext/eol.py not using absolute_import
59 59 hgext/extdiff.py not using absolute_import
60 60 hgext/factotum.py not using absolute_import
61 61 hgext/fetch.py not using absolute_import
62 62 hgext/gpg.py not using absolute_import
63 63 hgext/graphlog.py not using absolute_import
64 64 hgext/hgcia.py not using absolute_import
65 65 hgext/hgk.py not using absolute_import
66 66 hgext/highlight/__init__.py not using absolute_import
67 67 hgext/highlight/highlight.py not using absolute_import
68 68 hgext/histedit.py not using absolute_import
69 69 hgext/keyword.py not using absolute_import
70 70 hgext/largefiles/__init__.py not using absolute_import
71 71 hgext/largefiles/basestore.py not using absolute_import
72 72 hgext/largefiles/lfcommands.py not using absolute_import
73 73 hgext/largefiles/lfutil.py not using absolute_import
74 74 hgext/largefiles/localstore.py not using absolute_import
75 75 hgext/largefiles/overrides.py not using absolute_import
76 76 hgext/largefiles/proto.py not using absolute_import
77 77 hgext/largefiles/remotestore.py not using absolute_import
78 78 hgext/largefiles/reposetup.py not using absolute_import
79 79 hgext/largefiles/uisetup.py not using absolute_import
80 80 hgext/largefiles/wirestore.py not using absolute_import
81 81 hgext/mq.py not using absolute_import
82 82 hgext/notify.py not using absolute_import
83 83 hgext/pager.py not using absolute_import
84 84 hgext/patchbomb.py not using absolute_import
85 85 hgext/purge.py not using absolute_import
86 86 hgext/rebase.py not using absolute_import
87 87 hgext/record.py not using absolute_import
88 88 hgext/relink.py not using absolute_import
89 89 hgext/schemes.py not using absolute_import
90 90 hgext/share.py not using absolute_import
91 91 hgext/shelve.py not using absolute_import
92 92 hgext/strip.py not using absolute_import
93 93 hgext/transplant.py not using absolute_import
94 94 hgext/win32mbcs.py not using absolute_import
95 95 hgext/win32text.py not using absolute_import
96 96 hgext/zeroconf/Zeroconf.py not using absolute_import
97 97 hgext/zeroconf/Zeroconf.py requires print_function
98 98 hgext/zeroconf/__init__.py not using absolute_import
99 99 i18n/check-translation.py not using absolute_import
100 100 i18n/polib.py not using absolute_import
101 101 mercurial/byterange.py not using absolute_import
102 102 mercurial/cmdutil.py not using absolute_import
103 103 mercurial/commands.py not using absolute_import
104 104 mercurial/context.py not using absolute_import
105 105 mercurial/dirstate.py not using absolute_import
106 106 mercurial/dispatch.py requires print_function
107 107 mercurial/exchange.py not using absolute_import
108 108 mercurial/help.py not using absolute_import
109 109 mercurial/httpclient/__init__.py not using absolute_import
110 110 mercurial/httpclient/_readers.py not using absolute_import
111 111 mercurial/httpclient/socketutil.py not using absolute_import
112 112 mercurial/httpconnection.py not using absolute_import
113 113 mercurial/keepalive.py not using absolute_import
114 114 mercurial/keepalive.py requires print_function
115 115 mercurial/localrepo.py not using absolute_import
116 116 mercurial/lsprof.py requires print_function
117 117 mercurial/lsprofcalltree.py not using absolute_import
118 118 mercurial/lsprofcalltree.py requires print_function
119 119 mercurial/mail.py requires print_function
120 120 mercurial/manifest.py not using absolute_import
121 121 mercurial/mdiff.py not using absolute_import
122 122 mercurial/patch.py not using absolute_import
123 123 mercurial/pvec.py not using absolute_import
124 124 mercurial/py3kcompat.py not using absolute_import
125 125 mercurial/revlog.py not using absolute_import
126 126 mercurial/scmposix.py not using absolute_import
127 127 mercurial/scmutil.py not using absolute_import
128 128 mercurial/scmwindows.py not using absolute_import
129 129 mercurial/similar.py not using absolute_import
130 130 mercurial/store.py not using absolute_import
131 mercurial/util.py not using absolute_import
132 131 mercurial/windows.py not using absolute_import
133 132 setup.py not using absolute_import
134 133 tests/filterpyflakes.py requires print_function
135 134 tests/generate-working-copy-states.py requires print_function
136 135 tests/get-with-headers.py requires print_function
137 136 tests/heredoctest.py requires print_function
138 137 tests/hypothesishelpers.py not using absolute_import
139 138 tests/hypothesishelpers.py requires print_function
140 139 tests/killdaemons.py not using absolute_import
141 140 tests/md5sum.py not using absolute_import
142 141 tests/mockblackbox.py not using absolute_import
143 142 tests/printenv.py not using absolute_import
144 143 tests/readlink.py not using absolute_import
145 144 tests/readlink.py requires print_function
146 145 tests/revlog-formatv0.py not using absolute_import
147 146 tests/run-tests.py not using absolute_import
148 147 tests/seq.py not using absolute_import
149 148 tests/seq.py requires print_function
150 149 tests/silenttestrunner.py not using absolute_import
151 150 tests/silenttestrunner.py requires print_function
152 151 tests/sitecustomize.py not using absolute_import
153 152 tests/svn-safe-append.py not using absolute_import
154 153 tests/svnxml.py not using absolute_import
155 154 tests/test-ancestor.py requires print_function
156 155 tests/test-atomictempfile.py not using absolute_import
157 156 tests/test-batching.py not using absolute_import
158 157 tests/test-batching.py requires print_function
159 158 tests/test-bdiff.py not using absolute_import
160 159 tests/test-bdiff.py requires print_function
161 160 tests/test-context.py not using absolute_import
162 161 tests/test-context.py requires print_function
163 162 tests/test-demandimport.py not using absolute_import
164 163 tests/test-demandimport.py requires print_function
165 164 tests/test-dispatch.py not using absolute_import
166 165 tests/test-dispatch.py requires print_function
167 166 tests/test-doctest.py not using absolute_import
168 167 tests/test-duplicateoptions.py not using absolute_import
169 168 tests/test-duplicateoptions.py requires print_function
170 169 tests/test-filecache.py not using absolute_import
171 170 tests/test-filecache.py requires print_function
172 171 tests/test-filelog.py not using absolute_import
173 172 tests/test-filelog.py requires print_function
174 173 tests/test-hg-parseurl.py not using absolute_import
175 174 tests/test-hg-parseurl.py requires print_function
176 175 tests/test-hgweb-auth.py not using absolute_import
177 176 tests/test-hgweb-auth.py requires print_function
178 177 tests/test-hgwebdir-paths.py not using absolute_import
179 178 tests/test-hybridencode.py not using absolute_import
180 179 tests/test-hybridencode.py requires print_function
181 180 tests/test-lrucachedict.py not using absolute_import
182 181 tests/test-lrucachedict.py requires print_function
183 182 tests/test-manifest.py not using absolute_import
184 183 tests/test-minirst.py not using absolute_import
185 184 tests/test-minirst.py requires print_function
186 185 tests/test-parseindex2.py not using absolute_import
187 186 tests/test-parseindex2.py requires print_function
188 187 tests/test-pathencode.py not using absolute_import
189 188 tests/test-pathencode.py requires print_function
190 189 tests/test-propertycache.py not using absolute_import
191 190 tests/test-propertycache.py requires print_function
192 191 tests/test-revlog-ancestry.py not using absolute_import
193 192 tests/test-revlog-ancestry.py requires print_function
194 193 tests/test-run-tests.py not using absolute_import
195 194 tests/test-simplemerge.py not using absolute_import
196 195 tests/test-status-inprocess.py not using absolute_import
197 196 tests/test-status-inprocess.py requires print_function
198 197 tests/test-symlink-os-yes-fs-no.py not using absolute_import
199 198 tests/test-trusted.py not using absolute_import
200 199 tests/test-trusted.py requires print_function
201 200 tests/test-ui-color.py not using absolute_import
202 201 tests/test-ui-color.py requires print_function
203 202 tests/test-ui-config.py not using absolute_import
204 203 tests/test-ui-config.py requires print_function
205 204 tests/test-ui-verbosity.py not using absolute_import
206 205 tests/test-ui-verbosity.py requires print_function
207 206 tests/test-url.py not using absolute_import
208 207 tests/test-url.py requires print_function
209 208 tests/test-walkrepo.py requires print_function
210 209 tests/test-wireproto.py requires print_function
211 210 tests/tinyproxy.py requires print_function
General Comments 0
You need to be logged in to leave comments. Login now