##// END OF EJS Templates
util: use string.hexdigits instead of defining it ourselves...
Augie Fackler -
r30054:8b89521a default
parent child Browse files
Show More
@@ -1,2918 +1,2918 b''
1 1 # util.py - Mercurial utility functions and platform specific implementations
2 2 #
3 3 # Copyright 2005 K. Thananchayan <thananck@yahoo.com>
4 4 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
5 5 # Copyright 2006 Vadim Gelfer <vadim.gelfer@gmail.com>
6 6 #
7 7 # This software may be used and distributed according to the terms of the
8 8 # GNU General Public License version 2 or any later version.
9 9
10 10 """Mercurial utility functions and platform specific implementations.
11 11
12 12 This contains helper routines that are independent of the SCM core and
13 13 hide platform-specific details from the core.
14 14 """
15 15
16 16 from __future__ import absolute_import
17 17
18 18 import bz2
19 19 import calendar
20 20 import collections
21 21 import datetime
22 22 import errno
23 23 import gc
24 24 import hashlib
25 25 import imp
26 26 import os
27 27 import re as remod
28 28 import shutil
29 29 import signal
30 30 import socket
31 import string
31 32 import subprocess
32 33 import sys
33 34 import tempfile
34 35 import textwrap
35 36 import time
36 37 import traceback
37 38 import zlib
38 39
39 40 from . import (
40 41 encoding,
41 42 error,
42 43 i18n,
43 44 osutil,
44 45 parsers,
45 46 pycompat,
46 47 )
47 48
48 49 for attr in (
49 50 'empty',
50 51 'httplib',
51 52 'httpserver',
52 53 'pickle',
53 54 'queue',
54 55 'urlerr',
55 56 'urlparse',
56 57 # we do import urlreq, but we do it outside the loop
57 58 #'urlreq',
58 59 'stringio',
59 60 'socketserver',
60 61 'xmlrpclib',
61 62 ):
62 63 globals()[attr] = getattr(pycompat, attr)
63 64
64 65 # This line is to make pyflakes happy:
65 66 urlreq = pycompat.urlreq
66 67
67 68 if os.name == 'nt':
68 69 from . import windows as platform
69 70 else:
70 71 from . import posix as platform
71 72
72 73 _ = i18n._
73 74
74 75 bindunixsocket = platform.bindunixsocket
75 76 cachestat = platform.cachestat
76 77 checkexec = platform.checkexec
77 78 checklink = platform.checklink
78 79 copymode = platform.copymode
79 80 executablepath = platform.executablepath
80 81 expandglobs = platform.expandglobs
81 82 explainexit = platform.explainexit
82 83 findexe = platform.findexe
83 84 gethgcmd = platform.gethgcmd
84 85 getuser = platform.getuser
85 86 getpid = os.getpid
86 87 groupmembers = platform.groupmembers
87 88 groupname = platform.groupname
88 89 hidewindow = platform.hidewindow
89 90 isexec = platform.isexec
90 91 isowner = platform.isowner
91 92 localpath = platform.localpath
92 93 lookupreg = platform.lookupreg
93 94 makedir = platform.makedir
94 95 nlinks = platform.nlinks
95 96 normpath = platform.normpath
96 97 normcase = platform.normcase
97 98 normcasespec = platform.normcasespec
98 99 normcasefallback = platform.normcasefallback
99 100 openhardlinks = platform.openhardlinks
100 101 oslink = platform.oslink
101 102 parsepatchoutput = platform.parsepatchoutput
102 103 pconvert = platform.pconvert
103 104 poll = platform.poll
104 105 popen = platform.popen
105 106 posixfile = platform.posixfile
106 107 quotecommand = platform.quotecommand
107 108 readpipe = platform.readpipe
108 109 rename = platform.rename
109 110 removedirs = platform.removedirs
110 111 samedevice = platform.samedevice
111 112 samefile = platform.samefile
112 113 samestat = platform.samestat
113 114 setbinary = platform.setbinary
114 115 setflags = platform.setflags
115 116 setsignalhandler = platform.setsignalhandler
116 117 shellquote = platform.shellquote
117 118 spawndetached = platform.spawndetached
118 119 split = platform.split
119 120 sshargs = platform.sshargs
120 121 statfiles = getattr(osutil, 'statfiles', platform.statfiles)
121 122 statisexec = platform.statisexec
122 123 statislink = platform.statislink
123 124 termwidth = platform.termwidth
124 125 testpid = platform.testpid
125 126 umask = platform.umask
126 127 unlink = platform.unlink
127 128 unlinkpath = platform.unlinkpath
128 129 username = platform.username
129 130
130 131 # Python compatibility
131 132
132 133 _notset = object()
133 134
134 135 # disable Python's problematic floating point timestamps (issue4836)
135 136 # (Python hypocritically says you shouldn't change this behavior in
136 137 # libraries, and sure enough Mercurial is not a library.)
137 138 os.stat_float_times(False)
138 139
139 140 def safehasattr(thing, attr):
140 141 return getattr(thing, attr, _notset) is not _notset
141 142
142 143 DIGESTS = {
143 144 'md5': hashlib.md5,
144 145 'sha1': hashlib.sha1,
145 146 'sha512': hashlib.sha512,
146 147 }
147 148 # List of digest types from strongest to weakest
148 149 DIGESTS_BY_STRENGTH = ['sha512', 'sha1', 'md5']
149 150
150 151 for k in DIGESTS_BY_STRENGTH:
151 152 assert k in DIGESTS
152 153
153 154 class digester(object):
154 155 """helper to compute digests.
155 156
156 157 This helper can be used to compute one or more digests given their name.
157 158
158 159 >>> d = digester(['md5', 'sha1'])
159 160 >>> d.update('foo')
160 161 >>> [k for k in sorted(d)]
161 162 ['md5', 'sha1']
162 163 >>> d['md5']
163 164 'acbd18db4cc2f85cedef654fccc4a4d8'
164 165 >>> d['sha1']
165 166 '0beec7b5ea3f0fdbc95d0dd47f3c5bc275da8a33'
166 167 >>> digester.preferred(['md5', 'sha1'])
167 168 'sha1'
168 169 """
169 170
170 171 def __init__(self, digests, s=''):
171 172 self._hashes = {}
172 173 for k in digests:
173 174 if k not in DIGESTS:
174 175 raise Abort(_('unknown digest type: %s') % k)
175 176 self._hashes[k] = DIGESTS[k]()
176 177 if s:
177 178 self.update(s)
178 179
179 180 def update(self, data):
180 181 for h in self._hashes.values():
181 182 h.update(data)
182 183
183 184 def __getitem__(self, key):
184 185 if key not in DIGESTS:
185 186 raise Abort(_('unknown digest type: %s') % k)
186 187 return self._hashes[key].hexdigest()
187 188
188 189 def __iter__(self):
189 190 return iter(self._hashes)
190 191
191 192 @staticmethod
192 193 def preferred(supported):
193 194 """returns the strongest digest type in both supported and DIGESTS."""
194 195
195 196 for k in DIGESTS_BY_STRENGTH:
196 197 if k in supported:
197 198 return k
198 199 return None
199 200
200 201 class digestchecker(object):
201 202 """file handle wrapper that additionally checks content against a given
202 203 size and digests.
203 204
204 205 d = digestchecker(fh, size, {'md5': '...'})
205 206
206 207 When multiple digests are given, all of them are validated.
207 208 """
208 209
209 210 def __init__(self, fh, size, digests):
210 211 self._fh = fh
211 212 self._size = size
212 213 self._got = 0
213 214 self._digests = dict(digests)
214 215 self._digester = digester(self._digests.keys())
215 216
216 217 def read(self, length=-1):
217 218 content = self._fh.read(length)
218 219 self._digester.update(content)
219 220 self._got += len(content)
220 221 return content
221 222
222 223 def validate(self):
223 224 if self._size != self._got:
224 225 raise Abort(_('size mismatch: expected %d, got %d') %
225 226 (self._size, self._got))
226 227 for k, v in self._digests.items():
227 228 if v != self._digester[k]:
228 229 # i18n: first parameter is a digest name
229 230 raise Abort(_('%s mismatch: expected %s, got %s') %
230 231 (k, v, self._digester[k]))
231 232
232 233 try:
233 234 buffer = buffer
234 235 except NameError:
235 236 if not pycompat.ispy3:
236 237 def buffer(sliceable, offset=0):
237 238 return sliceable[offset:]
238 239 else:
239 240 def buffer(sliceable, offset=0):
240 241 return memoryview(sliceable)[offset:]
241 242
242 243 closefds = os.name == 'posix'
243 244
244 245 _chunksize = 4096
245 246
246 247 class bufferedinputpipe(object):
247 248 """a manually buffered input pipe
248 249
249 250 Python will not let us use buffered IO and lazy reading with 'polling' at
250 251 the same time. We cannot probe the buffer state and select will not detect
251 252 that data are ready to read if they are already buffered.
252 253
253 254 This class let us work around that by implementing its own buffering
254 255 (allowing efficient readline) while offering a way to know if the buffer is
255 256 empty from the output (allowing collaboration of the buffer with polling).
256 257
257 258 This class lives in the 'util' module because it makes use of the 'os'
258 259 module from the python stdlib.
259 260 """
260 261
261 262 def __init__(self, input):
262 263 self._input = input
263 264 self._buffer = []
264 265 self._eof = False
265 266 self._lenbuf = 0
266 267
267 268 @property
268 269 def hasbuffer(self):
269 270 """True is any data is currently buffered
270 271
271 272 This will be used externally a pre-step for polling IO. If there is
272 273 already data then no polling should be set in place."""
273 274 return bool(self._buffer)
274 275
275 276 @property
276 277 def closed(self):
277 278 return self._input.closed
278 279
279 280 def fileno(self):
280 281 return self._input.fileno()
281 282
282 283 def close(self):
283 284 return self._input.close()
284 285
285 286 def read(self, size):
286 287 while (not self._eof) and (self._lenbuf < size):
287 288 self._fillbuffer()
288 289 return self._frombuffer(size)
289 290
290 291 def readline(self, *args, **kwargs):
291 292 if 1 < len(self._buffer):
292 293 # this should not happen because both read and readline end with a
293 294 # _frombuffer call that collapse it.
294 295 self._buffer = [''.join(self._buffer)]
295 296 self._lenbuf = len(self._buffer[0])
296 297 lfi = -1
297 298 if self._buffer:
298 299 lfi = self._buffer[-1].find('\n')
299 300 while (not self._eof) and lfi < 0:
300 301 self._fillbuffer()
301 302 if self._buffer:
302 303 lfi = self._buffer[-1].find('\n')
303 304 size = lfi + 1
304 305 if lfi < 0: # end of file
305 306 size = self._lenbuf
306 307 elif 1 < len(self._buffer):
307 308 # we need to take previous chunks into account
308 309 size += self._lenbuf - len(self._buffer[-1])
309 310 return self._frombuffer(size)
310 311
311 312 def _frombuffer(self, size):
312 313 """return at most 'size' data from the buffer
313 314
314 315 The data are removed from the buffer."""
315 316 if size == 0 or not self._buffer:
316 317 return ''
317 318 buf = self._buffer[0]
318 319 if 1 < len(self._buffer):
319 320 buf = ''.join(self._buffer)
320 321
321 322 data = buf[:size]
322 323 buf = buf[len(data):]
323 324 if buf:
324 325 self._buffer = [buf]
325 326 self._lenbuf = len(buf)
326 327 else:
327 328 self._buffer = []
328 329 self._lenbuf = 0
329 330 return data
330 331
331 332 def _fillbuffer(self):
332 333 """read data to the buffer"""
333 334 data = os.read(self._input.fileno(), _chunksize)
334 335 if not data:
335 336 self._eof = True
336 337 else:
337 338 self._lenbuf += len(data)
338 339 self._buffer.append(data)
339 340
340 341 def popen2(cmd, env=None, newlines=False):
341 342 # Setting bufsize to -1 lets the system decide the buffer size.
342 343 # The default for bufsize is 0, meaning unbuffered. This leads to
343 344 # poor performance on Mac OS X: http://bugs.python.org/issue4194
344 345 p = subprocess.Popen(cmd, shell=True, bufsize=-1,
345 346 close_fds=closefds,
346 347 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
347 348 universal_newlines=newlines,
348 349 env=env)
349 350 return p.stdin, p.stdout
350 351
351 352 def popen3(cmd, env=None, newlines=False):
352 353 stdin, stdout, stderr, p = popen4(cmd, env, newlines)
353 354 return stdin, stdout, stderr
354 355
355 356 def popen4(cmd, env=None, newlines=False, bufsize=-1):
356 357 p = subprocess.Popen(cmd, shell=True, bufsize=bufsize,
357 358 close_fds=closefds,
358 359 stdin=subprocess.PIPE, stdout=subprocess.PIPE,
359 360 stderr=subprocess.PIPE,
360 361 universal_newlines=newlines,
361 362 env=env)
362 363 return p.stdin, p.stdout, p.stderr, p
363 364
364 365 def version():
365 366 """Return version information if available."""
366 367 try:
367 368 from . import __version__
368 369 return __version__.version
369 370 except ImportError:
370 371 return 'unknown'
371 372
372 373 def versiontuple(v=None, n=4):
373 374 """Parses a Mercurial version string into an N-tuple.
374 375
375 376 The version string to be parsed is specified with the ``v`` argument.
376 377 If it isn't defined, the current Mercurial version string will be parsed.
377 378
378 379 ``n`` can be 2, 3, or 4. Here is how some version strings map to
379 380 returned values:
380 381
381 382 >>> v = '3.6.1+190-df9b73d2d444'
382 383 >>> versiontuple(v, 2)
383 384 (3, 6)
384 385 >>> versiontuple(v, 3)
385 386 (3, 6, 1)
386 387 >>> versiontuple(v, 4)
387 388 (3, 6, 1, '190-df9b73d2d444')
388 389
389 390 >>> versiontuple('3.6.1+190-df9b73d2d444+20151118')
390 391 (3, 6, 1, '190-df9b73d2d444+20151118')
391 392
392 393 >>> v = '3.6'
393 394 >>> versiontuple(v, 2)
394 395 (3, 6)
395 396 >>> versiontuple(v, 3)
396 397 (3, 6, None)
397 398 >>> versiontuple(v, 4)
398 399 (3, 6, None, None)
399 400
400 401 >>> v = '3.9-rc'
401 402 >>> versiontuple(v, 2)
402 403 (3, 9)
403 404 >>> versiontuple(v, 3)
404 405 (3, 9, None)
405 406 >>> versiontuple(v, 4)
406 407 (3, 9, None, 'rc')
407 408
408 409 >>> v = '3.9-rc+2-02a8fea4289b'
409 410 >>> versiontuple(v, 2)
410 411 (3, 9)
411 412 >>> versiontuple(v, 3)
412 413 (3, 9, None)
413 414 >>> versiontuple(v, 4)
414 415 (3, 9, None, 'rc+2-02a8fea4289b')
415 416 """
416 417 if not v:
417 418 v = version()
418 419 parts = remod.split('[\+-]', v, 1)
419 420 if len(parts) == 1:
420 421 vparts, extra = parts[0], None
421 422 else:
422 423 vparts, extra = parts
423 424
424 425 vints = []
425 426 for i in vparts.split('.'):
426 427 try:
427 428 vints.append(int(i))
428 429 except ValueError:
429 430 break
430 431 # (3, 6) -> (3, 6, None)
431 432 while len(vints) < 3:
432 433 vints.append(None)
433 434
434 435 if n == 2:
435 436 return (vints[0], vints[1])
436 437 if n == 3:
437 438 return (vints[0], vints[1], vints[2])
438 439 if n == 4:
439 440 return (vints[0], vints[1], vints[2], extra)
440 441
441 442 # used by parsedate
442 443 defaultdateformats = (
443 444 '%Y-%m-%dT%H:%M:%S', # the 'real' ISO8601
444 445 '%Y-%m-%dT%H:%M', # without seconds
445 446 '%Y-%m-%dT%H%M%S', # another awful but legal variant without :
446 447 '%Y-%m-%dT%H%M', # without seconds
447 448 '%Y-%m-%d %H:%M:%S', # our common legal variant
448 449 '%Y-%m-%d %H:%M', # without seconds
449 450 '%Y-%m-%d %H%M%S', # without :
450 451 '%Y-%m-%d %H%M', # without seconds
451 452 '%Y-%m-%d %I:%M:%S%p',
452 453 '%Y-%m-%d %H:%M',
453 454 '%Y-%m-%d %I:%M%p',
454 455 '%Y-%m-%d',
455 456 '%m-%d',
456 457 '%m/%d',
457 458 '%m/%d/%y',
458 459 '%m/%d/%Y',
459 460 '%a %b %d %H:%M:%S %Y',
460 461 '%a %b %d %I:%M:%S%p %Y',
461 462 '%a, %d %b %Y %H:%M:%S', # GNU coreutils "/bin/date --rfc-2822"
462 463 '%b %d %H:%M:%S %Y',
463 464 '%b %d %I:%M:%S%p %Y',
464 465 '%b %d %H:%M:%S',
465 466 '%b %d %I:%M:%S%p',
466 467 '%b %d %H:%M',
467 468 '%b %d %I:%M%p',
468 469 '%b %d %Y',
469 470 '%b %d',
470 471 '%H:%M:%S',
471 472 '%I:%M:%S%p',
472 473 '%H:%M',
473 474 '%I:%M%p',
474 475 )
475 476
476 477 extendeddateformats = defaultdateformats + (
477 478 "%Y",
478 479 "%Y-%m",
479 480 "%b",
480 481 "%b %Y",
481 482 )
482 483
483 484 def cachefunc(func):
484 485 '''cache the result of function calls'''
485 486 # XXX doesn't handle keywords args
486 487 if func.__code__.co_argcount == 0:
487 488 cache = []
488 489 def f():
489 490 if len(cache) == 0:
490 491 cache.append(func())
491 492 return cache[0]
492 493 return f
493 494 cache = {}
494 495 if func.__code__.co_argcount == 1:
495 496 # we gain a small amount of time because
496 497 # we don't need to pack/unpack the list
497 498 def f(arg):
498 499 if arg not in cache:
499 500 cache[arg] = func(arg)
500 501 return cache[arg]
501 502 else:
502 503 def f(*args):
503 504 if args not in cache:
504 505 cache[args] = func(*args)
505 506 return cache[args]
506 507
507 508 return f
508 509
509 510 class sortdict(dict):
510 511 '''a simple sorted dictionary'''
511 512 def __init__(self, data=None):
512 513 self._list = []
513 514 if data:
514 515 self.update(data)
515 516 def copy(self):
516 517 return sortdict(self)
517 518 def __setitem__(self, key, val):
518 519 if key in self:
519 520 self._list.remove(key)
520 521 self._list.append(key)
521 522 dict.__setitem__(self, key, val)
522 523 def __iter__(self):
523 524 return self._list.__iter__()
524 525 def update(self, src):
525 526 if isinstance(src, dict):
526 527 src = src.iteritems()
527 528 for k, v in src:
528 529 self[k] = v
529 530 def clear(self):
530 531 dict.clear(self)
531 532 self._list = []
532 533 def items(self):
533 534 return [(k, self[k]) for k in self._list]
534 535 def __delitem__(self, key):
535 536 dict.__delitem__(self, key)
536 537 self._list.remove(key)
537 538 def pop(self, key, *args, **kwargs):
538 539 dict.pop(self, key, *args, **kwargs)
539 540 try:
540 541 self._list.remove(key)
541 542 except ValueError:
542 543 pass
543 544 def keys(self):
544 545 return self._list
545 546 def iterkeys(self):
546 547 return self._list.__iter__()
547 548 def iteritems(self):
548 549 for k in self._list:
549 550 yield k, self[k]
550 551 def insert(self, index, key, val):
551 552 self._list.insert(index, key)
552 553 dict.__setitem__(self, key, val)
553 554 def __repr__(self):
554 555 if not self:
555 556 return '%s()' % self.__class__.__name__
556 557 return '%s(%r)' % (self.__class__.__name__, self.items())
557 558
558 559 class _lrucachenode(object):
559 560 """A node in a doubly linked list.
560 561
561 562 Holds a reference to nodes on either side as well as a key-value
562 563 pair for the dictionary entry.
563 564 """
564 565 __slots__ = (u'next', u'prev', u'key', u'value')
565 566
566 567 def __init__(self):
567 568 self.next = None
568 569 self.prev = None
569 570
570 571 self.key = _notset
571 572 self.value = None
572 573
573 574 def markempty(self):
574 575 """Mark the node as emptied."""
575 576 self.key = _notset
576 577
577 578 class lrucachedict(object):
578 579 """Dict that caches most recent accesses and sets.
579 580
580 581 The dict consists of an actual backing dict - indexed by original
581 582 key - and a doubly linked circular list defining the order of entries in
582 583 the cache.
583 584
584 585 The head node is the newest entry in the cache. If the cache is full,
585 586 we recycle head.prev and make it the new head. Cache accesses result in
586 587 the node being moved to before the existing head and being marked as the
587 588 new head node.
588 589 """
589 590 def __init__(self, max):
590 591 self._cache = {}
591 592
592 593 self._head = head = _lrucachenode()
593 594 head.prev = head
594 595 head.next = head
595 596 self._size = 1
596 597 self._capacity = max
597 598
598 599 def __len__(self):
599 600 return len(self._cache)
600 601
601 602 def __contains__(self, k):
602 603 return k in self._cache
603 604
604 605 def __iter__(self):
605 606 # We don't have to iterate in cache order, but why not.
606 607 n = self._head
607 608 for i in range(len(self._cache)):
608 609 yield n.key
609 610 n = n.next
610 611
611 612 def __getitem__(self, k):
612 613 node = self._cache[k]
613 614 self._movetohead(node)
614 615 return node.value
615 616
616 617 def __setitem__(self, k, v):
617 618 node = self._cache.get(k)
618 619 # Replace existing value and mark as newest.
619 620 if node is not None:
620 621 node.value = v
621 622 self._movetohead(node)
622 623 return
623 624
624 625 if self._size < self._capacity:
625 626 node = self._addcapacity()
626 627 else:
627 628 # Grab the last/oldest item.
628 629 node = self._head.prev
629 630
630 631 # At capacity. Kill the old entry.
631 632 if node.key is not _notset:
632 633 del self._cache[node.key]
633 634
634 635 node.key = k
635 636 node.value = v
636 637 self._cache[k] = node
637 638 # And mark it as newest entry. No need to adjust order since it
638 639 # is already self._head.prev.
639 640 self._head = node
640 641
641 642 def __delitem__(self, k):
642 643 node = self._cache.pop(k)
643 644 node.markempty()
644 645
645 646 # Temporarily mark as newest item before re-adjusting head to make
646 647 # this node the oldest item.
647 648 self._movetohead(node)
648 649 self._head = node.next
649 650
650 651 # Additional dict methods.
651 652
652 653 def get(self, k, default=None):
653 654 try:
654 655 return self._cache[k].value
655 656 except KeyError:
656 657 return default
657 658
658 659 def clear(self):
659 660 n = self._head
660 661 while n.key is not _notset:
661 662 n.markempty()
662 663 n = n.next
663 664
664 665 self._cache.clear()
665 666
666 667 def copy(self):
667 668 result = lrucachedict(self._capacity)
668 669 n = self._head.prev
669 670 # Iterate in oldest-to-newest order, so the copy has the right ordering
670 671 for i in range(len(self._cache)):
671 672 result[n.key] = n.value
672 673 n = n.prev
673 674 return result
674 675
675 676 def _movetohead(self, node):
676 677 """Mark a node as the newest, making it the new head.
677 678
678 679 When a node is accessed, it becomes the freshest entry in the LRU
679 680 list, which is denoted by self._head.
680 681
681 682 Visually, let's make ``N`` the new head node (* denotes head):
682 683
683 684 previous/oldest <-> head <-> next/next newest
684 685
685 686 ----<->--- A* ---<->-----
686 687 | |
687 688 E <-> D <-> N <-> C <-> B
688 689
689 690 To:
690 691
691 692 ----<->--- N* ---<->-----
692 693 | |
693 694 E <-> D <-> C <-> B <-> A
694 695
695 696 This requires the following moves:
696 697
697 698 C.next = D (node.prev.next = node.next)
698 699 D.prev = C (node.next.prev = node.prev)
699 700 E.next = N (head.prev.next = node)
700 701 N.prev = E (node.prev = head.prev)
701 702 N.next = A (node.next = head)
702 703 A.prev = N (head.prev = node)
703 704 """
704 705 head = self._head
705 706 # C.next = D
706 707 node.prev.next = node.next
707 708 # D.prev = C
708 709 node.next.prev = node.prev
709 710 # N.prev = E
710 711 node.prev = head.prev
711 712 # N.next = A
712 713 # It is tempting to do just "head" here, however if node is
713 714 # adjacent to head, this will do bad things.
714 715 node.next = head.prev.next
715 716 # E.next = N
716 717 node.next.prev = node
717 718 # A.prev = N
718 719 node.prev.next = node
719 720
720 721 self._head = node
721 722
722 723 def _addcapacity(self):
723 724 """Add a node to the circular linked list.
724 725
725 726 The new node is inserted before the head node.
726 727 """
727 728 head = self._head
728 729 node = _lrucachenode()
729 730 head.prev.next = node
730 731 node.prev = head.prev
731 732 node.next = head
732 733 head.prev = node
733 734 self._size += 1
734 735 return node
735 736
736 737 def lrucachefunc(func):
737 738 '''cache most recent results of function calls'''
738 739 cache = {}
739 740 order = collections.deque()
740 741 if func.__code__.co_argcount == 1:
741 742 def f(arg):
742 743 if arg not in cache:
743 744 if len(cache) > 20:
744 745 del cache[order.popleft()]
745 746 cache[arg] = func(arg)
746 747 else:
747 748 order.remove(arg)
748 749 order.append(arg)
749 750 return cache[arg]
750 751 else:
751 752 def f(*args):
752 753 if args not in cache:
753 754 if len(cache) > 20:
754 755 del cache[order.popleft()]
755 756 cache[args] = func(*args)
756 757 else:
757 758 order.remove(args)
758 759 order.append(args)
759 760 return cache[args]
760 761
761 762 return f
762 763
763 764 class propertycache(object):
764 765 def __init__(self, func):
765 766 self.func = func
766 767 self.name = func.__name__
767 768 def __get__(self, obj, type=None):
768 769 result = self.func(obj)
769 770 self.cachevalue(obj, result)
770 771 return result
771 772
772 773 def cachevalue(self, obj, value):
773 774 # __dict__ assignment required to bypass __setattr__ (eg: repoview)
774 775 obj.__dict__[self.name] = value
775 776
776 777 def pipefilter(s, cmd):
777 778 '''filter string S through command CMD, returning its output'''
778 779 p = subprocess.Popen(cmd, shell=True, close_fds=closefds,
779 780 stdin=subprocess.PIPE, stdout=subprocess.PIPE)
780 781 pout, perr = p.communicate(s)
781 782 return pout
782 783
783 784 def tempfilter(s, cmd):
784 785 '''filter string S through a pair of temporary files with CMD.
785 786 CMD is used as a template to create the real command to be run,
786 787 with the strings INFILE and OUTFILE replaced by the real names of
787 788 the temporary files generated.'''
788 789 inname, outname = None, None
789 790 try:
790 791 infd, inname = tempfile.mkstemp(prefix='hg-filter-in-')
791 792 fp = os.fdopen(infd, 'wb')
792 793 fp.write(s)
793 794 fp.close()
794 795 outfd, outname = tempfile.mkstemp(prefix='hg-filter-out-')
795 796 os.close(outfd)
796 797 cmd = cmd.replace('INFILE', inname)
797 798 cmd = cmd.replace('OUTFILE', outname)
798 799 code = os.system(cmd)
799 800 if sys.platform == 'OpenVMS' and code & 1:
800 801 code = 0
801 802 if code:
802 803 raise Abort(_("command '%s' failed: %s") %
803 804 (cmd, explainexit(code)))
804 805 return readfile(outname)
805 806 finally:
806 807 try:
807 808 if inname:
808 809 os.unlink(inname)
809 810 except OSError:
810 811 pass
811 812 try:
812 813 if outname:
813 814 os.unlink(outname)
814 815 except OSError:
815 816 pass
816 817
817 818 filtertable = {
818 819 'tempfile:': tempfilter,
819 820 'pipe:': pipefilter,
820 821 }
821 822
822 823 def filter(s, cmd):
823 824 "filter a string through a command that transforms its input to its output"
824 825 for name, fn in filtertable.iteritems():
825 826 if cmd.startswith(name):
826 827 return fn(s, cmd[len(name):].lstrip())
827 828 return pipefilter(s, cmd)
828 829
829 830 def binary(s):
830 831 """return true if a string is binary data"""
831 832 return bool(s and '\0' in s)
832 833
833 834 def increasingchunks(source, min=1024, max=65536):
834 835 '''return no less than min bytes per chunk while data remains,
835 836 doubling min after each chunk until it reaches max'''
836 837 def log2(x):
837 838 if not x:
838 839 return 0
839 840 i = 0
840 841 while x:
841 842 x >>= 1
842 843 i += 1
843 844 return i - 1
844 845
845 846 buf = []
846 847 blen = 0
847 848 for chunk in source:
848 849 buf.append(chunk)
849 850 blen += len(chunk)
850 851 if blen >= min:
851 852 if min < max:
852 853 min = min << 1
853 854 nmin = 1 << log2(blen)
854 855 if nmin > min:
855 856 min = nmin
856 857 if min > max:
857 858 min = max
858 859 yield ''.join(buf)
859 860 blen = 0
860 861 buf = []
861 862 if buf:
862 863 yield ''.join(buf)
863 864
864 865 Abort = error.Abort
865 866
866 867 def always(fn):
867 868 return True
868 869
869 870 def never(fn):
870 871 return False
871 872
872 873 def nogc(func):
873 874 """disable garbage collector
874 875
875 876 Python's garbage collector triggers a GC each time a certain number of
876 877 container objects (the number being defined by gc.get_threshold()) are
877 878 allocated even when marked not to be tracked by the collector. Tracking has
878 879 no effect on when GCs are triggered, only on what objects the GC looks
879 880 into. As a workaround, disable GC while building complex (huge)
880 881 containers.
881 882
882 883 This garbage collector issue have been fixed in 2.7.
883 884 """
884 885 if sys.version_info >= (2, 7):
885 886 return func
886 887 def wrapper(*args, **kwargs):
887 888 gcenabled = gc.isenabled()
888 889 gc.disable()
889 890 try:
890 891 return func(*args, **kwargs)
891 892 finally:
892 893 if gcenabled:
893 894 gc.enable()
894 895 return wrapper
895 896
896 897 def pathto(root, n1, n2):
897 898 '''return the relative path from one place to another.
898 899 root should use os.sep to separate directories
899 900 n1 should use os.sep to separate directories
900 901 n2 should use "/" to separate directories
901 902 returns an os.sep-separated path.
902 903
903 904 If n1 is a relative path, it's assumed it's
904 905 relative to root.
905 906 n2 should always be relative to root.
906 907 '''
907 908 if not n1:
908 909 return localpath(n2)
909 910 if os.path.isabs(n1):
910 911 if os.path.splitdrive(root)[0] != os.path.splitdrive(n1)[0]:
911 912 return os.path.join(root, localpath(n2))
912 913 n2 = '/'.join((pconvert(root), n2))
913 914 a, b = splitpath(n1), n2.split('/')
914 915 a.reverse()
915 916 b.reverse()
916 917 while a and b and a[-1] == b[-1]:
917 918 a.pop()
918 919 b.pop()
919 920 b.reverse()
920 921 return os.sep.join((['..'] * len(a)) + b) or '.'
921 922
922 923 def mainfrozen():
923 924 """return True if we are a frozen executable.
924 925
925 926 The code supports py2exe (most common, Windows only) and tools/freeze
926 927 (portable, not much used).
927 928 """
928 929 return (safehasattr(sys, "frozen") or # new py2exe
929 930 safehasattr(sys, "importers") or # old py2exe
930 931 imp.is_frozen(u"__main__")) # tools/freeze
931 932
932 933 # the location of data files matching the source code
933 934 if mainfrozen() and getattr(sys, 'frozen', None) != 'macosx_app':
934 935 # executable version (py2exe) doesn't support __file__
935 936 datapath = os.path.dirname(sys.executable)
936 937 else:
937 938 datapath = os.path.dirname(__file__)
938 939
939 940 i18n.setdatapath(datapath)
940 941
941 942 _hgexecutable = None
942 943
943 944 def hgexecutable():
944 945 """return location of the 'hg' executable.
945 946
946 947 Defaults to $HG or 'hg' in the search path.
947 948 """
948 949 if _hgexecutable is None:
949 950 hg = os.environ.get('HG')
950 951 mainmod = sys.modules['__main__']
951 952 if hg:
952 953 _sethgexecutable(hg)
953 954 elif mainfrozen():
954 955 if getattr(sys, 'frozen', None) == 'macosx_app':
955 956 # Env variable set by py2app
956 957 _sethgexecutable(os.environ['EXECUTABLEPATH'])
957 958 else:
958 959 _sethgexecutable(sys.executable)
959 960 elif os.path.basename(getattr(mainmod, '__file__', '')) == 'hg':
960 961 _sethgexecutable(mainmod.__file__)
961 962 else:
962 963 exe = findexe('hg') or os.path.basename(sys.argv[0])
963 964 _sethgexecutable(exe)
964 965 return _hgexecutable
965 966
966 967 def _sethgexecutable(path):
967 968 """set location of the 'hg' executable"""
968 969 global _hgexecutable
969 970 _hgexecutable = path
970 971
971 972 def _isstdout(f):
972 973 fileno = getattr(f, 'fileno', None)
973 974 return fileno and fileno() == sys.__stdout__.fileno()
974 975
975 976 def system(cmd, environ=None, cwd=None, onerr=None, errprefix=None, out=None):
976 977 '''enhanced shell command execution.
977 978 run with environment maybe modified, maybe in different dir.
978 979
979 980 if command fails and onerr is None, return status, else raise onerr
980 981 object as exception.
981 982
982 983 if out is specified, it is assumed to be a file-like object that has a
983 984 write() method. stdout and stderr will be redirected to out.'''
984 985 if environ is None:
985 986 environ = {}
986 987 try:
987 988 sys.stdout.flush()
988 989 except Exception:
989 990 pass
990 991 def py2shell(val):
991 992 'convert python object into string that is useful to shell'
992 993 if val is None or val is False:
993 994 return '0'
994 995 if val is True:
995 996 return '1'
996 997 return str(val)
997 998 origcmd = cmd
998 999 cmd = quotecommand(cmd)
999 1000 if sys.platform == 'plan9' and (sys.version_info[0] == 2
1000 1001 and sys.version_info[1] < 7):
1001 1002 # subprocess kludge to work around issues in half-baked Python
1002 1003 # ports, notably bichued/python:
1003 1004 if not cwd is None:
1004 1005 os.chdir(cwd)
1005 1006 rc = os.system(cmd)
1006 1007 else:
1007 1008 env = dict(os.environ)
1008 1009 env.update((k, py2shell(v)) for k, v in environ.iteritems())
1009 1010 env['HG'] = hgexecutable()
1010 1011 if out is None or _isstdout(out):
1011 1012 rc = subprocess.call(cmd, shell=True, close_fds=closefds,
1012 1013 env=env, cwd=cwd)
1013 1014 else:
1014 1015 proc = subprocess.Popen(cmd, shell=True, close_fds=closefds,
1015 1016 env=env, cwd=cwd, stdout=subprocess.PIPE,
1016 1017 stderr=subprocess.STDOUT)
1017 1018 for line in iter(proc.stdout.readline, ''):
1018 1019 out.write(line)
1019 1020 proc.wait()
1020 1021 rc = proc.returncode
1021 1022 if sys.platform == 'OpenVMS' and rc & 1:
1022 1023 rc = 0
1023 1024 if rc and onerr:
1024 1025 errmsg = '%s %s' % (os.path.basename(origcmd.split(None, 1)[0]),
1025 1026 explainexit(rc)[0])
1026 1027 if errprefix:
1027 1028 errmsg = '%s: %s' % (errprefix, errmsg)
1028 1029 raise onerr(errmsg)
1029 1030 return rc
1030 1031
1031 1032 def checksignature(func):
1032 1033 '''wrap a function with code to check for calling errors'''
1033 1034 def check(*args, **kwargs):
1034 1035 try:
1035 1036 return func(*args, **kwargs)
1036 1037 except TypeError:
1037 1038 if len(traceback.extract_tb(sys.exc_info()[2])) == 1:
1038 1039 raise error.SignatureError
1039 1040 raise
1040 1041
1041 1042 return check
1042 1043
1043 1044 def copyfile(src, dest, hardlink=False, copystat=False, checkambig=False):
1044 1045 '''copy a file, preserving mode and optionally other stat info like
1045 1046 atime/mtime
1046 1047
1047 1048 checkambig argument is used with filestat, and is useful only if
1048 1049 destination file is guarded by any lock (e.g. repo.lock or
1049 1050 repo.wlock).
1050 1051
1051 1052 copystat and checkambig should be exclusive.
1052 1053 '''
1053 1054 assert not (copystat and checkambig)
1054 1055 oldstat = None
1055 1056 if os.path.lexists(dest):
1056 1057 if checkambig:
1057 1058 oldstat = checkambig and filestat(dest)
1058 1059 unlink(dest)
1059 1060 # hardlinks are problematic on CIFS, quietly ignore this flag
1060 1061 # until we find a way to work around it cleanly (issue4546)
1061 1062 if False and hardlink:
1062 1063 try:
1063 1064 oslink(src, dest)
1064 1065 return
1065 1066 except (IOError, OSError):
1066 1067 pass # fall back to normal copy
1067 1068 if os.path.islink(src):
1068 1069 os.symlink(os.readlink(src), dest)
1069 1070 # copytime is ignored for symlinks, but in general copytime isn't needed
1070 1071 # for them anyway
1071 1072 else:
1072 1073 try:
1073 1074 shutil.copyfile(src, dest)
1074 1075 if copystat:
1075 1076 # copystat also copies mode
1076 1077 shutil.copystat(src, dest)
1077 1078 else:
1078 1079 shutil.copymode(src, dest)
1079 1080 if oldstat and oldstat.stat:
1080 1081 newstat = filestat(dest)
1081 1082 if newstat.isambig(oldstat):
1082 1083 # stat of copied file is ambiguous to original one
1083 1084 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1084 1085 os.utime(dest, (advanced, advanced))
1085 1086 except shutil.Error as inst:
1086 1087 raise Abort(str(inst))
1087 1088
1088 1089 def copyfiles(src, dst, hardlink=None, progress=lambda t, pos: None):
1089 1090 """Copy a directory tree using hardlinks if possible."""
1090 1091 num = 0
1091 1092
1092 1093 if hardlink is None:
1093 1094 hardlink = (os.stat(src).st_dev ==
1094 1095 os.stat(os.path.dirname(dst)).st_dev)
1095 1096 if hardlink:
1096 1097 topic = _('linking')
1097 1098 else:
1098 1099 topic = _('copying')
1099 1100
1100 1101 if os.path.isdir(src):
1101 1102 os.mkdir(dst)
1102 1103 for name, kind in osutil.listdir(src):
1103 1104 srcname = os.path.join(src, name)
1104 1105 dstname = os.path.join(dst, name)
1105 1106 def nprog(t, pos):
1106 1107 if pos is not None:
1107 1108 return progress(t, pos + num)
1108 1109 hardlink, n = copyfiles(srcname, dstname, hardlink, progress=nprog)
1109 1110 num += n
1110 1111 else:
1111 1112 if hardlink:
1112 1113 try:
1113 1114 oslink(src, dst)
1114 1115 except (IOError, OSError):
1115 1116 hardlink = False
1116 1117 shutil.copy(src, dst)
1117 1118 else:
1118 1119 shutil.copy(src, dst)
1119 1120 num += 1
1120 1121 progress(topic, num)
1121 1122 progress(topic, None)
1122 1123
1123 1124 return hardlink, num
1124 1125
1125 1126 _winreservednames = '''con prn aux nul
1126 1127 com1 com2 com3 com4 com5 com6 com7 com8 com9
1127 1128 lpt1 lpt2 lpt3 lpt4 lpt5 lpt6 lpt7 lpt8 lpt9'''.split()
1128 1129 _winreservedchars = ':*?"<>|'
1129 1130 def checkwinfilename(path):
1130 1131 r'''Check that the base-relative path is a valid filename on Windows.
1131 1132 Returns None if the path is ok, or a UI string describing the problem.
1132 1133
1133 1134 >>> checkwinfilename("just/a/normal/path")
1134 1135 >>> checkwinfilename("foo/bar/con.xml")
1135 1136 "filename contains 'con', which is reserved on Windows"
1136 1137 >>> checkwinfilename("foo/con.xml/bar")
1137 1138 "filename contains 'con', which is reserved on Windows"
1138 1139 >>> checkwinfilename("foo/bar/xml.con")
1139 1140 >>> checkwinfilename("foo/bar/AUX/bla.txt")
1140 1141 "filename contains 'AUX', which is reserved on Windows"
1141 1142 >>> checkwinfilename("foo/bar/bla:.txt")
1142 1143 "filename contains ':', which is reserved on Windows"
1143 1144 >>> checkwinfilename("foo/bar/b\07la.txt")
1144 1145 "filename contains '\\x07', which is invalid on Windows"
1145 1146 >>> checkwinfilename("foo/bar/bla ")
1146 1147 "filename ends with ' ', which is not allowed on Windows"
1147 1148 >>> checkwinfilename("../bar")
1148 1149 >>> checkwinfilename("foo\\")
1149 1150 "filename ends with '\\', which is invalid on Windows"
1150 1151 >>> checkwinfilename("foo\\/bar")
1151 1152 "directory name ends with '\\', which is invalid on Windows"
1152 1153 '''
1153 1154 if path.endswith('\\'):
1154 1155 return _("filename ends with '\\', which is invalid on Windows")
1155 1156 if '\\/' in path:
1156 1157 return _("directory name ends with '\\', which is invalid on Windows")
1157 1158 for n in path.replace('\\', '/').split('/'):
1158 1159 if not n:
1159 1160 continue
1160 1161 for c in n:
1161 1162 if c in _winreservedchars:
1162 1163 return _("filename contains '%s', which is reserved "
1163 1164 "on Windows") % c
1164 1165 if ord(c) <= 31:
1165 1166 return _("filename contains %r, which is invalid "
1166 1167 "on Windows") % c
1167 1168 base = n.split('.')[0]
1168 1169 if base and base.lower() in _winreservednames:
1169 1170 return _("filename contains '%s', which is reserved "
1170 1171 "on Windows") % base
1171 1172 t = n[-1]
1172 1173 if t in '. ' and n not in '..':
1173 1174 return _("filename ends with '%s', which is not allowed "
1174 1175 "on Windows") % t
1175 1176
1176 1177 if os.name == 'nt':
1177 1178 checkosfilename = checkwinfilename
1178 1179 else:
1179 1180 checkosfilename = platform.checkosfilename
1180 1181
1181 1182 def makelock(info, pathname):
1182 1183 try:
1183 1184 return os.symlink(info, pathname)
1184 1185 except OSError as why:
1185 1186 if why.errno == errno.EEXIST:
1186 1187 raise
1187 1188 except AttributeError: # no symlink in os
1188 1189 pass
1189 1190
1190 1191 ld = os.open(pathname, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
1191 1192 os.write(ld, info)
1192 1193 os.close(ld)
1193 1194
1194 1195 def readlock(pathname):
1195 1196 try:
1196 1197 return os.readlink(pathname)
1197 1198 except OSError as why:
1198 1199 if why.errno not in (errno.EINVAL, errno.ENOSYS):
1199 1200 raise
1200 1201 except AttributeError: # no symlink in os
1201 1202 pass
1202 1203 fp = posixfile(pathname)
1203 1204 r = fp.read()
1204 1205 fp.close()
1205 1206 return r
1206 1207
1207 1208 def fstat(fp):
1208 1209 '''stat file object that may not have fileno method.'''
1209 1210 try:
1210 1211 return os.fstat(fp.fileno())
1211 1212 except AttributeError:
1212 1213 return os.stat(fp.name)
1213 1214
1214 1215 # File system features
1215 1216
1216 1217 def fscasesensitive(path):
1217 1218 """
1218 1219 Return true if the given path is on a case-sensitive filesystem
1219 1220
1220 1221 Requires a path (like /foo/.hg) ending with a foldable final
1221 1222 directory component.
1222 1223 """
1223 1224 s1 = os.lstat(path)
1224 1225 d, b = os.path.split(path)
1225 1226 b2 = b.upper()
1226 1227 if b == b2:
1227 1228 b2 = b.lower()
1228 1229 if b == b2:
1229 1230 return True # no evidence against case sensitivity
1230 1231 p2 = os.path.join(d, b2)
1231 1232 try:
1232 1233 s2 = os.lstat(p2)
1233 1234 if s2 == s1:
1234 1235 return False
1235 1236 return True
1236 1237 except OSError:
1237 1238 return True
1238 1239
1239 1240 try:
1240 1241 import re2
1241 1242 _re2 = None
1242 1243 except ImportError:
1243 1244 _re2 = False
1244 1245
1245 1246 class _re(object):
1246 1247 def _checkre2(self):
1247 1248 global _re2
1248 1249 try:
1249 1250 # check if match works, see issue3964
1250 1251 _re2 = bool(re2.match(r'\[([^\[]+)\]', '[ui]'))
1251 1252 except ImportError:
1252 1253 _re2 = False
1253 1254
1254 1255 def compile(self, pat, flags=0):
1255 1256 '''Compile a regular expression, using re2 if possible
1256 1257
1257 1258 For best performance, use only re2-compatible regexp features. The
1258 1259 only flags from the re module that are re2-compatible are
1259 1260 IGNORECASE and MULTILINE.'''
1260 1261 if _re2 is None:
1261 1262 self._checkre2()
1262 1263 if _re2 and (flags & ~(remod.IGNORECASE | remod.MULTILINE)) == 0:
1263 1264 if flags & remod.IGNORECASE:
1264 1265 pat = '(?i)' + pat
1265 1266 if flags & remod.MULTILINE:
1266 1267 pat = '(?m)' + pat
1267 1268 try:
1268 1269 return re2.compile(pat)
1269 1270 except re2.error:
1270 1271 pass
1271 1272 return remod.compile(pat, flags)
1272 1273
1273 1274 @propertycache
1274 1275 def escape(self):
1275 1276 '''Return the version of escape corresponding to self.compile.
1276 1277
1277 1278 This is imperfect because whether re2 or re is used for a particular
1278 1279 function depends on the flags, etc, but it's the best we can do.
1279 1280 '''
1280 1281 global _re2
1281 1282 if _re2 is None:
1282 1283 self._checkre2()
1283 1284 if _re2:
1284 1285 return re2.escape
1285 1286 else:
1286 1287 return remod.escape
1287 1288
1288 1289 re = _re()
1289 1290
1290 1291 _fspathcache = {}
1291 1292 def fspath(name, root):
1292 1293 '''Get name in the case stored in the filesystem
1293 1294
1294 1295 The name should be relative to root, and be normcase-ed for efficiency.
1295 1296
1296 1297 Note that this function is unnecessary, and should not be
1297 1298 called, for case-sensitive filesystems (simply because it's expensive).
1298 1299
1299 1300 The root should be normcase-ed, too.
1300 1301 '''
1301 1302 def _makefspathcacheentry(dir):
1302 1303 return dict((normcase(n), n) for n in os.listdir(dir))
1303 1304
1304 1305 seps = os.sep
1305 1306 if os.altsep:
1306 1307 seps = seps + os.altsep
1307 1308 # Protect backslashes. This gets silly very quickly.
1308 1309 seps.replace('\\','\\\\')
1309 1310 pattern = remod.compile(r'([^%s]+)|([%s]+)' % (seps, seps))
1310 1311 dir = os.path.normpath(root)
1311 1312 result = []
1312 1313 for part, sep in pattern.findall(name):
1313 1314 if sep:
1314 1315 result.append(sep)
1315 1316 continue
1316 1317
1317 1318 if dir not in _fspathcache:
1318 1319 _fspathcache[dir] = _makefspathcacheentry(dir)
1319 1320 contents = _fspathcache[dir]
1320 1321
1321 1322 found = contents.get(part)
1322 1323 if not found:
1323 1324 # retry "once per directory" per "dirstate.walk" which
1324 1325 # may take place for each patches of "hg qpush", for example
1325 1326 _fspathcache[dir] = contents = _makefspathcacheentry(dir)
1326 1327 found = contents.get(part)
1327 1328
1328 1329 result.append(found or part)
1329 1330 dir = os.path.join(dir, part)
1330 1331
1331 1332 return ''.join(result)
1332 1333
1333 1334 def checknlink(testfile):
1334 1335 '''check whether hardlink count reporting works properly'''
1335 1336
1336 1337 # testfile may be open, so we need a separate file for checking to
1337 1338 # work around issue2543 (or testfile may get lost on Samba shares)
1338 1339 f1 = testfile + ".hgtmp1"
1339 1340 if os.path.lexists(f1):
1340 1341 return False
1341 1342 try:
1342 1343 posixfile(f1, 'w').close()
1343 1344 except IOError:
1344 1345 try:
1345 1346 os.unlink(f1)
1346 1347 except OSError:
1347 1348 pass
1348 1349 return False
1349 1350
1350 1351 f2 = testfile + ".hgtmp2"
1351 1352 fd = None
1352 1353 try:
1353 1354 oslink(f1, f2)
1354 1355 # nlinks() may behave differently for files on Windows shares if
1355 1356 # the file is open.
1356 1357 fd = posixfile(f2)
1357 1358 return nlinks(f2) > 1
1358 1359 except OSError:
1359 1360 return False
1360 1361 finally:
1361 1362 if fd is not None:
1362 1363 fd.close()
1363 1364 for f in (f1, f2):
1364 1365 try:
1365 1366 os.unlink(f)
1366 1367 except OSError:
1367 1368 pass
1368 1369
1369 1370 def endswithsep(path):
1370 1371 '''Check path ends with os.sep or os.altsep.'''
1371 1372 return path.endswith(os.sep) or os.altsep and path.endswith(os.altsep)
1372 1373
1373 1374 def splitpath(path):
1374 1375 '''Split path by os.sep.
1375 1376 Note that this function does not use os.altsep because this is
1376 1377 an alternative of simple "xxx.split(os.sep)".
1377 1378 It is recommended to use os.path.normpath() before using this
1378 1379 function if need.'''
1379 1380 return path.split(os.sep)
1380 1381
1381 1382 def gui():
1382 1383 '''Are we running in a GUI?'''
1383 1384 if sys.platform == 'darwin':
1384 1385 if 'SSH_CONNECTION' in os.environ:
1385 1386 # handle SSH access to a box where the user is logged in
1386 1387 return False
1387 1388 elif getattr(osutil, 'isgui', None):
1388 1389 # check if a CoreGraphics session is available
1389 1390 return osutil.isgui()
1390 1391 else:
1391 1392 # pure build; use a safe default
1392 1393 return True
1393 1394 else:
1394 1395 return os.name == "nt" or os.environ.get("DISPLAY")
1395 1396
1396 1397 def mktempcopy(name, emptyok=False, createmode=None):
1397 1398 """Create a temporary file with the same contents from name
1398 1399
1399 1400 The permission bits are copied from the original file.
1400 1401
1401 1402 If the temporary file is going to be truncated immediately, you
1402 1403 can use emptyok=True as an optimization.
1403 1404
1404 1405 Returns the name of the temporary file.
1405 1406 """
1406 1407 d, fn = os.path.split(name)
1407 1408 fd, temp = tempfile.mkstemp(prefix='.%s-' % fn, dir=d)
1408 1409 os.close(fd)
1409 1410 # Temporary files are created with mode 0600, which is usually not
1410 1411 # what we want. If the original file already exists, just copy
1411 1412 # its mode. Otherwise, manually obey umask.
1412 1413 copymode(name, temp, createmode)
1413 1414 if emptyok:
1414 1415 return temp
1415 1416 try:
1416 1417 try:
1417 1418 ifp = posixfile(name, "rb")
1418 1419 except IOError as inst:
1419 1420 if inst.errno == errno.ENOENT:
1420 1421 return temp
1421 1422 if not getattr(inst, 'filename', None):
1422 1423 inst.filename = name
1423 1424 raise
1424 1425 ofp = posixfile(temp, "wb")
1425 1426 for chunk in filechunkiter(ifp):
1426 1427 ofp.write(chunk)
1427 1428 ifp.close()
1428 1429 ofp.close()
1429 1430 except: # re-raises
1430 1431 try: os.unlink(temp)
1431 1432 except OSError: pass
1432 1433 raise
1433 1434 return temp
1434 1435
1435 1436 class filestat(object):
1436 1437 """help to exactly detect change of a file
1437 1438
1438 1439 'stat' attribute is result of 'os.stat()' if specified 'path'
1439 1440 exists. Otherwise, it is None. This can avoid preparative
1440 1441 'exists()' examination on client side of this class.
1441 1442 """
1442 1443 def __init__(self, path):
1443 1444 try:
1444 1445 self.stat = os.stat(path)
1445 1446 except OSError as err:
1446 1447 if err.errno != errno.ENOENT:
1447 1448 raise
1448 1449 self.stat = None
1449 1450
1450 1451 __hash__ = object.__hash__
1451 1452
1452 1453 def __eq__(self, old):
1453 1454 try:
1454 1455 # if ambiguity between stat of new and old file is
1455 1456 # avoided, comparision of size, ctime and mtime is enough
1456 1457 # to exactly detect change of a file regardless of platform
1457 1458 return (self.stat.st_size == old.stat.st_size and
1458 1459 self.stat.st_ctime == old.stat.st_ctime and
1459 1460 self.stat.st_mtime == old.stat.st_mtime)
1460 1461 except AttributeError:
1461 1462 return False
1462 1463
1463 1464 def isambig(self, old):
1464 1465 """Examine whether new (= self) stat is ambiguous against old one
1465 1466
1466 1467 "S[N]" below means stat of a file at N-th change:
1467 1468
1468 1469 - S[n-1].ctime < S[n].ctime: can detect change of a file
1469 1470 - S[n-1].ctime == S[n].ctime
1470 1471 - S[n-1].ctime < S[n].mtime: means natural advancing (*1)
1471 1472 - S[n-1].ctime == S[n].mtime: is ambiguous (*2)
1472 1473 - S[n-1].ctime > S[n].mtime: never occurs naturally (don't care)
1473 1474 - S[n-1].ctime > S[n].ctime: never occurs naturally (don't care)
1474 1475
1475 1476 Case (*2) above means that a file was changed twice or more at
1476 1477 same time in sec (= S[n-1].ctime), and comparison of timestamp
1477 1478 is ambiguous.
1478 1479
1479 1480 Base idea to avoid such ambiguity is "advance mtime 1 sec, if
1480 1481 timestamp is ambiguous".
1481 1482
1482 1483 But advancing mtime only in case (*2) doesn't work as
1483 1484 expected, because naturally advanced S[n].mtime in case (*1)
1484 1485 might be equal to manually advanced S[n-1 or earlier].mtime.
1485 1486
1486 1487 Therefore, all "S[n-1].ctime == S[n].ctime" cases should be
1487 1488 treated as ambiguous regardless of mtime, to avoid overlooking
1488 1489 by confliction between such mtime.
1489 1490
1490 1491 Advancing mtime "if isambig(oldstat)" ensures "S[n-1].mtime !=
1491 1492 S[n].mtime", even if size of a file isn't changed.
1492 1493 """
1493 1494 try:
1494 1495 return (self.stat.st_ctime == old.stat.st_ctime)
1495 1496 except AttributeError:
1496 1497 return False
1497 1498
1498 1499 def __ne__(self, other):
1499 1500 return not self == other
1500 1501
1501 1502 class atomictempfile(object):
1502 1503 '''writable file object that atomically updates a file
1503 1504
1504 1505 All writes will go to a temporary copy of the original file. Call
1505 1506 close() when you are done writing, and atomictempfile will rename
1506 1507 the temporary copy to the original name, making the changes
1507 1508 visible. If the object is destroyed without being closed, all your
1508 1509 writes are discarded.
1509 1510
1510 1511 checkambig argument of constructor is used with filestat, and is
1511 1512 useful only if target file is guarded by any lock (e.g. repo.lock
1512 1513 or repo.wlock).
1513 1514 '''
1514 1515 def __init__(self, name, mode='w+b', createmode=None, checkambig=False):
1515 1516 self.__name = name # permanent name
1516 1517 self._tempname = mktempcopy(name, emptyok=('w' in mode),
1517 1518 createmode=createmode)
1518 1519 self._fp = posixfile(self._tempname, mode)
1519 1520 self._checkambig = checkambig
1520 1521
1521 1522 # delegated methods
1522 1523 self.read = self._fp.read
1523 1524 self.write = self._fp.write
1524 1525 self.seek = self._fp.seek
1525 1526 self.tell = self._fp.tell
1526 1527 self.fileno = self._fp.fileno
1527 1528
1528 1529 def close(self):
1529 1530 if not self._fp.closed:
1530 1531 self._fp.close()
1531 1532 filename = localpath(self.__name)
1532 1533 oldstat = self._checkambig and filestat(filename)
1533 1534 if oldstat and oldstat.stat:
1534 1535 rename(self._tempname, filename)
1535 1536 newstat = filestat(filename)
1536 1537 if newstat.isambig(oldstat):
1537 1538 # stat of changed file is ambiguous to original one
1538 1539 advanced = (oldstat.stat.st_mtime + 1) & 0x7fffffff
1539 1540 os.utime(filename, (advanced, advanced))
1540 1541 else:
1541 1542 rename(self._tempname, filename)
1542 1543
1543 1544 def discard(self):
1544 1545 if not self._fp.closed:
1545 1546 try:
1546 1547 os.unlink(self._tempname)
1547 1548 except OSError:
1548 1549 pass
1549 1550 self._fp.close()
1550 1551
1551 1552 def __del__(self):
1552 1553 if safehasattr(self, '_fp'): # constructor actually did something
1553 1554 self.discard()
1554 1555
1555 1556 def __enter__(self):
1556 1557 return self
1557 1558
1558 1559 def __exit__(self, exctype, excvalue, traceback):
1559 1560 if exctype is not None:
1560 1561 self.discard()
1561 1562 else:
1562 1563 self.close()
1563 1564
1564 1565 def makedirs(name, mode=None, notindexed=False):
1565 1566 """recursive directory creation with parent mode inheritance
1566 1567
1567 1568 Newly created directories are marked as "not to be indexed by
1568 1569 the content indexing service", if ``notindexed`` is specified
1569 1570 for "write" mode access.
1570 1571 """
1571 1572 try:
1572 1573 makedir(name, notindexed)
1573 1574 except OSError as err:
1574 1575 if err.errno == errno.EEXIST:
1575 1576 return
1576 1577 if err.errno != errno.ENOENT or not name:
1577 1578 raise
1578 1579 parent = os.path.dirname(os.path.abspath(name))
1579 1580 if parent == name:
1580 1581 raise
1581 1582 makedirs(parent, mode, notindexed)
1582 1583 try:
1583 1584 makedir(name, notindexed)
1584 1585 except OSError as err:
1585 1586 # Catch EEXIST to handle races
1586 1587 if err.errno == errno.EEXIST:
1587 1588 return
1588 1589 raise
1589 1590 if mode is not None:
1590 1591 os.chmod(name, mode)
1591 1592
1592 1593 def readfile(path):
1593 1594 with open(path, 'rb') as fp:
1594 1595 return fp.read()
1595 1596
1596 1597 def writefile(path, text):
1597 1598 with open(path, 'wb') as fp:
1598 1599 fp.write(text)
1599 1600
1600 1601 def appendfile(path, text):
1601 1602 with open(path, 'ab') as fp:
1602 1603 fp.write(text)
1603 1604
1604 1605 class chunkbuffer(object):
1605 1606 """Allow arbitrary sized chunks of data to be efficiently read from an
1606 1607 iterator over chunks of arbitrary size."""
1607 1608
1608 1609 def __init__(self, in_iter):
1609 1610 """in_iter is the iterator that's iterating over the input chunks.
1610 1611 targetsize is how big a buffer to try to maintain."""
1611 1612 def splitbig(chunks):
1612 1613 for chunk in chunks:
1613 1614 if len(chunk) > 2**20:
1614 1615 pos = 0
1615 1616 while pos < len(chunk):
1616 1617 end = pos + 2 ** 18
1617 1618 yield chunk[pos:end]
1618 1619 pos = end
1619 1620 else:
1620 1621 yield chunk
1621 1622 self.iter = splitbig(in_iter)
1622 1623 self._queue = collections.deque()
1623 1624 self._chunkoffset = 0
1624 1625
1625 1626 def read(self, l=None):
1626 1627 """Read L bytes of data from the iterator of chunks of data.
1627 1628 Returns less than L bytes if the iterator runs dry.
1628 1629
1629 1630 If size parameter is omitted, read everything"""
1630 1631 if l is None:
1631 1632 return ''.join(self.iter)
1632 1633
1633 1634 left = l
1634 1635 buf = []
1635 1636 queue = self._queue
1636 1637 while left > 0:
1637 1638 # refill the queue
1638 1639 if not queue:
1639 1640 target = 2**18
1640 1641 for chunk in self.iter:
1641 1642 queue.append(chunk)
1642 1643 target -= len(chunk)
1643 1644 if target <= 0:
1644 1645 break
1645 1646 if not queue:
1646 1647 break
1647 1648
1648 1649 # The easy way to do this would be to queue.popleft(), modify the
1649 1650 # chunk (if necessary), then queue.appendleft(). However, for cases
1650 1651 # where we read partial chunk content, this incurs 2 dequeue
1651 1652 # mutations and creates a new str for the remaining chunk in the
1652 1653 # queue. Our code below avoids this overhead.
1653 1654
1654 1655 chunk = queue[0]
1655 1656 chunkl = len(chunk)
1656 1657 offset = self._chunkoffset
1657 1658
1658 1659 # Use full chunk.
1659 1660 if offset == 0 and left >= chunkl:
1660 1661 left -= chunkl
1661 1662 queue.popleft()
1662 1663 buf.append(chunk)
1663 1664 # self._chunkoffset remains at 0.
1664 1665 continue
1665 1666
1666 1667 chunkremaining = chunkl - offset
1667 1668
1668 1669 # Use all of unconsumed part of chunk.
1669 1670 if left >= chunkremaining:
1670 1671 left -= chunkremaining
1671 1672 queue.popleft()
1672 1673 # offset == 0 is enabled by block above, so this won't merely
1673 1674 # copy via ``chunk[0:]``.
1674 1675 buf.append(chunk[offset:])
1675 1676 self._chunkoffset = 0
1676 1677
1677 1678 # Partial chunk needed.
1678 1679 else:
1679 1680 buf.append(chunk[offset:offset + left])
1680 1681 self._chunkoffset += left
1681 1682 left -= chunkremaining
1682 1683
1683 1684 return ''.join(buf)
1684 1685
1685 1686 def filechunkiter(f, size=65536, limit=None):
1686 1687 """Create a generator that produces the data in the file size
1687 1688 (default 65536) bytes at a time, up to optional limit (default is
1688 1689 to read all data). Chunks may be less than size bytes if the
1689 1690 chunk is the last chunk in the file, or the file is a socket or
1690 1691 some other type of file that sometimes reads less data than is
1691 1692 requested."""
1692 1693 assert size >= 0
1693 1694 assert limit is None or limit >= 0
1694 1695 while True:
1695 1696 if limit is None:
1696 1697 nbytes = size
1697 1698 else:
1698 1699 nbytes = min(limit, size)
1699 1700 s = nbytes and f.read(nbytes)
1700 1701 if not s:
1701 1702 break
1702 1703 if limit:
1703 1704 limit -= len(s)
1704 1705 yield s
1705 1706
1706 1707 def makedate(timestamp=None):
1707 1708 '''Return a unix timestamp (or the current time) as a (unixtime,
1708 1709 offset) tuple based off the local timezone.'''
1709 1710 if timestamp is None:
1710 1711 timestamp = time.time()
1711 1712 if timestamp < 0:
1712 1713 hint = _("check your clock")
1713 1714 raise Abort(_("negative timestamp: %d") % timestamp, hint=hint)
1714 1715 delta = (datetime.datetime.utcfromtimestamp(timestamp) -
1715 1716 datetime.datetime.fromtimestamp(timestamp))
1716 1717 tz = delta.days * 86400 + delta.seconds
1717 1718 return timestamp, tz
1718 1719
1719 1720 def datestr(date=None, format='%a %b %d %H:%M:%S %Y %1%2'):
1720 1721 """represent a (unixtime, offset) tuple as a localized time.
1721 1722 unixtime is seconds since the epoch, and offset is the time zone's
1722 1723 number of seconds away from UTC.
1723 1724
1724 1725 >>> datestr((0, 0))
1725 1726 'Thu Jan 01 00:00:00 1970 +0000'
1726 1727 >>> datestr((42, 0))
1727 1728 'Thu Jan 01 00:00:42 1970 +0000'
1728 1729 >>> datestr((-42, 0))
1729 1730 'Wed Dec 31 23:59:18 1969 +0000'
1730 1731 >>> datestr((0x7fffffff, 0))
1731 1732 'Tue Jan 19 03:14:07 2038 +0000'
1732 1733 >>> datestr((-0x80000000, 0))
1733 1734 'Fri Dec 13 20:45:52 1901 +0000'
1734 1735 """
1735 1736 t, tz = date or makedate()
1736 1737 if "%1" in format or "%2" in format or "%z" in format:
1737 1738 sign = (tz > 0) and "-" or "+"
1738 1739 minutes = abs(tz) // 60
1739 1740 q, r = divmod(minutes, 60)
1740 1741 format = format.replace("%z", "%1%2")
1741 1742 format = format.replace("%1", "%c%02d" % (sign, q))
1742 1743 format = format.replace("%2", "%02d" % r)
1743 1744 d = t - tz
1744 1745 if d > 0x7fffffff:
1745 1746 d = 0x7fffffff
1746 1747 elif d < -0x80000000:
1747 1748 d = -0x80000000
1748 1749 # Never use time.gmtime() and datetime.datetime.fromtimestamp()
1749 1750 # because they use the gmtime() system call which is buggy on Windows
1750 1751 # for negative values.
1751 1752 t = datetime.datetime(1970, 1, 1) + datetime.timedelta(seconds=d)
1752 1753 s = t.strftime(format)
1753 1754 return s
1754 1755
1755 1756 def shortdate(date=None):
1756 1757 """turn (timestamp, tzoff) tuple into iso 8631 date."""
1757 1758 return datestr(date, format='%Y-%m-%d')
1758 1759
1759 1760 def parsetimezone(s):
1760 1761 """find a trailing timezone, if any, in string, and return a
1761 1762 (offset, remainder) pair"""
1762 1763
1763 1764 if s.endswith("GMT") or s.endswith("UTC"):
1764 1765 return 0, s[:-3].rstrip()
1765 1766
1766 1767 # Unix-style timezones [+-]hhmm
1767 1768 if len(s) >= 5 and s[-5] in "+-" and s[-4:].isdigit():
1768 1769 sign = (s[-5] == "+") and 1 or -1
1769 1770 hours = int(s[-4:-2])
1770 1771 minutes = int(s[-2:])
1771 1772 return -sign * (hours * 60 + minutes) * 60, s[:-5].rstrip()
1772 1773
1773 1774 # ISO8601 trailing Z
1774 1775 if s.endswith("Z") and s[-2:-1].isdigit():
1775 1776 return 0, s[:-1]
1776 1777
1777 1778 # ISO8601-style [+-]hh:mm
1778 1779 if (len(s) >= 6 and s[-6] in "+-" and s[-3] == ":" and
1779 1780 s[-5:-3].isdigit() and s[-2:].isdigit()):
1780 1781 sign = (s[-6] == "+") and 1 or -1
1781 1782 hours = int(s[-5:-3])
1782 1783 minutes = int(s[-2:])
1783 1784 return -sign * (hours * 60 + minutes) * 60, s[:-6]
1784 1785
1785 1786 return None, s
1786 1787
1787 1788 def strdate(string, format, defaults=[]):
1788 1789 """parse a localized time string and return a (unixtime, offset) tuple.
1789 1790 if the string cannot be parsed, ValueError is raised."""
1790 1791 # NOTE: unixtime = localunixtime + offset
1791 1792 offset, date = parsetimezone(string)
1792 1793
1793 1794 # add missing elements from defaults
1794 1795 usenow = False # default to using biased defaults
1795 1796 for part in ("S", "M", "HI", "d", "mb", "yY"): # decreasing specificity
1796 1797 found = [True for p in part if ("%"+p) in format]
1797 1798 if not found:
1798 1799 date += "@" + defaults[part][usenow]
1799 1800 format += "@%" + part[0]
1800 1801 else:
1801 1802 # We've found a specific time element, less specific time
1802 1803 # elements are relative to today
1803 1804 usenow = True
1804 1805
1805 1806 timetuple = time.strptime(date, format)
1806 1807 localunixtime = int(calendar.timegm(timetuple))
1807 1808 if offset is None:
1808 1809 # local timezone
1809 1810 unixtime = int(time.mktime(timetuple))
1810 1811 offset = unixtime - localunixtime
1811 1812 else:
1812 1813 unixtime = localunixtime + offset
1813 1814 return unixtime, offset
1814 1815
1815 1816 def parsedate(date, formats=None, bias=None):
1816 1817 """parse a localized date/time and return a (unixtime, offset) tuple.
1817 1818
1818 1819 The date may be a "unixtime offset" string or in one of the specified
1819 1820 formats. If the date already is a (unixtime, offset) tuple, it is returned.
1820 1821
1821 1822 >>> parsedate(' today ') == parsedate(\
1822 1823 datetime.date.today().strftime('%b %d'))
1823 1824 True
1824 1825 >>> parsedate( 'yesterday ') == parsedate((datetime.date.today() -\
1825 1826 datetime.timedelta(days=1)\
1826 1827 ).strftime('%b %d'))
1827 1828 True
1828 1829 >>> now, tz = makedate()
1829 1830 >>> strnow, strtz = parsedate('now')
1830 1831 >>> (strnow - now) < 1
1831 1832 True
1832 1833 >>> tz == strtz
1833 1834 True
1834 1835 """
1835 1836 if bias is None:
1836 1837 bias = {}
1837 1838 if not date:
1838 1839 return 0, 0
1839 1840 if isinstance(date, tuple) and len(date) == 2:
1840 1841 return date
1841 1842 if not formats:
1842 1843 formats = defaultdateformats
1843 1844 date = date.strip()
1844 1845
1845 1846 if date == 'now' or date == _('now'):
1846 1847 return makedate()
1847 1848 if date == 'today' or date == _('today'):
1848 1849 date = datetime.date.today().strftime('%b %d')
1849 1850 elif date == 'yesterday' or date == _('yesterday'):
1850 1851 date = (datetime.date.today() -
1851 1852 datetime.timedelta(days=1)).strftime('%b %d')
1852 1853
1853 1854 try:
1854 1855 when, offset = map(int, date.split(' '))
1855 1856 except ValueError:
1856 1857 # fill out defaults
1857 1858 now = makedate()
1858 1859 defaults = {}
1859 1860 for part in ("d", "mb", "yY", "HI", "M", "S"):
1860 1861 # this piece is for rounding the specific end of unknowns
1861 1862 b = bias.get(part)
1862 1863 if b is None:
1863 1864 if part[0] in "HMS":
1864 1865 b = "00"
1865 1866 else:
1866 1867 b = "0"
1867 1868
1868 1869 # this piece is for matching the generic end to today's date
1869 1870 n = datestr(now, "%" + part[0])
1870 1871
1871 1872 defaults[part] = (b, n)
1872 1873
1873 1874 for format in formats:
1874 1875 try:
1875 1876 when, offset = strdate(date, format, defaults)
1876 1877 except (ValueError, OverflowError):
1877 1878 pass
1878 1879 else:
1879 1880 break
1880 1881 else:
1881 1882 raise Abort(_('invalid date: %r') % date)
1882 1883 # validate explicit (probably user-specified) date and
1883 1884 # time zone offset. values must fit in signed 32 bits for
1884 1885 # current 32-bit linux runtimes. timezones go from UTC-12
1885 1886 # to UTC+14
1886 1887 if when < -0x80000000 or when > 0x7fffffff:
1887 1888 raise Abort(_('date exceeds 32 bits: %d') % when)
1888 1889 if offset < -50400 or offset > 43200:
1889 1890 raise Abort(_('impossible time zone offset: %d') % offset)
1890 1891 return when, offset
1891 1892
1892 1893 def matchdate(date):
1893 1894 """Return a function that matches a given date match specifier
1894 1895
1895 1896 Formats include:
1896 1897
1897 1898 '{date}' match a given date to the accuracy provided
1898 1899
1899 1900 '<{date}' on or before a given date
1900 1901
1901 1902 '>{date}' on or after a given date
1902 1903
1903 1904 >>> p1 = parsedate("10:29:59")
1904 1905 >>> p2 = parsedate("10:30:00")
1905 1906 >>> p3 = parsedate("10:30:59")
1906 1907 >>> p4 = parsedate("10:31:00")
1907 1908 >>> p5 = parsedate("Sep 15 10:30:00 1999")
1908 1909 >>> f = matchdate("10:30")
1909 1910 >>> f(p1[0])
1910 1911 False
1911 1912 >>> f(p2[0])
1912 1913 True
1913 1914 >>> f(p3[0])
1914 1915 True
1915 1916 >>> f(p4[0])
1916 1917 False
1917 1918 >>> f(p5[0])
1918 1919 False
1919 1920 """
1920 1921
1921 1922 def lower(date):
1922 1923 d = {'mb': "1", 'd': "1"}
1923 1924 return parsedate(date, extendeddateformats, d)[0]
1924 1925
1925 1926 def upper(date):
1926 1927 d = {'mb': "12", 'HI': "23", 'M': "59", 'S': "59"}
1927 1928 for days in ("31", "30", "29"):
1928 1929 try:
1929 1930 d["d"] = days
1930 1931 return parsedate(date, extendeddateformats, d)[0]
1931 1932 except Abort:
1932 1933 pass
1933 1934 d["d"] = "28"
1934 1935 return parsedate(date, extendeddateformats, d)[0]
1935 1936
1936 1937 date = date.strip()
1937 1938
1938 1939 if not date:
1939 1940 raise Abort(_("dates cannot consist entirely of whitespace"))
1940 1941 elif date[0] == "<":
1941 1942 if not date[1:]:
1942 1943 raise Abort(_("invalid day spec, use '<DATE'"))
1943 1944 when = upper(date[1:])
1944 1945 return lambda x: x <= when
1945 1946 elif date[0] == ">":
1946 1947 if not date[1:]:
1947 1948 raise Abort(_("invalid day spec, use '>DATE'"))
1948 1949 when = lower(date[1:])
1949 1950 return lambda x: x >= when
1950 1951 elif date[0] == "-":
1951 1952 try:
1952 1953 days = int(date[1:])
1953 1954 except ValueError:
1954 1955 raise Abort(_("invalid day spec: %s") % date[1:])
1955 1956 if days < 0:
1956 1957 raise Abort(_("%s must be nonnegative (see 'hg help dates')")
1957 1958 % date[1:])
1958 1959 when = makedate()[0] - days * 3600 * 24
1959 1960 return lambda x: x >= when
1960 1961 elif " to " in date:
1961 1962 a, b = date.split(" to ")
1962 1963 start, stop = lower(a), upper(b)
1963 1964 return lambda x: x >= start and x <= stop
1964 1965 else:
1965 1966 start, stop = lower(date), upper(date)
1966 1967 return lambda x: x >= start and x <= stop
1967 1968
1968 1969 def stringmatcher(pattern):
1969 1970 """
1970 1971 accepts a string, possibly starting with 're:' or 'literal:' prefix.
1971 1972 returns the matcher name, pattern, and matcher function.
1972 1973 missing or unknown prefixes are treated as literal matches.
1973 1974
1974 1975 helper for tests:
1975 1976 >>> def test(pattern, *tests):
1976 1977 ... kind, pattern, matcher = stringmatcher(pattern)
1977 1978 ... return (kind, pattern, [bool(matcher(t)) for t in tests])
1978 1979
1979 1980 exact matching (no prefix):
1980 1981 >>> test('abcdefg', 'abc', 'def', 'abcdefg')
1981 1982 ('literal', 'abcdefg', [False, False, True])
1982 1983
1983 1984 regex matching ('re:' prefix)
1984 1985 >>> test('re:a.+b', 'nomatch', 'fooadef', 'fooadefbar')
1985 1986 ('re', 'a.+b', [False, False, True])
1986 1987
1987 1988 force exact matches ('literal:' prefix)
1988 1989 >>> test('literal:re:foobar', 'foobar', 're:foobar')
1989 1990 ('literal', 're:foobar', [False, True])
1990 1991
1991 1992 unknown prefixes are ignored and treated as literals
1992 1993 >>> test('foo:bar', 'foo', 'bar', 'foo:bar')
1993 1994 ('literal', 'foo:bar', [False, False, True])
1994 1995 """
1995 1996 if pattern.startswith('re:'):
1996 1997 pattern = pattern[3:]
1997 1998 try:
1998 1999 regex = remod.compile(pattern)
1999 2000 except remod.error as e:
2000 2001 raise error.ParseError(_('invalid regular expression: %s')
2001 2002 % e)
2002 2003 return 're', pattern, regex.search
2003 2004 elif pattern.startswith('literal:'):
2004 2005 pattern = pattern[8:]
2005 2006 return 'literal', pattern, pattern.__eq__
2006 2007
2007 2008 def shortuser(user):
2008 2009 """Return a short representation of a user name or email address."""
2009 2010 f = user.find('@')
2010 2011 if f >= 0:
2011 2012 user = user[:f]
2012 2013 f = user.find('<')
2013 2014 if f >= 0:
2014 2015 user = user[f + 1:]
2015 2016 f = user.find(' ')
2016 2017 if f >= 0:
2017 2018 user = user[:f]
2018 2019 f = user.find('.')
2019 2020 if f >= 0:
2020 2021 user = user[:f]
2021 2022 return user
2022 2023
2023 2024 def emailuser(user):
2024 2025 """Return the user portion of an email address."""
2025 2026 f = user.find('@')
2026 2027 if f >= 0:
2027 2028 user = user[:f]
2028 2029 f = user.find('<')
2029 2030 if f >= 0:
2030 2031 user = user[f + 1:]
2031 2032 return user
2032 2033
2033 2034 def email(author):
2034 2035 '''get email of author.'''
2035 2036 r = author.find('>')
2036 2037 if r == -1:
2037 2038 r = None
2038 2039 return author[author.find('<') + 1:r]
2039 2040
2040 2041 def ellipsis(text, maxlength=400):
2041 2042 """Trim string to at most maxlength (default: 400) columns in display."""
2042 2043 return encoding.trim(text, maxlength, ellipsis='...')
2043 2044
2044 2045 def unitcountfn(*unittable):
2045 2046 '''return a function that renders a readable count of some quantity'''
2046 2047
2047 2048 def go(count):
2048 2049 for multiplier, divisor, format in unittable:
2049 2050 if count >= divisor * multiplier:
2050 2051 return format % (count / float(divisor))
2051 2052 return unittable[-1][2] % count
2052 2053
2053 2054 return go
2054 2055
2055 2056 bytecount = unitcountfn(
2056 2057 (100, 1 << 30, _('%.0f GB')),
2057 2058 (10, 1 << 30, _('%.1f GB')),
2058 2059 (1, 1 << 30, _('%.2f GB')),
2059 2060 (100, 1 << 20, _('%.0f MB')),
2060 2061 (10, 1 << 20, _('%.1f MB')),
2061 2062 (1, 1 << 20, _('%.2f MB')),
2062 2063 (100, 1 << 10, _('%.0f KB')),
2063 2064 (10, 1 << 10, _('%.1f KB')),
2064 2065 (1, 1 << 10, _('%.2f KB')),
2065 2066 (1, 1, _('%.0f bytes')),
2066 2067 )
2067 2068
2068 2069 def uirepr(s):
2069 2070 # Avoid double backslash in Windows path repr()
2070 2071 return repr(s).replace('\\\\', '\\')
2071 2072
2072 2073 # delay import of textwrap
2073 2074 def MBTextWrapper(**kwargs):
2074 2075 class tw(textwrap.TextWrapper):
2075 2076 """
2076 2077 Extend TextWrapper for width-awareness.
2077 2078
2078 2079 Neither number of 'bytes' in any encoding nor 'characters' is
2079 2080 appropriate to calculate terminal columns for specified string.
2080 2081
2081 2082 Original TextWrapper implementation uses built-in 'len()' directly,
2082 2083 so overriding is needed to use width information of each characters.
2083 2084
2084 2085 In addition, characters classified into 'ambiguous' width are
2085 2086 treated as wide in East Asian area, but as narrow in other.
2086 2087
2087 2088 This requires use decision to determine width of such characters.
2088 2089 """
2089 2090 def _cutdown(self, ucstr, space_left):
2090 2091 l = 0
2091 2092 colwidth = encoding.ucolwidth
2092 2093 for i in xrange(len(ucstr)):
2093 2094 l += colwidth(ucstr[i])
2094 2095 if space_left < l:
2095 2096 return (ucstr[:i], ucstr[i:])
2096 2097 return ucstr, ''
2097 2098
2098 2099 # overriding of base class
2099 2100 def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
2100 2101 space_left = max(width - cur_len, 1)
2101 2102
2102 2103 if self.break_long_words:
2103 2104 cut, res = self._cutdown(reversed_chunks[-1], space_left)
2104 2105 cur_line.append(cut)
2105 2106 reversed_chunks[-1] = res
2106 2107 elif not cur_line:
2107 2108 cur_line.append(reversed_chunks.pop())
2108 2109
2109 2110 # this overriding code is imported from TextWrapper of Python 2.6
2110 2111 # to calculate columns of string by 'encoding.ucolwidth()'
2111 2112 def _wrap_chunks(self, chunks):
2112 2113 colwidth = encoding.ucolwidth
2113 2114
2114 2115 lines = []
2115 2116 if self.width <= 0:
2116 2117 raise ValueError("invalid width %r (must be > 0)" % self.width)
2117 2118
2118 2119 # Arrange in reverse order so items can be efficiently popped
2119 2120 # from a stack of chucks.
2120 2121 chunks.reverse()
2121 2122
2122 2123 while chunks:
2123 2124
2124 2125 # Start the list of chunks that will make up the current line.
2125 2126 # cur_len is just the length of all the chunks in cur_line.
2126 2127 cur_line = []
2127 2128 cur_len = 0
2128 2129
2129 2130 # Figure out which static string will prefix this line.
2130 2131 if lines:
2131 2132 indent = self.subsequent_indent
2132 2133 else:
2133 2134 indent = self.initial_indent
2134 2135
2135 2136 # Maximum width for this line.
2136 2137 width = self.width - len(indent)
2137 2138
2138 2139 # First chunk on line is whitespace -- drop it, unless this
2139 2140 # is the very beginning of the text (i.e. no lines started yet).
2140 2141 if self.drop_whitespace and chunks[-1].strip() == '' and lines:
2141 2142 del chunks[-1]
2142 2143
2143 2144 while chunks:
2144 2145 l = colwidth(chunks[-1])
2145 2146
2146 2147 # Can at least squeeze this chunk onto the current line.
2147 2148 if cur_len + l <= width:
2148 2149 cur_line.append(chunks.pop())
2149 2150 cur_len += l
2150 2151
2151 2152 # Nope, this line is full.
2152 2153 else:
2153 2154 break
2154 2155
2155 2156 # The current line is full, and the next chunk is too big to
2156 2157 # fit on *any* line (not just this one).
2157 2158 if chunks and colwidth(chunks[-1]) > width:
2158 2159 self._handle_long_word(chunks, cur_line, cur_len, width)
2159 2160
2160 2161 # If the last chunk on this line is all whitespace, drop it.
2161 2162 if (self.drop_whitespace and
2162 2163 cur_line and cur_line[-1].strip() == ''):
2163 2164 del cur_line[-1]
2164 2165
2165 2166 # Convert current line back to a string and store it in list
2166 2167 # of all lines (return value).
2167 2168 if cur_line:
2168 2169 lines.append(indent + ''.join(cur_line))
2169 2170
2170 2171 return lines
2171 2172
2172 2173 global MBTextWrapper
2173 2174 MBTextWrapper = tw
2174 2175 return tw(**kwargs)
2175 2176
2176 2177 def wrap(line, width, initindent='', hangindent=''):
2177 2178 maxindent = max(len(hangindent), len(initindent))
2178 2179 if width <= maxindent:
2179 2180 # adjust for weird terminal size
2180 2181 width = max(78, maxindent + 1)
2181 2182 line = line.decode(encoding.encoding, encoding.encodingmode)
2182 2183 initindent = initindent.decode(encoding.encoding, encoding.encodingmode)
2183 2184 hangindent = hangindent.decode(encoding.encoding, encoding.encodingmode)
2184 2185 wrapper = MBTextWrapper(width=width,
2185 2186 initial_indent=initindent,
2186 2187 subsequent_indent=hangindent)
2187 2188 return wrapper.fill(line).encode(encoding.encoding)
2188 2189
2189 2190 def iterlines(iterator):
2190 2191 for chunk in iterator:
2191 2192 for line in chunk.splitlines():
2192 2193 yield line
2193 2194
2194 2195 def expandpath(path):
2195 2196 return os.path.expanduser(os.path.expandvars(path))
2196 2197
2197 2198 def hgcmd():
2198 2199 """Return the command used to execute current hg
2199 2200
2200 2201 This is different from hgexecutable() because on Windows we want
2201 2202 to avoid things opening new shell windows like batch files, so we
2202 2203 get either the python call or current executable.
2203 2204 """
2204 2205 if mainfrozen():
2205 2206 if getattr(sys, 'frozen', None) == 'macosx_app':
2206 2207 # Env variable set by py2app
2207 2208 return [os.environ['EXECUTABLEPATH']]
2208 2209 else:
2209 2210 return [sys.executable]
2210 2211 return gethgcmd()
2211 2212
2212 2213 def rundetached(args, condfn):
2213 2214 """Execute the argument list in a detached process.
2214 2215
2215 2216 condfn is a callable which is called repeatedly and should return
2216 2217 True once the child process is known to have started successfully.
2217 2218 At this point, the child process PID is returned. If the child
2218 2219 process fails to start or finishes before condfn() evaluates to
2219 2220 True, return -1.
2220 2221 """
2221 2222 # Windows case is easier because the child process is either
2222 2223 # successfully starting and validating the condition or exiting
2223 2224 # on failure. We just poll on its PID. On Unix, if the child
2224 2225 # process fails to start, it will be left in a zombie state until
2225 2226 # the parent wait on it, which we cannot do since we expect a long
2226 2227 # running process on success. Instead we listen for SIGCHLD telling
2227 2228 # us our child process terminated.
2228 2229 terminated = set()
2229 2230 def handler(signum, frame):
2230 2231 terminated.add(os.wait())
2231 2232 prevhandler = None
2232 2233 SIGCHLD = getattr(signal, 'SIGCHLD', None)
2233 2234 if SIGCHLD is not None:
2234 2235 prevhandler = signal.signal(SIGCHLD, handler)
2235 2236 try:
2236 2237 pid = spawndetached(args)
2237 2238 while not condfn():
2238 2239 if ((pid in terminated or not testpid(pid))
2239 2240 and not condfn()):
2240 2241 return -1
2241 2242 time.sleep(0.1)
2242 2243 return pid
2243 2244 finally:
2244 2245 if prevhandler is not None:
2245 2246 signal.signal(signal.SIGCHLD, prevhandler)
2246 2247
2247 2248 def interpolate(prefix, mapping, s, fn=None, escape_prefix=False):
2248 2249 """Return the result of interpolating items in the mapping into string s.
2249 2250
2250 2251 prefix is a single character string, or a two character string with
2251 2252 a backslash as the first character if the prefix needs to be escaped in
2252 2253 a regular expression.
2253 2254
2254 2255 fn is an optional function that will be applied to the replacement text
2255 2256 just before replacement.
2256 2257
2257 2258 escape_prefix is an optional flag that allows using doubled prefix for
2258 2259 its escaping.
2259 2260 """
2260 2261 fn = fn or (lambda s: s)
2261 2262 patterns = '|'.join(mapping.keys())
2262 2263 if escape_prefix:
2263 2264 patterns += '|' + prefix
2264 2265 if len(prefix) > 1:
2265 2266 prefix_char = prefix[1:]
2266 2267 else:
2267 2268 prefix_char = prefix
2268 2269 mapping[prefix_char] = prefix_char
2269 2270 r = remod.compile(r'%s(%s)' % (prefix, patterns))
2270 2271 return r.sub(lambda x: fn(mapping[x.group()[1:]]), s)
2271 2272
2272 2273 def getport(port):
2273 2274 """Return the port for a given network service.
2274 2275
2275 2276 If port is an integer, it's returned as is. If it's a string, it's
2276 2277 looked up using socket.getservbyname(). If there's no matching
2277 2278 service, error.Abort is raised.
2278 2279 """
2279 2280 try:
2280 2281 return int(port)
2281 2282 except ValueError:
2282 2283 pass
2283 2284
2284 2285 try:
2285 2286 return socket.getservbyname(port)
2286 2287 except socket.error:
2287 2288 raise Abort(_("no port number associated with service '%s'") % port)
2288 2289
2289 2290 _booleans = {'1': True, 'yes': True, 'true': True, 'on': True, 'always': True,
2290 2291 '0': False, 'no': False, 'false': False, 'off': False,
2291 2292 'never': False}
2292 2293
2293 2294 def parsebool(s):
2294 2295 """Parse s into a boolean.
2295 2296
2296 2297 If s is not a valid boolean, returns None.
2297 2298 """
2298 2299 return _booleans.get(s.lower(), None)
2299 2300
2300 _hexdig = '0123456789ABCDEFabcdef'
2301 2301 _hextochr = dict((a + b, chr(int(a + b, 16)))
2302 for a in _hexdig for b in _hexdig)
2302 for a in string.hexdigits for b in string.hexdigits)
2303 2303
2304 2304 def _urlunquote(s):
2305 2305 """Decode HTTP/HTML % encoding.
2306 2306
2307 2307 >>> _urlunquote('abc%20def')
2308 2308 'abc def'
2309 2309 """
2310 2310 res = s.split('%')
2311 2311 # fastpath
2312 2312 if len(res) == 1:
2313 2313 return s
2314 2314 s = res[0]
2315 2315 for item in res[1:]:
2316 2316 try:
2317 2317 s += _hextochr[item[:2]] + item[2:]
2318 2318 except KeyError:
2319 2319 s += '%' + item
2320 2320 except UnicodeDecodeError:
2321 2321 s += unichr(int(item[:2], 16)) + item[2:]
2322 2322 return s
2323 2323
2324 2324 class url(object):
2325 2325 r"""Reliable URL parser.
2326 2326
2327 2327 This parses URLs and provides attributes for the following
2328 2328 components:
2329 2329
2330 2330 <scheme>://<user>:<passwd>@<host>:<port>/<path>?<query>#<fragment>
2331 2331
2332 2332 Missing components are set to None. The only exception is
2333 2333 fragment, which is set to '' if present but empty.
2334 2334
2335 2335 If parsefragment is False, fragment is included in query. If
2336 2336 parsequery is False, query is included in path. If both are
2337 2337 False, both fragment and query are included in path.
2338 2338
2339 2339 See http://www.ietf.org/rfc/rfc2396.txt for more information.
2340 2340
2341 2341 Note that for backward compatibility reasons, bundle URLs do not
2342 2342 take host names. That means 'bundle://../' has a path of '../'.
2343 2343
2344 2344 Examples:
2345 2345
2346 2346 >>> url('http://www.ietf.org/rfc/rfc2396.txt')
2347 2347 <url scheme: 'http', host: 'www.ietf.org', path: 'rfc/rfc2396.txt'>
2348 2348 >>> url('ssh://[::1]:2200//home/joe/repo')
2349 2349 <url scheme: 'ssh', host: '[::1]', port: '2200', path: '/home/joe/repo'>
2350 2350 >>> url('file:///home/joe/repo')
2351 2351 <url scheme: 'file', path: '/home/joe/repo'>
2352 2352 >>> url('file:///c:/temp/foo/')
2353 2353 <url scheme: 'file', path: 'c:/temp/foo/'>
2354 2354 >>> url('bundle:foo')
2355 2355 <url scheme: 'bundle', path: 'foo'>
2356 2356 >>> url('bundle://../foo')
2357 2357 <url scheme: 'bundle', path: '../foo'>
2358 2358 >>> url(r'c:\foo\bar')
2359 2359 <url path: 'c:\\foo\\bar'>
2360 2360 >>> url(r'\\blah\blah\blah')
2361 2361 <url path: '\\\\blah\\blah\\blah'>
2362 2362 >>> url(r'\\blah\blah\blah#baz')
2363 2363 <url path: '\\\\blah\\blah\\blah', fragment: 'baz'>
2364 2364 >>> url(r'file:///C:\users\me')
2365 2365 <url scheme: 'file', path: 'C:\\users\\me'>
2366 2366
2367 2367 Authentication credentials:
2368 2368
2369 2369 >>> url('ssh://joe:xyz@x/repo')
2370 2370 <url scheme: 'ssh', user: 'joe', passwd: 'xyz', host: 'x', path: 'repo'>
2371 2371 >>> url('ssh://joe@x/repo')
2372 2372 <url scheme: 'ssh', user: 'joe', host: 'x', path: 'repo'>
2373 2373
2374 2374 Query strings and fragments:
2375 2375
2376 2376 >>> url('http://host/a?b#c')
2377 2377 <url scheme: 'http', host: 'host', path: 'a', query: 'b', fragment: 'c'>
2378 2378 >>> url('http://host/a?b#c', parsequery=False, parsefragment=False)
2379 2379 <url scheme: 'http', host: 'host', path: 'a?b#c'>
2380 2380
2381 2381 Empty path:
2382 2382
2383 2383 >>> url('')
2384 2384 <url path: ''>
2385 2385 >>> url('#a')
2386 2386 <url path: '', fragment: 'a'>
2387 2387 >>> url('http://host/')
2388 2388 <url scheme: 'http', host: 'host', path: ''>
2389 2389 >>> url('http://host/#a')
2390 2390 <url scheme: 'http', host: 'host', path: '', fragment: 'a'>
2391 2391
2392 2392 Only scheme:
2393 2393
2394 2394 >>> url('http:')
2395 2395 <url scheme: 'http'>
2396 2396 """
2397 2397
2398 2398 _safechars = "!~*'()+"
2399 2399 _safepchars = "/!~*'()+:\\"
2400 2400 _matchscheme = remod.compile(r'^[a-zA-Z0-9+.\-]+:').match
2401 2401
2402 2402 def __init__(self, path, parsequery=True, parsefragment=True):
2403 2403 # We slowly chomp away at path until we have only the path left
2404 2404 self.scheme = self.user = self.passwd = self.host = None
2405 2405 self.port = self.path = self.query = self.fragment = None
2406 2406 self._localpath = True
2407 2407 self._hostport = ''
2408 2408 self._origpath = path
2409 2409
2410 2410 if parsefragment and '#' in path:
2411 2411 path, self.fragment = path.split('#', 1)
2412 2412
2413 2413 # special case for Windows drive letters and UNC paths
2414 2414 if hasdriveletter(path) or path.startswith(r'\\'):
2415 2415 self.path = path
2416 2416 return
2417 2417
2418 2418 # For compatibility reasons, we can't handle bundle paths as
2419 2419 # normal URLS
2420 2420 if path.startswith('bundle:'):
2421 2421 self.scheme = 'bundle'
2422 2422 path = path[7:]
2423 2423 if path.startswith('//'):
2424 2424 path = path[2:]
2425 2425 self.path = path
2426 2426 return
2427 2427
2428 2428 if self._matchscheme(path):
2429 2429 parts = path.split(':', 1)
2430 2430 if parts[0]:
2431 2431 self.scheme, path = parts
2432 2432 self._localpath = False
2433 2433
2434 2434 if not path:
2435 2435 path = None
2436 2436 if self._localpath:
2437 2437 self.path = ''
2438 2438 return
2439 2439 else:
2440 2440 if self._localpath:
2441 2441 self.path = path
2442 2442 return
2443 2443
2444 2444 if parsequery and '?' in path:
2445 2445 path, self.query = path.split('?', 1)
2446 2446 if not path:
2447 2447 path = None
2448 2448 if not self.query:
2449 2449 self.query = None
2450 2450
2451 2451 # // is required to specify a host/authority
2452 2452 if path and path.startswith('//'):
2453 2453 parts = path[2:].split('/', 1)
2454 2454 if len(parts) > 1:
2455 2455 self.host, path = parts
2456 2456 else:
2457 2457 self.host = parts[0]
2458 2458 path = None
2459 2459 if not self.host:
2460 2460 self.host = None
2461 2461 # path of file:///d is /d
2462 2462 # path of file:///d:/ is d:/, not /d:/
2463 2463 if path and not hasdriveletter(path):
2464 2464 path = '/' + path
2465 2465
2466 2466 if self.host and '@' in self.host:
2467 2467 self.user, self.host = self.host.rsplit('@', 1)
2468 2468 if ':' in self.user:
2469 2469 self.user, self.passwd = self.user.split(':', 1)
2470 2470 if not self.host:
2471 2471 self.host = None
2472 2472
2473 2473 # Don't split on colons in IPv6 addresses without ports
2474 2474 if (self.host and ':' in self.host and
2475 2475 not (self.host.startswith('[') and self.host.endswith(']'))):
2476 2476 self._hostport = self.host
2477 2477 self.host, self.port = self.host.rsplit(':', 1)
2478 2478 if not self.host:
2479 2479 self.host = None
2480 2480
2481 2481 if (self.host and self.scheme == 'file' and
2482 2482 self.host not in ('localhost', '127.0.0.1', '[::1]')):
2483 2483 raise Abort(_('file:// URLs can only refer to localhost'))
2484 2484
2485 2485 self.path = path
2486 2486
2487 2487 # leave the query string escaped
2488 2488 for a in ('user', 'passwd', 'host', 'port',
2489 2489 'path', 'fragment'):
2490 2490 v = getattr(self, a)
2491 2491 if v is not None:
2492 2492 setattr(self, a, _urlunquote(v))
2493 2493
2494 2494 def __repr__(self):
2495 2495 attrs = []
2496 2496 for a in ('scheme', 'user', 'passwd', 'host', 'port', 'path',
2497 2497 'query', 'fragment'):
2498 2498 v = getattr(self, a)
2499 2499 if v is not None:
2500 2500 attrs.append('%s: %r' % (a, v))
2501 2501 return '<url %s>' % ', '.join(attrs)
2502 2502
2503 2503 def __str__(self):
2504 2504 r"""Join the URL's components back into a URL string.
2505 2505
2506 2506 Examples:
2507 2507
2508 2508 >>> str(url('http://user:pw@host:80/c:/bob?fo:oo#ba:ar'))
2509 2509 'http://user:pw@host:80/c:/bob?fo:oo#ba:ar'
2510 2510 >>> str(url('http://user:pw@host:80/?foo=bar&baz=42'))
2511 2511 'http://user:pw@host:80/?foo=bar&baz=42'
2512 2512 >>> str(url('http://user:pw@host:80/?foo=bar%3dbaz'))
2513 2513 'http://user:pw@host:80/?foo=bar%3dbaz'
2514 2514 >>> str(url('ssh://user:pw@[::1]:2200//home/joe#'))
2515 2515 'ssh://user:pw@[::1]:2200//home/joe#'
2516 2516 >>> str(url('http://localhost:80//'))
2517 2517 'http://localhost:80//'
2518 2518 >>> str(url('http://localhost:80/'))
2519 2519 'http://localhost:80/'
2520 2520 >>> str(url('http://localhost:80'))
2521 2521 'http://localhost:80/'
2522 2522 >>> str(url('bundle:foo'))
2523 2523 'bundle:foo'
2524 2524 >>> str(url('bundle://../foo'))
2525 2525 'bundle:../foo'
2526 2526 >>> str(url('path'))
2527 2527 'path'
2528 2528 >>> str(url('file:///tmp/foo/bar'))
2529 2529 'file:///tmp/foo/bar'
2530 2530 >>> str(url('file:///c:/tmp/foo/bar'))
2531 2531 'file:///c:/tmp/foo/bar'
2532 2532 >>> print url(r'bundle:foo\bar')
2533 2533 bundle:foo\bar
2534 2534 >>> print url(r'file:///D:\data\hg')
2535 2535 file:///D:\data\hg
2536 2536 """
2537 2537 if self._localpath:
2538 2538 s = self.path
2539 2539 if self.scheme == 'bundle':
2540 2540 s = 'bundle:' + s
2541 2541 if self.fragment:
2542 2542 s += '#' + self.fragment
2543 2543 return s
2544 2544
2545 2545 s = self.scheme + ':'
2546 2546 if self.user or self.passwd or self.host:
2547 2547 s += '//'
2548 2548 elif self.scheme and (not self.path or self.path.startswith('/')
2549 2549 or hasdriveletter(self.path)):
2550 2550 s += '//'
2551 2551 if hasdriveletter(self.path):
2552 2552 s += '/'
2553 2553 if self.user:
2554 2554 s += urlreq.quote(self.user, safe=self._safechars)
2555 2555 if self.passwd:
2556 2556 s += ':' + urlreq.quote(self.passwd, safe=self._safechars)
2557 2557 if self.user or self.passwd:
2558 2558 s += '@'
2559 2559 if self.host:
2560 2560 if not (self.host.startswith('[') and self.host.endswith(']')):
2561 2561 s += urlreq.quote(self.host)
2562 2562 else:
2563 2563 s += self.host
2564 2564 if self.port:
2565 2565 s += ':' + urlreq.quote(self.port)
2566 2566 if self.host:
2567 2567 s += '/'
2568 2568 if self.path:
2569 2569 # TODO: similar to the query string, we should not unescape the
2570 2570 # path when we store it, the path might contain '%2f' = '/',
2571 2571 # which we should *not* escape.
2572 2572 s += urlreq.quote(self.path, safe=self._safepchars)
2573 2573 if self.query:
2574 2574 # we store the query in escaped form.
2575 2575 s += '?' + self.query
2576 2576 if self.fragment is not None:
2577 2577 s += '#' + urlreq.quote(self.fragment, safe=self._safepchars)
2578 2578 return s
2579 2579
2580 2580 def authinfo(self):
2581 2581 user, passwd = self.user, self.passwd
2582 2582 try:
2583 2583 self.user, self.passwd = None, None
2584 2584 s = str(self)
2585 2585 finally:
2586 2586 self.user, self.passwd = user, passwd
2587 2587 if not self.user:
2588 2588 return (s, None)
2589 2589 # authinfo[1] is passed to urllib2 password manager, and its
2590 2590 # URIs must not contain credentials. The host is passed in the
2591 2591 # URIs list because Python < 2.4.3 uses only that to search for
2592 2592 # a password.
2593 2593 return (s, (None, (s, self.host),
2594 2594 self.user, self.passwd or ''))
2595 2595
2596 2596 def isabs(self):
2597 2597 if self.scheme and self.scheme != 'file':
2598 2598 return True # remote URL
2599 2599 if hasdriveletter(self.path):
2600 2600 return True # absolute for our purposes - can't be joined()
2601 2601 if self.path.startswith(r'\\'):
2602 2602 return True # Windows UNC path
2603 2603 if self.path.startswith('/'):
2604 2604 return True # POSIX-style
2605 2605 return False
2606 2606
2607 2607 def localpath(self):
2608 2608 if self.scheme == 'file' or self.scheme == 'bundle':
2609 2609 path = self.path or '/'
2610 2610 # For Windows, we need to promote hosts containing drive
2611 2611 # letters to paths with drive letters.
2612 2612 if hasdriveletter(self._hostport):
2613 2613 path = self._hostport + '/' + self.path
2614 2614 elif (self.host is not None and self.path
2615 2615 and not hasdriveletter(path)):
2616 2616 path = '/' + path
2617 2617 return path
2618 2618 return self._origpath
2619 2619
2620 2620 def islocal(self):
2621 2621 '''whether localpath will return something that posixfile can open'''
2622 2622 return (not self.scheme or self.scheme == 'file'
2623 2623 or self.scheme == 'bundle')
2624 2624
2625 2625 def hasscheme(path):
2626 2626 return bool(url(path).scheme)
2627 2627
2628 2628 def hasdriveletter(path):
2629 2629 return path and path[1:2] == ':' and path[0:1].isalpha()
2630 2630
2631 2631 def urllocalpath(path):
2632 2632 return url(path, parsequery=False, parsefragment=False).localpath()
2633 2633
2634 2634 def hidepassword(u):
2635 2635 '''hide user credential in a url string'''
2636 2636 u = url(u)
2637 2637 if u.passwd:
2638 2638 u.passwd = '***'
2639 2639 return str(u)
2640 2640
2641 2641 def removeauth(u):
2642 2642 '''remove all authentication information from a url string'''
2643 2643 u = url(u)
2644 2644 u.user = u.passwd = None
2645 2645 return str(u)
2646 2646
2647 2647 def isatty(fp):
2648 2648 try:
2649 2649 return fp.isatty()
2650 2650 except AttributeError:
2651 2651 return False
2652 2652
2653 2653 timecount = unitcountfn(
2654 2654 (1, 1e3, _('%.0f s')),
2655 2655 (100, 1, _('%.1f s')),
2656 2656 (10, 1, _('%.2f s')),
2657 2657 (1, 1, _('%.3f s')),
2658 2658 (100, 0.001, _('%.1f ms')),
2659 2659 (10, 0.001, _('%.2f ms')),
2660 2660 (1, 0.001, _('%.3f ms')),
2661 2661 (100, 0.000001, _('%.1f us')),
2662 2662 (10, 0.000001, _('%.2f us')),
2663 2663 (1, 0.000001, _('%.3f us')),
2664 2664 (100, 0.000000001, _('%.1f ns')),
2665 2665 (10, 0.000000001, _('%.2f ns')),
2666 2666 (1, 0.000000001, _('%.3f ns')),
2667 2667 )
2668 2668
2669 2669 _timenesting = [0]
2670 2670
2671 2671 def timed(func):
2672 2672 '''Report the execution time of a function call to stderr.
2673 2673
2674 2674 During development, use as a decorator when you need to measure
2675 2675 the cost of a function, e.g. as follows:
2676 2676
2677 2677 @util.timed
2678 2678 def foo(a, b, c):
2679 2679 pass
2680 2680 '''
2681 2681
2682 2682 def wrapper(*args, **kwargs):
2683 2683 start = time.time()
2684 2684 indent = 2
2685 2685 _timenesting[0] += indent
2686 2686 try:
2687 2687 return func(*args, **kwargs)
2688 2688 finally:
2689 2689 elapsed = time.time() - start
2690 2690 _timenesting[0] -= indent
2691 2691 sys.stderr.write('%s%s: %s\n' %
2692 2692 (' ' * _timenesting[0], func.__name__,
2693 2693 timecount(elapsed)))
2694 2694 return wrapper
2695 2695
2696 2696 _sizeunits = (('m', 2**20), ('k', 2**10), ('g', 2**30),
2697 2697 ('kb', 2**10), ('mb', 2**20), ('gb', 2**30), ('b', 1))
2698 2698
2699 2699 def sizetoint(s):
2700 2700 '''Convert a space specifier to a byte count.
2701 2701
2702 2702 >>> sizetoint('30')
2703 2703 30
2704 2704 >>> sizetoint('2.2kb')
2705 2705 2252
2706 2706 >>> sizetoint('6M')
2707 2707 6291456
2708 2708 '''
2709 2709 t = s.strip().lower()
2710 2710 try:
2711 2711 for k, u in _sizeunits:
2712 2712 if t.endswith(k):
2713 2713 return int(float(t[:-len(k)]) * u)
2714 2714 return int(t)
2715 2715 except ValueError:
2716 2716 raise error.ParseError(_("couldn't parse size: %s") % s)
2717 2717
2718 2718 class hooks(object):
2719 2719 '''A collection of hook functions that can be used to extend a
2720 2720 function's behavior. Hooks are called in lexicographic order,
2721 2721 based on the names of their sources.'''
2722 2722
2723 2723 def __init__(self):
2724 2724 self._hooks = []
2725 2725
2726 2726 def add(self, source, hook):
2727 2727 self._hooks.append((source, hook))
2728 2728
2729 2729 def __call__(self, *args):
2730 2730 self._hooks.sort(key=lambda x: x[0])
2731 2731 results = []
2732 2732 for source, hook in self._hooks:
2733 2733 results.append(hook(*args))
2734 2734 return results
2735 2735
2736 2736 def getstackframes(skip=0, line=' %-*s in %s\n', fileline='%s:%s'):
2737 2737 '''Yields lines for a nicely formatted stacktrace.
2738 2738 Skips the 'skip' last entries.
2739 2739 Each file+linenumber is formatted according to fileline.
2740 2740 Each line is formatted according to line.
2741 2741 If line is None, it yields:
2742 2742 length of longest filepath+line number,
2743 2743 filepath+linenumber,
2744 2744 function
2745 2745
2746 2746 Not be used in production code but very convenient while developing.
2747 2747 '''
2748 2748 entries = [(fileline % (fn, ln), func)
2749 2749 for fn, ln, func, _text in traceback.extract_stack()[:-skip - 1]]
2750 2750 if entries:
2751 2751 fnmax = max(len(entry[0]) for entry in entries)
2752 2752 for fnln, func in entries:
2753 2753 if line is None:
2754 2754 yield (fnmax, fnln, func)
2755 2755 else:
2756 2756 yield line % (fnmax, fnln, func)
2757 2757
2758 2758 def debugstacktrace(msg='stacktrace', skip=0, f=sys.stderr, otherf=sys.stdout):
2759 2759 '''Writes a message to f (stderr) with a nicely formatted stacktrace.
2760 2760 Skips the 'skip' last entries. By default it will flush stdout first.
2761 2761 It can be used everywhere and intentionally does not require an ui object.
2762 2762 Not be used in production code but very convenient while developing.
2763 2763 '''
2764 2764 if otherf:
2765 2765 otherf.flush()
2766 2766 f.write('%s at:\n' % msg)
2767 2767 for line in getstackframes(skip + 1):
2768 2768 f.write(line)
2769 2769 f.flush()
2770 2770
2771 2771 class dirs(object):
2772 2772 '''a multiset of directory names from a dirstate or manifest'''
2773 2773
2774 2774 def __init__(self, map, skip=None):
2775 2775 self._dirs = {}
2776 2776 addpath = self.addpath
2777 2777 if safehasattr(map, 'iteritems') and skip is not None:
2778 2778 for f, s in map.iteritems():
2779 2779 if s[0] != skip:
2780 2780 addpath(f)
2781 2781 else:
2782 2782 for f in map:
2783 2783 addpath(f)
2784 2784
2785 2785 def addpath(self, path):
2786 2786 dirs = self._dirs
2787 2787 for base in finddirs(path):
2788 2788 if base in dirs:
2789 2789 dirs[base] += 1
2790 2790 return
2791 2791 dirs[base] = 1
2792 2792
2793 2793 def delpath(self, path):
2794 2794 dirs = self._dirs
2795 2795 for base in finddirs(path):
2796 2796 if dirs[base] > 1:
2797 2797 dirs[base] -= 1
2798 2798 return
2799 2799 del dirs[base]
2800 2800
2801 2801 def __iter__(self):
2802 2802 return self._dirs.iterkeys()
2803 2803
2804 2804 def __contains__(self, d):
2805 2805 return d in self._dirs
2806 2806
2807 2807 if safehasattr(parsers, 'dirs'):
2808 2808 dirs = parsers.dirs
2809 2809
2810 2810 def finddirs(path):
2811 2811 pos = path.rfind('/')
2812 2812 while pos != -1:
2813 2813 yield path[:pos]
2814 2814 pos = path.rfind('/', 0, pos)
2815 2815
2816 2816 # compression utility
2817 2817
2818 2818 class nocompress(object):
2819 2819 def compress(self, x):
2820 2820 return x
2821 2821 def flush(self):
2822 2822 return ""
2823 2823
2824 2824 compressors = {
2825 2825 None: nocompress,
2826 2826 # lambda to prevent early import
2827 2827 'BZ': lambda: bz2.BZ2Compressor(),
2828 2828 'GZ': lambda: zlib.compressobj(),
2829 2829 }
2830 2830 # also support the old form by courtesies
2831 2831 compressors['UN'] = compressors[None]
2832 2832
2833 2833 def _makedecompressor(decompcls):
2834 2834 def generator(f):
2835 2835 d = decompcls()
2836 2836 for chunk in filechunkiter(f):
2837 2837 yield d.decompress(chunk)
2838 2838 def func(fh):
2839 2839 return chunkbuffer(generator(fh))
2840 2840 return func
2841 2841
2842 2842 class ctxmanager(object):
2843 2843 '''A context manager for use in 'with' blocks to allow multiple
2844 2844 contexts to be entered at once. This is both safer and more
2845 2845 flexible than contextlib.nested.
2846 2846
2847 2847 Once Mercurial supports Python 2.7+, this will become mostly
2848 2848 unnecessary.
2849 2849 '''
2850 2850
2851 2851 def __init__(self, *args):
2852 2852 '''Accepts a list of no-argument functions that return context
2853 2853 managers. These will be invoked at __call__ time.'''
2854 2854 self._pending = args
2855 2855 self._atexit = []
2856 2856
2857 2857 def __enter__(self):
2858 2858 return self
2859 2859
2860 2860 def enter(self):
2861 2861 '''Create and enter context managers in the order in which they were
2862 2862 passed to the constructor.'''
2863 2863 values = []
2864 2864 for func in self._pending:
2865 2865 obj = func()
2866 2866 values.append(obj.__enter__())
2867 2867 self._atexit.append(obj.__exit__)
2868 2868 del self._pending
2869 2869 return values
2870 2870
2871 2871 def atexit(self, func, *args, **kwargs):
2872 2872 '''Add a function to call when this context manager exits. The
2873 2873 ordering of multiple atexit calls is unspecified, save that
2874 2874 they will happen before any __exit__ functions.'''
2875 2875 def wrapper(exc_type, exc_val, exc_tb):
2876 2876 func(*args, **kwargs)
2877 2877 self._atexit.append(wrapper)
2878 2878 return func
2879 2879
2880 2880 def __exit__(self, exc_type, exc_val, exc_tb):
2881 2881 '''Context managers are exited in the reverse order from which
2882 2882 they were created.'''
2883 2883 received = exc_type is not None
2884 2884 suppressed = False
2885 2885 pending = None
2886 2886 self._atexit.reverse()
2887 2887 for exitfunc in self._atexit:
2888 2888 try:
2889 2889 if exitfunc(exc_type, exc_val, exc_tb):
2890 2890 suppressed = True
2891 2891 exc_type = None
2892 2892 exc_val = None
2893 2893 exc_tb = None
2894 2894 except BaseException:
2895 2895 pending = sys.exc_info()
2896 2896 exc_type, exc_val, exc_tb = pending = sys.exc_info()
2897 2897 del self._atexit
2898 2898 if pending:
2899 2899 raise exc_val
2900 2900 return received and suppressed
2901 2901
2902 2902 def _bz2():
2903 2903 d = bz2.BZ2Decompressor()
2904 2904 # Bzip2 stream start with BZ, but we stripped it.
2905 2905 # we put it back for good measure.
2906 2906 d.decompress('BZ')
2907 2907 return d
2908 2908
2909 2909 decompressors = {None: lambda fh: fh,
2910 2910 '_truncatedBZ': _makedecompressor(_bz2),
2911 2911 'BZ': _makedecompressor(lambda: bz2.BZ2Decompressor()),
2912 2912 'GZ': _makedecompressor(lambda: zlib.decompressobj()),
2913 2913 }
2914 2914 # also support the old form by courtesies
2915 2915 decompressors['UN'] = decompressors[None]
2916 2916
2917 2917 # convenient shortcut
2918 2918 dst = debugstacktrace
@@ -1,177 +1,161 b''
1 1 #require test-repo
2 2
3 3 $ . "$TESTDIR/helpers-testrepo.sh"
4 4 $ cd "$TESTDIR"/..
5 5
6 6 $ hg files 'set:(**.py)' | sed 's|\\|/|g' | xargs python contrib/check-py3-compat.py
7 7 hgext/fsmonitor/pywatchman/__init__.py not using absolute_import
8 8 hgext/fsmonitor/pywatchman/__init__.py requires print_function
9 9 hgext/fsmonitor/pywatchman/capabilities.py not using absolute_import
10 10 hgext/fsmonitor/pywatchman/pybser.py not using absolute_import
11 11 i18n/check-translation.py not using absolute_import
12 12 setup.py not using absolute_import
13 13 tests/test-demandimport.py not using absolute_import
14 14
15 15 #if py3exe
16 16 $ hg files 'set:(**.py) - grep(pygments)' | sed 's|\\|/|g' \
17 17 > | xargs $PYTHON3 contrib/check-py3-compat.py \
18 18 > | sed 's/[0-9][0-9]*)$/*)/'
19 19 doc/hgmanpage.py: invalid syntax: invalid syntax (<unknown>, line *)
20 20 hgext/acl.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
21 21 hgext/automv.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
22 22 hgext/blackbox.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
23 23 hgext/bugzilla.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
24 24 hgext/censor.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
25 25 hgext/chgserver.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
26 26 hgext/children.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
27 27 hgext/churn.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
28 28 hgext/clonebundles.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
29 29 hgext/color.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
30 30 hgext/convert/bzr.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *)
31 31 hgext/convert/common.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
32 32 hgext/convert/convcmd.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
33 33 hgext/convert/cvs.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
34 34 hgext/convert/cvsps.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
35 35 hgext/convert/darcs.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
36 36 hgext/convert/filemap.py: error importing module: <SystemError> Parent module 'hgext.convert' not loaded, cannot perform relative import (line *)
37 37 hgext/convert/git.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
38 38 hgext/convert/gnuarch.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
39 39 hgext/convert/hg.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
40 40 hgext/convert/monotone.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
41 41 hgext/convert/p4.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
42 42 hgext/convert/subversion.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
43 43 hgext/convert/transport.py: error importing module: <ImportError> No module named 'svn.client' (line *)
44 44 hgext/eol.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
45 45 hgext/extdiff.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
46 46 hgext/factotum.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
47 47 hgext/fetch.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
48 48 hgext/fsmonitor/state.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
49 49 hgext/fsmonitor/watchmanclient.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
50 50 hgext/gpg.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
51 51 hgext/graphlog.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
52 52 hgext/hgk.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
53 53 hgext/histedit.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
54 54 hgext/journal.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
55 55 hgext/keyword.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
56 56 hgext/largefiles/basestore.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
57 57 hgext/largefiles/lfcommands.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
58 58 hgext/largefiles/lfutil.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
59 59 hgext/largefiles/localstore.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *)
60 60 hgext/largefiles/overrides.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
61 61 hgext/largefiles/proto.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
62 62 hgext/largefiles/remotestore.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
63 63 hgext/largefiles/reposetup.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
64 64 hgext/largefiles/storefactory.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
65 65 hgext/largefiles/uisetup.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
66 66 hgext/largefiles/wirestore.py: error importing module: <SystemError> Parent module 'hgext.largefiles' not loaded, cannot perform relative import (line *)
67 67 hgext/mq.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
68 68 hgext/notify.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
69 69 hgext/pager.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
70 70 hgext/patchbomb.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
71 71 hgext/purge.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
72 72 hgext/rebase.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
73 73 hgext/record.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
74 74 hgext/relink.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
75 75 hgext/schemes.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
76 76 hgext/share.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
77 77 hgext/shelve.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
78 78 hgext/strip.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
79 79 hgext/transplant.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
80 80 hgext/win32text.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
81 81 mercurial/archival.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
82 82 mercurial/bookmarks.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
83 83 mercurial/branchmap.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
84 84 mercurial/bundle2.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
85 85 mercurial/bundlerepo.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
86 86 mercurial/byterange.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
87 87 mercurial/changegroup.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
88 88 mercurial/changelog.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
89 89 mercurial/cmdutil.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
90 90 mercurial/commands.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
91 91 mercurial/commandserver.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
92 92 mercurial/config.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
93 93 mercurial/context.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
94 94 mercurial/copies.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
95 95 mercurial/crecord.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
96 96 mercurial/destutil.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
97 97 mercurial/dirstate.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
98 98 mercurial/discovery.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
99 99 mercurial/dispatch.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
100 100 mercurial/encoding.py: error importing module: <TypeError> bytes expected, not str (line *)
101 101 mercurial/exchange.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
102 102 mercurial/extensions.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
103 103 mercurial/filelog.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
104 104 mercurial/filemerge.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
105 105 mercurial/fileset.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
106 106 mercurial/formatter.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
107 107 mercurial/graphmod.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
108 108 mercurial/help.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
109 109 mercurial/hg.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
110 110 mercurial/hgweb/common.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
111 111 mercurial/hgweb/hgweb_mod.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
112 112 mercurial/hgweb/hgwebdir_mod.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
113 113 mercurial/hgweb/protocol.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
114 114 mercurial/hgweb/request.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
115 115 mercurial/hgweb/server.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
116 116 mercurial/hgweb/webcommands.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
117 117 mercurial/hgweb/webutil.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
118 118 mercurial/hgweb/wsgicgi.py: error importing module: <SystemError> Parent module 'mercurial.hgweb' not loaded, cannot perform relative import (line *)
119 119 mercurial/hook.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
120 120 mercurial/httpconnection.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
121 121 mercurial/httppeer.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
122 122 mercurial/i18n.py: error importing module: <TypeError> bytes expected, not str (line *)
123 mercurial/keepalive.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
124 mercurial/localrepo.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
125 mercurial/lock.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
126 mercurial/mail.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
127 mercurial/manifest.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
128 mercurial/match.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
129 mercurial/mdiff.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
130 mercurial/merge.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
131 mercurial/minirst.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
132 mercurial/namespaces.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
133 mercurial/obsolete.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
134 mercurial/patch.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
135 mercurial/pathutil.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
136 mercurial/peer.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
137 mercurial/profiling.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
138 mercurial/pushkey.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
139 mercurial/pvec.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
140 mercurial/registrar.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
141 mercurial/repair.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
142 mercurial/repoview.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
143 mercurial/revlog.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
144 mercurial/revset.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
145 mercurial/scmutil.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
146 mercurial/scmwindows.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
147 mercurial/similar.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
148 mercurial/simplemerge.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
149 mercurial/sshpeer.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
150 mercurial/sshserver.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
151 mercurial/sslutil.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
152 mercurial/statichttprepo.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
153 mercurial/store.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
154 mercurial/streamclone.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
155 mercurial/subrepo.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
156 mercurial/tagmerge.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
157 mercurial/tags.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
158 mercurial/templatefilters.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
159 mercurial/templatekw.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
160 mercurial/templater.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
161 mercurial/transaction.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
162 mercurial/ui.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
163 mercurial/unionrepo.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
164 mercurial/url.py: error importing: <TypeError> int() can't convert non-string with explicit base (error at util.py:*)
123 mercurial/keepalive.py: error importing module: <AttributeError> module 'mercurial.util' has no attribute 'httplib' (line *)
124 mercurial/localrepo.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
125 mercurial/mail.py: error importing module: <AttributeError> module 'email' has no attribute 'Header' (line *)
126 mercurial/manifest.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
127 mercurial/merge.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
128 mercurial/namespaces.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
129 mercurial/patch.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
130 mercurial/pvec.py: error importing module: <NameError> name 'xrange' is not defined (line *)
131 mercurial/repair.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
132 mercurial/revlog.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
133 mercurial/revset.py: error importing module: <NameError> name 'xrange' is not defined (line *)
134 mercurial/scmutil.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
135 mercurial/scmwindows.py: error importing module: <ImportError> No module named 'winreg' (line *)
136 mercurial/simplemerge.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
137 mercurial/sshpeer.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
138 mercurial/sshserver.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
139 mercurial/statichttprepo.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at byterange.py:*)
140 mercurial/store.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
141 mercurial/streamclone.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
142 mercurial/subrepo.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
143 mercurial/templatefilters.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
144 mercurial/templatekw.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
145 mercurial/templater.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
146 mercurial/ui.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
147 mercurial/unionrepo.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
148 mercurial/url.py: error importing: <AttributeError> module 'mercurial.util' has no attribute 'urlerr' (error at httpconnection.py:*)
165 149 mercurial/verify.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
166 150 mercurial/win32.py: error importing module: <ImportError> No module named 'msvcrt' (line *)
167 151 mercurial/windows.py: error importing module: <ImportError> No module named 'msvcrt' (line *)
168 152 mercurial/wireproto.py: error importing: <TypeError> a bytes-like object is required, not 'str' (error at revset.py:*)
169 153
170 154 #endif
171 155
172 156 #if py3exe py3pygments
173 157 $ hg files 'set:(**.py) and grep(pygments)' | sed 's|\\|/|g' \
174 158 > | xargs $PYTHON3 contrib/check-py3-compat.py \
175 159 > | sed 's/[0-9][0-9]*)$/*)/'
176 160 hgext/highlight/highlight.py: error importing: <TypeError> Can't mix strings and bytes in path components (error at i18n.py:*)
177 161 #endif
General Comments 0
You need to be logged in to leave comments. Login now